commitId
stringlengths
40
40
datetime
stringlengths
30
31
subject
stringlengths
37
266
comment
stringlengths
109
15.2k
diff
stringlengths
238
914k
gitVersion
stringclasses
9 values
43351718165579d908150e785815d1e65d439ab
Tue, 16 Apr 2024 09:24:14 -0700
[PATCH 0257/1000] [dynamo][decorator] Support disable on nn modules (#124185)
Fixes https://github.com/pytorch/pytorch/issues/123979 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124185 Approved by: https://github.com/weifengpy, https://github.com/yoyoyocmu
diff --git a/test/dynamo/test_decorators.py b/test/dynamo/test_decorators.py index ab38b26ff9..3bff8b7177 100644 --- a/test/dynamo/test_decorators.py +++ b/test/dynamo/test_decorators.py @@ -84,6 +84,56 @@ class DecoratorTests(torch._dynamo.test_case.TestCase): # to callsites of eval_frame.innermost_fn. A warning would also be very noisy. w = torch._dynamo.disable(fn=wrapper, recursive=True) + def test_disable_nn_modules_forward_hook(self): + class SimpleLinear(torch.nn.Module): + def __init__(self): + super().__init__() + self.layer0 = torch.nn.Linear(4, 4) + + def forward(self, inp): + return self.layer0(torch.sigmoid(inp)) + + class SimpleModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.layer0 = SimpleLinear() + self.layer1 = torch.nn.Linear(4, 4) + + def forward(self, inp): + z = self.layer0(torch.sin(inp)) + return self.layer1(z) + + def hook(module, args): + inp = args[0].sigmoid() + return (inp,) + + model = SimpleModel() + model.layer0.register_forward_pre_hook(hook) + + # Disable my monkeypatching + model.layer0 = torch._dynamo.disable(model.layer0) + + cnts = torch._dynamo.testing.CompileCounterWithBackend("eager") + opt_model = torch.compile(model, backend=cnts) + opt_model(torch.randn(4)) + + # check for no graph break + self.assertEqual(cnts.frame_count, 2) + + gm0 = cnts.graphs[0] + # Check that the first graph has sin node, and no sigmoid + self.assertTrue(any(node.target is torch.sin for node in gm0.graph.nodes)) + self.assertTrue( + all(node.target is not torch.sigmoid for node in gm0.graph.nodes) + ) + + gm1 = cnts.graphs[1] + # Check that the first graph does not have sigmoid. sigmoid is used in + # both hook and disabled module. + self.assertTrue( + all(node.target is not torch.sigmoid for node in gm1.graph.nodes) + ) + def test_allow_in_graph(self): cnts = torch._dynamo.testing.CompileCounter() diff --git a/torch/_dynamo/variables/builder.py b/torch/_dynamo/variables/builder.py index 8c8a9e97dd..79320002a6 100644 --- a/torch/_dynamo/variables/builder.py +++ b/torch/_dynamo/variables/builder.py @@ -137,6 +137,7 @@ from .misc import ( AutogradFunctionVariable, ComptimeVariable, DebuggingVariable, + DelayGraphBreakVariable, GetAttrVariable, GetSetDescriptorVariable, InspectSignatureVariable, @@ -972,6 +973,14 @@ class VariableBuilder: if len(value.__dict__) == 0: unimplemented(f"uninitialized nn.Module: {typestr(value)}") if istype(value, OptimizedModule): + # Check if the optimized module was disabled + if inspect.getattr_static(value.forward, "_torchdynamo_disable", False): + # This bytecode is mostly of kind LOAD_ATTR or LOAD_METHOD. If + # we graph break here, Dynamo does not know how to create + # continuation functions for such bytecodes. So, we delay the + # graph break to CALL_FUNCTION. + return DelayGraphBreakVariable(source=self.source) + self.install_guards(GuardBuilder.TYPE_MATCH) self.source = AttrSource(self.source, "_orig_mod") return self.wrap_module(value._orig_mod)
2.41.0
4cecf06d7c3d2f38870cdb01f1e322678abea9c
Tue, 16 Apr 2024 14:03:00 -0700
[PATCH 0258/1000] Update autotune jk knobs (#124214)
Differential Revision: [D56201145](https://our.internmc.facebook.com/intern/diff/D56201145/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124214 Approved by: https://github.com/aakhundov
diff --git a/torch/_inductor/triton_heuristics.py b/torch/_inductor/triton_heuristics.py index aae26554c9..f5c6dae70d 100644 --- a/torch/_inductor/triton_heuristics.py +++ b/torch/_inductor/triton_heuristics.py @@ -979,10 +979,8 @@ def should_use_remote_autotune_cache(): from triton.runtime.fb_memcache import MEMCACHE_VERSION - return torch._utils_internal.justknobs_check( - "pytorch/autotune_remote_cache:enable" - ) or MEMCACHE_VERSION >= torch._utils_internal.justknobs_getval_int( - "pytorch/autotune_remote_cache:memcache_version" + return MEMCACHE_VERSION >= torch._utils_internal.justknobs_getval_int( + "pytorch/remote_cache:autotune_memcache_version" )
2.41.0
efdf9a6a673b2d70befe826ff8c009997efbef0
Wed, 17 Apr 2024 18:05:11 +0000
[PATCH 0259/1000] fix pytorch version for onnx in doc (#124182)
Fixes [ 123845](https://github.com/pytorch/pytorch/issues/123845) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124182 Approved by: https://github.com/albanD
diff --git a/docs/source/onnx.rst b/docs/source/onnx.rst index 3a72c726bb..ffaa8ef836 100644 --- a/docs/source/onnx.rst +++ b/docs/source/onnx.rst @@ -18,7 +18,7 @@ Microsoft's `ONNX Runtime <https://www.onnxruntime.ai>`_. TorchDynamo-based ONNX Exporter ------------------------------- -*The TorchDynamo-based ONNX exporter is the newest (and Beta) exporter for PyTorch 2.0 and newer* +*The TorchDynamo-based ONNX exporter is the newest (and Beta) exporter for PyTorch 2.1 and newer* TorchDynamo engine is leveraged to hook into Python's frame evaluation API and dynamically rewrite its bytecode into an FX Graph. The resulting FX Graph is then polished before it is finally translated into an
2.41.0
726a23d4edc43db072747582031257f5f7016e7
Wed, 17 Apr 2024 19:16:32 +0000
[PATCH 0260/1000] change `tf32` thresholds for `test_per_sample_grads_embeddingnet` (#124104)
TF32 causes issues with the tolerances here; we might also consider migrating some of the `with_tf32_off` tests in this file to `tf32_on_and_off` in case it would be useful to get signal for TF32. CC @malfet @atalman Pull Request resolved: https://github.com/pytorch/pytorch/pull/124104 Approved by: https://github.com/zou3519
diff --git a/test/functorch/test_eager_transforms.py b/test/functorch/test_eager_transforms.py index ce8aa84bf3..b54b9762fb 100644 --- a/test/functorch/test_eager_transforms.py +++ b/test/functorch/test_eager_transforms.py @@ -51,7 +51,12 @@ from torch._ops import HigherOrderOperator from torch._subclasses.fake_tensor import FakeTensorMode from torch.func import functional_call, linearize, stack_module_state from torch.testing import make_tensor -from torch.testing._internal.common_cuda import SM70OrLater, TEST_CUDA, with_tf32_off +from torch.testing._internal.common_cuda import ( + SM70OrLater, + TEST_CUDA, + tf32_on_and_off, + with_tf32_off, +) from torch.testing._internal.common_device_type import ( dtypes, instantiate_device_type_tests, @@ -1688,6 +1693,7 @@ class TestVmapOfGrad(TestCase): for key in result: self.assertEqual(result[key], expected[key], atol=0, rtol=1.5e-3) + @tf32_on_and_off(0.005) @parametrize("mechanism", ["make_functional", "functional_call"]) def test_per_sample_grads_embeddingnet(self, device, mechanism): class SampleNet(nn.Module):
2.41.0
3e249969bd0f0625a2b06fd23f3c1ef4aa6b16b
Wed, 17 Apr 2024 19:29:30 +0000
[PATCH 0261/1000] [BE] enable `ruff` rule `RSE` and remove useless parentheses in `raise` statements (#124261)
Remove useless parentheses in `raise` statements if the exception type is raised with no argument. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124261 Approved by: https://github.com/albanD
diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py index 91a8aeba39..0d6965c148 100644 --- a/benchmarks/dynamo/common.py +++ b/benchmarks/dynamo/common.py @@ -1858,7 +1858,7 @@ class TimeOutException(Exception): def alarm_handler(signum, frame): - raise TimeOutException() + raise TimeOutException def exit_after(s): @@ -2136,7 +2136,7 @@ class BenchmarkRunner: return set() def get_tolerance_and_cosine_flag(self, is_training, current_device, name): - raise NotImplementedError() + raise NotImplementedError @property def equal_nan(self): diff --git a/benchmarks/dynamo/microbenchmarks/operator_inp_utils.py b/benchmarks/dynamo/microbenchmarks/operator_inp_utils.py index f085cbca11..28139e9339 100644 --- a/benchmarks/dynamo/microbenchmarks/operator_inp_utils.py +++ b/benchmarks/dynamo/microbenchmarks/operator_inp_utils.py @@ -80,7 +80,7 @@ def serialize_sparse_tensor(e): def deserialize_sparse_tensor(size, dtype, layout, is_coalesced, nnz=None): - raise NotImplementedError() + raise NotImplementedError def deserialize_tensor(size, dtype, stride=None): diff --git a/pyproject.toml b/pyproject.toml index 24bede639f..8e9a44f388 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -126,6 +126,7 @@ select = [ "PT025", "PT026", "PYI", + "RSE", "RUF008", # mutable dataclass default "RUF015", # access first ele in constant time "RUF016", # type error non-integer index diff --git a/scripts/release_notes/commitlist.py b/scripts/release_notes/commitlist.py index 83c73be49b..a1ae0206aa 100644 --- a/scripts/release_notes/commitlist.py +++ b/scripts/release_notes/commitlist.py @@ -578,7 +578,7 @@ def main(): with open(filename, "w") as f: f.writelines(lines) return - raise AssertionError() + raise AssertionError if __name__ == "__main__": diff --git a/test/cpp/jit/tests_setup.py b/test/cpp/jit/tests_setup.py index 1edb19f21e..d0ddf31532 100644 --- a/test/cpp/jit/tests_setup.py +++ b/test/cpp/jit/tests_setup.py @@ -6,10 +6,10 @@ import torch class Setup: def setup(self): - raise NotImplementedError() + raise NotImplementedError def shutdown(self): - raise NotImplementedError() + raise NotImplementedError class FileSetup: diff --git a/test/distributed/checkpoint/e2e/test_fsdp_ep.py b/test/distributed/checkpoint/e2e/test_fsdp_ep.py index 683a62f84d..44911ab5bf 100644 --- a/test/distributed/checkpoint/e2e/test_fsdp_ep.py +++ b/test/distributed/checkpoint/e2e/test_fsdp_ep.py @@ -21,7 +21,7 @@ class Dummymodel(nn.Module): super().__init__() def forward(self, x): - raise NotImplementedError() + raise NotImplementedError class EPModel(nn.Module): @@ -31,7 +31,7 @@ class EPModel(nn.Module): self.net2 = nn.Sequential(nn.Linear(16, 16), nn.ReLU()) def forward(self, x): - raise NotImplementedError() + raise NotImplementedError class SecondTier(nn.Module): @@ -43,7 +43,7 @@ class SecondTier(nn.Module): self.net = nn.Sequential(nn.Linear(16, 16), nn.ReLU()) def forward(self, x): - raise NotImplementedError() + raise NotImplementedError class TopModel(nn.Module): @@ -55,7 +55,7 @@ class TopModel(nn.Module): self.net = nn.Sequential(nn.Linear(16, 16), nn.ReLU()) def forward(self, x): - raise NotImplementedError() + raise NotImplementedError class TestFSDPWithEP(DTensorTestBase, VerifyStateDictMixin): diff --git a/test/distributed/elastic/metrics/api_test.py b/test/distributed/elastic/metrics/api_test.py index e548cfb6f4..3505e7740e 100644 --- a/test/distributed/elastic/metrics/api_test.py +++ b/test/distributed/elastic/metrics/api_test.py @@ -34,7 +34,7 @@ class TestMetricsHandler(MetricHandler): class Parent(abc.ABC): @abc.abstractmethod def func(self): - raise NotImplementedError() + raise NotImplementedError def base_func(self): self.func() @@ -57,7 +57,7 @@ class MetricsApiTest(TestCase): @prof def throw(self): - raise RuntimeError() + raise RuntimeError @prof(group="torchelastic") def bar2(self): diff --git a/test/distributed/elastic/rendezvous/api_test.py b/test/distributed/elastic/rendezvous/api_test.py index b9287546b3..82990c7fed 100644 --- a/test/distributed/elastic/rendezvous/api_test.py +++ b/test/distributed/elastic/rendezvous/api_test.py @@ -197,7 +197,7 @@ class _DummyRendezvousHandler(RendezvousHandler): return "dummy_backend" def next_rendezvous(self) -> Tuple[Store, int, int]: - raise NotImplementedError() + raise NotImplementedError def is_closed(self) -> bool: return False diff --git a/test/distributed/elastic/utils/distributed_test.py b/test/distributed/elastic/utils/distributed_test.py index 65ebd4b6e7..9e9c85e8a6 100644 --- a/test/distributed/elastic/utils/distributed_test.py +++ b/test/distributed/elastic/utils/distributed_test.py @@ -38,7 +38,7 @@ def _create_c10d_store_mp(is_server, server_addr, port, world_size, wait_for_wor timeout=2, ) if store is None: - raise AssertionError() + raise AssertionError store.set(f"test_key/{os.getpid()}", b"test_value") diff --git a/test/distributed/fsdp/test_fsdp_optim_state.py b/test/distributed/fsdp/test_fsdp_optim_state.py index 4080168e42..672b71d529 100644 --- a/test/distributed/fsdp/test_fsdp_optim_state.py +++ b/test/distributed/fsdp/test_fsdp_optim_state.py @@ -363,7 +363,7 @@ class TestFSDPOptimState(FSDPTest): # these settings are not implemented since the transformer is # wrapped with FSDP at the top-level, which means that there is # only a single flat parameter, making these booleans vacuous - raise NotImplementedError() + raise NotImplementedError if group is None: group = dist.distributed_c10d._get_default_group() model = TransformerWithSharedParams.init( diff --git a/test/distributed/pipeline/sync/test_bugs.py b/test/distributed/pipeline/sync/test_bugs.py index c3dc716a64..1cb981c6a4 100644 --- a/test/distributed/pipeline/sync/test_bugs.py +++ b/test/distributed/pipeline/sync/test_bugs.py @@ -63,7 +63,7 @@ def test_exception_no_hang(setup_rpc): class Raise(nn.Module): def forward(self, x): - raise ExpectedException() + raise ExpectedException model = nn.Sequential(Pass(), Pass(), Raise()) model = Pipe(model, chunks=3) diff --git a/test/distributed/pipeline/sync/test_pipe.py b/test/distributed/pipeline/sync/test_pipe.py index b0237f8427..848a3ed7eb 100644 --- a/test/distributed/pipeline/sync/test_pipe.py +++ b/test/distributed/pipeline/sync/test_pipe.py @@ -231,7 +231,7 @@ def test_exception(setup_rpc): class Raise(nn.Module): def forward(self, *_): - raise ExpectedException() + raise ExpectedException model = nn.Sequential(Raise()) model = Pipe(model, chunks=1) @@ -265,7 +265,7 @@ def test_exception_early_stop_asap(setup_rpc): class Raise(nn.Module): def forward(self, x): - raise ExpectedException() + raise ExpectedException model = nn.Sequential(Pass(), Pass(), Counter(), Raise()) model = Pipe(model, chunks=3) diff --git a/test/dynamo/test_autograd_function.py b/test/dynamo/test_autograd_function.py index afde52b9ed..88859c894e 100644 --- a/test/dynamo/test_autograd_function.py +++ b/test/dynamo/test_autograd_function.py @@ -752,7 +752,7 @@ class AutogradFunctionTests(torch._dynamo.test_case.TestCase): new_data = args[0]._data.view(*args[1:]) return FooTensor(new_data, args[0]._config, args[0]._scale) - raise NotImplementedError() + raise NotImplementedError class foo_autograd_fn(torch.autograd.Function): @staticmethod diff --git a/test/dynamo/test_exc.py b/test/dynamo/test_exc.py index 2008a95869..953e8ecd0a 100644 --- a/test/dynamo/test_exc.py +++ b/test/dynamo/test_exc.py @@ -47,7 +47,7 @@ from user code: def test_internal_error_suppress_errors(self, records): def fn001(x): def f(ctx): - raise AssertionError() + raise AssertionError comptime(f) @@ -62,7 +62,7 @@ WON'T CONVERT fn001 test_exc.py line N ========== TorchDynamo Stack Trace ========== Traceback (most recent call last): File "test_exc.py", line N, in f - raise AssertionError() + raise AssertionError AssertionError: from user code: @@ -84,7 +84,7 @@ from user code: def test_not_implemented_error(self, records): def fn001(x): def f(ctx): - raise NotImplementedError() + raise NotImplementedError # Ensure graph break is not possible for i in range(3): @@ -101,7 +101,7 @@ WON'T CONVERT fn001 test_exc.py line N due to: Traceback (most recent call last): File "test_exc.py", line N, in f - raise NotImplementedError() + raise NotImplementedError torch._dynamo.exc.InternalTorchDynamoError: from user code: @@ -128,7 +128,7 @@ from user code: # NB: avoid decorator, as 3.11 changed the line number attributed # in this situation def f(ctx): - raise AssertionError() + raise AssertionError comptime(f) diff --git a/test/dynamo/test_logging.py b/test/dynamo/test_logging.py index f2f507825a..8c37ef1a7f 100644 --- a/test/dynamo/test_logging.py +++ b/test/dynamo/test_logging.py @@ -164,7 +164,7 @@ from user code: import torch._inductor.lowering def throw(x): - raise AssertionError() + raise AssertionError # inject an error in the lowerings dict_entries = {} @@ -189,7 +189,7 @@ WON'T CONVERT inductor_error_fn test_logging.py line N due to: Traceback (most recent call last): File "test_logging.py", line N, in throw - raise AssertionError() + raise AssertionError torch._dynamo.exc.BackendCompilerFailed: backend='inductor' raised: LoweringException: AssertionError: target: aten.round.default diff --git a/test/dynamo/test_repros.py b/test/dynamo/test_repros.py index 899cebcac7..50b42b6557 100644 --- a/test/dynamo/test_repros.py +++ b/test/dynamo/test_repros.py @@ -396,12 +396,12 @@ class ListConfig: if self.resolve: x = x._dereference_node() if x._is_missing(): - raise AssertionError() + raise AssertionError self.index = self.index + 1 if isinstance(x, ListConfig.ValueNode): return x._value() - raise AssertionError() + raise AssertionError def __iter__(self): return self._iter_ex(True) @@ -410,7 +410,7 @@ class ListConfig: try: return ListConfig.ListIterator(self, resolve) except Exception: - raise AssertionError() + raise AssertionError def __init__(self): self._content = [ @@ -545,7 +545,7 @@ def apply_chunking_to_forward(forward_fn, *input_tensors): assert all(input_tensor.shape[1] == tensor_shape for input_tensor in input_tensors) num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters) if num_args_in_forward_chunk_fn != len(input_tensors): - raise ValueError() + raise ValueError return forward_fn(*input_tensors) @@ -848,7 +848,7 @@ def _merge_criteria_processor_list(default_list, custom_list): for default in default_list: for custom in custom_list: if type(custom) is type(default): - raise ValueError() + raise ValueError default_list.extend(custom_list) return default_list @@ -2573,7 +2573,7 @@ class ReproTests(torch._dynamo.test_case.TestCase): if self.i < 3: self.i += 1 return self.i - raise StopIteration() + raise StopIteration @torch.compile(backend="eager", fullgraph=True) def fn(x): diff --git a/test/dynamo/test_skip_non_tensor.py b/test/dynamo/test_skip_non_tensor.py index 81210f3fba..136e5b93d1 100644 --- a/test/dynamo/test_skip_non_tensor.py +++ b/test/dynamo/test_skip_non_tensor.py @@ -147,10 +147,10 @@ class SkipNonTensorTests(torch._dynamo.test_case.TestCase): class Foo(list): def __iter__(self): - raise Exception() + raise Exception def __len__(self): - raise Exception() + raise Exception x = Foo() x.append(torch.randn(4)) diff --git a/test/dynamo/test_structured_trace.py b/test/dynamo/test_structured_trace.py index c6f74c2a56..deb2a2d548 100644 --- a/test/dynamo/test_structured_trace.py +++ b/test/dynamo/test_structured_trace.py @@ -233,7 +233,7 @@ class StructuredTraceTest(TestCase): import torch._inductor.lowering def throw(x): - raise AssertionError() + raise AssertionError # inject an error in the lowerings dict_entries = {} diff --git a/test/functorch/discover_coverage.py b/test/functorch/discover_coverage.py index bbe3922b62..f6e0858148 100644 --- a/test/functorch/discover_coverage.py +++ b/test/functorch/discover_coverage.py @@ -732,12 +732,12 @@ class Operator: def any_opinfo_attr(self, attr): if not self.has_opinfo(): - raise RuntimeError() + raise RuntimeError return any(getattr(opinfo, attr) for opinfo in self.opinfos) def all_opinfo_attr(self, attr): if not self.has_opinfo(): - raise RuntimeError() + raise RuntimeError return all(getattr(opinfo, attr) for opinfo in self.opinfos) def supports_vjp(self): @@ -870,7 +870,7 @@ class OperatorSet: elif n.startswith(torch_dot): names_sanitized.append(n[len(torch_dot) :]) else: - raise AssertionError() + raise AssertionError return cls.from_names(names_sanitized) def query(self, operator_method, filter=(Support.NO, Support.YES, Support.UNKNOWN)): diff --git a/test/functorch/test_dims.py b/test/functorch/test_dims.py index 8d282f4465..0181c1c4d2 100644 --- a/test/functorch/test_dims.py +++ b/test/functorch/test_dims.py @@ -403,7 +403,7 @@ class TestMin(TestCase): # test with too many elements try: A[1, ..., 1, 1] - raise NotImplementedError() + raise NotImplementedError except IndexError: pass c, d = dims() @@ -415,7 +415,7 @@ class TestMin(TestCase): ) try: A[..., 3, ...] - raise NotImplementedError() + raise NotImplementedError except DimensionBindError: pass diff --git a/test/inductor/test_torchinductor_dynamic_shapes.py b/test/inductor/test_torchinductor_dynamic_shapes.py index 84fc95a2c6..c9824fbdd5 100644 --- a/test/inductor/test_torchinductor_dynamic_shapes.py +++ b/test/inductor/test_torchinductor_dynamic_shapes.py @@ -285,7 +285,7 @@ class TestInductorDynamic(TestCase): @custom_ops.custom_op("test::foo") def foo(x: torch.Tensor, y: int) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl("test::foo") def foo_impl(x: torch.Tensor, y: int) -> torch.Tensor: @@ -401,7 +401,7 @@ class TestInductorDynamic(TestCase): @custom_ops.custom_op("test::foo") def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl("test::foo") def foo_impl(x: torch.Tensor) -> torch.Tensor: diff --git a/test/jit/test_freezing.py b/test/jit/test_freezing.py index 91d2bdb82c..1e74436438 100644 --- a/test/jit/test_freezing.py +++ b/test/jit/test_freezing.py @@ -2632,7 +2632,7 @@ class TestFrozenOptimizations(JitTestCase): res3 = torch._C._nn.linear(in_tensor2, self.w2, self.b2) res4 = torch._C._nn.linear(in_tensor1, self.w2, self.b1) else: - raise AssertionError() + raise AssertionError res2 = torch._C._nn.linear(in_tensor1, self.w2, self.b1) return res1, res2, res3, res4 diff --git a/test/jit/test_list_dict.py b/test/jit/test_list_dict.py index f780a9836b..f3d314dbac 100644 --- a/test/jit/test_list_dict.py +++ b/test/jit/test_list_dict.py @@ -2871,7 +2871,7 @@ class TestScriptList(JitTestCase): def __next__(self): if self.value == limit: # noqa: F821 - raise StopIteration() + raise StopIteration ret = self.value self.value += 1 diff --git a/test/lazy/test_step_closures.py b/test/lazy/test_step_closures.py index c844790972..b481d89d9c 100644 --- a/test/lazy/test_step_closures.py +++ b/test/lazy/test_step_closures.py @@ -54,7 +54,7 @@ class ClosuresTest(TestCase): torch._lazy.add_step_closure(closure) torch._lazy.mark_step() - raise AssertionError() # Should not reach here + raise AssertionError # Should not reach here except RuntimeError as e: assert flag.is_set(), "Should have caught exception from closure" @@ -79,7 +79,7 @@ class ClosuresTest(TestCase): torch._lazy.add_step_closure(closure2, run_async=True) torch._lazy.mark_step() - raise AssertionError() # Should not reach here + raise AssertionError # Should not reach here except RuntimeError as e: # Should have caught exception from closure1 pass diff --git a/test/onnx/test_operators.py b/test/onnx/test_operators.py index ed72229945..0f40f9324c 100644 --- a/test/onnx/test_operators.py +++ b/test/onnx/test_operators.py @@ -283,7 +283,7 @@ class TestOperators(common_utils.TestCase): def symbolic(g, x): # The inside of this function should never be invoked, because # we will fail due to an argument mismatch first. - raise AssertionError() + raise AssertionError @staticmethod def forward(ctx, x, y): diff --git a/test/onnx/verify.py b/test/onnx/verify.py index 0dca46764d..74b85acc2e 100644 --- a/test/onnx/verify.py +++ b/test/onnx/verify.py @@ -154,7 +154,7 @@ class Errors: NB: It is an error to "fail" without having added any errors to the error context. """ - raise self.exc_class() + raise self.exc_class def failWith(self, msg): """ @@ -489,7 +489,7 @@ def verify( errs.requireEqual( proto_bytes.getvalue(), alt_proto_bytes.getvalue() ) - raise AssertionError() + raise AssertionError # TODO: test that the traced model also returns the same thing... run_helper(torch_out, args, remained_onnx_input_idx) diff --git a/test/package/test_importer.py b/test/package/test_importer.py index 2148cc118c..72f582cc3b 100644 --- a/test/package/test_importer.py +++ b/test/package/test_importer.py @@ -98,7 +98,7 @@ class TestImporter(PackageTestCase): self._whichmodule_return = whichmodule_return def import_module(self, module_name): - raise NotImplementedError() + raise NotImplementedError def whichmodule(self, obj, name): return self._whichmodule_return diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py index f46087c6f0..a4269d84d3 100644 --- a/test/profiler/test_profiler.py +++ b/test/profiler/test_profiler.py @@ -1970,7 +1970,7 @@ assert KinetoStepTracker.current_step() == initial_step + 2 * niters try: with cm: x.add(y) - raise ValueError() + raise ValueError x.relu() except ValueError: pass diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py index 03ca4a2f0e..6e04e8890d 100644 --- a/test/test_custom_ops.py +++ b/test/test_custom_ops.py @@ -509,7 +509,7 @@ class TestCustomOp(CustomOpTestCaseBase): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def baz(x: Tensor) -> Tensor: - raise NotImplementedError() + raise NotImplementedError def test_unsupported_schemas(self): with self.assertRaisesRegex(ValueError, "only supports functional"): @@ -670,35 +670,35 @@ class TestCustomOp(CustomOpTestCaseBase): with self.assertRaisesRegex(ValueError, "varargs"): def foo(*args): - raise NotImplementedError() + raise NotImplementedError infer_schema(foo) with self.assertRaisesRegex(ValueError, "varkwargs"): def foo(**kwargs): - raise NotImplementedError() + raise NotImplementedError infer_schema(foo) with self.assertRaisesRegex(ValueError, "must have a type annotation"): def foo(x): - raise NotImplementedError() + raise NotImplementedError infer_schema(foo) with self.assertRaisesRegex(ValueError, "unsupported"): def foo(x: Tensor) -> Tuple[Tensor, ...]: - raise NotImplementedError() + raise NotImplementedError infer_schema(foo) with self.assertRaisesRegex(ValueError, "can be mutated"): def foo(x: Tensor, y: int) -> Tensor: - raise NotImplementedError() + raise NotImplementedError infer_schema(foo, mutates_args={"y"}) @@ -752,7 +752,7 @@ class TestCustomOp(CustomOpTestCaseBase): @custom_ops.custom_op(f"{self.test_ns}::foo") def foo(x: Tensor) -> typ: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{self.test_ns}::foo") def foo_impl(x: Tensor) -> typ: @@ -771,7 +771,7 @@ class TestCustomOp(CustomOpTestCaseBase): @custom_ops.custom_op(f"{self.test_ns}::foo") def foo(x: Tensor) -> Tuple[typ, typ]: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{self.test_ns}::foo") def foo_impl(x: Tensor) -> Tuple[typ, typ]: @@ -789,7 +789,7 @@ class TestCustomOp(CustomOpTestCaseBase): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: Tensor, y: typ) -> Tensor: - raise NotImplementedError() + raise NotImplementedError yeet = None @@ -823,7 +823,7 @@ class TestCustomOp(CustomOpTestCaseBase): @custom_ops.custom_op(f"{self.test_ns}::foo") def foo(x: torch.Tensor, sizes: Sequence[int]) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError called = 0 @@ -847,7 +847,7 @@ class TestCustomOp(CustomOpTestCaseBase): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: Tensor, y: List[Optional[int]]) -> Tensor: - raise NotImplementedError() + raise NotImplementedError del foo @@ -855,7 +855,7 @@ class TestCustomOp(CustomOpTestCaseBase): # int[N] in Dispatcher is a bit wild, so we don't try to support it. @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: Tensor, y: Tuple[int, int]) -> Tensor: - raise NotImplementedError() + raise NotImplementedError del foo @@ -863,7 +863,7 @@ class TestCustomOp(CustomOpTestCaseBase): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: Tensor, y: Callable) -> Tensor: - raise NotImplementedError() + raise NotImplementedError del foo @@ -910,7 +910,7 @@ class TestCustomOp(CustomOpTestCaseBase): @custom_ops.custom_op(f"{ns}::foo2") def foo2(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError def test_private_ctor(self): with self.assertRaisesRegex(RuntimeError, "CustomOp constructor is private"): @@ -919,7 +919,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_lifetime(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError custom_op = torch._custom_op.impl.get_op(f"{TestCustomOp.test_ns}::foo") @@ -928,7 +928,7 @@ class TestCustomOp(CustomOpTestCaseBase): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: # noqa: F811 - raise NotImplementedError() + raise NotImplementedError # Unless we delete the original op. custom_ops._destroy(f"{TestCustomOp.test_ns}::foo") @@ -936,14 +936,14 @@ class TestCustomOp(CustomOpTestCaseBase): # Smoke test @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: # noqa: F811 - raise NotImplementedError() + raise NotImplementedError custom_ops._destroy(f"{TestCustomOp.test_ns}::foo") def test_autograd_notimplemented(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: # noqa: F811 - raise NotImplementedError() + raise NotImplementedError x = torch.randn(3, requires_grad=True) op = self.get_op(f"{self.test_ns}::foo") @@ -954,7 +954,7 @@ class TestCustomOp(CustomOpTestCaseBase): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: Sequence[torch.Tensor]) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError x = torch.randn(3, requires_grad=True) y = torch.randn(3) @@ -966,7 +966,7 @@ class TestCustomOp(CustomOpTestCaseBase): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError x = torch.randn(3, requires_grad=True) y = torch.randn(3) @@ -978,7 +978,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_autograd_notimplemented_gradmode(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x, y): @@ -994,7 +994,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_impl_cpu(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo", device_types="cpu") def foo_cpu(x): @@ -1008,7 +1008,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_impl_invalid_devices(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError def foo_impl(x): return x.sin() @@ -1033,7 +1033,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_backward_partially_registered(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x): @@ -1054,7 +1054,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_save_for_backward_inputs_are_namedtuple(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x): @@ -1084,7 +1084,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_backward_returns_dict(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x): @@ -1107,7 +1107,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_backward_dict_invalid_keys(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x): @@ -1130,7 +1130,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_backward_dict_grad_for_nontensor(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor, dim: int) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x, dim): @@ -1153,7 +1153,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_backward_dict_requires_keys_for_input_tensors(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x, y): @@ -1176,7 +1176,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_backward_dict_requires_keys_for_input_optional_tensors(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor, y: Optional[torch.Tensor]) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x, y): @@ -1199,7 +1199,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_backward_grads_are_tensor_or_none(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x): @@ -1222,7 +1222,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_backward_tensorlist_input_requires_list_grads_with_same_numel(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(xs: Sequence[torch.Tensor]) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(xs): @@ -1245,7 +1245,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_backward_tensorlist_input_requires_list_grads_none_or_Tensor(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(xs: Sequence[torch.Tensor]) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(xs): @@ -1268,7 +1268,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_backward_tensorlist_input_requires_list_grads(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(xs: Sequence[torch.Tensor]) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(xs): @@ -1291,7 +1291,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_backward_output_differentiability_type(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(xs: Sequence[torch.Tensor]) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError with self.assertRaisesRegex(RuntimeError, "output_differentiability"): @@ -1304,7 +1304,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_backward_output_differentiability_numel(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(xs: Sequence[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: - raise NotImplementedError() + raise NotImplementedError with self.assertRaisesRegex(RuntimeError, "output_differentiability"): @@ -1317,7 +1317,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_backward_output_differentiability_tensorlist(self): @custom_ops.custom_op(f"{self.test_ns}::foo") def foo(x: Tensor) -> Tuple[List[Tensor], Tensor]: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{self.test_ns}::foo") def foo_impl(x): @@ -1343,7 +1343,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_backward_output_differentiability_non_tensor(self): @custom_ops.custom_op(f"{self.test_ns}::foo") def foo(x: Tensor) -> Tuple[Tensor, int]: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{self.test_ns}::foo") def foo_impl(x): @@ -1368,7 +1368,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_impl_separate(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo", device_types="cpu") def foo_cpu(x): @@ -1392,7 +1392,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_impl_multiple(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @custom_ops.impl(f"{TestCustomOp.test_ns}::foo") def foo_impl(x): @@ -1422,7 +1422,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_impl_meta(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor, dim: int) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @torch.library.impl_abstract(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) def foo_meta(x, dim): @@ -1438,7 +1438,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_duplicate_impl(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor, dim: int) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @torch.library.impl_abstract(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) def foo_meta(x, dim): @@ -1457,7 +1457,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_new_data_dependent_symint(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @torch.library.impl_abstract(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) def foo_meta(x): @@ -1483,7 +1483,7 @@ class TestCustomOp(CustomOpTestCaseBase): # this one is just a sanity check. @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError @torch.library.impl_abstract(f"{TestCustomOp.test_ns}::foo", lib=self.lib()) def foo_meta(x): @@ -1497,7 +1497,7 @@ class TestCustomOp(CustomOpTestCaseBase): def test_not_implemented_error(self): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError x = torch.randn(3) op = self.get_op(f"{self.test_ns}::foo") @@ -1510,7 +1510,7 @@ class TestCustomOp(CustomOpTestCaseBase): @custom_ops.custom_op(f"{TestCustomOp.test_ns}::bar") def bar(sizes: Sequence[int]) -> torch.Tensor: - raise NotImplementedError() + raise NotImplementedError op = self.get_op(f"{self.test_ns}::bar") with self.assertRaisesRegex(NotImplementedError, "no Tensor inputs"): @@ -2021,7 +2021,7 @@ class MiniOpTest(CustomOpTestCaseBase): @staticmethod def backward(ctx, grad): - raise NotImplementedError() + raise NotImplementedError def autograd_impl(x): return Op.apply(x) diff --git a/test/test_maskedtensor.py b/test/test_maskedtensor.py index ed7f567788..87ce6db35e 100644 --- a/test/test_maskedtensor.py +++ b/test/test_maskedtensor.py @@ -492,7 +492,7 @@ class TestBinary(TestCase): mt1 = masked_tensor(data1, mask1) try: fn(mt0, mt1) - raise AssertionError() + raise AssertionError except ValueError as e: assert ( "Input masks must match. If you need support for this, please open an issue on Github." diff --git a/test/test_nn.py b/test/test_nn.py index e040afe3c6..720beb606e 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -6603,7 +6603,7 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""") elif weight_layout == torch.sparse_coo: module.weight = nn.Parameter(module.weight.to_sparse_coo()) else: - raise AssertionError() + raise AssertionError inp = torch.randn(4, requires_grad=True, device=device) res = module(inp) diff --git a/test/test_overrides.py b/test/test_overrides.py index 936303e459..d79753f78a 100644 --- a/test/test_overrides.py +++ b/test/test_overrides.py @@ -1206,7 +1206,7 @@ class TestTorchFunctionMode(TestCase): class A(TorchFunctionMode): def __torch_function__(self, *args, **kwargs): - raise ErrorA() + raise ErrorA with self.assertRaises(ErrorA): with A(): @@ -1218,7 +1218,7 @@ class TestTorchFunctionMode(TestCase): class A(TorchFunctionMode): def __torch_function__(self, *args, **kwargs): - raise ErrorA() + raise ErrorA x = A() with self.assertRaises(ErrorA): diff --git a/test/test_python_dispatch.py b/test/test_python_dispatch.py index 04c263591f..bd027ff47b 100644 --- a/test/test_python_dispatch.py +++ b/test/test_python_dispatch.py @@ -1238,7 +1238,7 @@ $3: f32[] = torch._ops.aten.add.Tensor($1, $2)""") class AMode(TorchDispatchMode): def __torch_dispatch__(self, func, types, args=(), kwargs=None): if func.__name__ == 'randn.default': - raise RuntimeError() + raise RuntimeError return A(torch.zeros(())) with AMode(): @@ -1254,7 +1254,7 @@ $3: f32[] = torch._ops.aten.add.Tensor($1, $2)""") class A(TorchDispatchMode): def __torch_dispatch__(self, func, types, args=(), kwargs=None): - raise ErrorA() + raise ErrorA x = A() with self.assertRaises(ErrorA): diff --git a/test/test_utils.py b/test/test_utils.py index 57651d73c0..5dd946faba 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -1081,7 +1081,7 @@ class TestTraceback(TestCase): source = '''\ def f(x): def g(x): - raise RuntimeError() # HEYA + raise RuntimeError # HEYA x = x * 3 return g(x) + 1 @@ -1099,7 +1099,7 @@ def f(x): def test_format_traceback_short(self): try: - raise RuntimeError() + raise RuntimeError except RuntimeError as e: self.assertRegex(format_traceback_short(e.__traceback__), r'.*test_utils.py:\d+ in test_format_traceback_short') diff --git a/test/test_weak.py b/test/test_weak.py index cc9ff6f58e..471049c601 100644 --- a/test/test_weak.py +++ b/test/test_weak.py @@ -525,7 +525,7 @@ class WeakKeyDictionaryTestCase(TestCase): return self def __next__(self): - raise Exc() + raise Exc self.assertRaises(Exc, d.update, badseq()) @@ -866,7 +866,7 @@ class WeakKeyDictionaryScriptObjectTestCase(TestCase): return self def __next__(self): - raise Exc() + raise Exc self.assertRaises(Exc, d.update, badseq()) diff --git a/test/torch_np/numpy_tests/core/test_multiarray.py b/test/torch_np/numpy_tests/core/test_multiarray.py index c7e2ac670c..bbb9104ff1 100644 --- a/test/torch_np/numpy_tests/core/test_multiarray.py +++ b/test/torch_np/numpy_tests/core/test_multiarray.py @@ -1136,14 +1136,14 @@ class TestCreation(TestCase): return 1 def __getitem__(self, index): - raise ValueError() + raise ValueError class Map: def __len__(self): return 1 def __getitem__(self, index): - raise KeyError() + raise KeyError a = np.array([Map()]) assert_(a.shape == (1,)) @@ -1160,7 +1160,7 @@ class TestCreation(TestCase): if ind in [0, 1]: return ind else: - raise IndexError() + raise IndexError d = np.array([Point2(), Point2(), Point2()]) assert_equal(d.dtype, np.dtype(object)) diff --git a/test/torch_np/numpy_tests/fft/test_pocketfft.py b/test/torch_np/numpy_tests/fft/test_pocketfft.py index ebf34f758a..7cb8597690 100644 --- a/test/torch_np/numpy_tests/fft/test_pocketfft.py +++ b/test/torch_np/numpy_tests/fft/test_pocketfft.py @@ -342,7 +342,7 @@ class TestFFT1D(TestCase): Y_res = fft(Y, axes=ax) assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) else: - raise ValueError() + raise ValueError @skipif(IS_WASM, reason="Cannot start thread") diff --git a/test/torch_np/numpy_tests/lib/test_function_base.py b/test/torch_np/numpy_tests/lib/test_function_base.py index c6f24dce08..85d69fc006 100644 --- a/test/torch_np/numpy_tests/lib/test_function_base.py +++ b/test/torch_np/numpy_tests/lib/test_function_base.py @@ -1435,7 +1435,7 @@ class TestVectorize(TestCase): try: vectorize(random.randrange) # Should succeed except Exception: - raise AssertionError() # noqa: TRY200 + raise AssertionError # noqa: TRY200 def test_keywords2_ticket_2100(self): # Test kwarg support: enhancement ticket 2100 diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py index 65ff7a50d9..b9651ea2da 100644 --- a/tools/autograd/gen_variable_type.py +++ b/tools/autograd/gen_variable_type.py @@ -1252,7 +1252,7 @@ def emit_body( if a.name == derivative_var_name: break else: - raise AssertionError() + raise AssertionError return f"grad_fn->should_compute_output({edge_off})" if is_inplace_foreach: diff --git a/torch/_custom_ops.py b/torch/_custom_ops.py index fe396da3fb..c13b0aaf33 100644 --- a/torch/_custom_ops.py +++ b/torch/_custom_ops.py @@ -61,7 +61,7 @@ def custom_op(qualname, func_or_schema=None): >>> # we will infer the types of the inputs and outputs. >>> @torch._custom_ops.custom_op("mylibrary::numpy_sin") >>> def numpy_sin(x: Tensor) -> Tensor: - >>> raise NotImplementedError() + >>> raise NotImplementedError >>> >>> # The custom op is now accessible via the torch.ops module: >>> torch.ops.mylibrary.numpy_sin @@ -143,7 +143,7 @@ def impl(qualname, *, device_types=("cpu", "cuda"), func=None): >>> # we will infer the types of the inputs and outputs. >>> @torch._custom_ops.custom_op("mylibrary::numpy_cos") >>> def numpy_cos(x: Tensor) -> Tensor: - >>> raise NotImplementedError() + >>> raise NotImplementedError >>> >>> # The custom op is now accessible via the torch.ops module: >>> torch.ops.mylibrary.numpy_cos @@ -207,7 +207,7 @@ def impl_abstract(qualname, *, func=None): >>> # Example 1: an operator without data-dependent output shape >>> @torch._custom_ops.custom_op("mylibrary::custom_linear") >>> def custom_linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor: - >>> raise NotImplementedError() + >>> raise NotImplementedError >>> >>> @torch._custom_ops.impl_abstract("mylibrary::custom_linear") >>> def custom_linear_abstract(x, weight): diff --git a/torch/_dynamo/backends/debugging.py b/torch/_dynamo/backends/debugging.py index a92c8701ae..a349a3dd63 100644 --- a/torch/_dynamo/backends/debugging.py +++ b/torch/_dynamo/backends/debugging.py @@ -129,7 +129,7 @@ class TestingOnlyCompileError(Exception): def relu_compile_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs): for node in gm.graph.nodes: if node.target == torch.relu: - raise ReluCompileError() + raise ReluCompileError return gm @@ -165,7 +165,7 @@ def non_leaf_compile_error_TESTING_ONLY(gm: torch.fx.GraphModule, example_inputs return gm for t in example_inputs: if not t.is_leaf: - raise TestingOnlyCompileError() + raise TestingOnlyCompileError return gm diff --git a/torch/_dynamo/device_interface.py b/torch/_dynamo/device_interface.py index e62efaf825..d2944bc9c0 100644 --- a/torch/_dynamo/device_interface.py +++ b/torch/_dynamo/device_interface.py @@ -39,7 +39,7 @@ class DeviceInterface(metaclass=DeviceInterfaceMeta): class device: def __new__(cls, device: _device_t): - raise NotImplementedError() + raise NotImplementedError class Worker: """ @@ -51,71 +51,71 @@ class DeviceInterface(metaclass=DeviceInterfaceMeta): @staticmethod def set_device(device: int): - raise NotImplementedError() + raise NotImplementedError @staticmethod def current_device() -> int: - raise NotImplementedError() + raise NotImplementedError @staticmethod def get_device_properties(device: _device_t = None): - raise NotImplementedError() + raise NotImplementedError @staticmethod def current_device(): - raise NotImplementedError() + raise NotImplementedError @staticmethod def set_device(device: _device_t): - raise NotImplementedError() + raise NotImplementedError @staticmethod def maybe_exchange_device(device: int) -> int: - raise NotImplementedError() + raise NotImplementedError @staticmethod def exchange_device(device: int) -> int: - raise NotImplementedError() + raise NotImplementedError @staticmethod def device_count(): - raise NotImplementedError() + raise NotImplementedError @staticmethod def is_available() -> bool: - raise NotImplementedError() + raise NotImplementedError @staticmethod def stream(stream: torch.Stream): - raise NotImplementedError() + raise NotImplementedError @staticmethod def current_stream(): - raise NotImplementedError() + raise NotImplementedError @staticmethod def set_stream(stream: torch.Stream): - raise NotImplementedError() + raise NotImplementedError @staticmethod def _set_stream_by_id(stream_id: int, device_index: int, device_type: int): - raise NotImplementedError() + raise NotImplementedError @staticmethod def get_raw_stream(): - raise NotImplementedError() + raise NotImplementedError @staticmethod def synchronize(device: _device_t = None): - raise NotImplementedError() + raise NotImplementedError @staticmethod def get_device_properties(device: _device_t = None): - raise NotImplementedError() + raise NotImplementedError @staticmethod def get_compute_capability(device: _device_t = None): - raise NotImplementedError() + raise NotImplementedError class DeviceGuard: diff --git a/torch/_dynamo/source.py b/torch/_dynamo/source.py index 9b890eb926..b0a5e7cb56 100644 --- a/torch/_dynamo/source.py +++ b/torch/_dynamo/source.py @@ -215,7 +215,7 @@ class EphemeralSource(Source): return f"<ephemeral{': ' + self.desc if self.desc is not None else ''}>" def make_guard(self): - raise NotImplementedError() + raise NotImplementedError def is_ephemeral(self): return True @@ -277,7 +277,7 @@ class NegateSource(ChainedSource): assert self.base is not None def reconstruct(self, codegen): - raise NotImplementedError() + raise NotImplementedError def guard_source(self): return self.base.guard_source() @@ -516,7 +516,7 @@ class ConstantSource(Source): return self.source_name def make_guard(self, fn): - raise NotImplementedError() + raise NotImplementedError @dataclasses.dataclass(frozen=True) diff --git a/torch/_dynamo/symbolic_convert.py b/torch/_dynamo/symbolic_convert.py index 8a8fda775e..0c8e5898e6 100644 --- a/torch/_dynamo/symbolic_convert.py +++ b/torch/_dynamo/symbolic_convert.py @@ -1242,7 +1242,7 @@ class InstructionTranslatorBase( if ( isinstance(val, BuiltinVariable) and val.fn is StopIteration ) or isinstance(val, variables.StopIterationVariable): - raise exc.UserStopIteration() + raise exc.UserStopIteration unimplemented(f"raise {exc}") else: unimplemented("raise ... from ...") @@ -2231,7 +2231,7 @@ class InstructionTranslator(InstructionTranslatorBase): return self.f_locals[source.local_name] if isinstance(source, GlobalSource): return self.f_globals[source.global_name] - raise KeyError() + raise KeyError def run(self): super().run() @@ -2388,7 +2388,7 @@ class InstructionTranslator(InstructionTranslatorBase): else create_instruction("RETURN_CONST", argval=inst.argval) ) self.output.add_output_instructions([return_inst]) - raise ReturnValueOp() + raise ReturnValueOp def RETURN_VALUE(self, inst): self._return(inst) @@ -2637,7 +2637,7 @@ class InliningInstructionTranslator(InstructionTranslatorBase): self.output.root_tx.mutated_closure_cell_contents.add( maybe_cell.source.name() ) - raise exc.UnspecializeRestartAnalysis() + raise exc.UnspecializeRestartAnalysis unimplemented("write to __closure__ while inlining") def LOAD_DEREF(self, inst): @@ -2676,12 +2676,12 @@ class InliningInstructionTranslator(InstructionTranslatorBase): def RETURN_VALUE(self, inst): self.symbolic_result = self.pop() # type: ignore[assignment] self.instruction_pointer = None - raise ReturnValueOp() + raise ReturnValueOp def RETURN_CONST(self, inst): self.symbolic_result = self._load_const(inst) self.instruction_pointer = None - raise ReturnValueOp() + raise ReturnValueOp class InliningGeneratorInstructionTranslator(InliningInstructionTranslator): diff --git a/torch/_dynamo/variables/base.py b/torch/_dynamo/variables/base.py index 56707b0abe..80f4995a9f 100644 --- a/torch/_dynamo/variables/base.py +++ b/torch/_dynamo/variables/base.py @@ -219,17 +219,17 @@ class VariableTracker(metaclass=VariableTrackerMeta): def make_guard(self, fn): if self.source: return self.source.make_guard(fn) - raise NotImplementedError() + raise NotImplementedError def const_getattr(self, tx, name: str) -> Any: """getattr(self, name) returning a python constant""" - raise NotImplementedError() + raise NotImplementedError def var_getattr(self, tx, name: str) -> "VariableTracker": """getattr(self, name) returning a new variable""" value = self.const_getattr(tx, name) if not variables.ConstantVariable.is_literal(value): - raise NotImplementedError() + raise NotImplementedError source = None if self.source: source = AttrSource(self.source, name) @@ -257,7 +257,7 @@ class VariableTracker(metaclass=VariableTrackerMeta): return None def reconstruct(self, codegen): - raise NotImplementedError() + raise NotImplementedError def can_reconstruct(self, tx): """If it is possible to reconstruct the Python object this @@ -273,7 +273,7 @@ class VariableTracker(metaclass=VariableTrackerMeta): return False def unpack_var_sequence(self, tx) -> List["VariableTracker"]: - raise NotImplementedError() + raise NotImplementedError def has_unpack_var_sequence(self, tx) -> bool: try: diff --git a/torch/_dynamo/variables/constant.py b/torch/_dynamo/variables/constant.py index 36df4797fd..29a3a72a6f 100644 --- a/torch/_dynamo/variables/constant.py +++ b/torch/_dynamo/variables/constant.py @@ -122,7 +122,7 @@ class ConstantVariable(VariableTracker): ) member = getattr(self.value, name) if callable(member): - raise NotImplementedError() + raise NotImplementedError return member def call_method( @@ -212,5 +212,5 @@ class EnumVariable(VariableTracker): def const_getattr(self, tx, name): member = getattr(self.value, name) if callable(member): - raise NotImplementedError() + raise NotImplementedError return member diff --git a/torch/_dynamo/variables/dicts.py b/torch/_dynamo/variables/dicts.py index 6b86048c50..a640bba07a 100644 --- a/torch/_dynamo/variables/dicts.py +++ b/torch/_dynamo/variables/dicts.py @@ -420,7 +420,7 @@ class DictView(VariableTracker): def view_items_vt(self): # Returns an iterable of the unpacked items # Implement in the subclasses - raise NotImplementedError() + raise NotImplementedError def unpack_var_sequence(self, tx): def unwrap(x): @@ -615,7 +615,7 @@ class DataClassVariable(ConstDictVariable): assert self.is_matching_cls(user_cls) def as_proxy(self): - raise NotImplementedError() + raise NotImplementedError def reconstruct(self, codegen): codegen.extend_output([codegen._create_load_const(self.user_cls)]) @@ -737,14 +737,14 @@ class CustomizedDictVariable(ConstDictVariable): # called from builder.py @classmethod def wrap(cls, builder, obj): - raise NotImplementedError() + raise NotImplementedError def __init__(self, items, user_cls, **options): super().__init__(items, user_cls, **options) assert self.is_matching_cls(user_cls) def as_proxy(self): - raise NotImplementedError() + raise NotImplementedError # 'RETURN_VALUE triggered compile' # called from torch/_dynamo/codegen.py diff --git a/torch/_dynamo/variables/functions.py b/torch/_dynamo/variables/functions.py index 0a398e17ba..e43b8f1a14 100644 --- a/torch/_dynamo/variables/functions.py +++ b/torch/_dynamo/variables/functions.py @@ -439,7 +439,7 @@ class NestedUserFunctionVariable(BaseUserFunctionVariable): def get_function(self): if self.closure: - raise NotImplementedError() + raise NotImplementedError func = types.FunctionType( self.code.as_python_constant(), self.f_globals, diff --git a/torch/_dynamo/variables/lists.py b/torch/_dynamo/variables/lists.py index 33b2116123..cb5c641c67 100644 --- a/torch/_dynamo/variables/lists.py +++ b/torch/_dynamo/variables/lists.py @@ -160,7 +160,7 @@ class RangeVariable(BaseListVariable): elif len(items_to_map) == 3: start, stop, step = items_to_map else: - raise AssertionError() + raise AssertionError assert stop is not None super().__init__([start, stop, step], **kwargs) @@ -592,7 +592,7 @@ class SliceVariable(BaseListVariable): elif len(items_to_map) == 3: start, stop, step = items_to_map else: - raise AssertionError() + raise AssertionError if isinstance(start, variables.TensorVariable) or isinstance( stop, variables.TensorVariable @@ -644,7 +644,7 @@ class ListIteratorVariable(VariableTracker): assert self.mutable_local old_index = self.index if old_index >= len(self.items): - raise StopIteration() + raise StopIteration tx.output.side_effects.mutation(self) self.index += 1 return self.items[old_index] @@ -665,7 +665,7 @@ class ListIteratorVariable(VariableTracker): def as_python_constant(self): if self.index > 0: - raise NotImplementedError() + raise NotImplementedError return iter([x.as_python_constant() for x in self.items]) def unpack_var_sequence(self, tx): @@ -748,7 +748,7 @@ class RestrictedListSubclassVariable(ListVariable): return [x.as_proxy() for x in self.items] def as_python_constant(self): - raise NotImplementedError() + raise NotImplementedError def is_python_constant(self): return False diff --git a/torch/_dynamo/variables/misc.py b/torch/_dynamo/variables/misc.py index ba2d546ec3..b0ef237edd 100644 --- a/torch/_dynamo/variables/misc.py +++ b/torch/_dynamo/variables/misc.py @@ -592,13 +592,13 @@ class GetAttrVariable(VariableTracker): def const_getattr(self, tx, name): if not isinstance(self.obj, variables.NNModuleVariable): - raise NotImplementedError() + raise NotImplementedError step1 = tx.output.get_submodule(self.obj.module_key) if self.name not in step1.__dict__: - raise NotImplementedError() + raise NotImplementedError step2 = inspect.getattr_static(step1, self.name) if name not in step2.__dict__: - raise NotImplementedError() + raise NotImplementedError return inspect.getattr_static(step2, name) def reconstruct(self, codegen): diff --git a/torch/_dynamo/variables/nn_module.py b/torch/_dynamo/variables/nn_module.py index 548bebb9bb..5fbd502a3c 100644 --- a/torch/_dynamo/variables/nn_module.py +++ b/torch/_dynamo/variables/nn_module.py @@ -153,7 +153,7 @@ class NNModuleVariable(VariableTracker): # Mark the class dynamic unless its module initialization if tx.f_code.co_name != "__init__": GenerationTracker.mark_class_dynamic(type(mod)) - raise UnspecializeRestartAnalysis() + raise UnspecializeRestartAnalysis def _custom_getattr_fallback(self, base, tx, name, options): """Check for a __getattr__ and handle it specially if it is implemented""" diff --git a/torch/_dynamo/variables/optimizer.py b/torch/_dynamo/variables/optimizer.py index 62120b10c5..594226e3e8 100644 --- a/torch/_dynamo/variables/optimizer.py +++ b/torch/_dynamo/variables/optimizer.py @@ -157,7 +157,7 @@ class OptimizerVariable(UserDefinedObjectVariable): ): return self.value.param_groups[arg.source.index] - raise ArgMappingException() + raise ArgMappingException new_args = [map_arg(arg) for arg in args] new_kwargs = {k: map_arg(v) for k, v in kwargs.items()} diff --git a/torch/_dynamo/variables/tensor.py b/torch/_dynamo/variables/tensor.py index 89f56ee83f..480aa91469 100644 --- a/torch/_dynamo/variables/tensor.py +++ b/torch/_dynamo/variables/tensor.py @@ -222,7 +222,7 @@ class TensorVariable(VariableTracker): return SourcelessBuilder.create(tx, example_value) if not (self.source and self.source.subguards_allowed()): - raise NotImplementedError() + raise NotImplementedError # For local source, we associate the real value. We use this real value # for implementing getattr fallthrough on the variable tracker base class. @@ -238,23 +238,23 @@ class TensorVariable(VariableTracker): # Which is incorrect, and violates the invariant that all sources should be eval()-able against the scope. _input_associated_real_value = eval(self.source.name(), scope) except Exception as exc: - raise NotImplementedError() from exc + raise NotImplementedError from exc if _input_associated_real_value is None: - raise NotImplementedError() + raise NotImplementedError if object_has_getattribute(_input_associated_real_value): - raise NotImplementedError() + raise NotImplementedError if get_custom_getattr(_input_associated_real_value): - raise NotImplementedError() + raise NotImplementedError real_value = getattr(_input_associated_real_value, name) if callable(real_value): # Callables have more nuanced handling, and we should let the existing system delegate here. # Raising was past behavior and so should always be sound to fall back. # Note - at a certain point we may want to handle - raise NotImplementedError() + raise NotImplementedError from ..guards import GuardBuilder from .builder import VariableBuilder @@ -391,7 +391,7 @@ class TensorVariable(VariableTracker): result = self.dynamic_getattr(tx, name) if result is None: - raise NotImplementedError() + raise NotImplementedError return result def has_unpack_var_sequence(self, tx): @@ -1090,7 +1090,7 @@ class NumpyNdarrayVariable(TensorVariable): elif name in ["__version__"]: unimplemented("delegate np.__version__ to NumPy") if result is None: - raise NotImplementedError() + raise NotImplementedError return result @staticmethod diff --git a/torch/_functorch/pyfunctorch.py b/torch/_functorch/pyfunctorch.py index 3a7373e42f..5a78facf08 100644 --- a/torch/_functorch/pyfunctorch.py +++ b/torch/_functorch/pyfunctorch.py @@ -72,7 +72,7 @@ class FuncTorchInterpreter(ABC): return self._cptr.key() def get_state(self): - raise NotImplementedError() + raise NotImplementedError def check_state(self, state): return state == self.get_state() diff --git a/torch/_guards.py b/torch/_guards.py index 3f3cdc2777..df62ab8e83 100644 --- a/torch/_guards.py +++ b/torch/_guards.py @@ -797,17 +797,17 @@ class Source: return False def reconstruct(self, codegen): - raise NotImplementedError() + raise NotImplementedError def guard_source(self) -> GuardSource: - raise NotImplementedError() + raise NotImplementedError def name(self) -> str: - raise NotImplementedError() + raise NotImplementedError def make_guard(self, fn) -> Guard: if self.guard_source() is GuardSource.CONSTANT: - raise NotImplementedError() + raise NotImplementedError return Guard(self, fn) def is_nn_module(self) -> bool: diff --git a/torch/_inductor/autotune_process.py b/torch/_inductor/autotune_process.py index 790ec9d60e..ba1976745c 100644 --- a/torch/_inductor/autotune_process.py +++ b/torch/_inductor/autotune_process.py @@ -422,7 +422,7 @@ class BenchmarkRequest: def make_run_fn( self, *input_tensors: torch.Tensor, output_tensor: torch.Tensor ) -> Callable[[], None]: - raise NotImplementedError() + raise NotImplementedError def cleanup_run_fn(self) -> None: pass diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py index baf5869b09..07c6f93730 100644 --- a/torch/_inductor/codecache.py +++ b/torch/_inductor/codecache.py @@ -446,7 +446,7 @@ def _reduce_tensor(t): # TODO: These tensors don't currently pickle, so we can't cache a # compiled graph containing them. Just fail now. If mkldnn tensors # get pickling support, we can remove this. - raise BypassFxGraphCache() + raise BypassFxGraphCache # Very large tensors could be expensive to copy to cpu and hash. Let's # at least report if we find slowness. @@ -598,7 +598,7 @@ class FxGraphHashDetails: # Some configs options are callables, e.g., post_grad_custom_pre_pass, # and may not pickle. log.debug("Can't pickle inductor config: %s", e) - raise BypassFxGraphCache() from e + raise BypassFxGraphCache from e def debug_str(self) -> str: """ @@ -843,19 +843,19 @@ class FxGraphCache: """ # Freezing can embed constants that wouldn't be static across runs. if config.freezing or config.aot_inductor.use_runtime_constant_folding: - raise BypassFxGraphCache() + raise BypassFxGraphCache # The treatment of guards in the caching implementation requires that # we have a shape env. if FxGraphCache._get_shape_env() is None: log.debug("fx graph cache no shape env") - raise BypassFxGraphCache() + raise BypassFxGraphCache # HigherOrderOperators should be handled on a case-by-case basis. # Currently, we just skip caching if we have any. for node in gm.graph.nodes: if isinstance(node.target, torch._ops.HigherOrderOperator): - raise BypassFxGraphCache() + raise BypassFxGraphCache @staticmethod def load( @@ -990,7 +990,7 @@ def cpp_compiler_search(search: str) -> str: return cxx except (subprocess.SubprocessError, FileNotFoundError, ImportError): continue - raise exc.InvalidCxxCompiler() + raise exc.InvalidCxxCompiler def install_gcc_via_conda() -> str: @@ -2745,7 +2745,7 @@ def _worker_compile_triton( class CodeCacheFuture: def result(self): - raise NotImplementedError() + raise NotImplementedError class TritonFuture(CodeCacheFuture): diff --git a/torch/_inductor/codegen/common.py b/torch/_inductor/codegen/common.py index b061e3ad1f..19b504d93d 100644 --- a/torch/_inductor/codegen/common.py +++ b/torch/_inductor/codegen/common.py @@ -88,16 +88,16 @@ device_codegens: Dict[str, DeviceCodegen] = {} class DeviceOpOverrides: def import_get_raw_stream_as(self, name): - raise NotImplementedError() + raise NotImplementedError def set_device(self, device_idx): - raise NotImplementedError() + raise NotImplementedError def synchronize(self): - raise NotImplementedError() + raise NotImplementedError def device_guard(self, device_idx): - raise NotImplementedError() + raise NotImplementedError device_op_overrides_dict: Dict[str, DeviceOpOverrides] = {} @@ -1368,7 +1368,7 @@ class Kernel(CodeGen): self.cse = cse def load(self, name: str, index: sympy.Expr) -> CSEVariable: - raise NotImplementedError() + raise NotImplementedError def indirect_load(self, name: str, index: sympy.Expr): """A load the depends on an index we have read""" @@ -1381,12 +1381,12 @@ class Kernel(CodeGen): self.loads = prior def store_reduction(self, name: str, index: sympy.Expr, value: CSEVariable): - raise NotImplementedError() + raise NotImplementedError def store( self, name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode = None ) -> None: - raise NotImplementedError() + raise NotImplementedError def reduction( self, @@ -1395,7 +1395,7 @@ class Kernel(CodeGen): reduction_type: ReductionType, value: Union[CSEVariable, Tuple[CSEVariable, ...]], ) -> Union[CSEVariable, Tuple[CSEVariable, ...]]: - raise NotImplementedError() + raise NotImplementedError def scan( self, @@ -1405,7 +1405,7 @@ class Kernel(CodeGen): ], values: Tuple[CSEVariable, ...], ) -> Tuple[CSEVariable, ...]: - raise NotImplementedError() + raise NotImplementedError def bucketize( self, @@ -1418,11 +1418,11 @@ class Kernel(CodeGen): """ See [Note: Inductor bucketize op] """ - raise NotImplementedError() + raise NotImplementedError @property def assert_function(self) -> str: - raise NotImplementedError() + raise NotImplementedError def indirect_assert(self, var, lower, upper, mask=None): if lower and upper: @@ -1444,7 +1444,7 @@ class Kernel(CodeGen): return f'{self.assert_function}({cond}, "index out of bounds: {cond_print}")' def index_to_str(self, index: sympy.Expr) -> str: - raise NotImplementedError() + raise NotImplementedError def __enter__(self): # TODO: hoist this to top level @@ -1737,4 +1737,4 @@ class KernelTemplate: Generates a ChoiceCaller instance from the given arguments. """ - raise NotImplementedError() + raise NotImplementedError diff --git a/torch/_inductor/codegen/cpp.py b/torch/_inductor/codegen/cpp.py index 26a68ea837..e8ca0dd18b 100644 --- a/torch/_inductor/codegen/cpp.py +++ b/torch/_inductor/codegen/cpp.py @@ -2659,7 +2659,7 @@ class CppVecKernel(CppKernel): mean, m2, weight = reduction_project(reduction_type, next_value) return f"welford_combine({var}, {{{mean}, {m2}, {weight}}})" else: - raise NotImplementedError() + raise NotImplementedError def indirect_assert(self, var, lower, upper, mask=None): assert not mask, "do not support mask in indirect_indexing assertion" diff --git a/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py b/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py index 1e4828aab4..6a15b183f8 100644 --- a/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py +++ b/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py @@ -197,7 +197,7 @@ class CutlassEVTEpilogueTypeFormatter: return f"cutlass::epilogue::fusion::Sm90EVT<cutlass::epilogue::fusion::Sm90Compute<cutlass::maximum, ElementAcc, ElementAcc, RoundStyle>,{a}, {const_zero}>" # noqa: B950 def reduction(self, dtype, src_dtype, reduction_type, value): - raise CUTLASSEVTOpNotImplementedError() + raise CUTLASSEVTOpNotImplementedError # Add more ops here... def getvalue(self, result) -> str: @@ -354,7 +354,7 @@ class CutlassEVTEpilogueArgumentFormatter: return a def reduction(self, dtype, src_dtype, reduction_type, value): - raise CUTLASSEVTOpNotImplementedError() + raise CUTLASSEVTOpNotImplementedError def getvalue(self, result) -> str: return "{" + str(result) + "}" diff --git a/torch/_inductor/codegen/memory_planning.py b/torch/_inductor/codegen/memory_planning.py index d94e4723db..8b58fe049e 100644 --- a/torch/_inductor/codegen/memory_planning.py +++ b/torch/_inductor/codegen/memory_planning.py @@ -134,15 +134,15 @@ class AllocationTreeNode: def get_live_ranges(self) -> LiveRanges: """Aggregate LiveRanges for all objects below this in tree""" - raise NotImplementedError() + raise NotImplementedError def get_size_hint(self) -> int: """Number of bytes used for example inputs""" - raise NotImplementedError() + raise NotImplementedError def get_symbolic_size(self) -> sympy.Expr: """Number of bytes needed at runtime""" - raise NotImplementedError() + raise NotImplementedError def finalize(self, pool, offset) -> AllocationTreeNode: """Called after all allocations have been made""" diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py index 0133c3585d..3893c34832 100644 --- a/torch/_inductor/codegen/triton.py +++ b/torch/_inductor/codegen/triton.py @@ -1468,7 +1468,7 @@ class TritonKernel(Kernel): def add_range(i, expr): expr = sv.simplify(expr) if not sv.statically_known_multiple_of(remaining[i], expr): - raise CantSplit() + raise CantSplit # guard on the last item out remaining[i] = FloorDiv(remaining[i], expr) new_ranges[i].append(expr) @@ -1501,7 +1501,7 @@ class TritonKernel(Kernel): if not sv.statically_known_multiple_of( size, remaining[current_group] ): - raise CantSplit() + raise CantSplit size1 = remaining[current_group] size2 = FloorDiv(size, remaining[current_group]) return_getters.append( diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py index 09b6328e26..1609a6d897 100644 --- a/torch/_inductor/codegen/wrapper.py +++ b/torch/_inductor/codegen/wrapper.py @@ -1346,7 +1346,7 @@ class WrapperCodeGen(CodeGen): self.lines.append(LineContext(ctx)) def val_to_cpp_arg_str(self, type_, val) -> str: - raise NotImplementedError() + raise NotImplementedError def val_to_arg_str(self, s): from torch.utils._triton import dtype_to_string, has_triton_package diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py index 48fed87257..404c49edde 100644 --- a/torch/_inductor/graph.py +++ b/torch/_inductor/graph.py @@ -954,10 +954,10 @@ class GraphLowering(torch.fx.Interpreter): return self.add_tensor_constant(value, target) def call_module(self, target, args, kwargs): - raise AssertionError() + raise AssertionError def call_method(self, target, args, kwargs): - raise AssertionError() + raise AssertionError def output(self, target, args, kwargs): result = super().output(target, args, kwargs) diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py index 9960c02b33..2a66ea69d3 100644 --- a/torch/_inductor/ir.py +++ b/torch/_inductor/ir.py @@ -2252,7 +2252,7 @@ class View(GenericView): size_old = size_old * modulus V.graph.sizevars.guard_equals(size_new, size_old) else: - raise AssertionError() + raise AssertionError while stack_old: size_old = stack_old.pop() @@ -2818,7 +2818,7 @@ class FlexibleLayout(Layout): "stride_ordered_for_memory_format, unsuppored memory_format: %s", memory_format, ) - raise NotImplementedError() + raise NotImplementedError @staticmethod def same_ordered(sizes, stride): @@ -3666,16 +3666,16 @@ class ChoiceCaller: return do_bench(lambda: algo(*args, out=out)) def call_name(self) -> str: - raise NotImplementedError() + raise NotImplementedError def to_callable(self): - raise NotImplementedError() + raise NotImplementedError def hash_key(self) -> str: - raise NotImplementedError() + raise NotImplementedError def output_node(self) -> "TensorBox": - raise NotImplementedError() + raise NotImplementedError def info_dict(self) -> Dict[str, Union[PrimitiveInfoType, List[PrimitiveInfoType]]]: """Information returned here is logged to the autotune log file when that is enabled.""" @@ -3684,7 +3684,7 @@ class ChoiceCaller: class TritonTemplateCallerBase(ChoiceCaller): def get_make_kernel_render(self) -> Any: - raise NotImplementedError() + raise NotImplementedError class MultiTemplateBuffer(TritonTemplateBuffer): @@ -4033,7 +4033,7 @@ class ExternKernel(InputsKernel): wrapper.writeline(origin_str) def codegen(self, wrapper): - raise NotImplementedError() + raise NotImplementedError def get_kernel_name(self): return self.cpp_kernel_name if V.graph.cpp_wrapper else self.python_kernel_name @@ -4157,7 +4157,7 @@ class ExternKernel(InputsKernel): offset, index, ) - raise NotImplementedError() + raise NotImplementedError return ReinterpretView( data=x.data, diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py index 4fdd2c8c38..89e6931217 100644 --- a/torch/_inductor/lowering.py +++ b/torch/_inductor/lowering.py @@ -1629,7 +1629,7 @@ def bernoulli_p(x, *args): # This shouldn't be called in general @register_lowering(aten._foobar) def _foobar(_): - raise AssertionError() + raise AssertionError @functools.lru_cache(1) diff --git a/torch/_inductor/pattern_matcher.py b/torch/_inductor/pattern_matcher.py index b0e204c857..177d7c9466 100644 --- a/torch/_inductor/pattern_matcher.py +++ b/torch/_inductor/pattern_matcher.py @@ -218,7 +218,7 @@ class PatternExpr: def _match( self, node: torch.fx.Node, ctx: MatchContext ) -> Union[Match, FailedMatch]: - raise NotImplementedError() + raise NotImplementedError def match(self, node: torch.fx.Node) -> Union[Match, FailedMatch]: try: @@ -361,7 +361,7 @@ class _TargetExpr(PatternExpr): return isinstance(self.users, Multiple) or self.users > 1 def find_anchor_nodes(self, ctx: MatchContext, searched): - raise NotImplementedError() + raise NotImplementedError def _match_fns(self, node: torch.fx.Node): return ( @@ -803,7 +803,7 @@ class PatternEntry: extra_check: Callable[[Match], bool] def apply(self, match: Match, graph: torch.fx.Graph, node: torch.fx.Node): - raise NotImplementedError() + raise NotImplementedError def register(self, pass_dicts, target=None, prepend=False): if target is None: @@ -1507,7 +1507,7 @@ class PatternMatcherPass: def _not_implemented(*args, **kwargs) -> NoReturn: - raise NotImplementedError() + raise NotImplementedError def fx_to_pattern( diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py index f37580a59b..573e79acd6 100644 --- a/torch/_inductor/scheduler.py +++ b/torch/_inductor/scheduler.py @@ -2514,13 +2514,13 @@ class BaseScheduling: """ Check whether node1 and node2 can be vertically fused or not. """ - raise NotImplementedError() + raise NotImplementedError def can_fuse_horizontal(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode): """ Check whether node1 and node2 can be horizontally fused or not. """ - raise NotImplementedError() + raise NotImplementedError def fuse(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode): """ @@ -2535,7 +2535,7 @@ class BaseScheduling: """ Process the iteration sizes in case a transformation needs to be applied. """ - raise NotImplementedError() + raise NotImplementedError def codegen_template( self, template_node: SchedulerNode, epilogue_nodes: List[SchedulerNode] @@ -2546,19 +2546,19 @@ class BaseScheduling: This function is only available for triton now. If the third-party backend behaves as a sub-class of TritonScheduling, it can override it or reuse it. """ - raise NotImplementedError() + raise NotImplementedError def codegen_node(self, node: Union[FusedSchedulerNode, SchedulerNode]): """ Generate a kernel given a list of pre-fused nodes. """ - raise NotImplementedError() + raise NotImplementedError def codegen_sync(self): """ Generate synchronization code for the kernel. This method depends on the hardware characteristics. """ - raise NotImplementedError() + raise NotImplementedError def ready_to_flush(self) -> bool: """ @@ -2571,14 +2571,14 @@ class BaseScheduling: """ Flush the generated kernel and python wrapper code to the source code file. """ - raise NotImplementedError() + raise NotImplementedError def benchmark_fused_nodes(self, nodes): """ Benchmark fused list of nodes and return the execution time in milliseconds on randomly generated inputs. """ - raise NotImplementedError() + raise NotImplementedError def get_fusion_pair_priority(self, node1, node2) -> int: """ diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py index a0f847bf9a..030649df45 100644 --- a/torch/_inductor/utils.py +++ b/torch/_inductor/utils.py @@ -949,11 +949,11 @@ class DeferredLineBase: def __call__(self) -> Optional[str]: """Returns either self.line or None to indicate the line has been 'unwritten'""" - raise NotImplementedError() + raise NotImplementedError def _new_line(self, line: str) -> DeferredLineBase: """Returns a new deferred line with the same condition""" - raise NotImplementedError() + raise NotImplementedError def with_prefix(self, prefix): return self._new_line(f"{prefix}{self.line}") diff --git a/torch/_numpy/_funcs_impl.py b/torch/_numpy/_funcs_impl.py index 27a2c20d74..7c09288f45 100644 --- a/torch/_numpy/_funcs_impl.py +++ b/torch/_numpy/_funcs_impl.py @@ -217,7 +217,7 @@ def _split_helper_int(tensor, indices_or_sections, axis, strict=False): l, n = tensor.shape[axis], indices_or_sections if n <= 0: - raise ValueError() + raise ValueError if l % n == 0: num, sz = n, l // n diff --git a/torch/_numpy/_ndarray.py b/torch/_numpy/_ndarray.py index c93ba20058..203a12a8b9 100644 --- a/torch/_numpy/_ndarray.py +++ b/torch/_numpy/_ndarray.py @@ -512,7 +512,7 @@ def array(obj, dtype=None, *, copy=True, order="K", subok=False, ndmin=0, like=N if like is not None: raise NotImplementedError("'like' parameter is not supported.") if order != "K": - raise NotImplementedError() + raise NotImplementedError # a happy path if ( diff --git a/torch/_numpy/_normalizations.py b/torch/_numpy/_normalizations.py index 0569d2d3e7..1cf2f56eaa 100644 --- a/torch/_numpy/_normalizations.py +++ b/torch/_numpy/_normalizations.py @@ -174,7 +174,7 @@ def maybe_copy_to(out, result, promote_scalar_result=False): maybe_copy_to(o, r, promote_scalar_result) for o, r in zip(out, result) ) else: - raise AssertionError() # We should never hit this path + raise AssertionError # We should never hit this path def wrap_tensors(result): diff --git a/torch/_ops.py b/torch/_ops.py index 7b081e1360..03ff25a688 100644 --- a/torch/_ops.py +++ b/torch/_ops.py @@ -85,7 +85,7 @@ class OperatorBase: self.functorch_table = {} def __call__(self, *args, **kwargs): - raise NotImplementedError() + raise NotImplementedError def has_kernel_for_dispatch_key(self, k): return k in self.py_kernels @@ -165,7 +165,7 @@ class OperatorBase: return fn def name(self): - raise NotImplementedError() + raise NotImplementedError is_included_in_alias = torch._C._dispatch_is_included_in_alias diff --git a/torch/_streambase.py b/torch/_streambase.py index 1d4737563d..5a0df2c22b 100644 --- a/torch/_streambase.py +++ b/torch/_streambase.py @@ -6,27 +6,27 @@ class _StreamBase(ABC): @abstractmethod def wait_event(self, event): - raise NotImplementedError() + raise NotImplementedError @abstractmethod def wait_stream(self, stream): - raise NotImplementedError() + raise NotImplementedError @abstractmethod def record_event(self, event=None): - raise NotImplementedError() + raise NotImplementedError @abstractmethod def query(self): - raise NotImplementedError() + raise NotImplementedError @abstractmethod def synchronize(self): - raise NotImplementedError() + raise NotImplementedError @abstractmethod def __eq__(self, stream): - raise NotImplementedError() + raise NotImplementedError class _EventBase(ABC): @@ -34,12 +34,12 @@ class _EventBase(ABC): @abstractmethod def wait(self, stream=None): - raise NotImplementedError() + raise NotImplementedError @abstractmethod def query(self): - raise NotImplementedError() + raise NotImplementedError @abstractmethod def synchronize(self): - raise NotImplementedError() + raise NotImplementedError diff --git a/torch/cuda/_memory_viz.py b/torch/cuda/_memory_viz.py index a862acd731..d3838f3410 100644 --- a/torch/cuda/_memory_viz.py +++ b/torch/cuda/_memory_viz.py @@ -354,7 +354,7 @@ def trace(data): elif e['action'] == 'oom': size = e['size'] free = e['device_free'] - out.write(f'raise OutOfMemoryError() # {Bytes(size)} requested, {Bytes(free)} free in CUDA\n') + out.write(f'raise OutOfMemoryError # {Bytes(size)} requested, {Bytes(free)} free in CUDA\n') else: out.write(f'{e}\n') out.write(f"TOTAL MEM: {Bytes(count)}") diff --git a/torch/distributed/_composable/fsdp/_fsdp_param.py b/torch/distributed/_composable/fsdp/_fsdp_param.py index 15046a79c6..b6b4bf5524 100644 --- a/torch/distributed/_composable/fsdp/_fsdp_param.py +++ b/torch/distributed/_composable/fsdp/_fsdp_param.py @@ -465,7 +465,7 @@ class FSDPParam: return [_to_dtype_if_needed(sharded_param_data, self.param_dtype)] elif self.sharded_state == ShardedState.SHARDED_POST_FORWARD: if hasattr(self._sharded_local_tensor, "fsdp_pre_all_gather"): - raise NotImplementedError() + raise NotImplementedError all_gather_input = _to_dtype_if_needed( cast(torch.Tensor, self._sharded_post_forward_param_data), self.param_dtype, diff --git a/torch/distributed/_spmd/parallel_mode.py b/torch/distributed/_spmd/parallel_mode.py index a908109805..2e9c15258d 100644 --- a/torch/distributed/_spmd/parallel_mode.py +++ b/torch/distributed/_spmd/parallel_mode.py @@ -39,7 +39,7 @@ class ParallelMode(ABC): TODO(@wanchaol): some of these arguments are not necessary for partitioning, remove the unnecessary ones later. """ - raise NotImplementedError() + raise NotImplementedError @abstractmethod def transform_and_compile(self, gm: GraphModule) -> GraphModule: @@ -51,7 +51,7 @@ class ParallelMode(ABC): the distributed environment. """ # TODO: add more necessary arguments to this interface. - raise NotImplementedError() + raise NotImplementedError class DataParallel(ParallelMode): diff --git a/torch/distributed/_state_dict_utils.py b/torch/distributed/_state_dict_utils.py index b69be53bb2..17e8448247 100644 --- a/torch/distributed/_state_dict_utils.py +++ b/torch/distributed/_state_dict_utils.py @@ -146,7 +146,7 @@ def _iterate_state_dict( not isinstance(companion_obj, (list, tuple)) or len(companion_obj) != len(iter_object) ): - raise CompanionMismatch() + raise CompanionMismatch ret = [ _iterate_state_dict( @@ -437,7 +437,7 @@ def _check_state_dict_similarity( companion_obj: Any, ) -> torch.Tensor: if companion_obj.dtype != obj.dtype or companion_obj.size() != obj.size(): - raise CompanionMismatch() + raise CompanionMismatch return obj try: diff --git a/torch/distributed/elastic/agent/server/api.py b/torch/distributed/elastic/agent/server/api.py index cdcea11af8..c142fbf67b 100644 --- a/torch/distributed/elastic/agent/server/api.py +++ b/torch/distributed/elastic/agent/server/api.py @@ -439,7 +439,7 @@ class ElasticAgent(abc.ABC): Raises: Exception - any other failures NOT related to worker process """ - raise NotImplementedError() + raise NotImplementedError @abc.abstractmethod def get_worker_group(self, role: str = DEFAULT_ROLE) -> WorkerGroup: @@ -450,7 +450,7 @@ class ElasticAgent(abc.ABC): Implementors are encouraged (but not required) to return a defensive read-only copy. """ - raise NotImplementedError() + raise NotImplementedError class SimpleElasticAgent(ElasticAgent): @@ -477,7 +477,7 @@ class SimpleElasticAgent(ElasticAgent): This is according to worker spec for the worker group . Returns a map of ``local_rank`` to worker ``id``. """ - raise NotImplementedError() + raise NotImplementedError @abc.abstractmethod def _stop_workers(self, worker_group: WorkerGroup) -> None: @@ -487,7 +487,7 @@ class SimpleElasticAgent(ElasticAgent): ``WorkerState``. That is, it must gracefully handle stopping non-existent workers, unhealthy (stuck) workers, etc. """ - raise NotImplementedError() + raise NotImplementedError @abc.abstractmethod def _monitor_workers(self, worker_group: WorkerGroup) -> RunResult: @@ -495,7 +495,7 @@ class SimpleElasticAgent(ElasticAgent): This function also returns the new state of the worker group. """ - raise NotImplementedError() + raise NotImplementedError @abc.abstractmethod def _shutdown(self, death_sig: signal.Signals = signal.SIGTERM) -> None: @@ -504,7 +504,7 @@ class SimpleElasticAgent(ElasticAgent): Args: death_sig: Signal to send to the child process, SIGTERM is default """ - raise NotImplementedError() + raise NotImplementedError @staticmethod def _set_master_addr_port( diff --git a/torch/distributed/elastic/multiprocessing/api.py b/torch/distributed/elastic/multiprocessing/api.py index c657c18f4a..72c3955e7d 100644 --- a/torch/distributed/elastic/multiprocessing/api.py +++ b/torch/distributed/elastic/multiprocessing/api.py @@ -458,7 +458,7 @@ class PContext(abc.ABC): @abc.abstractmethod def _start(self) -> None: """Start processes using strategy defined in a particular context.""" - raise NotImplementedError() + raise NotImplementedError @abc.abstractmethod def _poll(self) -> Optional[RunProcsResult]: @@ -469,7 +469,7 @@ class PContext(abc.ABC): successfully or any process fails. Returns ``None`` if all processes are still running. """ - raise NotImplementedError() + raise NotImplementedError def wait(self, timeout: float = -1, period: float = 1) -> Optional[RunProcsResult]: """ @@ -514,7 +514,7 @@ class PContext(abc.ABC): @abc.abstractmethod def pids(self) -> Dict[int, int]: """Return pids of processes mapped by their respective local_ranks.""" - raise NotImplementedError() + raise NotImplementedError @abc.abstractmethod def _close(self, death_sig: signal.Signals, timeout: int = 30) -> None: @@ -522,7 +522,7 @@ class PContext(abc.ABC): Terminates all processes managed by this context and cleans up any meta resources (e.g. redirect, error_file files). """ - raise NotImplementedError() + raise NotImplementedError def close( self, death_sig: Optional[signal.Signals] = None, timeout: int = 30 diff --git a/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py b/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py index dd0c098d4a..b3b599cb1c 100644 --- a/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py +++ b/torch/distributed/elastic/rendezvous/dynamic_rendezvous.py @@ -655,10 +655,10 @@ class _DistributedRendezvousOpExecutor(_RendezvousOpExecutor): continue if action == _Action.ERROR_CLOSED: - raise RendezvousClosedError() + raise RendezvousClosedError if action == _Action.ERROR_TIMEOUT: - raise RendezvousTimeoutError() + raise RendezvousTimeoutError if action == _Action.SYNC: # Delay the execution by one second to avoid overloading the diff --git a/torch/distributed/elastic/rendezvous/etcd_rendezvous.py b/torch/distributed/elastic/rendezvous/etcd_rendezvous.py index f73f354128..4ece7819c9 100644 --- a/torch/distributed/elastic/rendezvous/etcd_rendezvous.py +++ b/torch/distributed/elastic/rendezvous/etcd_rendezvous.py @@ -270,7 +270,7 @@ class EtcdRendezvous: self._rendezvous_deadline = time.time() + self._timeout while True: if time.time() > self._rendezvous_deadline: - raise RendezvousTimeoutError() + raise RendezvousTimeoutError logger.info("Attempting to join next rendezvous") try: @@ -340,17 +340,17 @@ class EtcdRendezvous: logger.info("Observed existing rendezvous state: %s", state) if state["status"] == "closed": - raise RendezvousClosedError() + raise RendezvousClosedError if state["status"] == "joinable": return self.join_phase(state["version"]) if state["status"] == "final": self.handle_existing_rendezvous(state["version"]) - raise EtcdRendezvousRetryImmediately() + raise EtcdRendezvousRetryImmediately self.try_wait_for_state_change(etcd_index=active_version.etcd_index + 1) - raise EtcdRendezvousRetryableFailure() + raise EtcdRendezvousRetryableFailure def join_phase(self, expected_version): """ @@ -632,7 +632,7 @@ class EtcdRendezvous: active_version, state = self.get_rdzv_state() if state["status"] != "final" or state["version"] != expected_version: - raise EtcdRendezvousRetryImmediately() + raise EtcdRendezvousRetryImmediately # Increment counter to signal an additional waiting worker. state["num_workers_waiting"] += 1 @@ -714,7 +714,7 @@ class EtcdRendezvous: pass if time.time() > self._rendezvous_deadline: - raise RendezvousTimeoutError() + raise RendezvousTimeoutError active_version, state = self.get_rdzv_state() def handle_join_last_call(self, expected_version, deadline): @@ -832,7 +832,7 @@ class EtcdRendezvous: pass if time.time() > self._rendezvous_deadline: - raise RendezvousTimeoutError() + raise RendezvousTimeoutError # Unfortunately, we have to do another fetch in order to get last etcd_index. return self.get_rdzv_state() diff --git a/torch/fx/_symbolic_trace.py b/torch/fx/_symbolic_trace.py index 02c15ec395..b3524dbde4 100644 --- a/torch/fx/_symbolic_trace.py +++ b/torch/fx/_symbolic_trace.py @@ -963,7 +963,7 @@ class _PatchedFn(NamedTuple): orig_fn: Any def revert(self): - raise NotImplementedError() + raise NotImplementedError class _PatchedFnSetItem(_PatchedFn): diff --git a/torch/fx/experimental/_sym_dispatch_mode.py b/torch/fx/experimental/_sym_dispatch_mode.py index 8f6160ea41..c3385de616 100644 --- a/torch/fx/experimental/_sym_dispatch_mode.py +++ b/torch/fx/experimental/_sym_dispatch_mode.py @@ -22,7 +22,7 @@ SYM_FUNCTION_MODE: Optional["SymDispatchMode"] = None # class SymDispatchMode: def __sym_dispatch__(self, func, types, args, kwargs): - raise NotImplementedError() + raise NotImplementedError def __enter__(self): global SYM_FUNCTION_MODE diff --git a/torch/fx/passes/operator_support.py b/torch/fx/passes/operator_support.py index 2e0eab25c4..ce050f046e 100644 --- a/torch/fx/passes/operator_support.py +++ b/torch/fx/passes/operator_support.py @@ -31,7 +31,7 @@ class OperatorSupportBase(abc.ABC): def is_node_supported( self, submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node ) -> bool: - raise NotImplementedError() + raise NotImplementedError @compatibility(is_backward_compatible=False) diff --git a/torch/jit/_script.py b/torch/jit/_script.py index e9e4c404c5..a0adf60284 100644 --- a/torch/jit/_script.py +++ b/torch/jit/_script.py @@ -476,7 +476,7 @@ if _enabled: # RecursiveScriptClass. def forward_magic_method(self, method_name, *args, **kwargs): if not self._c._has_method(method_name): - raise TypeError() + raise TypeError self_method = self.__getattr__(method_name) return self_method(*args, **kwargs) @@ -865,7 +865,7 @@ if _enabled: if getattr(self_method, "__func__", None) == getattr( RecursiveScriptModule, method_name ): - raise NotImplementedError() + raise NotImplementedError return self_method(*args, **kwargs) def __iter__(self): diff --git a/torch/nn/modules/conv.py b/torch/nn/modules/conv.py index 982808c08d..806be54738 100644 --- a/torch/nn/modules/conv.py +++ b/torch/nn/modules/conv.py @@ -1184,7 +1184,7 @@ class _LazyConvXdMixin(LazyModuleMixin): # Function to return the number of spatial dims expected for inputs to the module. # This is expected to be implemented by subclasses. def _get_num_spatial_dims(self) -> int: - raise NotImplementedError() + raise NotImplementedError # LazyConv1d defines weight as a Tensor but derived class defines it as UnitializeParameter diff --git a/torch/onnx/_internal/diagnostics/infra/_infra.py b/torch/onnx/_internal/diagnostics/infra/_infra.py index 325cdc44ac..2a522b61f8 100644 --- a/torch/onnx/_internal/diagnostics/infra/_infra.py +++ b/torch/onnx/_internal/diagnostics/infra/_infra.py @@ -264,7 +264,7 @@ class Invocation: # TODO: Implement this. # Tracks top level call arguments and diagnostic options. def __init__(self) -> None: - raise NotImplementedError() + raise NotImplementedError @dataclasses.dataclass diff --git a/torch/overrides.py b/torch/overrides.py index a7a783cd39..39d2ba19cf 100644 --- a/torch/overrides.py +++ b/torch/overrides.py @@ -1910,7 +1910,7 @@ class TorchFunctionMode: pass def __torch_function__(self, func, types, args=(), kwargs=None): - raise NotImplementedError() + raise NotImplementedError def __enter__(self): _push_mode(self) diff --git a/torch/testing/_comparison.py b/torch/testing/_comparison.py index 42ef9be1b1..e2bad14e44 100644 --- a/torch/testing/_comparison.py +++ b/torch/testing/_comparison.py @@ -36,7 +36,7 @@ class ErrorMeta(Exception): super().__init__( "If you are a user and see this message during normal operation " "please file an issue at https://github.com/pytorch/pytorch/issues. " - "If you are a developer and working on the comparison functions, please `raise ErrorMeta().to_error()` " + "If you are a developer and working on the comparison functions, please `raise ErrorMeta.to_error()` " "for user facing errors." ) self.type = type @@ -336,7 +336,7 @@ class Pair(abc.ABC): @staticmethod def _inputs_not_supported() -> NoReturn: - raise UnsupportedInputs() + raise UnsupportedInputs @staticmethod def _check_inputs_isinstance(*inputs: Any, cls: Union[Type, Tuple[Type, ...]]): @@ -1217,7 +1217,7 @@ def not_close_error_metas( ) except ErrorMeta as error_meta: # Explicitly raising from None to hide the internal traceback - raise error_meta.to_error() from None + raise error_meta.to_error() from None # noqa: RSE102 error_metas: List[ErrorMeta] = [] for pair in pairs: diff --git a/torch/testing/_internal/distributed/distributed_test.py b/torch/testing/_internal/distributed/distributed_test.py index 9e1e0b4361..13620d2594 100644 --- a/torch/testing/_internal/distributed/distributed_test.py +++ b/torch/testing/_internal/distributed/distributed_test.py @@ -9390,7 +9390,7 @@ class DistributedTest: @staticmethod def backward(ctx, grad_output): - raise RuntimeError() + raise RuntimeError class MyModel(nn.Module): def __init__(self, device): @@ -9534,7 +9534,7 @@ class DistributedTest: @staticmethod def backward(ctx, grad_output): - raise RuntimeError() + raise RuntimeError class MyModel(torch.nn.Module): def __init__(self, device): diff --git a/torch/utils/_python_dispatch.py b/torch/utils/_python_dispatch.py index cb0be93ce4..9bd6d25f0d 100644 --- a/torch/utils/_python_dispatch.py +++ b/torch/utils/_python_dispatch.py @@ -67,7 +67,7 @@ class TorchDispatchMode: self.old_dispatch_mode_flag = False def __torch_dispatch__(self, func, types, args=(), kwargs=None): - raise NotImplementedError() + raise NotImplementedError def __enter__(self): global _is_in_torch_dispatch_mode diff --git a/torch/utils/checkpoint.py b/torch/utils/checkpoint.py index 8de5338817..259b1cd351 100644 --- a/torch/utils/checkpoint.py +++ b/torch/utils/checkpoint.py @@ -1085,7 +1085,7 @@ class _recomputation_hook(torch.autograd.graph.saved_tensors_hooks): if target_frame.early_stop and target_frame.recomp_counter[gid] == len( target_frame.weak_holders ): - raise _StopRecomputationError() + raise _StopRecomputationError # See Rule 6: [ retain_graph is True ] above return x.detach() diff --git a/torch/utils/data/_utils/fetch.py b/torch/utils/data/_utils/fetch.py index c5696b401c..553c516ff3 100644 --- a/torch/utils/data/_utils/fetch.py +++ b/torch/utils/data/_utils/fetch.py @@ -12,7 +12,7 @@ class _BaseDatasetFetcher: self.drop_last = drop_last def fetch(self, possibly_batched_index): - raise NotImplementedError() + raise NotImplementedError class _IterableDatasetFetcher(_BaseDatasetFetcher): diff --git a/torch/utils/data/datapipes/dataframe/dataframes.py b/torch/utils/data/datapipes/dataframe/dataframes.py index 69a14e06fc..02c824b11a 100644 --- a/torch/utils/data/datapipes/dataframe/dataframes.py +++ b/torch/utils/data/datapipes/dataframe/dataframes.py @@ -92,7 +92,7 @@ class Capture: if attrname == 'kwarg' or attrname == 'kwargs': raise Exception('no kwargs!') if attrname in ['__deepcopy__']: - raise AttributeError() + raise AttributeError result = CaptureGetAttr(self, attrname, ctx=self.ctx) return result diff --git a/torch/utils/data/sampler.py b/torch/utils/data/sampler.py index dbd91d0ac1..0f100f1858 100644 --- a/torch/utils/data/sampler.py +++ b/torch/utils/data/sampler.py @@ -79,7 +79,7 @@ class Sampler(Generic[T_co]): # Calling `len(subclass_instance)` raises: # TypeError: 'NotImplementedType' object cannot be interpreted as an integer # - # + `raise NotImplementedError()`: + # + `raise NotImplementedError`: # This prevents triggering some fallback behavior. E.g., the built-in # `list(X)` tries to call `len(X)` first, and executes a different code # path if the method is not found or `NotImplemented` is returned, while diff --git a/torchgen/dest/ufunc.py b/torchgen/dest/ufunc.py index da42149c59..ffc879afb6 100644 --- a/torchgen/dest/ufunc.py +++ b/torchgen/dest/ufunc.py @@ -507,7 +507,7 @@ def compute_ufunc_cpu_kernel(g: NativeFunctionsGroup) -> str: elif k is UfuncKey.CPUVector: compute_t = VectorizedCType(BaseCType(scalar_t)) else: - raise AssertionError() + raise AssertionError inner_ufunc_sigs = ufunc_sigs.setdefault(dtype, {}) if k not in inner_ufunc_sigs: inner_ufunc_sigs[k] = UfuncSignature( diff --git a/torchgen/gen.py b/torchgen/gen.py index 1d8e0061bf..2549fd175c 100644 --- a/torchgen/gen.py +++ b/torchgen/gen.py @@ -1151,7 +1151,7 @@ def compute_cpp_argument_yaml( arg["default"] = cpp_a.default return arg elif isinstance(cpp_a.argument, SelfArgument): - raise AssertionError() + raise AssertionError elif isinstance(cpp_a.argument, Argument): return compute_argument_yaml( cpp_a.argument,
2.41.0
403757913689d200683a4158c565bc3dbade74b
Wed, 17 Apr 2024 20:45:35 +0000
[PATCH 0263/1000] [DeviceMesh][Test] Add 3d unit test for `get_local_rank()` (#124142)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/124142 Approved by: https://github.com/xunnanxu, https://github.com/fegin, https://github.com/XilunWu
diff --git a/test/distributed/test_device_mesh.py b/test/distributed/test_device_mesh.py index a98916a922..697cc7fec0 100644 --- a/test/distributed/test_device_mesh.py +++ b/test/distributed/test_device_mesh.py @@ -195,6 +195,45 @@ class DeviceMeshTestNDim(DTensorTestBase): self.assertNotEqual(hash(mesh), hash(mesh3)) self.assertNotEqual(hash(mesh2), hash(mesh3)) + @with_comms + @run_with_both_funcol_impls + def test_get_local_rank_3d(self): + """ + If we have a 3D mesh and we want to apply dp, pp, tp to it, + mesh_dim_names = ["dp", "pp", "tp", and the mesh tensor would be: + mesh_3d_tensor = [ + [ + [0, 1], + [2, 3], + ], + [ + [4, 5], + [6, 7], + ] + + ] + """ + mesh_shape = (2, 2, 2) + mesh_3d = init_device_mesh( + self.device_type, mesh_shape, mesh_dim_names=("dp", "pp", "tp") + ) + + # tp_rank_0: [0, 2, 4, 6], tp_rank_1: [1, 3, 5, 7] + tp_rank = mesh_3d.get_local_rank("tp") + print(f"{self.rank=}, {tp_rank=}") + expected_tp_rank = self.rank % 2 + self.assertEqual(tp_rank, expected_tp_rank) + + # pp_rank_0: [0, 1, 4, 5], pp_rank_1: [2, 3, 6, 7] + pp_rank = mesh_3d.get_local_rank("pp") + expected_pp_rank = 0 if self.rank % 4 <= 1 else 1 + self.assertEqual(pp_rank, expected_pp_rank) + + # dp_rank_0: [0, 1, 2, 3], dp_rank_1: [4, 5, 6, 7] + dp_rank = mesh_3d.get_local_rank("dp") + expected_dp_rank = self.rank // 4 + self.assertEqual(dp_rank, expected_dp_rank) + class InitDeviceMeshTest(DTensorTestBase): @property
2.41.0
ec05c769b7e1c6ab5ba75f86b4ae6d43d77ac96
Wed, 17 Apr 2024 21:32:18 +0000
[PATCH 0264/1000] all_gather and reduce_scatter autograd (#123989)
This adds `all_gather_tensor_autograd` and `reduce_scatter_tensor_autograd` to the functional_collectives library. This only supports `sum` mode for `reduce_scatter` but should be easy to extend in the future. The backwards implementations match the behavior in https://github.com/pytorch/torchrec/blob/main/torchrec/distributed/comm_ops.py This follows the pattern of #123599 . Test plan: ```sh pytest test/distributed/test_functional_api.py -k Autograd ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/123989 Approved by: https://github.com/wanchaol
diff --git a/test/distributed/test_functional_api.py b/test/distributed/test_functional_api.py index 90f750d400..d26dcf970a 100644 --- a/test/distributed/test_functional_api.py +++ b/test/distributed/test_functional_api.py @@ -294,7 +294,7 @@ class TestTraceableCollectives(MultiThreadedTestCase): for dim in dims_to_gather: output_size = [3, 3, 3] output_size[dim] *= mesh.size(0) - # each rank have its own tensor, all_gather gives a list + # each rank have its own tensor, all_gather gives a bigger tensor local_tensor = torch.ones([3, 3, 3], device=device) gathered_tensor = ft_c.all_gather_tensor( local_tensor, gather_dim=dim, group=(mesh, 0) @@ -685,6 +685,72 @@ class TestFunctionalAutograd(MultiThreadedTestCase): self.assertIsNotNone(t.grad) + @parametrize("compile", [True, False]) + def test_all_gather_tensor(self, compile: bool) -> None: + group = dist.group.WORLD.group_name + + def my_func(t: torch.Tensor, dim: int) -> torch.Tensor: + assert t.requires_grad + out = ft_c.all_gather_tensor_autograd( + t * 1.0, + gather_dim=dim, + group=group, + ) + out = out * 1.0 + return out + + if compile: + compiled = torch.compile(my_func, fullgraph=True, backend="aot_eager") + else: + compiled = my_func + + dims_to_gather = [0, 1, 2] + for dim in dims_to_gather: + output_size = [3, 3, 3] + output_size[dim] *= self.world_size + # each rank have its own tensor, all_gather gives a bigger tensor + local_tensor = torch.ones([3, 3, 3], requires_grad=True) + gathered_tensor = compiled(local_tensor, dim) + self.assertEqual(gathered_tensor, torch.ones(output_size)) + + gathered_tensor.sum().backward() + self.assertEqual( + local_tensor.grad, + torch.full((3, 3, 3), fill_value=float(self.world_size)), + ) + + @parametrize("compile", [True, False]) + def test_reduce_scatter_tensor(self, compile: bool) -> None: + group = dist.group.WORLD.group_name + + def my_func(t: torch.Tensor, dim: int) -> torch.Tensor: + assert t.requires_grad + rs_tensor = ( + ft_c.reduce_scatter_tensor_autograd( + input_tensor * 1.0, "sum", scatter_dim=dim, group=group + ) + * 1.0 + ) + return rs_tensor + + if compile: + compiled = torch.compile(my_func, fullgraph=True, backend="aot_eager") + else: + compiled = my_func + + dims_to_scatter = [0, 1] + for dim in dims_to_scatter: + group_size = self.world_size + input_size = [3, 3] + output_size = [3, 3] + output_size[dim] *= group_size + input_tensor = torch.ones(output_size, requires_grad=True) + rs_tensor = compiled(input_tensor, dim) + res_num = 1 * group_size + self.assertEqual(rs_tensor, torch.ones(input_size) * res_num) + rs_tensor.sum().backward() + self.assertEqual(input_tensor.grad, torch.full(output_size, fill_value=1.0)) + if __name__ == "__main__": run_tests() diff --git a/torch/csrc/distributed/c10d/Functional.cpp b/torch/csrc/distributed/c10d/Functional.cpp index 63d30d51d6..942ae7358d 100644 --- a/torch/csrc/distributed/c10d/Functional.cpp +++ b/torch/csrc/distributed/c10d/Functional.cpp @@ -437,6 +437,119 @@ at::Tensor all_to_all_single_autograd( input, output_split_sizes, input_split_sizes, group_name)[0]; } +class ReduceScatterTensor + : public torch::autograd::Function<ReduceScatterTensor> { + public: + static torch::autograd::Variable forward( + torch::autograd::AutogradContext* ctx, + const at::Tensor& input, + std::string reduce_op, + int64_t group_size, + std::string group_name) { + TORCH_CHECK(reduce_op == "sum", "Only sum reduce op is supported"); + + ctx->saved_data["group_size"] = group_size; + ctx->saved_data["group_name"] = group_name; + + return c10::Dispatcher::singleton() + .findSchemaOrThrow("_c10d_functional::reduce_scatter_tensor", "") + .typed<decltype(reduce_scatter_tensor)>() + .call(input, reduce_op, group_size, group_name); + } + + static torch::autograd::variable_list backward( + torch::autograd::AutogradContext* ctx, + torch::autograd::variable_list grad_out_list) { + const int64_t group_size = ctx->saved_data["group_size"].toInt(); + const std::string& group_name = ctx->saved_data["group_name"].toStringRef(); + + DCHECK(grad_out_list.size() == 1); + auto grad_out = grad_out_list[0]; + + auto out = + c10::Dispatcher::singleton() + .findSchemaOrThrow("_c10d_functional::all_gather_into_tensor", "") + .typed<decltype(all_gather_into_tensor)>() + .call(grad_out, group_size, group_name); + + // do an explicit wait to avoid cuda stream issues + // TODO: track active cuda stream in wait + out = c10::Dispatcher::singleton() + .findSchemaOrThrow("_c10d_functional::wait_tensor", "") + .typed<decltype(wait_tensor)>() + .call(out); + + return { + out, + at::Tensor(), + at::Tensor(), + at::Tensor(), + }; + } +}; + +at::Tensor reduce_scatter_tensor_autograd( + const at::Tensor& input, + std::string reduce_op, + int64_t group_size, + std::string group_name) { + return ReduceScatterTensor::apply(input, reduce_op, group_size, group_name); +} + +class AllGatherIntoTensor + : public torch::autograd::Function<AllGatherIntoTensor> { + public: + static torch::autograd::Variable forward( + torch::autograd::AutogradContext* ctx, + const at::Tensor& input, + int64_t group_size, + std::string group_name) { + ctx->saved_data["group_size"] = group_size; + ctx->saved_data["group_name"] = group_name; + + return c10::Dispatcher::singleton() + .findSchemaOrThrow("_c10d_functional::all_gather_into_tensor", "") + .typed<decltype(all_gather_into_tensor)>() + .call(input, group_size, group_name); + } + + static torch::autograd::variable_list backward( + torch::autograd::AutogradContext* ctx, + torch::autograd::variable_list grad_out_list) { + const int64_t group_size = ctx->saved_data["group_size"].toInt(); + const std::string& group_name = ctx->saved_data["group_name"].toStringRef(); + + DCHECK(grad_out_list.size() == 1); + auto grad_out = grad_out_list[0]; + + auto out = + c10::Dispatcher::singleton() + .findSchemaOrThrow("_c10d_functional::reduce_scatter_tensor", "") + .typed<decltype(reduce_scatter_tensor)>() + .call(grad_out, "sum", group_size, group_name); + + // do an explicit wait to avoid cuda stream issues + // TODO: track active cuda stream in wait + out = c10::Dispatcher::singleton() + .findSchemaOrThrow("_c10d_functional::wait_tensor", "") + .typed<decltype(wait_tensor)>() + .call(out); + + return { + out, + at::Tensor(), + at::Tensor(), + }; + } +}; + +at::Tensor all_gather_into_tensor_autograd( + const at::Tensor& input, + int64_t group_size, + std::string group_name) { + return AllGatherIntoTensor::apply(input, group_size, group_name); +} + } // namespace TORCH_LIBRARY(_c10d_functional_autograd, m) { @@ -448,4 +561,21 @@ TORCH_LIBRARY(_c10d_functional_autograd, m) { "str group_name) -> Tensor", torch::dispatch(c10::DispatchKey::Autograd, ::all_to_all_single_autograd), {at::Tag::pt2_compliant_tag}); + m.def( + "reduce_scatter_tensor(" + "Tensor input, " + "str reduce_op, " + "int group_size, " + "str group_name) -> Tensor", + torch::dispatch( + c10::DispatchKey::Autograd, ::reduce_scatter_tensor_autograd), + {at::Tag::pt2_compliant_tag}); + m.def( + "all_gather_into_tensor(" + "Tensor input, " + "int group_size, " + "str group_name) -> Tensor", + torch::dispatch( + c10::DispatchKey::Autograd, ::all_gather_into_tensor_autograd), + {at::Tag::pt2_compliant_tag}); } diff --git a/torch/distributed/_functional_collectives.py b/torch/distributed/_functional_collectives.py index 175e74464a..afd2b382f9 100644 --- a/torch/distributed/_functional_collectives.py +++ b/torch/distributed/_functional_collectives.py @@ -215,6 +215,39 @@ def all_gather_tensor( return res +def all_gather_tensor_autograd( + self: torch.Tensor, + gather_dim: int, + group: RANK_TYPES, + tag: str = "", +): + """ + Gather tensor data across from all machines and concatenate over ``gather_dim``. + + Note that it currently only supports gather_dim = 0. + + This function is the same as all_gather_tensor but will propagate the + backwards gradient across workers. + + See all_gather_tensor for more details on usage. + """ + group_name = _resolve_group_name(group, tag) + group_size = c10d._get_group_size_by_name(group_name) + + tensor = torch.ops._c10d_functional_autograd.all_gather_into_tensor( + self, group_size, group_name + ) + res = _FromTorchTensor.apply(tensor) + # TODO this should be done inside AsyncCollectiveTensor to delay the wait() call + if gather_dim != 0: + # torch.cat access the data so we already need to wait here, first do wait + # and then chunk + cat avoid us going through ACT dispatching logic again + if isinstance(res, AsyncCollectiveTensor): + res = res.wait() # type: ignore[attr-defined] + res = torch.cat(torch.chunk(res, group_size, dim=0), dim=gather_dim) + return res + + def reduce_scatter_tensor( self: torch.Tensor, reduceOp: str, @@ -257,6 +290,45 @@ def reduce_scatter_tensor( return res +def reduce_scatter_tensor_autograd( + self: torch.Tensor, + reduceOp: str, + scatter_dim: int, + group: RANK_TYPES, + tag: str = "", +): + """ + Reduces the tensor data across all machines in such a way that all get + the final result, then scatter the results to corresponding ranks. + + This function is the same as reduce_scatter_tensor but will propagate the + backwards gradient across workers. + + Currently only the "sum" reduceOp is supported. + + See reduce_scatter_tensor for more details on usage. + """ + + group_name = _resolve_group_name(group, tag) + group_size = c10d._get_group_size_by_name(group_name) + + assert ( + self.size(scatter_dim) % group_size == 0 + ), f"input dimension 0 ({self.size(0)} must be a multiple of group_size {group_size}" + if scatter_dim != 0: + tensor_list = torch.chunk(self, group_size, dim=scatter_dim) + self = torch.cat(tensor_list) + + tensor = torch.ops._c10d_functional_autograd.reduce_scatter_tensor( + self, + reduceOp.lower(), + group_size, + group_name, # type: ignore[possibly-undefined] + ) + res = _FromTorchTensor.apply(tensor) + return res + + def all_reduce_coalesced( self: List[torch.Tensor], reduceOp: str, group: RANK_TYPES, tag: str = "" ) -> List[torch.Tensor]:
2.41.0
44d0466451be755f226c379201220d989f45d7e
Wed, 17 Apr 2024 22:31:30 +0000
[PATCH 0265/1000] Revert "[DeviceMesh][Test] Add 3d unit test for `get_local_rank()` (#124142)"
This reverts commit a403757913689d200683a4158c565bc3dbade74b. Reverted https://github.com/pytorch/pytorch/pull/124142 on behalf of https://github.com/malfet due to Broke lint ([comment](https://github.com/pytorch/pytorch/pull/124142#issuecomment-2062587289))
diff --git a/test/distributed/test_device_mesh.py b/test/distributed/test_device_mesh.py index 697cc7fec0..a98916a922 100644 --- a/test/distributed/test_device_mesh.py +++ b/test/distributed/test_device_mesh.py @@ -195,45 +195,6 @@ class DeviceMeshTestNDim(DTensorTestBase): self.assertNotEqual(hash(mesh), hash(mesh3)) self.assertNotEqual(hash(mesh2), hash(mesh3)) - @with_comms - @run_with_both_funcol_impls - def test_get_local_rank_3d(self): - """ - If we have a 3D mesh and we want to apply dp, pp, tp to it, - mesh_dim_names = ["dp", "pp", "tp", and the mesh tensor would be: - mesh_3d_tensor = [ - [ - [0, 1], - [2, 3], - ], - [ - [4, 5], - [6, 7], - ] - - ] - """ - mesh_shape = (2, 2, 2) - mesh_3d = init_device_mesh( - self.device_type, mesh_shape, mesh_dim_names=("dp", "pp", "tp") - ) - - # tp_rank_0: [0, 2, 4, 6], tp_rank_1: [1, 3, 5, 7] - tp_rank = mesh_3d.get_local_rank("tp") - print(f"{self.rank=}, {tp_rank=}") - expected_tp_rank = self.rank % 2 - self.assertEqual(tp_rank, expected_tp_rank) - - # pp_rank_0: [0, 1, 4, 5], pp_rank_1: [2, 3, 6, 7] - pp_rank = mesh_3d.get_local_rank("pp") - expected_pp_rank = 0 if self.rank % 4 <= 1 else 1 - self.assertEqual(pp_rank, expected_pp_rank) - - # dp_rank_0: [0, 1, 2, 3], dp_rank_1: [4, 5, 6, 7] - dp_rank = mesh_3d.get_local_rank("dp") - expected_dp_rank = self.rank // 4 - self.assertEqual(dp_rank, expected_dp_rank) - class InitDeviceMeshTest(DTensorTestBase): @property
2.41.0
c18afa25f81aeeb6e817254e06cf537a20c656c
Tue, 16 Apr 2024 06:58:14 +0000
[PATCH 0266/1000] Intel GPU oneDNN upstreaming for primitive integration (#117112)
# Motivation As proposed in https://github.com/pytorch/pytorch/issues/114848 and https://github.com/pytorch/pytorch/issues/114723, oneDNN library is an important component for Intel GPU software ecosystem. Current PR is based on #117098, where oneDNN library for Intel GPU should be ready. This PR is the integration code from aten to oneDNN. GEMM integration code is the core part in this PR. Accompanied with GEMM, more basic support like runtime (device, stream), primitive attr is also included. We put the oneDNN integration code in directory `aten/src/ATen/native/mkldnn/xpu/detail`. We add a namespace `at::native::xpu::onednn` for oneDNN integration. The code in this PR would be used in following PRs, where aten operators would call the functions in these integration code.. We separate the prs due to onednn integration is logically separable with aten operator implementation. Also, this can ease the burden of reviewing by avoid too much codes in single PR. Co-authored-by: xiaolil1 <xiaoli.liu@intel.com> Co-authored-by: lei,zhenyuan <zhenyuan.lei@intel.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/117112 Approved by: https://github.com/EikanWang, https://github.com/jgong5, https://github.com/albanD
diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/Attr.h b/aten/src/ATen/native/mkldnn/xpu/detail/Attr.h new file mode 100644 index 0000000000..56e5870849 --- /dev/null +++ b/aten/src/ATen/native/mkldnn/xpu/detail/Attr.h @@ -0,0 +1,365 @@ +#pragma once + +#include <ATen/ATen.h> +#include <oneapi/dnnl/dnnl.hpp> +#include <oneapi/dnnl/dnnl_types.h> +#include <ATen/native/mkldnn/xpu/detail/Utils.h> +#include <ATen/native/mkldnn/xpu/detail/oneDNNContext.h> + +namespace at::native::onednn { +/* oneDNN quantization usage: + https://oneapi-src.github.io/oneDNN/dev_guide_attributes_quantization.html# + + src_fp32 = scale_src * (src_int8 - zero_point) + wei_fp32 = scale_wei * (wei_int8 - zero_point) + dst_fp32 = scale_dst * (dst_int8 - zero_point) + fp32 Convolution: dst_fp32 = src_fp32 * wei_fp32 + Int8 Convolution: dst_fp32 = (src_int8 * wei_int8) * (scale_src * scale_wei) + Int8 Convolution: dst_int8 = 1 / scale_dst * dst_fp32; + + Considering zero-point (asymmetric): + dst_fp32 = (src_int8 - src_zp) * src_sc * wei_int8 * wei_sc + dst_sc * (dst_int8 - dst_zp) = (src_int8 - src_zp) * wei_int8 * src_sc * + wei_sc + dst_int8 = (src_int8 - src_zp) * wei_int8 * src_sc * wei_sc / dst_sc + + dst_zp + + considering bias: + fp32 Convolution: dst_fp32 = src_fp32 * wei_fp32 + bias + Int8 Convolution: dst_fp32 = (src_int8 * wei_int8) * (scale_src * scale_wei) + + bias Int8 Convolution: dst_fp32 = (src_int8 * wei_int8 + bias/(scale_src * + scale_wei)) * (scale_src * scale_wei) Int8 Convolution: dst_int8 = 1 / + scale_dst * dst_fp32; +*/ + +/* + oneDNN postops usage: + Currently, oneDNN supports 5 kinds of post ops. More details can be refered +to oneDNN doc. + https://oneapi-src.github.io/oneDNN/dev_guide_attributes_post_ops.html#doxid-dev-guide-attributes-post-ops-1dev-guide-attributes-post-ops-eltwise + +0. without post ops + dst = Conv(src, wei) + bias; + dst_int8 = 1/q_scale * dst; q_scale is the op output quantization scale + fp32 API: Attr attr; + int8 API: Attr attr(q_scale); + +1. append eltwise post op + dst = elt_scale * Eltwise{conv_scale * [Conv(src, wei) + bias], alpha, beta} + dst_int8 = 1/q_scale * dst; + fp32 API: + Attr attr; + attr.append_post_eltwise(1.f, conv_scale, 0.f, kind_with_linear) + attr.append_post_eltwise(elt_scale, alpha, beta, eltwise_algorithm) + int8 API: + Attr attr(q_scale); + attr.append_post_eltwise(1.f, conv_scale, 0.f, kind_with_linear) + attr.append_post_eltwise(elt_scale, alpha, beta, eltwise_algorithm) + +2. append sum post op + dst = conv_scale * Conv(src, wei) + sum_scale * (dst - zp) + dst_int8 = 1/q_scale * dst; + fp32 API: + Attr attr; + attr.append_post_eltwise(1.f, conv_scale, 0.f, kind_with_linear) + attr.append_post_sum(sum_scale) + int8 API: + Attr attr(q_scale); + attr.append_post_eltwise(1.f, conv_scale, 0.f, kind_with_linear) + attr.append_post_sum(sum_scale) + +3. append binary post op + dst = Binary[Conv(src, wei)] + +*/ +using kind_t = dnnl::primitive::kind; +struct PostOpParam { + // eltwise post op constructor + PostOpParam(float scale, float alpha, float beta, dnnl::algorithm algo, kind_t kind) + : scale_(scale), alpha_(alpha), beta_(beta), algo_(algo), kind_(kind) {} + // sum post op constructor + PostOpParam(float scale, kind_t kind) : scale_(scale), kind_(kind) {} + // binary post op constructor + PostOpParam( + at::Tensor& binary, + dnnl::memory::desc& binary_md, + dnnl::memory::desc& expected_md, + dnnl::algorithm algo, + kind_t kind) + : binary_(binary), + meta_(binary_md), + expected_meta_(expected_md), + algo_(algo), + kind_(kind) {} + // prelu post op constructor + PostOpParam(int mask, kind_t kind) : mask_(mask), kind_(kind) {} + + // post sum or binary with scale post op constructor + PostOpParam(at::Tensor& binary, float scale, dnnl::algorithm algo, kind_t kind) + : scale_(scale), binary_(binary), algo_(algo), kind_(kind) {} + + // for int8 sum/eltwise + float scale_ = 1.0; + // for eltwise + float alpha_ = 0.0; + float beta_ = 0.0; + // for binary + at::Tensor binary_ = at::Tensor(); + at::Tensor expected_binary_ = at::Tensor(); + void* binary_ptr_ = nullptr; + dnnl::memory::desc meta_ = dnnl::memory::desc(); + dnnl::memory::desc expected_meta_ = dnnl::memory::desc(); + // for prelu + int mask_ = 0; + // common + dnnl::algorithm algo_ = dnnl::algorithm::eltwise_relu; + kind_t kind_ = kind_t::eltwise; +}; + +class Attr { + public: + Attr() : q_scale_(1.f), q_zero_point_(0) {} + Attr(float q_scale, int64_t zp = 0) : q_scale_(q_scale), q_zero_point_(zp) {} + + /***** eltwise *****/ + dnnl::algorithm kind_with_relu = dnnl::algorithm::eltwise_relu; + dnnl::algorithm kind_with_sigmoid = dnnl::algorithm::eltwise_logistic; + dnnl::algorithm kind_with_gelu_tanh = dnnl::algorithm::eltwise_gelu_tanh; + dnnl::algorithm kind_with_gelu_erf = dnnl::algorithm::eltwise_gelu_erf; + dnnl::algorithm kind_with_mish = dnnl::algorithm::eltwise_mish; + dnnl::algorithm kind_with_linear = dnnl::algorithm::eltwise_linear; + dnnl::algorithm kind_with_swish = dnnl::algorithm::eltwise_swish; + dnnl::algorithm kind_with_sqrt = dnnl::algorithm::eltwise_sqrt; + dnnl::algorithm kind_with_tanh = dnnl::algorithm::eltwise_tanh; + dnnl::algorithm kind_with_square = dnnl::algorithm::eltwise_square; + dnnl::algorithm kind_with_abs = dnnl::algorithm::eltwise_abs; + dnnl::algorithm kind_with_exp = dnnl::algorithm::eltwise_exp; + dnnl::algorithm kind_with_log = dnnl::algorithm::eltwise_log; + dnnl::algorithm kind_with_round = dnnl::algorithm::eltwise_round; + dnnl::algorithm kind_with_hardswish = dnnl::algorithm::eltwise_hardswish; + dnnl::algorithm kind_with_soft_relu = dnnl::algorithm::eltwise_soft_relu; + dnnl::algorithm kind_with_elu = dnnl::algorithm::eltwise_elu; + dnnl::algorithm kind_with_pow = dnnl::algorithm::eltwise_pow; + dnnl::algorithm kind_with_clip = dnnl::algorithm::eltwise_clip; + // note: hardsigmoid seems oneDNN still not support + dnnl::algorithm kind_with_hardsigmoid = dnnl::algorithm::eltwise_hardsigmoid; + + /***** binary *****/ + dnnl::algorithm kind_with_binary_mul = dnnl::algorithm::binary_mul; + dnnl::algorithm kind_with_binary_add = dnnl::algorithm::binary_add; + dnnl::algorithm kind_with_binary_sub = dnnl::algorithm::binary_sub; + dnnl::algorithm kind_with_binary_div = dnnl::algorithm::binary_div; + dnnl::algorithm kind_with_binary_eq = dnnl::algorithm::binary_eq; + dnnl::algorithm kind_with_binary_ne = dnnl::algorithm::binary_ne; + dnnl::algorithm kind_with_binary_ge = dnnl::algorithm::binary_ge; + dnnl::algorithm kind_with_binary_gt = dnnl::algorithm::binary_gt; + dnnl::algorithm kind_with_binary_le = dnnl::algorithm::binary_le; + dnnl::algorithm kind_with_binary_lt = dnnl::algorithm::binary_lt; + dnnl::algorithm kind_with_binary_max = dnnl::algorithm::binary_max; + dnnl::algorithm kind_with_binary_min = dnnl::algorithm::binary_min; + + // append sum post op + Attr& append_post_sum( + float sum_scale, + float sum_q_scale = 1.f, + int64_t zp = 0) { + ops_params_.push_back( + PostOpParam(/*scale_sum*/ sum_scale * sum_q_scale, kind_t::sum)); + return *this; + } + + // append eltwise post op + Attr& append_post_eltwise( + float scale, + float alpha, + float beta, + dnnl::algorithm algo) { + ops_params_.push_back( + PostOpParam(scale, alpha, beta, algo, kind_t::eltwise)); + return *this; + } + + // append binary post op + Attr& append_post_binary(dnnl::algorithm algo, const at::Tensor& binary) { + auto binary_ = binary.is_quantized() ? at::dequantize(binary) : binary; + bool binary_is_channels_last = (binary_.suggest_memory_format() == at::MemoryFormat::ChannelsLast || + binary_.suggest_memory_format() == at::MemoryFormat::ChannelsLast3d); + + binary_ = binary_is_channels_last ? binary_ : binary_.contiguous(); + dnnl::memory::desc md = get_onednn_md(binary_); + auto expected_md = dnnl::memory::desc( + md.get_dims(), md.get_data_type(), dnnl::memory::format_tag::any); + ops_params_.push_back( + PostOpParam(binary_, md, expected_md, algo, kind_t::binary)); + return *this; + } + + Attr& append_scale_binary( + dnnl::algorithm algo, + at::Tensor binary, + float scale, + float sum_q_scale = 1.f, + int64_t zp = 0) { + ops_params_.push_back(PostOpParam( + binary, /*scale_sum*/ scale * sum_q_scale, algo, kind_t::binary)); + return *this; + } + + // append bias with binary_add method (only used for QConv now) + template <int N> + Attr& append_bias(const at::Tensor& binary) { + // In PyTorch, bias are in shape of [OC], + // we expand its shape according to Conv dimension + // Conv1d [OC, 1, 1], Conv2d [1, OC, 1, ,1], Conv3d [1, OC, 1, 1, 1] + at::Tensor binary_ = binary.contiguous(); + dnnl::memory::desc binary_md; + switch (N) { + case 1: + binary_md = dnnl::memory::desc( + {binary.size(0), 1, 1}, + dnnl::memory::data_type::f32, + dnnl::memory::format_tag::abc); + break; + case 2: + binary_md = dnnl::memory::desc( + {1, binary.size(0), 1, 1}, + dnnl::memory::data_type::f32, + dnnl::memory::format_tag::abcd); + break; + case 3: + binary_md = dnnl::memory::desc( + {1, binary.size(0), 1, 1, 1}, + dnnl::memory::data_type::f32, + dnnl::memory::format_tag::abcde); + break; + default: + TORCH_INTERNAL_ASSERT(0, + "XPU only supports append_bias for Conv1d, Conv2d and Conv3d."); + } + // In this case, expected_md = binary_md + ops_params_.push_back(PostOpParam( + binary_, binary_md, binary_md, kind_with_binary_add, kind_t::binary)); + return *this; + } + + // append prelu post op + Attr& append_post_prelu(int mask) { + ops_params_.push_back(PostOpParam(mask, kind_t::prelu)); + return *this; + } + + dnnl::post_ops extract_post_ops(const at::Tensor& dst){ + // this function is used to extract post ops params from the ops_params_ + // and put them into onednn post ops + for (size_t i = 0; i < ops_params_.size(); ++i) { + kind_t kind = ops_params_[i].kind_; + switch (kind) { + case kind_t::eltwise: { + dnnl::algorithm algo = ops_params_[i].algo_; + float alpha = ops_params_[i].alpha_; + float beta = ops_params_[i].beta_; + dnnl_post_ops_.append_eltwise(algo, alpha, beta); + break; + } + case kind_t::sum: { + float scale = ops_params_[i].scale_; + // TODO [Asymmetric]: + // Post-sum zp for gpu is not supported currently + dnnl_post_ops_.append_sum(scale); + break; + } + case kind_t::binary: { + dnnl::algorithm algo = ops_params_[i].algo_; + auto expected_md = ops_params_[i].expected_meta_; + // In this case user may create src1 memory descriptor with + // format_tag::any or set a specific tag. However, in later case if + // tags mismatch with dst, it would result in suboptimal performance. + // So here we use format_tag::any to make sure the fast can be + // selected. + // Thus we use expected_md (with format_any) here to create pd instead + // of original md + dnnl_post_ops_.append_binary(algo, expected_md); + break; + } + default: + break; + } + } + + // if output is quantized, then append the eltwise linear to adjust the + // output scale/zero_point + if (dst.is_quantized()) { + // [Note: Gap of u8 qtensor scale between oneDNN and PyTorch] + // The /2 here is for output_scale collected by observer is different + // from quantization requirements in oneDNN. + // For Observer, the conv_scale (activation scale in other case) is + // computed through 2max_v/(qmax - qmin). The max_v is collected + // from the tensor to be observerd. + // (https://pytorch.org/docs/stable/generated/torch.quantization.observer.MinMaxObserver.html#torch.quantization.observer.MinMaxObserver) + // On the other hand, for u8 in oneDNN, the scale for quantization is + // defined as max_v/(qmax-qmin). Hence, we need to divide by 2 here. + // (https://oneapi-src.github.io/oneDNN/dev_guide_inference_int8.html) + dnnl_post_ops_.append_eltwise( + kind_with_linear, 1.f / q_scale_, q_zero_point_); + } + return dnnl_post_ops_; + } + + bool with_sum() { + for (size_t i = 0; i < ops_params_.size(); ++i) { + if (ops_params_[i].kind_ == kind_t::sum) { + return true; + } + } + return false; + } + + bool with_binary() { + for (size_t i = 0; i < ops_params_.size(); ++i) { + if (ops_params_[i].kind_ == kind_t::binary) { + return true; + } + } + return false; + } + + void construct_post_binary( + dnnl::primitive_desc& pd, + std::unordered_map<int, dnnl::memory>& args) { + // This function is used to construct binary memory desc in binary post ops. + // According to oneDNN doc, the binary tensor can be in shape of + // [1, 1, 1, 1], tensor broadcast + // [1, C, 1, 1], channel broadcast + // [dst.shape], no broadcast and eltwise-wise binary operations on dst + + auto engine = + GpuEngineManager::Instance().get_engine({c10::kXPU, c10::xpu::current_device()}); + for (size_t i = 0; i < ops_params_.size(); ++i) { + kind_t kind = ops_params_[i].kind_; + if (kind == kind_t::binary) { + dnnl::memory binary_m; + auto binary = ops_params_[i].binary_; + auto md = ops_params_[i].meta_; + // qeury expected_md to achieve peak performance + auto expected_md = pd.query_md( + dnnl::query::exec_arg_md, + DNNL_ARG_ATTR_MULTIPLE_POST_OP(i) | DNNL_ARG_SRC_1); + + binary_m = at::native::onednn::make_onednn_memory( + md, engine, binary.data_ptr() + ); + + args.insert( + {DNNL_ARG_ATTR_MULTIPLE_POST_OP(i) | DNNL_ARG_SRC_1, binary_m}); + } + } + } + + float q_scale_ = 1.0; // the scale used to quantize the fused result from fp32 + // to int8, only works for int8 case + int64_t q_zero_point_ = 0; + std::vector<PostOpParam> ops_params_; // series of post ops + dnnl::post_ops dnnl_post_ops_; +}; + +} // namespace at::native::onednn diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/Matmul.cpp b/aten/src/ATen/native/mkldnn/xpu/detail/Matmul.cpp new file mode 100644 index 0000000000..7dfd31b93b --- /dev/null +++ b/aten/src/ATen/native/mkldnn/xpu/detail/Matmul.cpp @@ -0,0 +1,244 @@ + +#include <c10/xpu/XPUFunctions.h> + +#include <ATen/ATen.h> +#include <ATen/record_function.h> + +#include <Attr.h> +#include <Utils.h> + +#include <oneapi/dnnl/dnnl.hpp> + +namespace at::native::onednn { + +sycl::event matmul( + at::Tensor& result, + const at::Tensor& mat1, + const at::Tensor& mat2, + const at::Tensor& b_raw, + bool m2_trans, + Attr attr, + const std::vector<sycl::event>& deps) { + int64_t dims = result.dim(); + TORCH_CHECK( + dims == 2 || dims == 3, + "oneDNN matmul only works with 2D or 3D, got ", + dims); + TORCH_CHECK( + dims == mat1.dim() && dims == mat2.dim(), + "oneDNN input matrixes must have the same ranks"); + TORCH_CHECK(result.defined(), "oneDNN matmul result should be defined"); + + at::Device cur_device = at::Device(at::kXPU, c10::xpu::current_device()); + auto engine = GpuEngineManager::Instance().get_engine(cur_device); + auto stream = GpuStreamManager::Instance().get_stream(); + + at::Tensor m1 = is_onednn_matmul_strides(mat1) ? mat1 : mat1.contiguous(); + at::Tensor m2 = is_onednn_matmul_strides(mat2) ? mat2 : mat2.contiguous(); + at::Tensor dst = is_onednn_matmul_strides(result, true) ? result : result.contiguous(); + + int64_t m = dst.size(-2); + int64_t n = dst.size(-1); + int64_t k = m1.size(-1); + int64_t mb = 1; + + if (dims == 3) { + mb = dst.size(0); + TORCH_CHECK( + mb == m1.size(0) && mb == m2.size(0), + "batch size mismatch, dst mb: ", + mb, + "m1 mb", + m1.size(0), + " m2 mb: ", + m2.size(0)); + } + + // validate bias and make it compatible with oneDNN implementation + bool with_bias = false; + at::Tensor b = b_raw; + if (b.defined()) { + with_bias = true; + if (b.dim() == 1) { + TORCH_CHECK( + b.size(0) == n || b.size(0) == 1, + "matmul supports [n] or [1] when bias dim is 1 ..."); + if (b.size(0) == 0) { + with_bias = false; + } else if (m1.dim() == 3) { + b = b.expand({mb, m, n}).contiguous(); + } else if (m1.dim() == 2) { + b = b.expand({1, n}).contiguous(); + } + } else if (b.dim() == 2) { + TORCH_CHECK( + (b.size(0) == m && b.size(1) == n) || + (b.size(0) == 1 && b.size(1) == n) || + (b.size(0) == m && b.size(1) == 1) || + (b.size(0) == 1 && b.size(1) == 1), + "matmul supports [m, n] or [1, n] or [m, 1] or [1, 1] when bias dim is 2 ..."); + if (b.size(0) == 1 && b.size(1) == 1) + b = b.expand({1, n}).contiguous(); + } else if (b.dim() == 3) { + TORCH_CHECK( + at::are_expandable({mb, m, n}, b.sizes()), + "matmul bias must be expandable to:", + dst.sizes(), + " but got:", + b.sizes()); + b = b.expand({mb, m, n}).contiguous(); + } else if (b.dim() == 0) { + TORCH_CHECK( + b.numel() == 1, "matmul supports 1 numel when bias dim is [] ..."); + if (m1.dim() == 3) { + b = b.expand({mb, m, n}).contiguous(); + } else { + b = b.expand({1, n}).contiguous(); + } + } else { + TORCH_CHECK(0, "unsupported bias dim in matmul ..."); + } + } + + b = b.contiguous(); // avoid reorder 2 times + + // xpu matmul support both ab/ba shape for m2 tensor, we don't check any more + auto m1_usr_dt = get_onednn_dtype(m1); + auto m2_usr_dt = get_onednn_dtype(m2); + auto dst_usr_dt = get_onednn_dtype(dst); + + auto m1_dt = m1_usr_dt; + auto m2_dt = m2_usr_dt; + auto dst_dt = dst_usr_dt; + dnnl::memory::data_type bias_dt; + + dnnl::memory::desc m1_md, m1_usr_md, m1_any_md; + dnnl::memory::desc m2_md, m2_usr_md, m2_any_md; + dnnl::memory::desc dst_md, dst_usr_md, dst_any_md; + dnnl::memory::desc bias_md; + + // Naive Master weight + if (m1_dt == dnnl::memory::data_type::bf16 && m2_dt == dnnl::memory::data_type::f32) { + m2_dt = dnnl::memory::data_type::bf16; + dst_dt = dnnl::memory::data_type::bf16; + } else if ( + m1_dt == dnnl::memory::data_type::f32 && m2_dt == dnnl::memory::data_type::bf16) { + m1_dt = dnnl::memory::data_type::bf16; + dst_dt = dnnl::memory::data_type::bf16; + } + + dnnl::memory::dims m1_dims, m2_dims, dst_dims, bias_dims; + dnnl::memory::dims m1_strides, m2_strides, dst_strides, bias_strides; + if (dims == 2) { + m1_dims = {m, k}; + m2_dims = {k, n}; + dst_dims = {m, n}; + + m1_strides = {m1.stride(0), m1.stride(1)}; + if (m2_trans) { + m2_strides = {m2.stride(0), m2.stride(1)}; + } else { + m2_strides = {m2.stride(1), m2.stride(0)}; + } + dst_strides = {dst.stride(0), dst.stride(1)}; + } else { + m1_dims = {mb, m, k}; + m2_dims = {mb, k, n}; + dst_dims = {mb, m, n}; + + m1_strides = {m1.stride(0), m1.stride(1), m1.stride(2)}; + if (m2_trans) { + m2_strides = {m2.stride(0), m2.stride(1), m2.stride(2)}; + } else { + m2_strides = {m2.stride(0), m2.stride(2), m2.stride(1)}; + } + dst_strides = {dst.stride(0), dst.stride(1), dst.stride(2)}; + } + + if (with_bias) { + bias_dims = get_onednn_dims(b); + bias_dt = get_onednn_dtype(b); + bias_strides = get_onednn_strides(b); + } + + dnnl::post_ops po = attr.extract_post_ops(dst); + + std::unordered_map<int, dnnl::memory> args; + dnnl::matmul matmul_p; + dnnl::matmul::primitive_desc matmul_pd; + + // STEP1: create memory desc + m1_md = dnnl::memory::desc(m1_dims, m1_dt, m1_strides); + m2_md = dnnl::memory::desc(m2_dims, m2_dt, m2_strides); + dst_md = dnnl::memory::desc(dst_dims, dst_dt, dst_strides); + + // STEP2: creat attribute + dnnl::primitive_attr pattr; + pattr.set_post_ops(po); + + #if ONEDNN_SUPPORT_DETERMINISTIC + if(at::globalContext().deterministicAlgorithms()) + pattr.set_deterministic(true); + #endif + + // scratchpad + pattr.set_scratchpad_mode(dnnl::scratchpad_mode::user); + + if (m1_dt == dnnl::memory::data_type::f32) { + pattr.set_fpmath_mode(dnnl::fpmath_mode::strict); + } + + // STEP3: create primitive + if (with_bias) { + bias_md = dnnl::memory::desc(bias_dims, bias_dt, bias_strides); + matmul_pd = + dnnl::matmul::primitive_desc(engine, m1_md, m2_md, bias_md, dst_md, pattr); + } else { + matmul_pd = dnnl::matmul::primitive_desc(engine, m1_md, m2_md, dst_md, pattr); + } + + matmul_p = dnnl::matmul(matmul_pd); + + m1_usr_md = dnnl::memory::desc(m1_dims, m1_usr_dt, m1_strides); + m2_usr_md = dnnl::memory::desc(m2_dims, m2_usr_dt, m2_strides); + dst_usr_md = dnnl::memory::desc(dst_dims, dst_usr_dt, dst_strides); + + // STEP4: create memory + auto m1_usr_m = make_onednn_memory(m1_usr_md, engine, m1.data_ptr()); + auto m2_usr_m = make_onednn_memory(m2_usr_md, engine, m2.data_ptr()); + auto dst_usr_m = make_onednn_memory(dst_usr_md, engine, dst.data_ptr()); + + auto expected_m1_md = matmul_pd.src_desc(); + auto expected_m2_md = matmul_pd.weights_desc(); + auto expected_dst_md = matmul_pd.dst_desc(); + + dnnl::memory m1_m = m1_usr_m, m2_m = m2_usr_m, dst_m = dst_usr_m; + at::Tensor m1_, m2_, dst_; + + if (attr.with_binary()) + attr.construct_post_binary(matmul_pd, args); + + size_t scratchpad_size = matmul_pd.scratchpad_desc().get_size(); + at::Tensor scratchpad_tensor = at::empty( + {static_cast<int64_t>(scratchpad_size)}, m1.options().dtype(at::kByte), c10::nullopt); + auto scratchpad_memory = make_onednn_memory( + matmul_pd.scratchpad_desc(), engine, scratchpad_tensor.data_ptr()); + args.insert({DNNL_ARG_SCRATCHPAD, scratchpad_memory}); + + args.insert({DNNL_ARG_SRC, m1_m}); + args.insert({DNNL_ARG_WEIGHTS, m2_m}); + args.insert({DNNL_ARG_DST, dst_m}); + if (with_bias) { + auto bias_m = make_onednn_memory(bias_md, engine, b.data_ptr()); + args.insert({DNNL_ARG_BIAS, bias_m}); + } + + sycl::event matmul_event = dnnl::sycl_interop::execute(matmul_p, stream, args, deps); + + if (!dst.is_same(result)) + result.copy_(dst); + + return matmul_event; +} + +} // namespace at::native::onednn diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/Utils.cpp b/aten/src/ATen/native/mkldnn/xpu/detail/Utils.cpp new file mode 100644 index 0000000000..73a37d275b --- /dev/null +++ b/aten/src/ATen/native/mkldnn/xpu/detail/Utils.cpp @@ -0,0 +1,352 @@ +#include <ATen/native/mkldnn/xpu/detail/Utils.h> + +namespace at::native::onednn { + +dnnl::memory make_onednn_memory( + dnnl::memory::desc md, + dnnl::engine& engine, + void* ptr){ + return dnnl::sycl_interop::make_memory( + md, + engine, + dnnl::sycl_interop::memory_kind::usm, + ptr == nullptr ? DNNL_MEMORY_ALLOCATE : ptr); +} + +dnnl::memory::format_tag get_dnnl_default_format( + int ndims, + bool is_channels_last, + bool allow_undef) { + switch (ndims) { + case 1: + return dnnl::memory::format_tag::a; + case 2: + return dnnl::memory::format_tag::ab; + case 3: + return is_channels_last ? dnnl::memory::format_tag::acb + : dnnl::memory::format_tag::abc; + case 4: + return is_channels_last ? dnnl::memory::format_tag::acdb + : dnnl::memory::format_tag::abcd; + case 5: + return is_channels_last ? dnnl::memory::format_tag::acdeb + : dnnl::memory::format_tag::abcde; + case 6: + return dnnl::memory::format_tag::abcdef; + case 7: + return dnnl::memory::format_tag::abcdefg; + case 8: + return dnnl::memory::format_tag::abcdefgh; + case 9: + return dnnl::memory::format_tag::abcdefghi; + case 10: + return dnnl::memory::format_tag::abcdefghij; + case 11: + return dnnl::memory::format_tag::abcdefghijk; + case 12: + return dnnl::memory::format_tag::abcdefghijkl; + default: + if (!allow_undef) { + TORCH_CHECK(false, "oneDNN doesn't support tensor dimension > 12"); + } + return dnnl::memory::format_tag::undef; + } +} + +dnnl::memory::data_type get_onednn_dtype( + const at::Tensor& tensor, + bool allow_undef) { + switch (tensor.scalar_type()) { + case at::ScalarType::Byte: + return dnnl::memory::data_type::u8; + case at::ScalarType::Char: + return dnnl::memory::data_type::s8; + case at::ScalarType::QInt8: + return dnnl::memory::data_type::s8; + case at::ScalarType::QUInt8: + return dnnl::memory::data_type::u8; + case at::ScalarType::Int: + return dnnl::memory::data_type::s32; + case at::ScalarType::Half: + return dnnl::memory::data_type::f16; + case at::ScalarType::Float: + return dnnl::memory::data_type::f32; + case at::ScalarType::BFloat16: + return dnnl::memory::data_type::bf16; + default: + if (!allow_undef) { + TORCH_CHECK( + false, + c10::toString(tensor.scalar_type()), + " is not supported in oneDNN!"); + } + return dnnl::memory::data_type::undef; + }; +} + +dnnl::memory::data_type get_onednn_dtype_include_double( + const at::Tensor& tensor, + bool allow_undef) { + if (tensor.scalar_type() == at::ScalarType::Double) + return dnnl::memory::data_type::f64; + return get_onednn_dtype(tensor, allow_undef); +} + +bool is_supported_onednn_dtype(const at::Tensor& tensor) { + return get_onednn_dtype(tensor, /*allow_undef*/ true) == + dnnl::memory::data_type::undef + ? false + : true; +} + +dnnl::memory::dims get_onednn_dims(const at::Tensor& tensor) { + dnnl::memory::dims dims; + for (size_t i = 0; i < tensor.sizes().size(); i++) + dims.push_back(tensor.size(i)); + return dims; +} + +dnnl::memory::dims get_onednn_strides(const at::Tensor& tensor) { + dnnl::memory::dims strides; + for (size_t i = 0; i < tensor.strides().size(); i++) + strides.push_back(tensor.stride(i)); + return strides; +} + +dnnl::memory::desc get_onednn_md(const at::Tensor& tensor) { + return { + get_onednn_dims(tensor), + get_onednn_dtype(tensor), + get_onednn_strides(tensor)}; +} + +bool onednn_strides_check(const at::Tensor& src) { + auto adims = get_onednn_dims(src); + int ndims = (int)adims.size(); + auto dims = adims.data(); + auto data_type = static_cast<dnnl_data_type_t>( + get_onednn_dtype(src, /*allow_undef*/ true)); + auto strides_info = get_onednn_strides(src); + auto strides = strides_info.empty() ? nullptr : &strides_info[0]; + + dnnl_memory_desc_t md; + dnnl_memory_desc_create_with_strides(&md, ndims, dims, data_type, strides); + dnnl_format_kind_t md_fmt_kind; + int md_ndims; + int md_inner_nblks; + dnnl_dims_t* md_padded_dims = nullptr; + + dnnl_memory_desc_query(md, dnnl_query_inner_nblks_s32, &md_inner_nblks); + dnnl_memory_desc_query(md, dnnl_query_format_kind, &md_fmt_kind); + dnnl_memory_desc_query(md, dnnl_query_ndims_s32, &md_ndims); + dnnl_memory_desc_query(md, dnnl_query_padded_dims, &md_padded_dims); + if (strides == nullptr || md_ndims == 0 || + md_fmt_kind != dnnl_format_kind_t::dnnl_blocked) + return true; + + dnnl_dims_t blocks = {0}; + int perm[DNNL_MAX_NDIMS] = {0}; + for (int d = 0; d < md_ndims; ++d) { + // no strides check needed for empty tensor + if (md_padded_dims[d] == 0) + return true; + + // no strides verification for runtime dims + if (strides[d] == DNNL_RUNTIME_DIM_VAL) + return true; + + perm[d] = d; + blocks[d] = 1; + } + + auto block_size = 1; + dnnl_dims_t md_inner_blks; + dnnl_dims_t md_blk_inner_idxs; + dnnl_memory_desc_query(md, dnnl_query_inner_idxs, &md_blk_inner_idxs); + dnnl_memory_desc_query(md, dnnl_query_inner_blks, &md_inner_blks); + for (int iblk = 0; iblk < md_inner_nblks; ++iblk) { + blocks[md_blk_inner_idxs[iblk]] *= md_inner_blks[iblk]; + block_size *= md_inner_blks[iblk]; + } + + // A custom comparator to yield linear order on perm + auto idx_sorter = [&](const int a, const int b) -> bool { + if (strides[a] == strides[b] && md_padded_dims[a] == md_padded_dims[b]) + return a < b; + else if (strides[a] == strides[b]) + return md_padded_dims[a] < md_padded_dims[b]; + else + return strides[a] < strides[b]; + }; + std::sort(perm, perm + md_ndims, idx_sorter); + + auto min_stride = block_size; + for (int idx = 0; idx < md_ndims; ++idx) { + const int d = perm[idx]; + + // Make an exception for strides[d] == 0 as it has broadcast semantics + // Note: owing to being sorted, these are the initial strides + if (strides[d] == 0) + continue; + else if (strides[d] < min_stride) + return false; + + // update min_stride for next iteration + const auto padded_dim = *md_padded_dims[d]; + min_stride = block_size * strides[d] * (padded_dim / blocks[d]); + } + return true; +} + +bool is_broadcast(const at::Tensor& t) { + for (int i = 0; i < t.dim(); i++) { + if (t.stride(i) == 0) + return true; + } + return false; +} + +bool is_onednn_matmul_strides( + const at::Tensor& tensor, + bool is_dst) { + // https://oneapi-src.github.io/oneDNN/dev_guide_matmul.html + // oneDNN matmul only support 2-dim and 3-dim + // 2D src(Mxk), wei(KxN), dst(MxN) + // 3D src(SxMxK), wei(WxKxN), dst(DxMxN) + auto sizes = tensor.sizes(); + auto tensor_dim = sizes.size(); + if (tensor_dim != 2 && tensor_dim != 3) + return false; + + if (tensor.is_contiguous()) + return true; + + // the overlaped cases are not supported + dnnl::memory::dims strides = get_onednn_strides(tensor); + int64_t storage_size = 1; + for (size_t dim = 0; dim < tensor_dim; ++dim) + storage_size += (sizes[dim] - 1) * strides[dim]; + if (storage_size < tensor.numel()) + return false; + + // the broadcast cases are not supported + if (is_broadcast(tensor)) { + return false; + } + + if (is_dst) { + // The memory format of the destination tensor should always + // be plain with n axis contiguous + if (strides[-1] != 1) + return false; + } else { + // the src and weight must have at least one of the axes + // m or k and n or k contiguous (i.e., stride=1) respectively. + if (strides[tensor_dim - 1] != 1 && strides[tensor_dim - 2] != 1) + return false; + } + + if (!onednn_strides_check(tensor)) + return false; + return true; +} + +bool is_broadcast_from_other_to_self( + const at::Tensor& self, + const at::Tensor& other) { + return ( + self.sizes() != other.sizes() && + at::is_expandable_to(other.sizes(), self.sizes())); +} + +at::MemoryFormat get_cl_tag_by_ndim(const int64_t ndim) { + TORCH_CHECK( + 3 == ndim || 4 == ndim || 5 == ndim, + "ndim must be 3, 4 or 5 when get cl tag"); + if (3 == ndim) { + return at::MemoryFormat::Contiguous; + } else if (5 == ndim) { + return at::MemoryFormat::ChannelsLast3d; + } else { + return at::MemoryFormat::ChannelsLast; + } +} + +bool binary_valid( + const at::Tensor& self, + const at::Tensor& other, + bool is_fusion) { + if (self.sizes() != other.sizes() && + !is_broadcast_from_other_to_self(self, other)) + return false; + + /* If the following conditions are satisfied, then oneDNN path will be + selected: + * 1. self and other should be xpu tensor and be defined. + * 2. self or other should not be scalar (wrapped tensor). + * 3. dim of self and other should be equal and must be larger than 0 and + smaller than 7. + * 4. the datatype should be supported by oneDNN primitive. + * 5. self and other should be in the same datatype. + * 6. self and other should be contiguous or channel-last contiguous.*/ + + + // 1. self and other should be xpu tensor and be defined. + if ((!self.defined()) || (!other.defined()) || (!self.is_xpu()) || + (!other.is_xpu())) + return false; + + // 2. self or other should not be scalar (wrapped tensor). + if (self.unsafeGetTensorImpl()->is_wrapped_number() || other.unsafeGetTensorImpl()->is_wrapped_number()) + return false; + + // 3. dim of self and other should be equal and must be larger than 0 and + // smaller than 7. + if ((self.dim() <= 0) || (other.dim() <= 0) || (self.dim() != other.dim()) || + (self.dim() > 6) || (other.dim() > 6)) + return false; + + // 4. the datatype should be supported by oneDNN primitive. + switch (self.scalar_type()) { + case at::ScalarType::Char: + break; + case at::ScalarType::Byte: + break; + case at::ScalarType::Half: + break; + case at::ScalarType::Float: + break; + case at::ScalarType::BFloat16: + break; + default: + return false; + }; + + // 5. datatype check + if (is_fusion) { + // for fusion case, the fusion can be performed on scalar_type or Float + // datatype. + if (self.scalar_type() != other.scalar_type() && + other.scalar_type() != at::ScalarType::Float) { + return false; + } + } else { + if (self.scalar_type() != other.scalar_type()) { + // for non-fusion case: self and other should be in the same datatype. + return false; + } + } + + // 6. self and other should be contiguous or channel-last contiguous. + const auto ndim = self.ndimension(); + auto cl_tag = at::MemoryFormat::ChannelsLast; + if (3 == ndim || 4 == ndim || 5 == ndim) { + cl_tag = get_cl_tag_by_ndim(ndim); + } + if ((self.is_contiguous() && other.is_contiguous()) || + (self.is_contiguous(cl_tag) && other.is_contiguous(cl_tag))) + return true; + return false; +} + +} diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/Utils.h b/aten/src/ATen/native/mkldnn/xpu/detail/Utils.h new file mode 100644 index 0000000000..1fcb669d53 --- /dev/null +++ b/aten/src/ATen/native/mkldnn/xpu/detail/Utils.h @@ -0,0 +1,56 @@ +#pragma once +#include <iostream> +#include <ATen/ATen.h> +#include <ATen/Tensor.h> +#include <ATen/core/Tensor.h> + +#include <ATen/core/grad_mode.h> +#include <c10/core/MemoryFormat.h> +#include <oneapi/dnnl/dnnl.hpp> +#include <oneapi/dnnl/dnnl_sycl.hpp> +#include <oneapi/dnnl/dnnl_version.h> + + +#define ONEDNN_SUPPORT_DETERMINISTIC (DNNL_VERSION_MAJOR >=3 && DNNL_VERSION_MINOR >=4) + +namespace at::native::onednn { + +dnnl::memory::format_tag get_dnnl_default_format( + int ndims, + bool is_channels_last = false, + bool allow_undef = false); + +dnnl::memory::data_type get_onednn_dtype( + const at::Tensor& tensor, + bool allow_undef = false); + +dnnl::memory::data_type get_onednn_dtype_include_double( + const at::Tensor& tensor, + bool allow_undef = false); + +bool is_supported_onednn_dtype(const at::Tensor& tensor); + +dnnl::memory::dims get_onednn_dims(const at::Tensor& tensor); + +dnnl::memory::dims get_onednn_strides(const at::Tensor& tensor); +dnnl::memory::desc get_onednn_md(const at::Tensor& tensor); + +bool onednn_strides_check(const at::Tensor& src); +bool is_broadcast(const at::Tensor& t); + +bool is_onednn_matmul_strides( + const at::Tensor& tensor, + bool is_dst = false); + +bool is_broadcast_from_other_to_self( + const at::Tensor& self, + const at::Tensor& other); + +at::MemoryFormat get_cl_tag_by_ndim(const int64_t ndim); + +bool binary_valid( + const at::Tensor& self, + const at::Tensor& other, + bool is_fusion = false); + +} // namespace at::native::onednn diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/oneDNN.h b/aten/src/ATen/native/mkldnn/xpu/detail/oneDNN.h new file mode 100644 index 0000000000..a34edfff36 --- /dev/null +++ b/aten/src/ATen/native/mkldnn/xpu/detail/oneDNN.h @@ -0,0 +1,20 @@ +#pragma once + +#include <ATen/ATen.h> +#include <ATen/native/mkldnn/xpu/detail/oneDNNContext.h> +#include <ATen/native/mkldnn/xpu/detail/Attr.h> +#include <ATen/native/mkldnn/xpu/detail/Utils.h> + + +namespace at::native::onednn{ + +TORCH_API sycl::event matmul( + at::Tensor& result, + const at::Tensor& mat1, + const at::Tensor& mat2, + const at::Tensor& b_raw, + bool m2_trans, + Attr attr, + const std::vector<sycl::event>& deps = {}); + +} // namespace at::native::onednn diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/oneDNNContext.cpp b/aten/src/ATen/native/mkldnn/xpu/detail/oneDNNContext.cpp new file mode 100644 index 0000000000..9bec64c8c0 --- /dev/null +++ b/aten/src/ATen/native/mkldnn/xpu/detail/oneDNNContext.cpp @@ -0,0 +1,27 @@ +#include <ATen/native/mkldnn/xpu/detail/oneDNNContext.h> +#include <ATen/native/mkldnn/xpu/detail/Utils.h> + +/* * + * Do NOT put any kernels or call any device binaries here! + * Only maintain oneDNN runtime states in this file. + * */ +namespace at::native::onednn { + +using namespace dnnl; + +GpuEngineManager& GpuEngineManager::Instance() { + static GpuEngineManager myInstance; + return myInstance; +} + +GpuStreamManager& GpuStreamManager::Instance() { + static thread_local GpuStreamManager myInstance; + return myInstance; +} + +bool set_onednn_verbose(int level) { + dnnl::status rs = dnnl::set_verbose(level); + return rs == dnnl::status::success; +} + +} // namespace at::native::onednn diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/oneDNNContext.h b/aten/src/ATen/native/mkldnn/xpu/detail/oneDNNContext.h new file mode 100644 index 0000000000..c7e7a5e94b --- /dev/null +++ b/aten/src/ATen/native/mkldnn/xpu/detail/oneDNNContext.h @@ -0,0 +1,75 @@ +#pragma once + +#include <ATen/Config.h> + +#include <c10/core/Device.h> +#include <c10/xpu/XPUFunctions.h> +#include <c10/xpu/XPUStream.h> + +#include <oneapi/dnnl/dnnl.hpp> +#include <oneapi/dnnl/dnnl_sycl.hpp> +#include <vector> + +namespace at::native::onednn { + +TORCH_API dnnl::memory make_onednn_memory( + dnnl::memory::desc md, + dnnl::engine& engine, + void* ptr); + +// Keep non-static and non-inline +bool set_onednn_verbose(int level); + +// GpuEngineManager singleton +struct TORCH_API GpuEngineManager { + static GpuEngineManager& Instance(); // Singleton + + dnnl::engine& get_engine(const Device& device) { + TORCH_INTERNAL_ASSERT(device.type() == kXPU); + TORCH_INTERNAL_ASSERT(device.index() < c10::xpu::device_count()); + return *engine_pool[device.index()]; + } + + GpuEngineManager(GpuEngineManager const&) = delete; + GpuEngineManager& operator=(GpuEngineManager const&) = delete; + + protected: + GpuEngineManager() { + int device_count = (int)c10::xpu::device_count(); + TORCH_INTERNAL_ASSERT(device_count > 0); + for (int i = 0; i < device_count; i++) { + engine_pool.push_back( + std::make_shared<dnnl::engine>(dnnl::sycl_interop::make_engine( + c10::xpu::get_raw_device(i), c10::xpu::get_device_context() + ))); + } + } + ~GpuEngineManager() {} + + private: + std::vector<std::shared_ptr<dnnl::engine>> engine_pool; +}; + +// GpuStreamManager singleton +struct TORCH_API GpuStreamManager { + static GpuStreamManager& Instance(); // Singleton + + dnnl::stream get_stream() { + c10::DeviceIndex device_index = c10::xpu::current_device(); + TORCH_INTERNAL_ASSERT(device_index < c10::xpu::device_count()); + return dnnl::sycl_interop::make_stream( + GpuEngineManager::Instance().get_engine({c10::kXPU, device_index}), + c10::xpu::getCurrentXPUStream(device_index).queue()); + } + + GpuStreamManager(GpuStreamManager const&) = delete; + GpuStreamManager& operator=(GpuStreamManager const&) = delete; + + protected: + GpuStreamManager() { + } + ~GpuStreamManager() {} + +}; + +} // namespace at::native::onednn
2.41.0
23bf9cef0ed582bd17801c9e36faba499adc993
Wed, 17 Apr 2024 11:59:20 -0700
[PATCH 0267/1000] Add fake impl for aten.unique2 (#124306)
Reapply of: https://github.com/pytorch/pytorch/pull/121571 Differential Revision: [D56258431](https://our.internmc.facebook.com/intern/diff/D56258431) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124306 Approved by: https://github.com/gmagogsfm
diff --git a/test/test_ops.py b/test/test_ops.py index 3be010f83f..3aa36bae6d 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -2374,6 +2374,15 @@ dynamic_output_op_tests = ( "linalg.lstsq.grad_oriented", ) +# Ops that have dynamic output shapes that we can handle when +# allow_dynamic_shape_ops is True in fake tensor shape environment. +supported_dynamic_output_op_tests = ( + "nonzero", + "unique", + "repeat_interleave", + "masked_select", +) + # some inputs invoke dynamic output shape operators, some do not sometimes_dynamic_output_op_test = ( "__getitem__", @@ -2442,12 +2451,28 @@ class TestFakeTensor(TestCase): samples = op.sample_inputs(device, dtype, requires_grad=False) for sample in samples: + mode = FakeTensorMode() + + from torch.fx.experimental.symbolic_shapes import ShapeEnv + + allow_dynamic_output_shape_shape_env = ShapeEnv( + allow_dynamic_output_shape_ops=True + ) + + allow_dynamic_output_shape_mode = FakeTensorMode( + shape_env=allow_dynamic_output_shape_shape_env + ) + try: - mode = FakeTensorMode() + with context(): + res = op(sample.input, *sample.args, **sample.kwargs) + except Exception: + continue + def run_with_fake_mode_and_verify(fake_mode, match_results=True): def map_to_fake(e): if isinstance(e, torch.Tensor): - return mode.from_tensor(e) + return fake_mode.from_tensor(e) else: return e @@ -2457,55 +2482,64 @@ class TestFakeTensor(TestCase): try: with context(): - res = op(sample.input, *sample.args, **sample.kwargs) - except Exception as e: - continue + with fake_mode: + res_fake = op(input, *args, **kwargs) - with context(): - with mode: - res_fake = op(input, *args, **kwargs) + if not match_results: + return - for fake_out, real_out in zip( - pytree.tree_leaves(res_fake), pytree.tree_leaves(res) - ): - if not isinstance(fake_out, torch.Tensor): - self.assertTrue(not isinstance(real_out, torch.Tensor)) - self.assertEqual(fake_out, real_out) - continue + for fake_out, real_out in zip( + pytree.tree_leaves(res_fake), pytree.tree_leaves(res) + ): + if not isinstance(fake_out, torch.Tensor): + self.assertTrue(not isinstance(real_out, torch.Tensor)) + self.assertEqual(fake_out, real_out) + continue - self.assertTrue(isinstance(fake_out, FakeTensor)) - # if you see a shape exception here, you may need to add - # a `dynamic_output_shape` tag to an operator + self.assertTrue(isinstance(fake_out, FakeTensor)) + # if you see a shape exception here, you may need to add + # a `dynamic_output_shape` tag to an operator - # prims/decomps must correctly model strides, - # see https://github.com/pytorch/pytorch/issues/78050#issuecomment-1253950325 - prims.utils.compare_tensor_meta(fake_out, real_out, True) + # prims/decomps must correctly model strides, + # see https://github.com/pytorch/pytorch/issues/78050#issuecomment-1253950325 + prims.utils.compare_tensor_meta(fake_out, real_out, True) - if name not in aliasing_failures: - fake_aliasing = outputs_alias_inputs( - (input, args, kwargs), res_fake - ) - real_aliasing = outputs_alias_inputs( - (sample.input, sample, args, sample.kwargs), res - ) - self.assertEqual(fake_aliasing, real_aliasing) + if name not in aliasing_failures: + fake_aliasing = outputs_alias_inputs( + (input, args, kwargs), res_fake + ) + real_aliasing = outputs_alias_inputs( + (sample.input, sample, args, sample.kwargs), res + ) + self.assertEqual(fake_aliasing, real_aliasing) - self.assertTrue( - name not in dynamic_output_op_tests - and name not in data_dependent_op_tests - ) + self.assertTrue( + name not in dynamic_output_op_tests + and name not in data_dependent_op_tests + ) - except torch._subclasses.fake_tensor.UnsupportedFakeTensorException: - pass - except torch._subclasses.fake_tensor.UnsupportedOperatorException: - pass - except torch._subclasses.fake_tensor.DynamicOutputShapeException: - self.assertTrue( - name in dynamic_output_op_tests - or name in sometimes_dynamic_output_op_test + except torch._subclasses.fake_tensor.UnsupportedFakeTensorException: + pass + except torch._subclasses.fake_tensor.UnsupportedOperatorException: + pass + except torch._subclasses.fake_tensor.DynamicOutputShapeException: + self.assertTrue( + name in dynamic_output_op_tests + or name in sometimes_dynamic_output_op_test + ) + self.assertTrue( + mode.shape_env is None + or not mode.shape_env.allow_dynamic_output_shape_ops + or name not in supported_dynamic_output_op_tests + ) + except torch._subclasses.fake_tensor.DataDependentOutputException: + self.assertTrue(name in data_dependent_op_tests) + + run_with_fake_mode_and_verify(mode) + if name in supported_dynamic_output_op_tests: + run_with_fake_mode_and_verify( + allow_dynamic_output_shape_mode, match_results=False ) - except torch._subclasses.fake_tensor.DataDependentOutputException: - self.assertTrue(name in data_dependent_op_tests) @ops(op_db, dtypes=OpDTypes.any_one) def test_pointwise_ops(self, device, dtype, op): diff --git a/torch/_subclasses/fake_impls.py b/torch/_subclasses/fake_impls.py index 1a6e59d073..244e90adcc 100644 --- a/torch/_subclasses/fake_impls.py +++ b/torch/_subclasses/fake_impls.py @@ -258,6 +258,62 @@ def dyn_shape(fake_mode, func, *args, **kwargs): raise DynamicOutputShapeException(func) +@register_op_impl(aten._unique2.default) +def unique2( + fake_mode, func, arg, sorted=True, return_inverse=False, return_counts=False +): + if ( + fake_mode.shape_env is None + or not fake_mode.shape_env.allow_dynamic_output_shape_ops + ): + # Without symints/symfloats, cannot handle this + raise DynamicOutputShapeException(func) + + if arg.unique_memo is None: + # Avoid importing sympy at a module level + from torch.fx.experimental.symbolic_shapes import ( + _constrain_range_for_size, + has_free_symbols, + ) + + if not has_free_symbols(arg.numel()) and arg.numel() == 0: + # If numel is zero, then the output size must be zero. + # In this case, we must not allocate an unbacked SymInt, + # because if we do, it will immediately get refined to + # zero, but this will be inconsistent with size oblivious + # tests (which will continue to claim that the unbacked + # symint cannot equal zero). We could also unconditionally + # allocate an unbacked SymInt and not refine its range, + # but this seems more precise. + nnz = arg._nonzero_memo = 0 + arg._nonzero_memo_vc = arg._version + else: + nnz = fake_mode.shape_env.create_unbacked_symint() + + maxval = sys.maxsize - 1 + + if not has_free_symbols(arg.numel()): + maxval = int(arg.numel()) + + _constrain_range_for_size(nnz, max=maxval) + + arg.unique_memo = nnz + + ret = [arg.new_empty((arg.unique_memo,))] + + if return_inverse: + ret.append(torch.empty_like(arg)) + else: + ret.append(arg.new_empty(0)) + + if return_counts: + ret.append(torch.empty_like(arg)) + else: + ret.append(arg.new_empty(0)) + + return tuple(ret) + + @register_op_impl(aten.repeat_interleave.Tensor) def repeat_interleave_tensor(fake_mode, func, repeats, output_size=None): if output_size is None: diff --git a/torch/_subclasses/fake_tensor.py b/torch/_subclasses/fake_tensor.py index 4cf1485a6f..ef6ab4c637 100644 --- a/torch/_subclasses/fake_tensor.py +++ b/torch/_subclasses/fake_tensor.py @@ -397,6 +397,31 @@ class FakeTensor(torch.Tensor): return None return self._nonzero_memo + # This memorizes the unbacked SymInt representing the number of unique + # elements in this tensor. This is helpful if you do something like + # calling torch.unique(x) multiple times and should + # give a consistent unbacked SymInt. It needs to be invalidated in the + # same way constant is. + # TODO: Generalize this as needed, e.g., into a trie of memos + _unique_memo: Optional[torch.SymInt] + _unique_memo_vc: Optional[int] + + @property + def unique_memo(self): + if self._unique_memo is None: + return None + # Version counter based tracking isn't 100% sound but it's close + # enough + if self._unique_memo_vc != self._version: + self._unique_memo = None + return None + return self._unique_memo + + @unique_memo.setter + def unique_memo(self, value): + self._unique_memo = value + self._unique_memo_vc = self._version + @property def device(self): if self.fake_mode.in_kernel_invocation: @@ -471,6 +496,9 @@ class FakeTensor(torch.Tensor): self.constant = constant # type: ignore[attr-defined] self._nonzero_memo = None # type: ignore[attr-defined] self._nonzero_memo_vc = None # type: ignore[attr-defined] + self._unique_memo = None # type: ignore[attr-defined] + self._unique_memo_vc = None # type: ignore[attr-defined] + if FakeTensorConfig.debug: self._debug_trace = CapturedTraceback.extract() # type: ignore[attr-defined] return self
2.41.0
ebdbb63ceaea0bba3643307cd3328c470c9d28c
Wed, 17 Apr 2024 12:44:54 -0700
[PATCH 0268/1000] Introduce set_example_value and use it throughout Dynamo (#124176)
I'm going to setup some extra behavior when we set example value, so I need a convenient place to interpose. I cannot easily do it on meta itself because its a generic dict with no interposition point. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124176 Approved by: https://github.com/oulgen ghstack dependencies: #124105, #124059
diff --git a/torch/_dynamo/eval_frame.py b/torch/_dynamo/eval_frame.py index 20f8675c75..17249d8cc8 100644 --- a/torch/_dynamo/eval_frame.py +++ b/torch/_dynamo/eval_frame.py @@ -58,7 +58,7 @@ from .code_context import code_context from .exc import CondOpArgsMismatchError, UserError, UserErrorType from .mutation_guard import install_generation_tagging_init from .types import CacheEntry, DynamoCallback -from .utils import common_constant_types, compile_times +from .utils import common_constant_types, compile_times, set_example_value log = logging.getLogger(__name__) @@ -750,7 +750,7 @@ class FlattenInputOutputSignature(torch.fx.interpreter.Transformer): if "tensor_dict" in self.current_node.meta: arg.node.meta["tensor_dict"] = self.current_node.meta["tensor_dict"] if "example_value" in self.current_node.meta: - arg.node.meta["example_value"] = self.current_node.meta["example_value"] + set_example_value(arg.node, self.current_node.meta["example_value"]) return arg def output(self, target, args, kwargs): @@ -774,9 +774,9 @@ class FlattenInputOutputSignature(torch.fx.interpreter.Transformer): if "val" in self.current_node.meta: result_proxy.node.meta["val"] = self.current_node.meta["val"] if "example_value" in self.current_node.meta: - result_proxy.node.meta["example_value"] = self.current_node.meta[ - "example_value" - ] + set_example_value( + result_proxy.node, self.current_node.meta["example_value"] + ) if self.current_node.op != "output": result_proxy.node._rename( getattr(self.current_node, "name", result_proxy.node.name) diff --git a/torch/_dynamo/output_graph.py b/torch/_dynamo/output_graph.py index 5d9a238c18..c72be35ea8 100644 --- a/torch/_dynamo/output_graph.py +++ b/torch/_dynamo/output_graph.py @@ -84,6 +84,7 @@ from .utils import ( LazyString, nn_module_proxy, same, + set_example_value, ) from .variables.base import VariableTracker from .variables.builder import ( @@ -434,7 +435,7 @@ class OutputGraph: "dynamo_backward_state", BackwardState, source=BackwardStateSource() ) self.backward_state_proxy.node.meta["grapharg"] = BackwardStateGraphArg() - self.backward_state_proxy.node.meta["example_value"] = BackwardState() + set_example_value(self.backward_state_proxy.node, BackwardState()) self.backward_state_var = self.new_var() return self.backward_state_proxy @@ -655,7 +656,7 @@ class OutputGraph: before=True, source=prop, ) - proxy.node.meta["example_value"] = s + set_example_value(proxy.node, s) proxy.node.meta["grapharg"] = GraphArg( prop, s, @@ -2139,7 +2140,7 @@ class SubgraphTracer(fx.Tracer): if proxy in self.lifted_freevars: return self.lifted_freevars[proxy] new_proxy = self.create_graph_input(proxy.node.name) - new_proxy.node.meta["example_value"] = proxy.node.meta["example_value"] + set_example_value(new_proxy.node, proxy.node.meta["example_value"]) self.lifted_freevars[proxy] = new_proxy if self.parent is not None and proxy.tracer != self.parent: self.parent.lift_tracked_freevar_to_input(proxy) diff --git a/torch/_dynamo/utils.py b/torch/_dynamo/utils.py index 9f1f3aa3a7..ee1c777eed 100644 --- a/torch/_dynamo/utils.py +++ b/torch/_dynamo/utils.py @@ -1143,6 +1143,16 @@ def enum_repr(value, local): return local_name +def set_example_value(node, example_value): + # NB: example_value is a bit of a misnomer, because this is always a fake + # tensor of some sort. Furthermore, these example values serve as the + # runtime state of Dynamo tracing, which means if metadata mutation + # occurs, the example_value gets directly updated (so you can't rely on + # this to accurately reflect what the state of the value was at the time + # the program was traced). + node.meta["example_value"] = example_value + + def _get_fake_tensor(vt): fake_tensor = vt.as_proxy().node.meta.get("example_value") if not is_fake(fake_tensor): diff --git a/torch/_dynamo/variables/builder.py b/torch/_dynamo/variables/builder.py index 79320002a6..34abf3b8da 100644 --- a/torch/_dynamo/variables/builder.py +++ b/torch/_dynamo/variables/builder.py @@ -79,6 +79,7 @@ from ..utils import ( is_utils_checkpoint, istype, odict_values, + set_example_value, tensor_always_has_static_shape, tuple_iterator, tuple_iterator_getitem, @@ -658,7 +659,7 @@ class VariableBuilder: "device_type": value.device_type, }, ) - stream_proxy.node.meta["example_value"] = value + set_example_value(stream_proxy.node, value) return StreamVariable( stream_proxy, value, @@ -1554,7 +1555,7 @@ def wrap_fx_proxy_cls( # (WARNING: this means that if we mutate metadata on the fake # tensor, the stored example value will update too!) example_value = _clone_input(example_value) - proxy.node.meta["example_value"] = example_value + set_example_value(proxy.node, example_value) specialized_props = target_cls.specialize(example_value) # TODO: not sure about this fake mode test if ( @@ -1586,7 +1587,7 @@ def wrap_fx_proxy_cls( sizes = [ConstantVariable.create(x) for x in example_value] return SizeVariable(sizes, **options) elif isinstance(example_value, (tuple, list)): - proxy.node.meta["example_value"] = example_value + set_example_value(proxy.node, example_value) unpacked = [] for i, val in enumerate(example_value): if val is None: @@ -1638,7 +1639,7 @@ def wrap_fx_proxy_cls( elif example_value is None or proxy.node.target is torch.manual_seed: return ConstantVariable.create(None, **options) elif isinstance(example_value, (torch.SymInt, torch.SymFloat, torch.SymBool)): - proxy.node.meta["example_value"] = example_value + set_example_value(proxy.node, example_value) return SymNodeVariable(proxy, example_value, **options) elif ( inspect.isclass(proxy.node.target) @@ -1647,7 +1648,7 @@ def wrap_fx_proxy_cls( device_interface.current_stream for _, device_interface in get_registered_device_interfaces() ]: - proxy.node.meta["example_value"] = example_value + set_example_value(proxy.node, example_value) return StreamVariable(proxy, example_value, example_value.device, **options) elif ( inspect.isclass(proxy.node.target) and issubclass(proxy.node.target, _EventBase) @@ -1655,10 +1656,10 @@ def wrap_fx_proxy_cls( device_interface.Event for _, device_interface in get_registered_device_interfaces() ]: - proxy.node.meta["example_value"] = example_value + set_example_value(proxy.node, example_value) return EventVariable(proxy, example_value, **options) elif proxy.node.target == "query" and proxy.node.op == "call_method": - proxy.node.meta["example_value"] = example_value + set_example_value(proxy.node, example_value) return ConstantVariable(example_value, **options) elif ( example_value is not None @@ -1666,7 +1667,7 @@ def wrap_fx_proxy_cls( and proxy.node.target == "record_event" and proxy.node.op == "call_method" ): - proxy.node.meta["example_value"] = example_value + set_example_value(proxy.node, example_value) return EventVariable(proxy, example_value, **options) elif isinstance(example_value, int) and proxy.node.target in [ torch.sym_int, @@ -1684,18 +1685,18 @@ def wrap_fx_proxy_cls( torch._constrain_as_value, torch._constrain_as_size, ]: - proxy.node.meta["example_value"] = example_value + set_example_value(proxy.node, example_value) return ConstantVariable.create(example_value, **options) elif isinstance(example_value, torch.backends.cuda.SDPAParams): from .sdpa import SDPAParamsVariable - proxy.node.meta["example_value"] = example_value + set_example_value(proxy.node, example_value) return SDPAParamsVariable(proxy, **options) elif isinstance(example_value, bool) and proxy.node.target in [ torch.backends.cuda.can_use_flash_attention, torch.backends.cuda.can_use_efficient_attention, ]: - proxy.node.meta["example_value"] = example_value + set_example_value(proxy.node, example_value) return ConstantVariable.create(example_value, **options) else: unimplemented( diff --git a/torch/_dynamo/variables/lists.py b/torch/_dynamo/variables/lists.py index cb5c641c67..1311bcae1f 100644 --- a/torch/_dynamo/variables/lists.py +++ b/torch/_dynamo/variables/lists.py @@ -23,6 +23,7 @@ from ..utils import ( iter_contains, namedtuple_fields, odict_values, + set_example_value, ) from .base import MutableLocal, VariableTracker from .constant import ConstantVariable @@ -442,11 +443,14 @@ class SizeVariable(TupleVariable): return torch.Size(proxies) proxy = tracer.create_proxy("call_function", torch.Size, (proxies,), {}) - proxy.node.meta["example_value"] = torch.Size( - [ - p.node.meta["example_value"] if not isinstance(p, int) else p - for p in proxies - ] + set_example_value( + proxy.node, + torch.Size( + [ + p.node.meta["example_value"] if not isinstance(p, int) else p + for p in proxies + ] + ), ) return proxy diff --git a/torch/_dynamo/variables/misc.py b/torch/_dynamo/variables/misc.py index b0ef237edd..83ddc372bd 100644 --- a/torch/_dynamo/variables/misc.py +++ b/torch/_dynamo/variables/misc.py @@ -22,6 +22,7 @@ from ..utils import ( identity, is_tensor_base_attr_getter, proxy_args_kwargs, + set_example_value, ) from .base import VariableTracker from .functions import NestedUserFunctionVariable, UserFunctionVariable @@ -501,7 +502,7 @@ class AutogradFunctionContextVariable(UserDefinedObjectVariable): ), {}, ) - proxy.node.meta["example_value"] = out.value + set_example_value(proxy.node, out.value) return out diff --git a/torch/_dynamo/variables/nn_module.py b/torch/_dynamo/variables/nn_module.py index 5fbd502a3c..32ef830577 100644 --- a/torch/_dynamo/variables/nn_module.py +++ b/torch/_dynamo/variables/nn_module.py @@ -31,6 +31,7 @@ from ..utils import ( nnmodule_has_hooks, object_has_getattribute, proxy_args_kwargs, + set_example_value, ) from .base import MutableLocal, typestr, VariableTracker from .functions import invoke_and_store_as_constant @@ -376,7 +377,7 @@ class NNModuleVariable(VariableTracker): tuple(), {}, ) - mod_proxy.node.meta["example_value"] = module + set_example_value(mod_proxy.node, module) proxy_args, proxy_kwargs = proxy_args_kwargs(args, kwargs) diff --git a/torch/_dynamo/variables/tensor.py b/torch/_dynamo/variables/tensor.py index 480aa91469..e4cc623184 100644 --- a/torch/_dynamo/variables/tensor.py +++ b/torch/_dynamo/variables/tensor.py @@ -40,6 +40,7 @@ from ..utils import ( object_has_getattribute, product, proxy_args_kwargs, + set_example_value, tensortype_to_dtype, ) from .base import _is_top_level_scope, VariableTracker @@ -963,7 +964,7 @@ class SymNodeVariable(VariableTracker): assert proxy.node.meta["example_value"] == sym_num if sym_num is None: sym_num = get_fake_value(proxy.node, tx) - proxy.node.meta["example_value"] = sym_num + set_example_value(proxy.node, sym_num) if isinstance(sym_num, (sympy.Integer, int, bool)): sym_num = int(sym_num) if isinstance(sym_num, sympy.Integer) else sym_num
2.41.0
330acae768273bae28e2df38046992da5d49937
Tue, 16 Apr 2024 16:08:36 +0000
[PATCH 0269/1000] Refactored implementation for upsample_nearest decompostions (#122783)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/122783 Approved by: https://github.com/peterbell10
diff --git a/torch/_decomp/decompositions.py b/torch/_decomp/decompositions.py index 56f63952f4..3ef43ad4b1 100644 --- a/torch/_decomp/decompositions.py +++ b/torch/_decomp/decompositions.py @@ -2647,71 +2647,45 @@ def get_scale_value(scales, idx): @register_decomposition(aten.upsample_nearest1d.vec) +@register_decomposition(aten.upsample_nearest2d.vec) +@register_decomposition(aten.upsample_nearest3d.vec) @aten.upsample_nearest1d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) @aten.upsample_nearest1d.vec.py_impl(DispatchKey.Autograd) -def upsample_nearest1d_vec(input, output_size, scale_factors): - osize = upsample_compute_output_size(input.size(), output_size, scale_factors) - scale = get_scale_value(scale_factors, 0) - - return aten.upsample_nearest1d.default(input, osize, scale) - - -@register_decomposition(aten._upsample_nearest_exact1d.vec) -@aten._upsample_nearest_exact1d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) -@aten._upsample_nearest_exact1d.vec.py_impl(DispatchKey.Autograd) -def _upsample_nearest_exact1d_vec(input, output_size, scale_factors): - osize = upsample_compute_output_size(input.size(), output_size, scale_factors) - scale = get_scale_value(scale_factors, 0) - - return aten._upsample_nearest_exact1d.default(input, osize, scale) - - -@register_decomposition(aten.upsample_nearest2d.vec) @aten.upsample_nearest2d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) @aten.upsample_nearest2d.vec.py_impl(DispatchKey.Autograd) -def upsample_nearest2d_vec(input, output_size, scale_factors): - osize = upsample_compute_output_size(input.size(), output_size, scale_factors) - scale_h = get_scale_value(scale_factors, 0) - scale_w = get_scale_value(scale_factors, 1) - - return aten.upsample_nearest2d.default(input, osize, scale_h, scale_w) - - -@register_decomposition(aten._upsample_nearest_exact2d.vec) -@aten._upsample_nearest_exact2d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) -@aten._upsample_nearest_exact2d.vec.py_impl(DispatchKey.Autograd) -def _upsample_nearest_exact2d_vec(input, output_size, scale_factors): - osize = upsample_compute_output_size(input.size(), output_size, scale_factors) - scale_h = get_scale_value(scale_factors, 0) - scale_w = get_scale_value(scale_factors, 1) - - return aten._upsample_nearest_exact2d.default(input, osize, scale_h, scale_w) - - -@register_decomposition(aten.upsample_nearest3d.vec) @aten.upsample_nearest3d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) @aten.upsample_nearest3d.vec.py_impl(DispatchKey.Autograd) -def upsample_nearest3d_vec(input, output_size, scale_factors): +def _upsample_nearest_vec( + input: Tensor, + output_size: Optional[List[int]], + scale_factors: Optional[List[float]], +) -> Tensor: osize = upsample_compute_output_size(input.size(), output_size, scale_factors) - scale_d = get_scale_value(scale_factors, 0) - scale_h = get_scale_value(scale_factors, 1) - scale_w = get_scale_value(scale_factors, 2) - - return aten.upsample_nearest3d.default(input, osize, scale_d, scale_h, scale_w) + scales = ( + scale_factors if scale_factors else [None] * len(osize) # type: ignore[list-item] + ) + return _upsample_nearest(input, osize, scales) +@register_decomposition(aten._upsample_nearest_exact1d.vec) +@register_decomposition(aten._upsample_nearest_exact2d.vec) @register_decomposition(aten._upsample_nearest_exact3d.vec) +@aten._upsample_nearest_exact1d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten._upsample_nearest_exact1d.vec.py_impl(DispatchKey.Autograd) +@aten._upsample_nearest_exact2d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten._upsample_nearest_exact2d.vec.py_impl(DispatchKey.Autograd) @aten._upsample_nearest_exact3d.vec.py_impl(DispatchKey.CompositeImplicitAutograd) @aten._upsample_nearest_exact3d.vec.py_impl(DispatchKey.Autograd) -def _upsample_nearest_exact3d_vec(input, output_size, scale_factors): +def _upsample_nearest_exact_vec( + input: Tensor, + output_size: Optional[List[int]], + scale_factors: Optional[List[float]], +) -> Tensor: osize = upsample_compute_output_size(input.size(), output_size, scale_factors) - scale_d = get_scale_value(scale_factors, 0) - scale_h = get_scale_value(scale_factors, 1) - scale_w = get_scale_value(scale_factors, 2) - - return aten._upsample_nearest_exact3d.default( - input, osize, scale_d, scale_h, scale_w + scales = ( + scale_factors if scale_factors else [None] * len(osize) # type: ignore[list-item] ) + return _upsample_nearest(input, osize, scales, exact=True) def _compute_upsample_nearest_indices(input, output_size, scales, exact=False): @@ -2743,88 +2717,58 @@ def _compute_upsample_nearest_indices(input, output_size, scales, exact=False): for _ in range(num_spatial_dims - 1 - d): input_indices = input_indices.unsqueeze(-1) indices.append(input_indices) - return tuple(indices) + return indices @register_decomposition(aten.upsample_nearest1d.default) @aten.upsample_nearest1d.default.py_impl(DispatchKey.CompositeImplicitAutograd) @aten.upsample_nearest1d.default.py_impl(DispatchKey.Autograd) -@pw_cast_for_opmath def upsample_nearest1d( input: Tensor, output_size: List[int], scales: Optional[float] = None, ) -> Tensor: - (l_indices,) = _compute_upsample_nearest_indices(input, output_size, (scales,)) - return aten._unsafe_index(input, (None, None, l_indices)) + return _upsample_nearest(input, output_size, [scales]) @register_decomposition(aten._upsample_nearest_exact1d.default) @aten._upsample_nearest_exact1d.default.py_impl(DispatchKey.CompositeImplicitAutograd) @aten._upsample_nearest_exact1d.default.py_impl(DispatchKey.Autograd) -@pw_cast_for_opmath -def _upsample_nearest_exact1d( +def upsample_nearest_exact1d( input: Tensor, output_size: List[int], scales: Optional[float] = None, ) -> Tensor: - (l_indices,) = _compute_upsample_nearest_indices( - input, output_size, (scales,), exact=True - ) - return aten._unsafe_index(input, (None, None, l_indices)) - - -def _upsample_nearest2d_common(input, h_indices, w_indices): - result = aten._unsafe_index(input, (None, None, h_indices, w_indices)) - - # convert output to correct memory format, if necessary - memory_format = utils.suggest_memory_format(input) - - # following "heuristic: only use channels_last path when it's faster than the contiguous path" - _, n_channels, _, _ = input.shape - if input.device.type == "cuda" and n_channels < 4: - memory_format = torch.contiguous_format - - result = result.contiguous(memory_format=memory_format) - return result + return _upsample_nearest(input, output_size, [scales], exact=True) @register_decomposition(aten.upsample_nearest2d.default) @aten.upsample_nearest2d.default.py_impl(DispatchKey.CompositeImplicitAutograd) @aten.upsample_nearest2d.default.py_impl(DispatchKey.Autograd) -@pw_cast_for_opmath def upsample_nearest2d( input: Tensor, output_size: List[int], scales_h: Optional[float] = None, scales_w: Optional[float] = None, ) -> Tensor: - h_indices, w_indices = _compute_upsample_nearest_indices( - input, output_size, (scales_h, scales_w) - ) - return _upsample_nearest2d_common(input, h_indices, w_indices) + return _upsample_nearest(input, output_size, [scales_h, scales_w]) @register_decomposition(aten._upsample_nearest_exact2d.default) @aten._upsample_nearest_exact2d.default.py_impl(DispatchKey.CompositeImplicitAutograd) @aten._upsample_nearest_exact2d.default.py_impl(DispatchKey.Autograd) -@pw_cast_for_opmath def _upsample_nearest_exact2d( input: Tensor, output_size: List[int], scales_h: Optional[float] = None, scales_w: Optional[float] = None, ) -> Tensor: - h_indices, w_indices = _compute_upsample_nearest_indices( - input, output_size, (scales_h, scales_w), exact=True - ) - return _upsample_nearest2d_common(input, h_indices, w_indices) + return _upsample_nearest(input, output_size, [scales_h, scales_w], exact=True) @register_decomposition(aten.upsample_nearest3d.default) @aten.upsample_nearest3d.default.py_impl(DispatchKey.CompositeImplicitAutograd) @aten.upsample_nearest3d.default.py_impl(DispatchKey.Autograd) -@pw_cast_for_opmath def upsample_nearest3d( input: Tensor, output_size: List[int], @@ -2832,18 +2776,12 @@ def upsample_nearest3d( scales_h: Optional[float] = None, scales_w: Optional[float] = None, ) -> Tensor: - d_indices, h_indices, w_indices = _compute_upsample_nearest_indices( - input, output_size, (scales_d, scales_h, scales_w) - ) - result = aten._unsafe_index(input, (None, None, d_indices, h_indices, w_indices)) - - return result + return _upsample_nearest(input, output_size, [scales_d, scales_h, scales_w]) @register_decomposition(aten._upsample_nearest_exact3d.default) @aten._upsample_nearest_exact3d.default.py_impl(DispatchKey.CompositeImplicitAutograd) @aten._upsample_nearest_exact3d.default.py_impl(DispatchKey.Autograd) -@pw_cast_for_opmath def _upsample_nearest_exact3d( input: Tensor, output_size: List[int], @@ -2851,11 +2789,35 @@ def _upsample_nearest_exact3d( scales_h: Optional[float] = None, scales_w: Optional[float] = None, ) -> Tensor: - d_indices, h_indices, w_indices = _compute_upsample_nearest_indices( - input, output_size, (scales_d, scales_h, scales_w), exact=True + return _upsample_nearest( + input, output_size, [scales_d, scales_h, scales_w], exact=True ) - result = aten._unsafe_index(input, (None, None, d_indices, h_indices, w_indices)) + +@pw_cast_for_opmath +def _upsample_nearest( + input: Tensor, + output_size: List[int], + scales: List[Optional[float]], + exact: bool = False, +) -> Tensor: + spatial_indices = _compute_upsample_nearest_indices( + input, output_size, scales, exact=exact + ) + + indices = [None, None] + spatial_indices + result = aten._unsafe_index(input, indices) + + if result.ndim == 4: + # convert output to correct memory format, if necessary + memory_format = utils.suggest_memory_format(input) + + # following "heuristic: only use channels_last path when it's faster than the contiguous path" + n_channels = input.shape[1] + if input.device.type == "cuda" and n_channels < 4: + memory_format = torch.contiguous_format + + result = result.contiguous(memory_format=memory_format) return result
2.41.0
4162eecfcb5cc11139260c034c653e972a9073a
Wed, 17 Apr 2024 23:08:45 +0000
[PATCH 0271/1000] Update Security Policy to provide Security Guidance for users (#120531)
Fixes #120530 Co-authored-by: albanD <desmaison.alban@gmail.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/120531 Approved by: https://github.com/malfet, https://github.com/albanD
diff --git a/SECURITY.md b/SECURITY.md index 0651f82b70..e8e0249fc8 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,9 +1,56 @@ -# Reporting Security Issues +# Security Policy -If you believe you have found a security vulnerability in PyTorch, we encourage you to let us know right away. We will investigate all legitimate reports and do our best to quickly fix the problem. + - [**Reporting a Vulnerability**](#reporting-a-vulnerability) + - [**Using Pytorch Securely**](#using-pytorch-securely) + - [Untrusted models](#untrusted-models) + - [Untrusted inputs](#untrusted-inputs) + - [Data privacy](#data-privacy) + +## Reporting Security Issues + +Beware that none of the topics under [Using Pytorch Securely](#using-pytorch-securely) are considered vulnerabilities of Pytorch. + +However, if you believe you have found a security vulnerability in PyTorch, we encourage you to let us know right away. We will investigate all legitimate reports and do our best to quickly fix the problem. Please report security issues using https://github.com/pytorch/pytorch/security/advisories/new Please refer to the following page for our responsible disclosure policy, reward guidelines, and those things that should not be reported: https://www.facebook.com/whitehat + + +## Using Pytorch Securely +**Pytorch models are programs**, so treat its security seriously -- running untrusted models is equivalent to running untrusted code. In general we recommend that model weights and the python code for the model are distributed independently. That said, be careful about where you get the python code from and who wrote it (preferentially check for a provenance or checksums, do not run any pip installed package). + +### Untrusted models +Be careful when running untrusted models. This classification includes models created by unknown developers or utilizing data obtained from unknown sources[^data-poisoning-sources]. + +**Prefer to execute untrusted models within a secure, isolated environment such as a sandbox** (e.g., containers, virtual machines). This helps protect your system from potentially malicious code. You can find further details and instructions in [this page](https://developers.google.com/code-sandboxing). + +**Be mindful of risky model formats**. Give preference to share and load weights with the appropriate format for your use case. [safetensors](https://huggingface.co/docs/safetensors/en/index) gives the most safety but is the most restricted in what it supports. [`torch.load`](https://pytorch.org/docs/stable/generated/torch.load.html#torch.load) with `weights_only=True` is also secure to our knowledge even though it offers significantly larger surface of attack. Loading un-trusted checkpoint with `weights_only=False` MUST never be done. + + + +Important Note: The trustworthiness of a model is not binary. You must always determine the proper level of caution depending on the specific model and how it matches your use case and risk tolerance. + +[^data-poisoning-sources]: To understand risks of utilization of data from unknown sources, read the following Cornell papers on Data poisoning: + https://arxiv.org/abs/2312.04748 + https://arxiv.org/abs/2401.05566 + +### Untrusted inputs during training and prediction + +If you plan to open your model to untrusted inputs, be aware that inputs can also be used as vectors by malicious agents. To minimize risks, make sure to give your model only the permisisons strictly required, and keep your libraries updated with the lates security patches. + +If applicable, prepare your model against bad inputs and prompt injections. Some recommendations: +- Pre-analysis: check how the model performs by default when exposed to prompt injection (e.g. using fuzzing for prompt injection). +- Input Sanitation: Before feeding data to the model, sanitize inputs rigorously. This involves techniques such as: + - Validation: Enforce strict rules on allowed characters and data types. + - Filtering: Remove potentially malicious scripts or code fragments. + - Encoding: Convert special characters into safe representations. + - Verification: Run tooling that identifies potential script injections (e.g. [models that detect prompt injection attempts](https://python.langchain.com/docs/guides/safety/hugging_face_prompt_injection)). + +### Data privacy + +**Take special security measures if your model if you train models with sensitive data**. Prioritize [sandboxing](https://developers.google.com/code-sandboxing) your models and: +- Do not feed sensitive data to untrusted model (even if runs in a sandboxed environment) +- If you consider publishing a model that was partially trained with sensitive data, be aware that data can potentially be recovered from the trained weights (especially if model overfits).
2.41.0
d7aeedb72f8a96d0f168308292e0d41c095f01b
Wed, 17 Apr 2024 23:26:55 +0000
[PATCH 0273/1000] [Dynamo] Check for __bool__ attribute before accessing it (#120943)
This PR checks if __bool__ attribute is available before accessing it when handling a UserDefinedObjectVariable Fixes #119782 Pull Request resolved: https://github.com/pytorch/pytorch/pull/120943 Approved by: https://github.com/zou3519
diff --git a/test/dynamo_expected_failures/TestComposability.test_convert_without_squash_mask b/test/dynamo_expected_failures/TestComposability.test_convert_without_squash_mask deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestComposability.test_fusion_before_s_prep b/test/dynamo_expected_failures/TestComposability.test_fusion_before_s_prep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestComposability.test_q_prep_before_s_prep b/test/dynamo_expected_failures/TestComposability.test_q_prep_before_s_prep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestComposability.test_qat_prep_before_s_prep b/test/dynamo_expected_failures/TestComposability.test_qat_prep_before_s_prep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestComposability.test_s_prep_before_fusion b/test/dynamo_expected_failures/TestComposability.test_s_prep_before_fusion deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestComposability.test_s_prep_before_q_prep b/test/dynamo_expected_failures/TestComposability.test_s_prep_before_q_prep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestComposability.test_s_prep_before_qat_prep b/test/dynamo_expected_failures/TestComposability.test_s_prep_before_qat_prep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestGenerateNumericDebugHandle.test_quantize_pt2e_preserve_handle b/test/dynamo_expected_failures/TestGenerateNumericDebugHandle.test_quantize_pt2e_preserve_handle deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_add b/test/dynamo_expected_failures/TestPT2ERepresentation.test_add deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_add_relu b/test/dynamo_expected_failures/TestPT2ERepresentation.test_add_relu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_conv2d b/test/dynamo_expected_failures/TestPT2ERepresentation.test_conv2d deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_dynamic_linear b/test/dynamo_expected_failures/TestPT2ERepresentation.test_dynamic_linear deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_maxpool2d b/test/dynamo_expected_failures/TestPT2ERepresentation.test_maxpool2d deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_qdq b/test/dynamo_expected_failures/TestPT2ERepresentation.test_qdq deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_qdq_per_channel b/test/dynamo_expected_failures/TestPT2ERepresentation.test_qdq_per_channel deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_static_linear b/test/dynamo_expected_failures/TestPT2ERepresentation.test_static_linear deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_composable_quantizer_linear_conv b/test/dynamo_expected_failures/TestQuantizePT2E.test_composable_quantizer_linear_conv deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_constant_prop_preserve_metadata b/test/dynamo_expected_failures/TestQuantizePT2E.test_constant_prop_preserve_metadata deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_embedding_conv_linear_quantization b/test/dynamo_expected_failures/TestQuantizePT2E.test_embedding_conv_linear_quantization deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_fold_all_ops_before_quantize b/test/dynamo_expected_failures/TestQuantizePT2E.test_fold_all_ops_before_quantize deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_fold_quantize b/test/dynamo_expected_failures/TestQuantizePT2E.test_fold_quantize deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_fold_quantize_per_channel b/test/dynamo_expected_failures/TestQuantizePT2E.test_fold_quantize_per_channel deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_groupwise_per_channel_quant b/test/dynamo_expected_failures/TestQuantizePT2E.test_groupwise_per_channel_quant deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_reentrant b/test/dynamo_expected_failures/TestQuantizePT2E.test_reentrant deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_save_load b/test/dynamo_expected_failures/TestQuantizePT2E.test_save_load deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_speed b/test/dynamo_expected_failures/TestQuantizePT2E.test_speed deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQATModels.test_qat_mobilenet_v2 b/test/dynamo_expected_failures/TestQuantizePT2EQATModels.test_qat_mobilenet_v2 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQATModels.test_qat_resnet18 b/test/dynamo_expected_failures/TestQuantizePT2EQATModels.test_qat_resnet18 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_prepare_qat_conv_bn_fusion_getitem_placeholder b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_prepare_qat_conv_bn_fusion_getitem_placeholder deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_fusion b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_fusion deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_fusion_literal_args b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_fusion_literal_args deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_fusion_no_conv_bias b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_fusion_no_conv_bias deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_relu_fusion b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_relu_fusion deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_relu_fusion_no_conv_bias b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_relu_fusion_no_conv_bias deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_no_bias b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_no_bias deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_inplace_add_relu b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_inplace_add_relu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_preserve_source_fn_stack b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_preserve_source_fn_stack deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_update_shared_qspec b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_update_shared_qspec deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_prepare_qat_conv_bn_fusion_getitem_placeholder b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_prepare_qat_conv_bn_fusion_getitem_placeholder deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_fusion b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_fusion deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_fusion_literal_args b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_fusion_literal_args deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_fusion_no_conv_bias b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_fusion_no_conv_bias deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_relu_fusion b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_relu_fusion deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_relu_fusion_no_conv_bias b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_relu_fusion_no_conv_bias deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_no_bias b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_no_bias deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_inplace_add_relu b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_inplace_add_relu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_preserve_source_fn_stack b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_preserve_source_fn_stack deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_update_shared_qspec b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_update_shared_qspec deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_add_and_inplace_add b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_add_and_inplace_add deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_add_mul_long b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_add_mul_long deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_add_mul_scalar b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_add_mul_scalar deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv1d b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv1d deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv1d_with_conv2d b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv1d_with_conv2d deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv2d b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv2d deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv_linear b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv_linear deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv_linear_no_permute b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv_linear_no_permute deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_dynamic_linear b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_dynamic_linear deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_dynamic_linear_int4_weight b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_dynamic_linear_int4_weight deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_dynamic_linear_with_conv b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_dynamic_linear_with_conv deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_gru b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_gru deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear_gru b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear_gru deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear_relu b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear_relu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear_with_dynamic_shape b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear_with_dynamic_shape deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_mul_and_inplace_mul b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_mul_and_inplace_mul deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_mul_float32_max b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_mul_float32_max deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_obs_sharing_ops b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_obs_sharing_ops deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_propagate_annotation b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_propagate_annotation deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_qat_dynamic_linear b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_qat_dynamic_linear deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizerModels.test_resnet18 b/test/dynamo_expected_failures/TestXNNPACKQuantizerModels.test_resnet18 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/torch/_dynamo/symbolic_convert.py b/torch/_dynamo/symbolic_convert.py index 0c8e5898e6..621b09670c 100644 --- a/torch/_dynamo/symbolic_convert.py +++ b/torch/_dynamo/symbolic_convert.py @@ -409,10 +409,14 @@ def generic_jump(truth_fn: typing.Callable[[object], bool], push: bool): self.push(value) self.jump(inst) elif isinstance(value, UserDefinedObjectVariable): - x = value.var_getattr(self, "__bool__") - # if __bool__ is missing, trying __len__ to infer a truth value. - if isinstance(x, GetAttrVariable): - x = value.var_getattr(self, "__len__") + x = None + has_bool = value.call_hasattr(self, "__bool__") + if has_bool.is_python_constant() and has_bool.as_python_constant(): + x = value.var_getattr(self, "__bool__") + else: + has_len = value.call_hasattr(self, "__len__") + if has_len.is_python_constant() and has_len.as_python_constant(): + x = value.var_getattr(self, "__len__") # __bool__ or __len__ is function if isinstance(x, UserMethodVariable):
2.41.0
4f42bfd528ccb8cf1ac5c4debc6fa101ca5edb7
Tue, 16 Apr 2024 20:26:48 +0000
[PATCH 0274/1000] [dynamo] Support list.reverse (#124210)
fixes #123974 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124210 Approved by: https://github.com/peterbell10
diff --git a/test/dynamo/test_repros.py b/test/dynamo/test_repros.py index 50b42b6557..754aef6d34 100644 --- a/test/dynamo/test_repros.py +++ b/test/dynamo/test_repros.py @@ -4777,6 +4777,29 @@ def forward(self, s0 : torch.SymInt, s1 : torch.SymInt, L_x_ : torch.Tensor): global_fn = new_fn self.assertEqual(opt(x, y), foo(x, y)) + # ref https://github.com/pytorch/pytorch/issues/123974 + def test_list_reverse(self): + def ladder(x): + trail = x.size(-1) + assert trail > 2 + weights = [] + for s in [trail, trail - 1, trail - 2]: + weights.append(torch.ones(s, s - 1)) + + for w in weights: + x = x @ w + + weights.reverse() + + for w in weights: + x = x @ w.t() + + return x + + data = torch.randn(3, 4) + opt_ladder = torch.compile(ladder, fullgraph=True, backend="eager") + self.assertEqual(opt_ladder(data), ladder(data)) + instantiate_parametrized_tests(ReproTests) diff --git a/torch/_dynamo/variables/lists.py b/torch/_dynamo/variables/lists.py index 1311bcae1f..46c7df6796 100644 --- a/torch/_dynamo/variables/lists.py +++ b/torch/_dynamo/variables/lists.py @@ -257,6 +257,12 @@ class CommonListMethodsVariable(BaseListVariable): assert not args items = list(self.items) return self.modified(items, mutable_local=MutableLocal()) + elif name == "reverse" and self.mutable_local: + assert not kwargs + assert not args + self.items.reverse() + tx.output.side_effects.mutation(self) + return ConstantVariable.create(None) else: return super().call_method(tx, name, args, kwargs)
2.41.0
5235694f467e75003164a5078f5ec67406146b0
Wed, 17 Apr 2024 08:53:03 -0700
[PATCH 0275/1000] [FSDP2] Made `unshard` return type consistent (#124293)
We can always return an `UnshardHandle` if `async_op=True` even if the FSDP module does not manage any parameters and hence does not have an `FSDPParamGroup`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124293 Approved by: https://github.com/weifengpy ghstack dependencies: #120952
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_comm.py b/test/distributed/_composable/fsdp/test_fully_shard_comm.py index f41f4f2009..d5e3393f82 100644 --- a/test/distributed/_composable/fsdp/test_fully_shard_comm.py +++ b/test/distributed/_composable/fsdp/test_fully_shard_comm.py @@ -577,7 +577,7 @@ class TestFullyShardBackwardPrefetch(FSDPTest): return post_backward_with_record -class TestFullyShardUnshard(FSDPTest): +class TestFullyShardUnshardMultiProcess(FSDPTest): @property def world_size(self) -> int: return min(torch.cuda.device_count(), 2) @@ -660,5 +660,22 @@ class TestFullyShardUnshard(FSDPTest): self.assertEqual(losses[0], losses[1]) +class TestFullyShardUnshardMultiThread(FSDPTestMultiThread): + @property + def world_size(self) -> int: + return 2 + + @unittest.skipIf(not TEST_CUDA, "no cuda") + def test_unshard_no_param_group(self): + # Check that we can call `unshard()` on a module with no parameter + # group / no managed parameters without erroring + model = nn.Sequential(nn.Linear(4, 4), nn.Linear(4, 4)) + for lin in model: + fully_shard(lin) + fully_shard(model) + handle = model.unshard(async_op=True) + handle.wait() + + if __name__ == "__main__": run_tests() diff --git a/torch/distributed/_composable/fsdp/fully_shard.py b/torch/distributed/_composable/fsdp/fully_shard.py index cc6038548f..2cf4e4aabb 100644 --- a/torch/distributed/_composable/fsdp/fully_shard.py +++ b/torch/distributed/_composable/fsdp/fully_shard.py @@ -178,10 +178,10 @@ class FSDP: pending unshard op in the pre-forward automatically. """ state = self._get_fsdp_state() - if (fsdp_param_group := state._fsdp_param_group) is None: - return None - fsdp_param_group.lazy_init() - fsdp_param_group.unshard(async_op=async_op) + fsdp_param_group = state._fsdp_param_group + if fsdp_param_group is not None: + fsdp_param_group.lazy_init() + fsdp_param_group.unshard(async_op=async_op) handle = UnshardHandle(fsdp_param_group) if async_op: return handle @@ -281,10 +281,12 @@ class UnshardHandle: A handle to wait on the unshard op. Args: - fsdp_param_group (FSDPParamGroup): FSDP parameter group to unshard. + fsdp_param_group (FSDPParamGroup, optional): FSDP parameter group to + unshard. This should be ``None`` iff the FSDP module does not + manage any parameters, meaning the unshard is a no-op. """ - def __init__(self, fsdp_param_group: FSDPParamGroup): + def __init__(self, fsdp_param_group: Optional[FSDPParamGroup]): self._fsdp_param_group = fsdp_param_group def wait(self): @@ -294,7 +296,7 @@ class UnshardHandle: This ensures that the current stream can use the unsharded parameters, which are now registered to the module. """ - if hasattr(self, "_fsdp_param_group"): + if self._fsdp_param_group is not None: self._fsdp_param_group.wait_for_unshard() # Avoid keeping a reference - delattr(self, "_fsdp_param_group") + self._fsdp_param_group = None
2.41.0
5049de24298a229fc133f8ed272e558ca81c73a
Wed, 17 Apr 2024 23:44:00 +0000
[PATCH 0277/1000] Revert "[Environment Variable][1/N] Use thread-safe env variable API in c10 (#119449)"
This reverts commit 5bef127c2ea49280e7fda4f9fa7cad6fa4078e7d. Reverted https://github.com/pytorch/pytorch/pull/119449 on behalf of https://github.com/PaliC due to your using TORCH_INTERNAL_ASSERT incorrectly ([comment](https://github.com/pytorch/pytorch/pull/119449#issuecomment-2062696010))
diff --git a/c10/core/impl/alloc_cpu.cpp b/c10/core/impl/alloc_cpu.cpp index def4c3a3a9..9b7ae22f9f 100644 --- a/c10/core/impl/alloc_cpu.cpp +++ b/c10/core/impl/alloc_cpu.cpp @@ -3,7 +3,6 @@ #include <c10/core/alignment.h> #include <c10/util/Flags.h> #include <c10/util/Logging.h> -#include <c10/util/env.h> #include <c10/util/irange.h> #include <c10/util/numa.h> @@ -54,8 +53,8 @@ void memset_junk(void* data, size_t num) { #if defined(__linux__) && !defined(__ANDROID__) static inline bool is_thp_alloc_enabled() { static bool value = [&] { - auto env = c10::utils::check_env("THP_MEM_ALLOC_ENABLE"); - return env.has_value() ? env.value() : 0; + const char* ptr = std::getenv("THP_MEM_ALLOC_ENABLE"); + return ptr != nullptr ? std::atoi(ptr) : 0; }(); return value; } diff --git a/c10/cuda/CUDAAllocatorConfig.cpp b/c10/cuda/CUDAAllocatorConfig.cpp index ca38dfd6a4..1f81ed47b6 100644 --- a/c10/cuda/CUDAAllocatorConfig.cpp +++ b/c10/cuda/CUDAAllocatorConfig.cpp @@ -234,7 +234,7 @@ size_t CUDAAllocatorConfig::parseAllocatorConfig( return i; } -void CUDAAllocatorConfig::parseArgs(const std::optional<std::string>& env) { +void CUDAAllocatorConfig::parseArgs(const char* env) { // If empty, set the default values m_max_split_size = std::numeric_limits<size_t>::max(); m_roundup_power2_divisions.assign(kRoundUpPowerOfTwoIntervals, 0); @@ -242,16 +242,16 @@ void CUDAAllocatorConfig::parseArgs(const std::optional<std::string>& env) { bool used_cudaMallocAsync = false; bool used_native_specific_option = false; - if (!env.has_value()) { + if (env == nullptr) { return; } { std::lock_guard<std::mutex> lock(m_last_allocator_settings_mutex); - m_last_allocator_settings = env.value(); + m_last_allocator_settings = env; } std::vector<std::string> config; - lexArgs(env.value().c_str(), config); + lexArgs(env, config); for (size_t i = 0; i < config.size(); i++) { std::string_view config_item_view(config[i]); diff --git a/c10/cuda/CUDAAllocatorConfig.h b/c10/cuda/CUDAAllocatorConfig.h index db5c9e1c8f..3106fc1b46 100644 --- a/c10/cuda/CUDAAllocatorConfig.h +++ b/c10/cuda/CUDAAllocatorConfig.h @@ -2,7 +2,6 @@ #include <c10/cuda/CUDAMacros.h> #include <c10/util/Exception.h> -#include <c10/util/env.h> #include <atomic> #include <cstddef> @@ -73,13 +72,14 @@ class C10_CUDA_API CUDAAllocatorConfig { static CUDAAllocatorConfig& instance() { static CUDAAllocatorConfig* s_instance = ([]() { auto inst = new CUDAAllocatorConfig(); - inst->parseArgs(c10::utils::get_env("PYTORCH_CUDA_ALLOC_CONF")); + const char* env = getenv("PYTORCH_CUDA_ALLOC_CONF"); + inst->parseArgs(env); return inst; })(); return *s_instance; } - void parseArgs(const std::optional<std::string>& env); + void parseArgs(const char* env); private: CUDAAllocatorConfig(); diff --git a/c10/cuda/CUDACachingAllocator.cpp b/c10/cuda/CUDACachingAllocator.cpp index afac5272b6..c472e82ce2 100644 --- a/c10/cuda/CUDACachingAllocator.cpp +++ b/c10/cuda/CUDACachingAllocator.cpp @@ -8,7 +8,6 @@ #include <c10/util/CallOnce.h> #include <c10/util/ScopeExit.h> #include <c10/util/UniqueVoidPtr.h> -#include <c10/util/env.h> #include <c10/util/flat_hash_map.h> #include <c10/util/hash.h> #include <c10/util/irange.h> @@ -2832,7 +2831,7 @@ class DeviceCachingAllocator { // errors, since the caching allocator foils cuda-memcheck. bool forceUncachedAllocator() { static bool force_uncached = - c10::utils::has_env("PYTORCH_NO_CUDA_MEMORY_CACHING"); + getenv("PYTORCH_NO_CUDA_MEMORY_CACHING") != nullptr; return force_uncached; } @@ -3364,9 +3363,9 @@ struct BackendStaticInitializer { // version checks, to CUDAAllocatorConfig's runtime doublecheck. If this // works, maybe we should move all of CUDAAllocatorConfig here? CUDAAllocator* parseEnvForBackend() { - const auto val = c10::utils::get_env("PYTORCH_CUDA_ALLOC_CONF"); - if (val.has_value()) { - const std::string& config = val.value(); + const char* val = getenv("PYTORCH_CUDA_ALLOC_CONF"); + if (val != nullptr) { + const std::string config(val); std::regex exp("[\\s,]+"); std::sregex_token_iterator it(config.begin(), config.end(), exp, -1); diff --git a/c10/cuda/CUDADeviceAssertionHost.cpp b/c10/cuda/CUDADeviceAssertionHost.cpp index ec41e6230f..1d52af7812 100644 --- a/c10/cuda/CUDADeviceAssertionHost.cpp +++ b/c10/cuda/CUDADeviceAssertionHost.cpp @@ -3,7 +3,6 @@ #include <c10/cuda/CUDAFunctions.h> #include <c10/util/Backtrace.h> #include <c10/util/Exception.h> -#include <c10/util/env.h> #include <c10/util/irange.h> #include <cuda_runtime.h> @@ -81,8 +80,8 @@ bool dsa_check_if_all_devices_support_managed_memory() { } bool env_flag_set(const char* env_var_name) { - const auto env_flag = c10::utils::check_env(env_var_name); - return env_flag.has_value() && env_flag.value(); + const char* const env_string = std::getenv(env_var_name); + return (env_string == nullptr) ? false : std::strcmp(env_string, "0"); } /// Deleter for UVM/managed memory pointers diff --git a/c10/cuda/CUDAMiscFunctions.cpp b/c10/cuda/CUDAMiscFunctions.cpp index 9ef724813e..11ea775366 100644 --- a/c10/cuda/CUDAMiscFunctions.cpp +++ b/c10/cuda/CUDAMiscFunctions.cpp @@ -1,14 +1,12 @@ #include <c10/cuda/CUDAMiscFunctions.h> -#include <c10/util/env.h> +#include <cstdlib> namespace c10::cuda { -// NOLINTNEXTLINE(bugprone-exception-escape,-warnings-as-errors) const char* get_cuda_check_suffix() noexcept { - static auto device_blocking_flag = - c10::utils::check_env("CUDA_LAUNCH_BLOCKING"); + static char* device_blocking_flag = getenv("CUDA_LAUNCH_BLOCKING"); static bool blocking_enabled = - (device_blocking_flag.has_value() && device_blocking_flag.value()); + (device_blocking_flag && atoi(device_blocking_flag)); if (blocking_enabled) { return ""; } else { diff --git a/c10/test/util/DeadlockDetection_test.cpp b/c10/test/util/DeadlockDetection_test.cpp index 05ae154e22..35c4953f6d 100644 --- a/c10/test/util/DeadlockDetection_test.cpp +++ b/c10/test/util/DeadlockDetection_test.cpp @@ -1,8 +1,9 @@ #include <c10/util/DeadlockDetection.h> -#include <c10/util/env.h> #include <gtest/gtest.h> +#include <cstdlib> + using namespace ::testing; using namespace c10::impl; @@ -22,7 +23,7 @@ TEST(DeadlockDetection, basic) { #ifndef _WIN32 TEST(DeadlockDetection, disable) { - c10::utils::set_env("TORCH_DISABLE_DEADLOCK_DETECTION", "1"); + setenv("TORCH_DISABLE_DEADLOCK_DETECTION", "1", 1); DummyPythonGILHooks hooks; SetPythonGILHooks(&hooks); SetPythonGILHooks(&hooks); diff --git a/c10/util/DeadlockDetection.cpp b/c10/util/DeadlockDetection.cpp index 4b00d24534..320fa7873c 100644 --- a/c10/util/DeadlockDetection.cpp +++ b/c10/util/DeadlockDetection.cpp @@ -1,5 +1,6 @@ #include <c10/util/DeadlockDetection.h> -#include <c10/util/env.h> + +#include <cstdlib> namespace c10::impl { @@ -7,7 +8,7 @@ namespace { PythonGILHooks* python_gil_hooks = nullptr; bool disable_detection() { - return c10::utils::has_env("TORCH_DISABLE_DEADLOCK_DETECTION"); + return std::getenv("TORCH_DISABLE_DEADLOCK_DETECTION") != nullptr; } } // namespace diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp index 17459f69fa..e9c9e9c2f3 100644 --- a/c10/util/Logging.cpp +++ b/c10/util/Logging.cpp @@ -1,7 +1,6 @@ #include <c10/util/Backtrace.h> #include <c10/util/Flags.h> #include <c10/util/Logging.h> -#include <c10/util/env.h> #ifdef FBCODE_CAFFE2 #include <folly/synchronization/SanitizeThread.h> #endif @@ -11,6 +10,7 @@ #endif #include <algorithm> +#include <cstdlib> #include <iostream> // Common code that we use regardless of whether we use glog or not. @@ -94,8 +94,8 @@ using DDPUsageLoggerType = std::function<void(const DDPLoggingData&)>; namespace { bool IsAPIUsageDebugMode() { - auto val = c10::utils::get_env("PYTORCH_API_USAGE_STDERR"); - return val.has_value() && !val.value().empty(); // any non-empty value + const char* val = getenv("PYTORCH_API_USAGE_STDERR"); + return val && *val; // any non-empty value } void APIUsageDebug(const string& event) { @@ -438,10 +438,10 @@ namespace c10::detail { namespace { void setLogLevelFlagFromEnv() { - auto level_env = c10::utils::get_env("TORCH_CPP_LOG_LEVEL"); + const char* level_str = std::getenv("TORCH_CPP_LOG_LEVEL"); // Not set, fallback to the default level (i.e. WARNING). - std::string level{level_env.has_value() ? level_env.value() : ""}; + std::string level{level_str != nullptr ? level_str : ""}; if (level.empty()) { return; } diff --git a/c10/util/env.cpp b/c10/util/env.cpp deleted file mode 100644 index 9d9193a552..0000000000 --- a/c10/util/env.cpp +++ /dev/null @@ -1,104 +0,0 @@ -#include <c10/util/Exception.h> -#include <c10/util/env.h> -#include <fmt/format.h> -#include <cstdlib> -#include <shared_mutex> - -namespace c10::utils { - -static std::shared_mutex env_mutex; - -// Set an environment variable. -void set_env(const char* name, const char* value, bool overwrite) { - std::lock_guard lk(env_mutex); -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4996) -#endif -#ifdef _MSC_VER - if (!overwrite) { - // NOLINTNEXTLINE(concurrency-mt-unsafe) - if (std::getenv(name) != nullptr) { - return; - } - } - auto full_env_variable = fmt::format("{}={}", name, value); - // NOLINTNEXTLINE(concurrency-mt-unsafe) - auto err = putenv(full_env_variable.c_str()); - if (err != 0) { - TORCH_INTERNAL_ASSERT( - "putenv failed for environment \"", name, "\", the error is: ", err); - } -#else - // NOLINTNEXTLINE(concurrency-mt-unsafe) - auto err = setenv(name, value, static_cast<int>(overwrite)); - if (err != 0) { - TORCH_INTERNAL_ASSERT( - "setenv failed for environment \"", name, "\", the error is: ", err); - } -#endif -#ifdef _MSC_VER -#pragma warning(pop) -#endif - return; -} - -// Checks an environment variable is set. -bool has_env(const char* name) noexcept { - std::shared_lock lk(env_mutex); -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4996) -#endif - // NOLINTNEXTLINE(concurrency-mt-unsafe) - auto envar = std::getenv(name); -#ifdef _MSC_VER -#pragma warning(pop) -#endif - return envar != nullptr; -} - -// Reads an environment variable and returns the content if it is set -std::optional<std::string> get_env(const char* name) noexcept { - std::shared_lock lk(env_mutex); -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4996) -#endif - // NOLINTNEXTLINE(concurrency-mt-unsafe) - auto envar = std::getenv(name); -#ifdef _MSC_VER -#pragma warning(pop) -#endif - if (envar != nullptr) { - return std::string(envar); - } - return std::nullopt; -} - -// Reads an environment variable and returns -// - optional<true>, if set equal to "1" -// - optional<false>, if set equal to "0" -// - nullopt, otherwise -// -// NB: -// Issues a warning if the value of the environment variable is not 0 or 1. -std::optional<bool> check_env(const char* name) { - auto env_opt = get_env(name); - if (env_opt.has_value()) { - if (*env_opt == "0") { - return false; - } - if (*env_opt == "1") { - return true; - } - TORCH_WARN( - "Ignoring invalid value for boolean flag ", - name, - ": ", - *env_opt, - "valid values are 0 or 1."); - } - return std::nullopt; -} -} // namespace c10::utils diff --git a/c10/util/env.h b/c10/util/env.h index 04b7585861..3db116c7db 100644 --- a/c10/util/env.h +++ b/c10/util/env.h @@ -1,20 +1,11 @@ #pragma once -#include <c10/macros/Export.h> +#include <c10/util/Exception.h> +#include <cstdlib> +#include <cstring> #include <optional> -#include <string> namespace c10::utils { - -// Set an environment variable. -C10_API void set_env( - const char* name, - const char* value, - bool overwrite = true); - -// Checks an environment variable is set. -C10_API bool has_env(const char* name) noexcept; - // Reads an environment variable and returns // - optional<true>, if set equal to "1" // - optional<false>, if set equal to "0" @@ -22,10 +13,29 @@ C10_API bool has_env(const char* name) noexcept; // // NB: // Issues a warning if the value of the environment variable is not 0 or 1. -C10_API std::optional<bool> check_env(const char* name); - -// Reads the value of an environment variable if it is set. -// However, check_env should be used if the value is assumed to be a flag. -C10_API std::optional<std::string> get_env(const char* name) noexcept; - +inline std::optional<bool> check_env(const char* name) { +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4996) +#endif + auto envar = std::getenv(name); +#ifdef _MSC_VER +#pragma warning(pop) +#endif + if (envar) { + if (strcmp(envar, "0") == 0) { + return false; + } + if (strcmp(envar, "1") == 0) { + return true; + } + TORCH_WARN( + "Ignoring invalid value for boolean flag ", + name, + ": ", + envar, + "valid values are 0 or 1."); + } + return std::nullopt; +} } // namespace c10::utils diff --git a/c10/util/tempfile.cpp b/c10/util/tempfile.cpp index f106885a88..28c3c7f14f 100644 --- a/c10/util/tempfile.cpp +++ b/c10/util/tempfile.cpp @@ -1,5 +1,4 @@ #include <c10/util/Exception.h> -#include <c10/util/env.h> #include <c10/util/tempfile.h> #include <fmt/format.h> @@ -23,11 +22,10 @@ static std::string make_filename(std::string_view name_prefix) { // We see if any of these environment variables is set and use their value, or // else default the temporary directory to `/tmp`. - std::string tmp_directory = "/tmp"; + const char* tmp_directory = "/tmp"; for (const char* variable : {"TMPDIR", "TMP", "TEMP", "TEMPDIR"}) { - auto path_opt = c10::utils::get_env(variable); - if (path_opt.has_value()) { - tmp_directory = path_opt.value(); + if (const char* path = getenv(variable)) { + tmp_directory = path; break; } }
2.41.0
1e1d671ef893070c7a527fc729fa8757385d062
Wed, 17 Apr 2024 05:48:37 -0700
[PATCH 0278/1000] Stop requiring a pystub for register_fake by default (#124064)
Previously, if someone used `register_fake` to add a fake impl for an operator defined in C++, we would require them to add a `m.set_python_module(<module>)` call to C++. This was to avoid situations where a user imported the C++ operator without importing the fake impl. This "breaks" open registration: there's no way to add a fake impl outside of a repository that defines an operator, so we want to turn this behavior off by default in open source. Test Plan: - existing tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/124064 Approved by: https://github.com/albanD ghstack dependencies: #123937
diff --git a/test/custom_operator/test_custom_ops.py b/test/custom_operator/test_custom_ops.py index 5badf01d07..1b490c49e9 100644 --- a/test/custom_operator/test_custom_ops.py +++ b/test/custom_operator/test_custom_ops.py @@ -6,6 +6,7 @@ import tempfile import unittest import torch +import torch._library.utils as utils from model import get_custom_op_library_path, Model from torch import ops @@ -24,7 +25,11 @@ class TestCustomOperators(TestCase): def test_op_with_no_abstract_impl_pystub(self): x = torch.randn(3, device="meta") - with self.assertRaisesRegex(RuntimeError, "pointwise"): + if utils.requires_set_python_module(): + with self.assertRaisesRegex(RuntimeError, "pointwise"): + torch.ops.custom.tan(x) + else: + # Smoketest torch.ops.custom.tan(x) def test_op_with_incorrect_abstract_impl_pystub(self): diff --git a/torch/_library/utils.py b/torch/_library/utils.py index cdf8b6d6ec..e2d0110409 100644 --- a/torch/_library/utils.py +++ b/torch/_library/utils.py @@ -4,6 +4,7 @@ import sys from typing import Any, Callable, Dict, Iterable, Tuple import torch +import torch._utils_internal as _utils_internal from torch import _C @@ -189,3 +190,12 @@ def can_generate_trivial_fake_impl(op: torch._ops.OpOverload) -> bool: return False # If the op returns nothing, then it has a trivial fake impl. return True + + +def requires_set_python_module() -> bool: + """If an op was defined in C++ and extended from Python using the + torch.library APIs, returns if we require that there have been a + m.set_python_module("mylib.ops") call from C++ that associates + the C++ op with a python module. + """ + return getattr(_utils_internal, "REQUIRES_SET_PYTHON_MODULE", True) diff --git a/torch/_utils_internal.py b/torch/_utils_internal.py index d12a499193..670f14457e 100644 --- a/torch/_utils_internal.py +++ b/torch/_utils_internal.py @@ -164,3 +164,8 @@ USE_GLOBAL_DEPS = True # USE_RTLD_GLOBAL_WITH_LIBTORCH controls whether __init__.py tries to load # _C.so with RTLD_GLOBAL during the call to dlopen. USE_RTLD_GLOBAL_WITH_LIBTORCH = False +# If an op was defined in C++ and extended from Python using the +# torch.library.register_fake, returns if we require that there be a +# m.set_python_module("mylib.ops") call from C++ that associates +# the C++ op with a python module. +REQUIRES_SET_PYTHON_MODULE = False diff --git a/torch/library.py b/torch/library.py index bf53fa874b..f289110a7a 100644 --- a/torch/library.py +++ b/torch/library.py @@ -539,24 +539,26 @@ def _check_pystubs_once(func, qualname, actual_module_name): maybe_pystub = torch._C._dispatch_pystub( op._schema.name, op._schema.overload_name) - if not maybe_pystub: - namespace = op.namespace - cpp_filename = op._handle().debug() - raise RuntimeError( - f"Operator '{qualname}' was defined in C++ and has a Python " - f"fake impl. In this situation, we require there to also be a " - f"companion C++ `m.set_python_module(\"{actual_module_name}\")` " - f"call, but we could not find one. Please add that to " - f"to the top of the C++ TORCH_LIBRARY({namespace}, ...) block the " - f"operator was registered in ({cpp_filename})") - pystub_module = maybe_pystub[0] - if actual_module_name != pystub_module: - cpp_filename = op._handle().debug() - raise RuntimeError( - f"Operator '{qualname}' specified that its python fake impl " - f"is in the Python module '{pystub_module}' but it was actually found " - f"in '{actual_module_name}'. Please either move the fake impl " - f"or correct the m.set_python_module call ({cpp_filename})") + if maybe_pystub is None: + if torch._library.utils.requires_set_python_module(): + namespace = op.namespace + cpp_filename = op._handle().debug() + raise RuntimeError( + f"Operator '{qualname}' was defined in C++ and has a Python " + f"fake impl. In this situation, we require there to also be a " + f"companion C++ `m.set_python_module(\"{actual_module_name}\")` " + f"call, but we could not find one. Please add that to " + f"to the top of the C++ TORCH_LIBRARY({namespace}, ...) block the " + f"operator was registered in ({cpp_filename})") + else: + pystub_module = maybe_pystub[0] + if actual_module_name != pystub_module: + cpp_filename = op._handle().debug() + raise RuntimeError( + f"Operator '{qualname}' specified that its python fake impl " + f"is in the Python module '{pystub_module}' but it was actually found " + f"in '{actual_module_name}'. Please either move the fake impl " + f"or correct the m.set_python_module call ({cpp_filename})") checked = True return func(*args, **kwargs) return inner
2.41.0
25387f4dd00de337f53d897bd0d30983f1a8f49
Thu, 18 Apr 2024 00:13:32 +0000
[PATCH 0281/1000] [ez][CI] Reduce CI_SERIAL_LIST pt2 (#124298)
#124085 Add @serialTest() to some tests slow gradcheck already runs serially Doing this slowly so its easier to check flaky issues that might get made Pull Request resolved: https://github.com/pytorch/pytorch/pull/124298 Approved by: https://github.com/kit1980
diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py index a4269d84d3..8e4e31718d 100644 --- a/test/profiler/test_profiler.py +++ b/test/profiler/test_profiler.py @@ -60,6 +60,7 @@ from torch.testing._internal.common_utils import ( IS_WINDOWS, parametrize, run_tests, + serialTest, skipIfTorchDynamo, TemporaryDirectoryName, TemporaryFileName, @@ -771,6 +772,7 @@ class TestProfiler(TestCase): }.items(), name_fn=lambda name, thread_spec: name, ) + @serialTest() @parametrize("work_in_main_thread", [True, False]) def test_source_multithreaded(self, name, thread_spec, work_in_main_thread): """Test various threading configurations. diff --git a/test/run_test.py b/test/run_test.py index cafa60bd1c..c029a96566 100755 --- a/test/run_test.py +++ b/test/run_test.py @@ -216,7 +216,6 @@ CI_SERIAL_LIST = [ "test_reductions", "test_cuda", "test_cuda_expandable_segments", - "test_indexing", "test_fx_backends", "test_linalg", "test_cpp_extensions_jit", @@ -229,13 +228,10 @@ CI_SERIAL_LIST = [ "nn/test_pooling", "nn/test_convolution", # Doesn't respect set_per_process_memory_fraction, results in OOM for other tests in slow gradcheck "distributions/test_distributions", - "test_autograd", # slow gradcheck runs a test that checks the cuda memory allocator - "test_prims", # slow gradcheck runs a test that checks the cuda memory allocator "test_modules", # failed test due to mismatched elements "functorch/test_vmap", # OOM "test_fx", # gets SIGKILL "test_dataloader", # frequently hangs for ROCm - "test_serialization", # test_serialization_2gb_file allocates a tensor of 2GB, and could cause OOM "test_schema_check", # Cause CUDA illegal memory access https://github.com/pytorch/pytorch/issues/95749 "functorch/test_memory_efficient_fusion", # Cause CUDA OOM on ROCm "test_utils", # OOM @@ -246,7 +242,6 @@ CI_SERIAL_LIST = [ "test_module_hooks", # OOM "inductor/test_max_autotune", "inductor/test_cutlass_backend", # slow due to many nvcc compilation steps - "test_profiler", # test_source_multithreaded is probably not compatible with parallelism ] # A subset of onnx tests that cannot run in parallel due to high memory usage. ONNX_SERIAL_LIST = [ diff --git a/test/test_indexing.py b/test/test_indexing.py index a6fbc6e8bb..8d0eeb2025 100644 --- a/test/test_indexing.py +++ b/test/test_indexing.py @@ -12,7 +12,7 @@ import numpy as np from torch.testing import make_tensor from torch.testing._internal.common_utils import ( - TestCase, run_tests, skipIfTorchDynamo, DeterministicGuard) + TestCase, run_tests, skipIfTorchDynamo, DeterministicGuard, serialTest, TEST_CUDA) from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, dtypes, dtypesIfCPU, dtypesIfCUDA, onlyNativeDeviceTypes, skipXLA) @@ -740,6 +740,7 @@ class TestIndexing(TestCase): self.assertEqual(len(w), 2) @skipIfTorchDynamo("This test causes SIGKILL when running with dynamo, https://github.com/pytorch/pytorch/issues/88472") + @serialTest(TEST_CUDA) def test_index_put_accumulate_large_tensor(self, device): # This test is for tensors with number of elements >= INT_MAX (2^31 - 1). N = (1 << 31) + 5 diff --git a/test/test_serialization.py b/test/test_serialization.py index 47b81d29cb..059ed70cc8 100644 --- a/test/test_serialization.py +++ b/test/test_serialization.py @@ -27,7 +27,7 @@ from torch.serialization import check_module_version_greater_or_equal, get_defau from torch.testing._internal.common_utils import ( IS_FILESYSTEM_UTF8_ENCODING, TemporaryDirectoryName, TestCase, IS_WINDOWS, TEST_DILL, run_tests, download_file, BytesIOContext, TemporaryFileName, - parametrize, instantiate_parametrized_tests, AlwaysWarnTypedStorageRemoval) + parametrize, instantiate_parametrized_tests, AlwaysWarnTypedStorageRemoval, serialTest) from torch.testing._internal.common_device_type import instantiate_device_type_tests from torch.testing._internal.common_dtype import all_types_and_complex_and @@ -937,6 +937,7 @@ class TestSerialization(TestCase, SerializationMixin): torch.load(f) # Ensure large zip64 serialization works properly + @serialTest() def test_serialization_2gb_file(self): # Run GC to clear up as much memory as possible before running this test gc.collect()
2.41.0
7a3dc56d4cd33c670c629aa94174e1f31225eb7
Wed, 17 Apr 2024 13:54:21 -0700
[PATCH 0283/1000] Small Adamax fix (#123498)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123498 Approved by: https://github.com/janeyx99
diff --git a/torch/optim/adamax.py b/torch/optim/adamax.py index 2269c8484d..443fbd2458 100644 --- a/torch/optim/adamax.py +++ b/torch/optim/adamax.py @@ -240,7 +240,9 @@ def adamax( See :class:`~torch.optim.Adamax` for details. """ - if not all(isinstance(t, torch.Tensor) for t in state_steps): + if not torch._utils.is_compiling() and not all( + isinstance(t, torch.Tensor) for t in state_steps + ): raise RuntimeError( "API has changed, `state_steps` argument must contain a list of singleton tensors" )
2.41.0
c66c43d51e5cba234e1f84a1155c12ac4c86a3f
Tue, 16 Apr 2024 15:08:44 +0000
[PATCH 0286/1000] Make macro with AMP more generic (#124050)
# Motivation According to [[RFC] Intel GPU Upstreaming](https://github.com/pytorch/pytorch/issues/114723), we would like to upstream amp autocast policy to facilitate the functionality and accuracy of `torch.compile` on e2e benchmarks. # Solution The first PR aims to make macro `KERNEL` to be generic. It accepts two types of inputs, like `(DISPATCH, OP, POLICY)` and `(DISPATCH, OP, OVERLOAD, POLICY)`. The second PR intends to refactor CUDA's autocast policy to make it can be shared with `XPU` backend. The final PR would like to support XPU autocast policy which shares the same recipe with `CUDA` backend. # Additional Context Another motivation is we would like to unify autocast API and provide the generic APIs, like: - `torch.get_autocast_dtype(device_type)` - `torch.set_autocast_dtype(device_type)` - `torch.is_autocast_enabled(device_type)` - `torch.set_autocast_enabled(device_type)` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124050 Approved by: https://github.com/jgong5, https://github.com/EikanWang, https://github.com/gujinghui, https://github.com/albanD
diff --git a/aten/src/ATen/autocast_mode.cpp b/aten/src/ATen/autocast_mode.cpp index 7282bba9e6..d7c5a9b768 100644 --- a/aten/src/ATen/autocast_mode.cpp +++ b/aten/src/ATen/autocast_mode.cpp @@ -247,15 +247,15 @@ TORCH_LIBRARY_IMPL(_, Autocast, m) { TORCH_LIBRARY_IMPL(aten, Autocast, m) { // lower_precision_fp - KERNEL_CUDA2(_convolution, deprecated, lower_precision_fp) + KERNEL_CUDA(_convolution, deprecated, lower_precision_fp) KERNEL_CUDA(_convolution, lower_precision_fp) KERNEL_CUDA(conv1d, lower_precision_fp) KERNEL_CUDA(conv2d, lower_precision_fp) KERNEL_CUDA(conv3d, lower_precision_fp) KERNEL_CUDA(conv_tbc, lower_precision_fp) KERNEL_CUDA(conv_transpose1d, lower_precision_fp) - KERNEL_CUDA2(conv_transpose2d, input, lower_precision_fp) - KERNEL_CUDA2(conv_transpose3d, input, lower_precision_fp) + KERNEL_CUDA(conv_transpose2d, input, lower_precision_fp) + KERNEL_CUDA(conv_transpose3d, input, lower_precision_fp) KERNEL_CUDA(convolution, lower_precision_fp) KERNEL_CUDA(cudnn_convolution, lower_precision_fp) KERNEL_CUDA(cudnn_convolution_transpose, lower_precision_fp) @@ -298,16 +298,16 @@ TORCH_LIBRARY_IMPL(aten, Autocast, m) { KERNEL_CUDA(rsqrt, fp32) KERNEL_CUDA(sinh, fp32) KERNEL_CUDA(tan, fp32) - KERNEL_CUDA2(pow, Tensor_Scalar, fp32) - KERNEL_CUDA2(pow, Tensor_Tensor, fp32) - KERNEL_CUDA2(pow, Scalar, fp32) + KERNEL_CUDA(pow, Tensor_Scalar, fp32) + KERNEL_CUDA(pow, Tensor_Tensor, fp32) + KERNEL_CUDA(pow, Scalar, fp32) KERNEL_CUDA(softplus, fp32) KERNEL_CUDA(layer_norm, fp32) KERNEL_CUDA(native_layer_norm, fp32) KERNEL_CUDA(group_norm, fp32) - KERNEL_CUDA2(frobenius_norm, dim, fp32) + KERNEL_CUDA(frobenius_norm, dim, fp32) KERNEL_CUDA(nuclear_norm, fp32) - KERNEL_CUDA2(nuclear_norm, dim, fp32) + KERNEL_CUDA(nuclear_norm, dim, fp32) KERNEL_CUDA(cosine_similarity, fp32) KERNEL_CUDA(poisson_nll_loss, fp32) KERNEL_CUDA(cosine_embedding_loss, fp32) @@ -332,27 +332,27 @@ TORCH_LIBRARY_IMPL(aten, Autocast, m) { KERNEL_CUDA(logsumexp, fp32) // fp32_set_opt_dtype KERNEL_CUDA(prod, fp32_set_opt_dtype) - KERNEL_CUDA2(prod, dim_int, fp32_set_opt_dtype) - KERNEL_CUDA2(prod, dim_Dimname, fp32_set_opt_dtype) - KERNEL_CUDA2(softmax, int, fp32_set_opt_dtype) - KERNEL_CUDA2(softmax, Dimname, fp32_set_opt_dtype) - KERNEL_CUDA2(log_softmax, int, fp32_set_opt_dtype) - KERNEL_CUDA2(log_softmax, Dimname, fp32_set_opt_dtype) + KERNEL_CUDA(prod, dim_int, fp32_set_opt_dtype) + KERNEL_CUDA(prod, dim_Dimname, fp32_set_opt_dtype) + KERNEL_CUDA(softmax, int, fp32_set_opt_dtype) + KERNEL_CUDA(softmax, Dimname, fp32_set_opt_dtype) + KERNEL_CUDA(log_softmax, int, fp32_set_opt_dtype) + KERNEL_CUDA(log_softmax, Dimname, fp32_set_opt_dtype) KERNEL_CUDA(cumprod, fp32_set_opt_dtype) - KERNEL_CUDA2(cumprod, dimname, fp32_set_opt_dtype) + KERNEL_CUDA(cumprod, dimname, fp32_set_opt_dtype) KERNEL_CUDA(cumsum, fp32_set_opt_dtype) - KERNEL_CUDA2(cumsum, dimname, fp32_set_opt_dtype) + KERNEL_CUDA(cumsum, dimname, fp32_set_opt_dtype) KERNEL_CUDA(linalg_vector_norm, fp32_set_opt_dtype) KERNEL_CUDA(linalg_matrix_norm, fp32_set_opt_dtype) - KERNEL_CUDA2(linalg_matrix_norm, str_ord, fp32_set_opt_dtype) + KERNEL_CUDA(linalg_matrix_norm, str_ord, fp32_set_opt_dtype) // commenting these out because they accept an explicit (not-optional) dtype, and we shouldn't try to flip that even // when autocasting. - // KERNEL_CUDA2(norm, ScalarOpt_dtype, fp32_set_opt_dtype) - // KERNEL_CUDA2(norm, ScalarOpt_dim_dtype, fp32_set_opt_dtype) - // KERNEL_CUDA2(norm, names_ScalarOpt_dim_dtype, fp32_set_opt_dtype) + // KERNEL_CUDA(norm, ScalarOpt_dtype, fp32_set_opt_dtype) + // KERNEL_CUDA(norm, ScalarOpt_dim_dtype, fp32_set_opt_dtype) + // KERNEL_CUDA(norm, names_ScalarOpt_dim_dtype, fp32_set_opt_dtype) KERNEL_CUDA(sum, fp32_set_opt_dtype) - KERNEL_CUDA2(sum, dim_IntList, fp32_set_opt_dtype) - KERNEL_CUDA2(sum, dim_DimnameList, fp32_set_opt_dtype) + KERNEL_CUDA(sum, dim_IntList, fp32_set_opt_dtype) + KERNEL_CUDA(sum, dim_DimnameList, fp32_set_opt_dtype) // fp32_append_dtype // The fp32_append_dtype wrapper overrides implicit promotion behavior. // norm does not implicitly promote, but be aware when adding new ops to this policy. @@ -383,11 +383,11 @@ TORCH_LIBRARY_IMPL(_, AutocastCPU, m) { TORCH_LIBRARY_IMPL(aten, AutocastCPU, m) { // lower_precision_fp cast policy KERNEL_CPU(conv1d, lower_precision_fp) - KERNEL_CPU2(conv1d, padding, lower_precision_fp) + KERNEL_CPU(conv1d, padding, lower_precision_fp) KERNEL_CPU(conv2d, lower_precision_fp) - KERNEL_CPU2(conv2d, padding, lower_precision_fp) + KERNEL_CPU(conv2d, padding, lower_precision_fp) KERNEL_CPU(conv3d, lower_precision_fp) - KERNEL_CPU2(conv3d, padding, lower_precision_fp) + KERNEL_CPU(conv3d, padding, lower_precision_fp) KERNEL_CPU(bmm, lower_precision_fp) KERNEL_CPU(mm, lower_precision_fp) KERNEL_CPU(linalg_vecdot, lower_precision_fp) @@ -395,13 +395,13 @@ TORCH_LIBRARY_IMPL(aten, AutocastCPU, m) { KERNEL_CPU(addmm, lower_precision_fp) KERNEL_CPU(addbmm, lower_precision_fp) KERNEL_CPU(linear, lower_precision_fp) - KERNEL_CPU2(_convolution, deprecated, lower_precision_fp) + KERNEL_CPU(_convolution, deprecated, lower_precision_fp) KERNEL_CPU(matmul, lower_precision_fp) KERNEL_CPU(conv_tbc, lower_precision_fp) KERNEL_CPU(mkldnn_rnn_layer, lower_precision_fp) KERNEL_CPU(conv_transpose1d, lower_precision_fp) - KERNEL_CPU2(conv_transpose2d, input, lower_precision_fp) - KERNEL_CPU2(conv_transpose3d, input, lower_precision_fp) + KERNEL_CPU(conv_transpose2d, input, lower_precision_fp) + KERNEL_CPU(conv_transpose3d, input, lower_precision_fp) KERNEL_CPU(prelu, lower_precision_fp) KERNEL_CPU(scaled_dot_product_attention, lower_precision_fp) KERNEL_CPU(_native_multi_head_attention, lower_precision_fp) @@ -412,14 +412,14 @@ TORCH_LIBRARY_IMPL(aten, AutocastCPU, m) { KERNEL_CPU(grid_sampler, fp32) KERNEL_CPU(polar, fp32) KERNEL_CPU(prod, fp32) - KERNEL_CPU2(prod, dim_int, fp32) - KERNEL_CPU2(prod, dim_Dimname, fp32) + KERNEL_CPU(prod, dim_int, fp32) + KERNEL_CPU(prod, dim_Dimname, fp32) KERNEL_CPU(quantile, fp32) - KERNEL_CPU2(quantile, scalar, fp32) + KERNEL_CPU(quantile, scalar, fp32) KERNEL_CPU(nanquantile, fp32) - KERNEL_CPU2(nanquantile, scalar, fp32) + KERNEL_CPU(nanquantile, scalar, fp32) KERNEL_CPU(stft, fp32) - KERNEL_CPU2(stft, center, fp32) + KERNEL_CPU(stft, center, fp32) KERNEL_CPU(cdist, fp32) KERNEL_CPU(grid_sampler_2d, fp32) KERNEL_CPU(_grid_sampler_2d_cpu_fallback, fp32) @@ -457,8 +457,8 @@ TORCH_LIBRARY_IMPL(aten, AutocastCPU, m) { KERNEL_CPU(soft_margin_loss, fp32) KERNEL_CPU(triplet_margin_loss, fp32) KERNEL_CPU(multi_margin_loss, fp32) - KERNEL_CPU2(ctc_loss, IntList, fp32) - KERNEL_CPU2(ctc_loss, Tensor, fp32) + KERNEL_CPU(ctc_loss, IntList, fp32) + KERNEL_CPU(ctc_loss, Tensor, fp32) KERNEL_CPU(kl_div, fp32) KERNEL_CPU(multilabel_margin_loss, fp32) KERNEL_CPU(binary_cross_entropy_with_logits, fp32) @@ -477,11 +477,11 @@ TORCH_LIBRARY_IMPL(aten, AutocastCPU, m) { KERNEL_CPU(fft_hfft, fp32) KERNEL_CPU(fft_ihfft, fp32) KERNEL_CPU(linalg_cond, fp32) - KERNEL_CPU2(linalg_cond, p_str, fp32) + KERNEL_CPU(linalg_cond, p_str, fp32) KERNEL_CPU(linalg_matrix_rank, fp32) - KERNEL_CPU2(linalg_matrix_rank, tol_tensor, fp32) - KERNEL_CPU2(linalg_matrix_rank, atol_rtol_tensor, fp32) - KERNEL_CPU2(linalg_matrix_rank, atol_rtol_float, fp32) + KERNEL_CPU(linalg_matrix_rank, tol_tensor, fp32) + KERNEL_CPU(linalg_matrix_rank, atol_rtol_tensor, fp32) + KERNEL_CPU(linalg_matrix_rank, atol_rtol_float, fp32) KERNEL_CPU(linalg_solve, fp32) KERNEL_CPU(linalg_cholesky, fp32) KERNEL_CPU(linalg_svdvals, fp32) @@ -513,7 +513,7 @@ TORCH_LIBRARY_IMPL(aten, AutocastCPU, m) { KERNEL_CPU(stack, promote) KERNEL_CPU(cat, promote) KERNEL_CPU(index_copy, promote) - KERNEL_CPU2(index_copy, dimname, promote) + KERNEL_CPU(index_copy, dimname, promote) } diff --git a/aten/src/ATen/autocast_mode.h b/aten/src/ATen/autocast_mode.h index b3f2fcd511..9d400db03e 100644 --- a/aten/src/ATen/autocast_mode.h +++ b/aten/src/ATen/autocast_mode.h @@ -541,9 +541,13 @@ copy pasted in from VariableTypeEverything.cpp with appropriate substitutions. #define ADD_NS(RAW_OP) at::RAW_OP +#define _KERNEL_OVERLOAD_NARG_IMPL(_0, _1, _2, N, ...) N +#define _KERNEL_OVERLOAD_NARG(...) \ + C10_EXPAND_MSVC_WORKAROUND(_KERNEL_OVERLOAD_NARG_IMPL(__VA_ARGS__, 2, 1)) + // Common cases where registration signature matches redispatch signature // (that's why SIGNATURE is repeated in the WrapFunction instantiation) -#define KERNEL(DISPATCHKEY, OP, POLICY) \ +#define KERNEL1(DISPATCHKEY, OP, POLICY) \ m.impl( \ TORCH_SELECTIVE_NAME("aten::" #OP), \ &::at::autocast::WrapFunction< \ @@ -563,6 +567,15 @@ copy pasted in from VariableTypeEverything.cpp with appropriate substitutions. decltype(ATEN_FN2(OP, OVERLOAD)), \ &ATEN_FN2(OP, OVERLOAD)>::type::call); +#define _KERNEL_DISPATCH(DISPATCHKEY, NARG, ...) \ + C10_CONCATENATE(KERNEL, NARG)(DISPATCHKEY, __VA_ARGS__) + +#define _KERNEL_IMPL(DISPATCHKEY, ...) \ + _KERNEL_DISPATCH(DISPATCHKEY, _KERNEL_OVERLOAD_NARG(__VA_ARGS__), __VA_ARGS__) + +// It will dispatch to KERNEL1 or KERNEL2 based on its inputs. +#define KERNEL(DISPATCHKEY, ...) _KERNEL_IMPL(DISPATCHKEY, __VA_ARGS__) + // Less-common but still useful case: redispatching to a function // with a new signature (e.g. appending a dtype) #define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE( \ @@ -581,12 +594,9 @@ copy pasted in from VariableTypeEverything.cpp with appropriate substitutions. REDISPATCH_SIGNATURE, \ &REDISPATCH_FUNC>::type::call); -// KERNEL_CPU/KERNEL_CPU2/KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CPU -// registration for AutocastCPU -#define KERNEL_CPU(OP, POLICY) KERNEL(c10::DeviceType::CPU, OP, POLICY) - -#define KERNEL_CPU2(OP, OVERLOAD, POLICY) \ - KERNEL2(c10::DeviceType::CPU, OP, OVERLOAD, POLICY) +// KERNEL_CPU/KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CPU +// registration (OP, POLICY) or (OP, OVERLOAD, POLICY) for AutocastCPU +#define KERNEL_CPU(...) KERNEL(c10::DeviceType::CPU, __VA_ARGS__) #define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CPU( \ REDISPATCH_FUNC, \ @@ -602,12 +612,9 @@ copy pasted in from VariableTypeEverything.cpp with appropriate substitutions. REDISPATCH_SIGNATURE, \ POLICY) -// KERNEL_CUDA/KERNEL_CUDA2/KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CUDA -// registration for AutocastCUDA -#define KERNEL_CUDA(OP, POLICY) KERNEL(c10::DeviceType::CUDA, OP, POLICY) - -#define KERNEL_CUDA2(OP, OVERLOAD, POLICY) \ - KERNEL2(c10::DeviceType::CUDA, OP, OVERLOAD, POLICY) +// KERNEL_CUDA/KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CUDA +// registration (OP, POLICY) or (OP, OVERLOAD, POLICY) for AutocastCUDA +#define KERNEL_CUDA(...) KERNEL(c10::DeviceType::CUDA, __VA_ARGS__) #define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CUDA( \ REDISPATCH_FUNC, \ @@ -623,14 +630,10 @@ copy pasted in from VariableTypeEverything.cpp with appropriate substitutions. REDISPATCH_SIGNATURE, \ POLICY) -// KERNEL_PRIVATEUSEONE/KERNEL_PRIVATEUSEONE2/ -// KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_PRIVATEUSEONE -// registration for AutocastPrivateUse1 -#define KERNEL_PRIVATEUSEONE(OP, POLICY) \ - KERNEL(c10::DeviceType::PrivateUse1, OP, POLICY) - -#define KERNEL_PRIVATEUSEONE2(OP, OVERLOAD, POLICY) \ - KERNEL2(c10::DeviceType::PrivateUse1, OP, OVERLOAD, POLICY) +// KERNEL_PRIVATEUSEONE/KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_PRIVATEUSEONE +// registration (OP, POLICY) or (OP, OVERLOAD, POLICY) for AutocastPrivateUse1 +#define KERNEL_PRIVATEUSEONE(OP, ...) \ + KERNEL(c10::DeviceType::PrivateUse1, __VA_ARGS__) #define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_PRIVATEUSEONE( \ REDISPATCH_FUNC, \
2.41.0
ae31495ff36b587fbde6eb509efd8584d238aff
Thu, 18 Apr 2024 01:17:44 +0000
[PATCH 0287/1000] Try to speed up lintrunner in CI (#124311)
Before timing: clang is 19min and noclang is 16min After timing: clang is 17min and noclang is 15min This is still crazy slow so most likely more could be done but didn't check the logs in details. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124311 Approved by: https://github.com/ezyang, https://github.com/Skylion007
diff --git a/.github/scripts/lintrunner.sh b/.github/scripts/lintrunner.sh index e1403d13d6..82f472b0f1 100755 --- a/.github/scripts/lintrunner.sh +++ b/.github/scripts/lintrunner.sh @@ -6,6 +6,9 @@ CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") eval "$(command conda 'shell.bash' 'hook' 2> /dev/null)" conda activate "${CONDA_ENV}" +# Use uv to speed up lintrunner init +python3 -m pip install uv + CACHE_DIRECTORY="/tmp/.lintbin" # Try to recover the cached binaries if [[ -d "${CACHE_DIRECTORY}" ]]; then
2.41.0
30bb13fe84c88ab5c988351543362b60fefb556
Wed, 17 Apr 2024 09:19:38 -0700
[PATCH 0288/1000] Re-land precompile triton templates (#124030)
Re-land precompile triton templates. This got reverted because we were precompiling templates without checking the cache. I have since added logic and a test to ensure we do not precompile if there is a cache hit. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124030 Approved by: https://github.com/shunting314, https://github.com/nmacchioni, https://github.com/yoyoyocmu
diff --git a/test/inductor/test_max_autotune.py b/test/inductor/test_max_autotune.py index d1f074de51..af87aba112 100644 --- a/test/inductor/test_max_autotune.py +++ b/test/inductor/test_max_autotune.py @@ -328,7 +328,8 @@ class TestMaxAutotune(TestCase): inputs: str, benchmark: Callable[[Any], Dict[ChoiceCaller, float]], ) -> Dict[ChoiceCaller, float]: - return benchmark(choices) + if benchmark is not None: + return benchmark(choices) asc = AlgorithmSelectorCache() @@ -426,6 +427,25 @@ class TestMaxAutotune(TestCase): FileCheck().check_not("extern_kernels.convolution").run(code[0]) self.assertEqual(conv1x1(input_tensor), out, atol=1e-2, rtol=0) + @skipIfRocm + def test_filled_cache_precompile(self): + def fn(a, b, c): + a = (a @ b) @ c + a, b, c = (t.to(torch.float16) for t in [a, b, c]) + return (a @ b) @ c + + fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn) + inputs = [torch.rand([256, 256], device="cuda") for _ in range(3)] + from torch._dynamo.utils import counters + + self.assertEqual(fn(*inputs), fn_c(*inputs), atol=1e-2, rtol=1e-2) + + torch._dynamo.reset() + counters.clear() + + fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn) + self.assertEqual(counters["inductor"]["select_algorithm_precompile"], 0) + def test_cat_addmm(self): def fn(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor): return torch.cat( diff --git a/test/inductor/test_select_algorithm.py b/test/inductor/test_select_algorithm.py index 3b76651fcc..48713bb63e 100644 --- a/test/inductor/test_select_algorithm.py +++ b/test/inductor/test_select_algorithm.py @@ -19,8 +19,10 @@ aten = torch.ops.aten def patches(fn): - def skip_cache(self, choices, name, key, generate): - return generate(choices) + def skip_cache(self, choices, name, key, benchmark): + if benchmark is None: + return {} + return benchmark(choices) for patcher in [ dynamo_config.patch(verbose=True), diff --git a/torch/_inductor/autotune_process.py b/torch/_inductor/autotune_process.py index ba1976745c..35beb6fb06 100644 --- a/torch/_inductor/autotune_process.py +++ b/torch/_inductor/autotune_process.py @@ -502,7 +502,6 @@ class TestBenchmarkRequest(BenchmarkRequest): class TritonBenchmarkRequest(BenchmarkRequest): # Important: Instances of this class have to be serializable # across process boundaries. Do not put CUDA Tensors in here! - def __init__( self, kernel_name: str, @@ -545,6 +544,8 @@ class TritonBenchmarkRequest(BenchmarkRequest): if "warmup" in inspect.signature(run_method).parameters: warmup_arg["warmup"] = False + from torch._C import _cuda_getCurrentRawStream as get_raw_stream + if torch.version.hip and self.matrix_instr_nonkdim != 0: return functools.partial( run_method, @@ -553,9 +554,7 @@ class TritonBenchmarkRequest(BenchmarkRequest): *self.extra_args, grid=self.grid, **warmup_arg, - num_stages=self.num_stages, - num_warps=self.num_warps, - matrix_instr_nonkdim=self.matrix_instr_nonkdim, + stream=get_raw_stream(self.output_tensor_meta.device.index), ) else: return functools.partial( @@ -565,10 +564,13 @@ class TritonBenchmarkRequest(BenchmarkRequest): *self.extra_args, grid=self.grid, **warmup_arg, - num_stages=self.num_stages, - num_warps=self.num_warps, + stream=get_raw_stream(self.output_tensor_meta.device.index), ) + def precompile(self): + mod = PyCodeCache.load_by_key_path(self.module_cache_key, self.module_path) + getattr(mod, self.kernel_name).precompile() + def __str__(self) -> str: return f"{self.kernel_name=}, {self.module_path=}, {self.module_cache_key=}" diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py index 07c6f93730..d4f2805773 100644 --- a/torch/_inductor/codecache.py +++ b/torch/_inductor/codecache.py @@ -156,7 +156,7 @@ class CacheBase: try: import triton - triton_version = triton.__version__ + triton_version = triton.__version__ # type: ignore[attr-defined] except ModuleNotFoundError: triton_version = None @@ -262,7 +262,7 @@ class PersistentCache(CacheBase): choices: List[ChoiceCaller], op: str, inputs: str, - benchmark: Callable[[Any], Dict[ChoiceCaller, float]], + benchmark: Optional[Callable[[Any], Dict[ChoiceCaller, float]]], ) -> Dict[ChoiceCaller, float]: """ Check to see if we have benchmarked the given choice callers. For each @@ -270,7 +270,7 @@ class PersistentCache(CacheBase): 1. Check global_cache[op][inputs][choice][precision], return benchmark if cached. 2. Check local_cache[op][inputs][choice][precision], return benchmark if cached. - 3. + 3. If benchmark is not None: a. `max_autotune_gemm=True`: benchmark the choice, update local_cache[op][inputs][choice], and return the benchmark. b. `max_autotune_gemm=False`: don't benchmark the choice, return nothing. @@ -303,9 +303,13 @@ class PersistentCache(CacheBase): if config.max_autotune or config.max_autotune_gemm: local_cache = self.get_local_cache() # check local cache first since it is data specific to the current machine - if not check_cache(local_cache) and not ( - use_global_cache() - and check_cache(self.get_global_cache(), callback=log_stats) + if ( + not check_cache(local_cache) + and not ( + use_global_cache() + and check_cache(self.get_global_cache(), callback=log_stats) + ) + and benchmark is not None ): try: # re-benchmark everything to try to get consistent numbers from the same machine diff --git a/torch/_inductor/codegen/triton_utils.py b/torch/_inductor/codegen/triton_utils.py index c95e699bcd..c8a7d92e3c 100644 --- a/torch/_inductor/codegen/triton_utils.py +++ b/torch/_inductor/codegen/triton_utils.py @@ -65,6 +65,32 @@ def signature_to_meta( } +def is_unaligned_buffer(arg: TensorArg): + buf_name = arg.buffer + if buf_name in V.graph.graph_inputs: + return not config.assume_aligned_inputs + + if buf_name in V.graph.constants: + # all constants are assumed to be aligned + return False + + if V.graph.scheduler: + layout = V.graph.scheduler.get_buffer_layout(buf_name) + else: + buffer = V.graph.get_buffer(buf_name) + # output arg + if not buffer: + assert buf_name == V.kernel.output_node.name + layout = V.kernel.output_node.layout + else: + layout = buffer.get_layout() + + if isinstance(layout, torch._inductor.ir.NonOwningLayout): + return not layout.maybe_guard_aligned() + else: + return False + + def config_of( args: List[KernelArgType], *, @@ -83,9 +109,7 @@ def config_of( offset_aligned = V.graph.sizevars.statically_known_multiple_of( x.offset * x.dtype.itemsize, alignment # type: ignore[arg-type] ) - return offset_aligned and not V.graph.scheduler.is_unaligned_buffer( - x.buffer - ) + return offset_aligned and not is_unaligned_buffer(x) else: return False if isinstance(x, SizeArg): diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py index 573e79acd6..049a77a4ef 100644 --- a/torch/_inductor/scheduler.py +++ b/torch/_inductor/scheduler.py @@ -2495,18 +2495,9 @@ class Scheduler: self.flush() - def is_unaligned_buffer(self, buf_name): - if buf_name in V.graph.graph_inputs: - return not config.assume_aligned_inputs - if buf_name in V.graph.constants: - # all constants are assumed to be aligned - return False + def get_buffer_layout(self, buf_name: str) -> ir.Layout: node = self.name_to_node[buf_name] - layout = node.node.get_layout() - if isinstance(layout, ir.NonOwningLayout): - return not layout.maybe_guard_aligned() - else: - return False + return node.node.get_layout() class BaseScheduling: diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py index 75deeaf5e3..3261909d2b 100644 --- a/torch/_inductor/select_algorithm.py +++ b/torch/_inductor/select_algorithm.py @@ -94,7 +94,7 @@ class TritonTemplateKernel(TritonKernel): grid_fn, meta, call_sizes, - use_jit=True, + use_jit=False, prefix_args=0, suffix_args=0, epilogue_fn=identity, @@ -153,8 +153,8 @@ class TritonTemplateKernel(TritonKernel): argdefs, _, signature = self.args.python_argdefs() triton_meta = { "signature": signature_to_meta(signature, size_dtype=self.index_dtype), - "device": V.graph.scheduler.current_device.index, - "device_type": V.graph.scheduler.current_device.type, + "device": self.output_node.get_device().index, + "device_type": self.output_node.get_device().type, "constants": {}, } triton_meta["configs"] = [config_of(signature)] @@ -554,7 +554,7 @@ class TritonTemplate(KernelTemplate): ), TritonTemplateKernel( kernel_name=kernel_name, output_node=fake_out, - use_jit=True, + use_jit=False, **kernel_options, ) as kernel: try: @@ -740,6 +740,10 @@ class TritonTemplateCaller(ir.TritonTemplateCallerBase): assert self.bmreq is not None return self.bmreq.benchmark(*args, output_tensor=out) + def precompile(self): + assert self.bmreq is not None + self.bmreq.precompile() + def __str__(self): return f"TritonTemplateCaller({self.bmreq.module_path}, {self.debug_extra})" @@ -881,6 +885,7 @@ class AlgorithmSelectorCache(PersistentCache): # TODO(nmacchioni): remove once CI tests are fixed choices = [choice for choice in choices if choice is not None] + if len(choices) == 0: raise RuntimeError( "No choices to select, please consider adding ATEN into max_autotune_gemm_backends " @@ -897,19 +902,38 @@ class AlgorithmSelectorCache(PersistentCache): def make_benchmark_fn(): return self.make_benchmark_fn(choices, input_nodes, layout, input_gen_fns) - def precompile(choices): + def precompile(choices) -> Callable[[], None]: + def no_op(*args, **kwargs): + return + if ( precompilation_timeout_seconds is None or precompilation_timeout_seconds <= 0 ): - return + return no_op num_workers = min( config.compile_threads, torch.get_num_threads(), len(choices), ) if num_workers <= 0: - return + return no_op + + # TODO - debug issue + if torch.version.hip: + return no_op + + # check local and global cache before precompiling + timings = self.lookup( + choices, + name, + repr([self.key_of(x) for x in input_nodes]), + benchmark=None, + ) + + if timings: + return no_op + log.info( "Multithreaded precompilation for %d choices using %d worker threads", len(choices),
2.41.0
3b4ac956eabb34c9f7e52b8c7fa50b085f667e5
Wed, 17 Apr 2024 20:20:18 +0300
[PATCH 0289/1000] Add index_reduce decomposition (#122579)
As in the title. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122579 Approved by: https://github.com/peterbell10 ghstack dependencies: #123375
diff --git a/test/inductor/test_torchinductor_opinfo.py b/test/inductor/test_torchinductor_opinfo.py index 75c4f61ca3..7319656905 100644 --- a/test/inductor/test_torchinductor_opinfo.py +++ b/test/inductor/test_torchinductor_opinfo.py @@ -168,7 +168,7 @@ inductor_skips["cpu"] = { "linalg.ldl_factor": {f32, f64}, # flaky "nn.functional.cosine_embedding_loss": {b8}, # flaky ("index_reduce", "prod"): {f16}, # flaky - ("index_reduce", "mean"): {f16, f64}, # flaky + ("index_reduce", "mean"): {f16}, # flaky } if IS_MACOS and IS_X86: diff --git a/torch/_inductor/decomposition.py b/torch/_inductor/decomposition.py index 00640f62fd..47370c2e54 100644 --- a/torch/_inductor/decomposition.py +++ b/torch/_inductor/decomposition.py @@ -27,6 +27,7 @@ from torch._prims_common import ( ) from . import config, inductor_prims +from .utils import needs_fallback_due_to_atomic_add_limitations, use_scatter_fallback log = logging.getLogger(__name__) aten = torch.ops.aten @@ -705,3 +706,50 @@ def _softmax_backward_data(grad_output, output, dim, input_dtype): if grad_output.dtype != input_dtype: grad_input = grad_input.to(input_dtype) return grad_input.contiguous() + + +@register_decomposition(aten.index_reduce) +def index_reduce( + self, dim: int, index, src, reduction_type: str, *, include_self: bool = True +): + if reduction_type == "mean" and not needs_fallback_due_to_atomic_add_limitations( + self.dtype + ): + true_division = self.dtype.is_floating_point or self.dtype.is_complex + ones = torch.ones_like(src) + if include_self: + out = self + counts = torch.ones_like(self).index_add(dim, index, ones) + else: + out = self.index_fill(dim, index, 0) + counts = torch.zeros_like(self).index_add(dim, index, ones) + counts = counts.masked_fill(counts < 1, 1) + out = out.index_add(dim, index, src) + return out / counts if true_division else out // counts + + if use_scatter_fallback( + "aten.scatter_reduce_", + reduction_type, + self.dtype, + src.dtype, + src.device.type, + True, + ): + return NotImplemented + + repeats = self.shape[dim + 1 :].numel() * self.shape[:dim].numel() + index_shape = (index.numel(), *self.shape[dim + 1 :], *self.shape[:dim]) + perm = (*range(self.ndim - dim, self.ndim), 0, *range(1, self.ndim - dim)) + scatter_index = ( + index.to(torch.int64) + .repeat_interleave(repeats) + .reshape(index_shape) + .permute(perm) + ) + return self.scatter_reduce( + dim, + scatter_index, + src, + reduction_type, + include_self=include_self, + ) diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py index 89e6931217..b44ee2805b 100644 --- a/torch/_inductor/lowering.py +++ b/torch/_inductor/lowering.py @@ -58,9 +58,10 @@ from .utils import ( is_dynamic, is_gpu, is_pointwise_use, + needs_fallback_due_to_atomic_add_limitations, pad_listlike, - parallel_num_threads, sympy_product, + use_scatter_fallback, ) from .virtualized import ops, V @@ -1904,7 +1905,6 @@ def sdpa_constraint(fx_node, *args, **kwargs): # WIP -make_fallback(aten.index_reduce) # @pearu make_fallback(aten._adaptive_avg_pool3d) # @isuruf make_fallback(aten.adaptive_max_pool3d) # @isuruf make_fallback(aten.fractional_max_pool3d) # @isuruf @@ -2088,6 +2088,9 @@ make_fallback(aten._efficient_attention_forward.default, sdpa_constraint) make_fallback(aten._efficient_attention_backward.default, sdpa_constraint) make_fallback(aten._scaled_mm.default, constrain_to_fx_strides) +# index_reduce requires fallback when use_scatter_fallback(...) returns True +make_fallback(aten.index_reduce) + # Register with type_promotion_kind None. # For example, fp16.copy_(fp32) should **not** promote the first input's dtype. @@ -2819,11 +2822,6 @@ def _unsafe_index_put_(self, indices, values, accumulate=False): return index_put_impl_(self, indices, values, accumulate, check=False) -def needs_fallback_due_to_atomic_add_limitations(dtype): - # tl.atomic_add does NOT support the following types - return dtype in {torch.int64, torch.bool, torch.bfloat16} - - def index_put_impl_(self, indices, values, accumulate, check): # Dispatch to masked fill for single boolean index with single value if ( @@ -2973,24 +2971,14 @@ def scatter_fallback( reduce: Optional[str] = None, include_self: bool = True, ): - reduce_ty = "add" if fn == "aten.scatter_" else "sum" - if ( - reduce not in {None, reduce_ty} - or ( - isinstance(src, TensorBox) - and is_gpu(src.get_device().type) - and needs_fallback_due_to_atomic_add_limitations(src.get_dtype()) - ) - or ( - fn == "aten.scatter_reduce_" - and reduce == "sum" - and isinstance(src, TensorBox) - and src.get_device() == torch.device("cpu") - and config.cpp.fallback_scatter_reduce_sum - and (config.cpp.dynamic_threads or parallel_num_threads() != 1) - ) - or (reduce == reduce_ty and self.get_dtype() in {torch.bool, torch.int64}) - or torch.are_deterministic_algorithms_enabled() + src_is_tensor = isinstance(src, TensorBox) + if use_scatter_fallback( + fn, + reduce, + self.get_dtype(), + src.get_dtype() if src_is_tensor else type(src), + src.get_device().type if src_is_tensor else "not impl", + src_is_tensor, ): ir.ScatterFallback( V.graph.current_node.target, diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py index 030649df45..53319bd2dd 100644 --- a/torch/_inductor/utils.py +++ b/torch/_inductor/utils.py @@ -1528,3 +1528,33 @@ def is_gpu(device: str): def device_need_guard(device: str): assert isinstance(device, str) return is_gpu(device) + + +def needs_fallback_due_to_atomic_add_limitations(dtype): + # tl.atomic_add does NOT support the following types + return dtype in {torch.int64, torch.bool, torch.bfloat16} + + +def use_scatter_fallback( + fn, reduction_type, self_dtype, src_dtype, src_device_type, src_is_tensor +): + reduce_ty = "add" if fn == "aten.scatter_" else "sum" + + return ( + reduction_type not in {None, reduce_ty} + or ( + src_is_tensor + and is_gpu(src_device_type) + and needs_fallback_due_to_atomic_add_limitations(src_dtype) + ) + or ( + fn == "aten.scatter_reduce_" + and reduction_type == "sum" + and src_is_tensor + and src_device_type == "cpu" + and config.cpp.fallback_scatter_reduce_sum + and (config.cpp.dynamic_threads or parallel_num_threads() != 1) + ) + or (reduction_type == reduce_ty and self_dtype in {torch.bool, torch.int64}) + or torch.are_deterministic_algorithms_enabled() + )
2.41.0
71423c2e4881bb8dbe4dc302e77030ef53058fb
Wed, 17 Apr 2024 14:52:47 -0700
[PATCH 0290/1000] [inductor] let coordesc tuner respect max RBLOCK (#124325)
Fix https://github.com/pytorch/pytorch/issues/124251 . Coordesc tuner need respect max RBLOCK. When rnumel is a multiple of max-RBLOCK, inductor codegen will skip rmask. If coordesc tuner does not consider max-RBLOCK and pick a RBLOCK larger than that, we would get CUDA IMA (illegal memory access) error. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124325 Approved by: https://github.com/Chillee, https://github.com/jansel
diff --git a/test/inductor/test_coordinate_descent_tuner.py b/test/inductor/test_coordinate_descent_tuner.py index 86212e891f..5b9f35fa9c 100644 --- a/test/inductor/test_coordinate_descent_tuner.py +++ b/test/inductor/test_coordinate_descent_tuner.py @@ -98,6 +98,18 @@ class TestCoordinateDescentTuner(TestCase): f"Expected:\n{expected}\nActual:\n{actual}", ) + def test_value_too_large(self): + # Simulate a reduction + size_hints = [2**20, 2**20] + + tuner = CoordescTuner(size_hints=size_hints) + + max_block = config.triton.max_block + self.assertFalse(tuner.value_too_large("XBLOCK", max_block["X"])) + self.assertTrue(tuner.value_too_large("XBLOCK", max_block["X"] * 2)) + self.assertFalse(tuner.value_too_large("RBLOCK", max_block["R"])) + self.assertTrue(tuner.value_too_large("RBLOCK", max_block["R"] * 2)) + if __name__ == "__main__": if IS_LINUX and HAS_CUDA: diff --git a/torch/_inductor/coordinate_descent_tuner.py b/torch/_inductor/coordinate_descent_tuner.py index d0e1efd1f3..baf293d9f5 100644 --- a/torch/_inductor/coordinate_descent_tuner.py +++ b/torch/_inductor/coordinate_descent_tuner.py @@ -70,11 +70,10 @@ class CoordescTuner: return zmax def get_rmax(self): + rmax = inductor_config.triton.max_block["R"] if self.size_hints and len(self.size_hints) > 0: - return self.size_hints[-1] # the last one is for reduction - else: - # large enough. We should not pick this large RBLOCK anyway - return 2**30 + rmax = min(rmax, self.size_hints[-1]) # the last one is for reduction + return rmax def get_warpsmax(self): # Currently, CUDA has a maximum of 1024 threads, so 32 is the max
2.41.0
8f3b99a94659d1d14e28f6271e123cc48dcef71
Wed, 17 Apr 2024 09:42:57 +0800
[PATCH 0291/1000] [inductor] Modify the rules for freezing the layout of x.unwrap_view() in convert_to_reinterpret_view (#122760)
Fix https://github.com/pytorch/pytorch/issues/121607 Modify the rules for freezing the layout of `x.unwrap_view()` in `convert_to_reinterpret_view`: If any read of `x.unwrap_view()` is in channels_last format, freeze the layout of `x.unwrap_view()` to channels_last format. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122760 Approved by: https://github.com/leslie-fang-intel, https://github.com/jgong5, https://github.com/jansel
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py index 2a66ea69d3..ea80192422 100644 --- a/torch/_inductor/ir.py +++ b/torch/_inductor/ir.py @@ -4138,7 +4138,30 @@ class ExternKernel(InputsKernel): # NOTE: Don't use extract_read_writes here as it fails when # make_loader() inlines the computation - x.unwrap_view().freeze_layout() + x_unwrap_view = x.unwrap_view() + x_unwrap_view_fx_node = V.graph.get_buffer( + x_unwrap_view.get_name() + ).get_origin_node() + # Prefer channels last format according to how the format is set from eager. + if ( + x_unwrap_view_fx_node is not None + and "val" in x_unwrap_view_fx_node.meta + and isinstance(x_unwrap_view.layout, FlexibleLayout) + and ( + x_unwrap_view_fx_node.meta["val"].is_contiguous( + memory_format=torch.channels_last + ) + or x_unwrap_view_fx_node.meta["val"].is_contiguous( + memory_format=torch.channels_last_3d + ) + ) + ): + x_unwrap_view.freeze_layout_with_same_order( + make_channels_last_strides_for(x_unwrap_view.get_size()) + ) + else: + x_unwrap_view.freeze_layout() + index_args, var_ranges = dependencies.index_vars_squeeze( x.get_size(), prefix="r" )
2.41.0
1a56efbb91377237eb1b81d72c7d598ad61b14e
Mon, 15 Apr 2024 20:48:42 +0800
[PATCH 0292/1000] [inductor] modify the output_stride of ConcatKernel (#122761)
Fix https://github.com/pytorch/pytorch/issues/121613. Modify the `output_stride` of `ConcatKernel`: If any input to `Concat` is `Pointwise`, check the layout of all inputs to `Pointwise`, if any of the inputs is in channels_last format, set channels_last strides for the `output_stride`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122761 Approved by: https://github.com/jgong5, https://github.com/leslie-fang-intel, https://github.com/jansel
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py index ea80192422..4cfb582e22 100644 --- a/torch/_inductor/ir.py +++ b/torch/_inductor/ir.py @@ -3856,6 +3856,20 @@ class ConcatKernel(NopKernel): # use CL stride for the output output_stride = make_channels_last_strides_for(new_size) break + any_input_is_storage_and_layout = any(is_storage_and_layout(x) for x in inputs) + fx_node_args = V.graph.current_node.args[0] + assert V.graph.current_node.target in [aten.cat, aten.cat.default] + assert isinstance(fx_node_args, list) + # If any of the inputs has meta tensor and the meta tensor is in CL format, use CL format for the output + if any_input_is_storage_and_layout is False and any( + "val" in arg.meta + and ( + arg.meta["val"].is_contiguous(memory_format=torch.channels_last) + or arg.meta["val"].is_contiguous(memory_format=torch.channels_last_3d) + ) + for arg in fx_node_args + ): + output_stride = make_channels_last_strides_for(new_size) concat_kernel = ConcatKernel( name=None,
2.41.0
f04c29be5bcd5670fb9c05e0cf8f4650559052f
Mon, 15 Apr 2024 13:58:03 +0800
[PATCH 0293/1000] [inductor] Freeze the layout of the conv input to channels_last (#122765)
Fix https://github.com/pytorch/pytorch/issues/118082. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122765 Approved by: https://github.com/jgong5, https://github.com/leslie-fang-intel, https://github.com/jansel
diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py index 404c49edde..aaaee95887 100644 --- a/torch/_inductor/graph.py +++ b/torch/_inductor/graph.py @@ -18,6 +18,7 @@ from torch._decomp import get_decompositions from torch._dynamo.utils import defake, dynamo_timed from torch._higher_order_ops.effects import _EffectType from torch._logging import LazyString, trace_structured +from torch._prims_common import make_channels_last_strides_for from torch._subclasses.fake_tensor import FakeTensor from torch.fx.experimental._backward_state import BackwardState from torch.fx.experimental.sym_node import magic_methods, method_to_operator @@ -1204,21 +1205,24 @@ class GraphLowering(torch.fx.Interpreter): torch.ops.aten.mm.default, torch.ops.aten._int_mm.default, ] + need_fixed_channels_last_layout = [] if not self.layout_opt: need_fixed_layout.append(torch.ops.aten.convolution.default) if torch._C._has_mkldnn: need_fixed_layout += [ + torch.ops.mkldnn._linear_pointwise.default, + torch.ops.mkldnn._linear_pointwise.binary, + torch.ops.aten.mkldnn_rnn_layer.default, + torch.ops.onednn.qlinear_pointwise.default, + torch.ops.onednn.qlinear_pointwise.tensor, + ] + need_fixed_channels_last_layout += [ torch.ops.mkldnn._convolution_pointwise.default, torch.ops.mkldnn._convolution_pointwise.binary, torch.ops.mkldnn._convolution_pointwise_.binary, torch.ops.mkldnn._convolution_transpose_pointwise.default, - torch.ops.mkldnn._linear_pointwise.default, - torch.ops.mkldnn._linear_pointwise.binary, - torch.ops.aten.mkldnn_rnn_layer.default, torch.ops.onednn.qconv2d_pointwise.default, torch.ops.onednn.qconv2d_pointwise.binary, - torch.ops.onednn.qlinear_pointwise.default, - torch.ops.onednn.qlinear_pointwise.tensor, ] if torch._C.has_mkl: need_fixed_layout += [torch.ops.mkl._mkl_linear.default] @@ -1228,6 +1232,13 @@ class GraphLowering(torch.fx.Interpreter): ir.get_stride_order(n.meta["val"].stride()), allow_padding=True, ) + if user.target in need_fixed_channels_last_layout: + result = ir.ExternKernel.require_stride_order( + result, + ir.get_stride_order( + make_channels_last_strides_for(n.meta["val"].shape) + ), + ) if user.op == "output": if isinstance(result.data.data, (Pointwise, Reduction)): result.realize()
2.41.0
6b757701e6564621463ab9da0b25778d9326043
Tue, 16 Apr 2024 00:12:23 -0700
[PATCH 0294/1000] [aot] trim refcount for subclass runtime wrapper (#124155)
On torchtrain, before <img width="1218" alt="image" src="https://github.com/pytorch/pytorch/assets/9547562/b340c114-071a-440c-904c-c042de4d92c5"> after ![image](https://github.com/pytorch/pytorch/assets/9547562/ee3b6e6f-6e46-46bc-a93d-d4603673ee63) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124155 Approved by: https://github.com/jansel, https://github.com/bdhirsh ghstack dependencies: #124127
diff --git a/test/inductor/test_compiled_autograd.py b/test/inductor/test_compiled_autograd.py index 27c1ccd3e7..5f9dd9b84d 100644 --- a/test/inductor/test_compiled_autograd.py +++ b/test/inductor/test_compiled_autograd.py @@ -1287,6 +1287,61 @@ TORCH_LIBRARY(test_autograd_cpp_node_data_dependent, m) { out = compiled_fn(activations) self.assertTrue(len(activations) == 0) + @unittest.skipIf(not HAS_CUDA, "requires cuda") + def test_free_activation_memory_subclass(self): + # cover the case when aot inputs have subclasses, resulting in a different runtime wrapper + self.assertTrue(torch.cuda.memory_allocated() == 0) + + # Use an op to check that the memory is freed by the time the op is executed + def assertion_impl(to_clone): + mem_allocated = torch.cuda.memory_allocated() + self.assertTrue( + mem_allocated < 1200000, "some activations should have been freed" + ) + self.assertTrue( + mem_allocated > 800000, + "currently subclasses don't seem to be freed in inductor", + ) + return to_clone.clone() + + with torch.library._scoped_library("test_compiled_autograd", "FRAGMENT") as lib: + lib.define( + "assertion_op(Tensor x) -> Tensor", tags=(torch.Tag.pt2_compliant_tag,) + ) + lib.impl("assertion_op", assertion_impl, "CPU") + lib.impl("assertion_op", lambda x: x.clone(), "Meta") + lib.impl("assertion_op", lambda x: x.clone(), "NestedTensor") + + def fn(inputs): + _, y = inputs + out = y.cpu() + cloned_out = torch.ops.test_compiled_autograd.assertion_op(out) + return cloned_out + + gm = torch.fx.symbolic_trace(fn) + torch._dynamo.utils.set_locals_to_steal(gm, ["inputs"]) + compiled_fn = torch.compile(gm) + + from torch.nested._internal.nested_tensor import jagged_from_list + + activations = [ + jagged_from_list( + [ + torch.ones((1, 100000), device="cuda"), # 400,000 bytes + torch.ones((1, 100000), device="cuda"), # 400,000 bytes + ], + None, + )[ + 0 + ], # NestedTensor + torch.ones((1, 100000), device="cuda"), # 400,000 bytes + ] + # 1,200,000 bytes (3 * 4 * 100,000 bytes) + self.assertTrue(torch.cuda.memory_allocated() > 1200000) + + out = compiled_fn(activations) + self.assertTrue(len(activations) == 0) + def load_test_module(name): testdir = Path(__file__).absolute().parent.parent diff --git a/torch/_functorch/_aot_autograd/runtime_wrappers.py b/torch/_functorch/_aot_autograd/runtime_wrappers.py index 2ac3158873..90ba8a4c74 100644 --- a/torch/_functorch/_aot_autograd/runtime_wrappers.py +++ b/torch/_functorch/_aot_autograd/runtime_wrappers.py @@ -335,8 +335,9 @@ def aot_dispatch_subclass_wrapper( subclass_metas: List[Union[int, SubclassCreationMeta]], num_fw_outs_saved_for_bw: Optional[int], ) -> Callable: - def inner_fn(args): + def inner_fn(args: List[Any]): unwrapped_args = unwrap_tensor_subclasses(args, is_joint_structure=False) + args.clear() # expectation: runtime_fn is a boxed fn unwrapped_outs = runtime_fn(unwrapped_args) wrapped_outs = wrap_tensor_subclasses(
2.41.0
b4b857a60150205fb53e08f7b09e791b0723ace
Wed, 17 Apr 2024 15:31:35 -0700
[PATCH 0295/1000] [dynamo][nn_module] Enable torch.compile/disable as decorators on the class (#124187)
Support something like. This is UI change, so please review carefully. ~~~ @torch._dynamo.disable class SimpleLinear(torch.nn.Module): def __init__(self): super().__init__() self.layer0 = torch.nn.Linear(4, 4) def forward(self, inp): return self.layer0(torch.sigmoid(inp)) @torch.compile(backend=cnts) class SimpleModel(torch.nn.Module): def __init__(self): super().__init__() self.layer0 = SimpleLinear() self.layer1 = torch.nn.Linear(4, 4) def forward(self, inp): z = self.layer0(torch.sin(inp)) return self.layer1(z) ~~~ Pull Request resolved: https://github.com/pytorch/pytorch/pull/124187 Approved by: https://github.com/yanboliang, https://github.com/jansel
diff --git a/test/dynamo/test_decorators.py b/test/dynamo/test_decorators.py index 3bff8b7177..890edca40c 100644 --- a/test/dynamo/test_decorators.py +++ b/test/dynamo/test_decorators.py @@ -134,6 +134,55 @@ class DecoratorTests(torch._dynamo.test_case.TestCase): all(node.target is not torch.sigmoid for node in gm1.graph.nodes) ) + def test_disable_nn_module_with_class_decorator(self): + cnts = torch._dynamo.testing.CompileCounterWithBackend("eager") + + @torch._dynamo.disable + class SimpleLinear(torch.nn.Module): + def __init__(self): + super().__init__() + self.layer0 = torch.nn.Linear(4, 4) + + def forward(self, inp): + return self.layer0(torch.sigmoid(inp)) + + @torch.compile(backend=cnts) + class SimpleModel(torch.nn.Module): + def __init__(self): + super().__init__() + self.layer0 = SimpleLinear() + self.layer1 = torch.nn.Linear(4, 4) + + def forward(self, inp): + z = self.layer0(torch.sin(inp)) + return self.layer1(z) + + def hook(module, args): + inp = args[0].sigmoid() + return (inp,) + + model = SimpleModel() + model.layer0.register_forward_pre_hook(hook) + + model(torch.randn(4)) + + # check for no graph break + self.assertEqual(cnts.frame_count, 2) + + gm0 = cnts.graphs[0] + # Check that the first graph has sin node, and no sigmoid + self.assertTrue(any(node.target is torch.sin for node in gm0.graph.nodes)) + self.assertTrue( + all(node.target is not torch.sigmoid for node in gm0.graph.nodes) + ) + + gm1 = cnts.graphs[1] + # Check that the first graph does not have sigmoid. sigmoid is used in + # both hook and disabled module. + self.assertTrue( + all(node.target is not torch.sigmoid for node in gm1.graph.nodes) + ) + def test_allow_in_graph(self): cnts = torch._dynamo.testing.CompileCounter() diff --git a/torch/_dynamo/eval_frame.py b/torch/_dynamo/eval_frame.py index 17249d8cc8..99a466523a 100644 --- a/torch/_dynamo/eval_frame.py +++ b/torch/_dynamo/eval_frame.py @@ -335,6 +335,22 @@ class _TorchDynamoContext: new_mod.get_compiler_config = get_compiler_config return new_mod + + if inspect.isclass(fn): + # User has wrapped the class with compile/disable decorator. Apply + # disable to init/call method. + cls_obj = fn + if isinstance(self, DisableContext): + # Disable on init is useful for reconstruction of bytecodes where we + # want to prevent Dynamo from tracing into the init function. Check + # test_reconstruction in test_model_output.py. + cls_obj.__init__ = self(cls_obj.__init__) + cls_obj.__call__ = self(cls_obj.__call__) + if issubclass(cls_obj, torch.nn.Module): + # NN module variable tracker directly inlines the _call_impl. Disable it. + cls_obj._call_impl = self(cls_obj._call_impl) + return cls_obj + assert callable(fn) try:
2.41.0
ddd17bdc6c69a5765c39ab91b9acae74b58e8d3
Tue, 16 Apr 2024 15:02:26 -0700
[PATCH 0296/1000] [benchmarks] Add --snapshot-memory to get memory pickles for eager vs compiled (#119411)
creates memory snapshot pickles e.g. ``` inductor_no_cudagraphs_torchbench_amp_training_cuda_performance_compiled_pytorch_stargan.pickle inductor_no_cudagraphs_torchbench_amp_training_cuda_performance_eager_pytorch_stargan.pickle ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/119411 Approved by: https://github.com/jansel
diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py index 0d6965c148..9908b44508 100644 --- a/benchmarks/dynamo/common.py +++ b/benchmarks/dynamo/common.py @@ -1994,6 +1994,29 @@ def maybe_init_distributed(should_init_distributed, rank, world_size, port="6789 torch.distributed.destroy_process_group() +@contextmanager +def maybe_snapshot_memory(should_snapshot_memory, suffix): + # Enables Memory Snapshot tool for memory deep dives: + # https://pytorch.org/blog/understanding-gpu-memory-1/ + try: + if should_snapshot_memory: + torch.cuda.memory._record_memory_history(max_entries=100000) + yield + finally: + if should_snapshot_memory: + try: + torch.cuda.memory._dump_snapshot( + os.path.join( + torch._dynamo.config.base_dir, + f"{output_filename.rstrip('.csv')}_{suffix}.pickle", + ) + ) + except Exception as e: + logging.error("Failed to save memory snapshot, %s", e) + + torch.cuda.memory._record_memory_history(enabled=None) + + class BenchmarkRunner: def __init__(self): self.model_iter_fn = None @@ -2695,9 +2718,12 @@ class BenchmarkRunner: if tag is not None: experiment_kwargs["tag"] = tag results = [] - eager_latency, eager_peak_mem, _ = warmup( - self.model_iter_fn, model, example_inputs, "eager" - ) + with maybe_snapshot_memory( + self.args.snapshot_memory, f"eager_{self.args.only}" + ): + eager_latency, eager_peak_mem, _ = warmup( + self.model_iter_fn, model, example_inputs, "eager" + ) if self.args.export_aot_inductor: t_0 = time.perf_counter() @@ -2708,7 +2734,11 @@ class BenchmarkRunner: optimized_model_iter_fn = optimize_ctx(self.model_iter_fn) aot_compilation_time = 0 - with maybe_enable_compiled_autograd(self.args.compiled_autograd): + with maybe_enable_compiled_autograd( + self.args.compiled_autograd + ), maybe_snapshot_memory( + self.args.snapshot_memory, f"compiled_{self.args.only}" + ): dynamo_latency, dynamo_peak_mem, dynamo_stats = warmup( optimized_model_iter_fn, model, example_inputs, "dynamo" ) @@ -3270,6 +3300,13 @@ def parse_args(args=None): help="profiles TorchDynamo cache lookup", ) + parser.add_argument( + "--snapshot-memory", + "--snapshot_memory", + action="store_true", + help="Enables Memory Snapshot tool for memory deep dives: https://pytorch.org/blog/understanding-gpu-memory-1/", + ) + group_fuser = parser.add_mutually_exclusive_group() # --nvfuser is now the default, keep the option to not break scripts group_fuser.add_argument("--nvfuser", action="store_true", help=argparse.SUPPRESS)
2.41.0
c94652d7d3a465fd17125d4f07eb7ce33da0b4b
Wed, 17 Apr 2024 15:03:22 -0700
[PATCH 0297/1000] [benchmarks] Add --use-warm-peak-memory (#124326)
Measuring peak memory on the first run can capture cases where compiled artifacts leak into runtime, but it also introduces a lot of noise from cudnn/triton autotuning which generally uses as much memory as it can. Setting this flag as a default will need some discussion, so I will only add it to unblock compiled backward benchmarking (where all autotuning memory use is exposed) ``` e.g. resnet50 # without --warm-peak-memory memory: eager: 1.95 GB, dynamo: 6.68 GB, ratio: 0.29 # with --warm-peak-memory memory: eager: 1.96 GB, dynamo: 2.06 GB, ratio: 0.95 ``` ![image](https://github.com/pytorch/pytorch/assets/9547562/36cd8687-a7f7-4ec6-b989-7e1263aa7d37) This issue may also affect large models. Here's an example case of cudnn_convolution_backward autotuning allocating 30GB to tune a model otherwise using 5GB memory: ![image](https://github.com/pytorch/pytorch/assets/9547562/4e544b11-3579-4c69-811a-91d896f1ba66) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124326 Approved by: https://github.com/jansel ghstack dependencies: #119411
diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py index 9908b44508..9e946df174 100644 --- a/benchmarks/dynamo/common.py +++ b/benchmarks/dynamo/common.py @@ -2724,6 +2724,10 @@ class BenchmarkRunner: eager_latency, eager_peak_mem, _ = warmup( self.model_iter_fn, model, example_inputs, "eager" ) + if self.args.use_warm_peak_memory: + _, eager_peak_mem, _ = warmup( + self.model_iter_fn, model, example_inputs, "eager", niters=1 + ) if self.args.export_aot_inductor: t_0 = time.perf_counter() @@ -2742,6 +2746,14 @@ class BenchmarkRunner: dynamo_latency, dynamo_peak_mem, dynamo_stats = warmup( optimized_model_iter_fn, model, example_inputs, "dynamo" ) + if self.args.use_warm_peak_memory: + _, dynamo_peak_mem, _ = warmup( + optimized_model_iter_fn, + model, + example_inputs, + "dynamo", + niters=1, + ) if self.args.profile_dynamo_cache_lookup: with torch.profiler.profile( @@ -3174,6 +3186,12 @@ def parse_args(args=None): action="store_true", help="print graph counter stats", ) + parser.add_argument( + "--use-warm-peak-memory", + "--use_warm_peak_memory", + action="store_true", + help="Measure peak memory using a warm run to reduce autotuning noise", + ) parser.add_argument( "--print-memory", action="store_true",
2.41.0
12bae09bec34a724bf03c37a73d466c4a9d7e38
Wed, 17 Apr 2024 11:55:22 -0700
[PATCH 0298/1000] [dynamo] fix 3.11+ refleak (#124238)
Fixes https://github.com/pytorch/pytorch/issues/119607 for 3.11+. In 3.11+, `_PyFrame_FastToLocalsWithError` could implicity run `COPY_FREE_VARS` on the original frame, leading to double incref's since the dynamo shadow frame can rerun `COPY_FREE_VARS`. So the solution is to skip the first `COPY_FREE_VARS` instruction in the shadow frame if it was already executed in the original frame. Also move the location for clearing the original frame in 3.12 to handle error cases more thoroughly. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124238 Approved by: https://github.com/jansel
diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py index 8ee9037e4c..36e5a32a6f 100644 --- a/test/dynamo/test_misc.py +++ b/test/dynamo/test_misc.py @@ -44,7 +44,6 @@ from torch._dynamo.testing import ( same, skipIfNotPy311, unsupported, - xfailIfPy311, ) from torch._dynamo.utils import CompileProfiler, counters, ifdynstaticdefault from torch._inductor.utils import run_and_get_code @@ -10100,7 +10099,6 @@ fn lambda mod: mod.fc, ) - @xfailIfPy311 def test_sequential_module_free(self): self._test_compile_model_free( lambda: ( @@ -10113,14 +10111,12 @@ fn lambda mod: mod[0], ) - @xfailIfPy311 def test_linear_module_free(self): self._test_compile_model_free( lambda: (torch.nn.Linear(100, 100), torch.randn(100, 100)), lambda mod: mod, ) - @xfailIfPy311 def test_outside_linear_module_free(self): # Compared to test_linear_module_free, the linear # layer is not the code object that is directly compiled. diff --git a/torch/_dynamo/testing.py b/torch/_dynamo/testing.py index e621b9abaa..3b1b725fca 100644 --- a/torch/_dynamo/testing.py +++ b/torch/_dynamo/testing.py @@ -341,12 +341,6 @@ def skipIfNotPy311(fn): return unittest.skip(fn) -def xfailIfPy311(fn): - if sys.version_info >= (3, 11): - return unittest.expectedFailure(fn) - return fn - - # Controls tests generated in test/inductor/test_torchinductor_dynamic_shapes.py # and test/dynamo/test_dynamic_shapes.py def expectedFailureDynamic(fn): diff --git a/torch/csrc/dynamo/cpython_defs.c b/torch/csrc/dynamo/cpython_defs.c index b3dde42439..bf710b9ff7 100644 --- a/torch/csrc/dynamo/cpython_defs.c +++ b/torch/csrc/dynamo/cpython_defs.c @@ -68,8 +68,10 @@ THP_PyFrame_OpAlreadyRan(_PyInterpreterFrame *frame, int opcode, int oparg) // https://github.com/python/cpython/blob/0325a8a8cdba6c091bcbbb3c995f3bf1d1217012/Objects/frameobject.c#L1136 // Initialize frame free variables if needed +// free_vars_copied argument added in order to let caller know that the COPY_FREE_VARS +// codepath occurred. static void -frame_init_get_vars(_PyInterpreterFrame *frame) +frame_init_get_vars(_PyInterpreterFrame *frame, int *free_vars_copied) { // COPY_FREE_VARS has no quickened forms, so no need to use _PyOpcode_Deopt // here: @@ -91,6 +93,8 @@ frame_init_get_vars(_PyInterpreterFrame *frame) } // COPY_FREE_VARS doesn't have inline CACHEs, either: frame->prev_instr = _PyCode_CODE(frame->f_code); + + *free_vars_copied = 1; } // https://github.com/python/cpython/blob/0325a8a8cdba6c091bcbbb3c995f3bf1d1217012/Objects/frameobject.c#L1162 @@ -146,7 +150,7 @@ frame_get_var(_PyInterpreterFrame *frame, PyCodeObject *co, int i, // https://github.com/python/cpython/blob/0325a8a8cdba6c091bcbbb3c995f3bf1d1217012/Objects/frameobject.c#L1213 static PyObject * -THP_PyFrame_GetLocals(_PyInterpreterFrame *frame, int include_hidden) +THP_PyFrame_GetLocals(_PyInterpreterFrame *frame, int include_hidden, int *free_vars_copied) { /* Merge fast locals into f->f_locals */ PyObject *locals = frame->f_locals; @@ -169,7 +173,7 @@ THP_PyFrame_GetLocals(_PyInterpreterFrame *frame, int include_hidden) } } - frame_init_get_vars(frame); + frame_init_get_vars(frame, free_vars_copied); PyCodeObject *co = frame->f_code; for (int i = 0; i < co->co_nlocalsplus; i++) { @@ -234,9 +238,9 @@ THP_PyFrame_GetLocals(_PyInterpreterFrame *frame, int include_hidden) // https://github.com/python/cpython/blob/0325a8a8cdba6c091bcbbb3c995f3bf1d1217012/Objects/frameobject.c#L1301 int -THP_PyFrame_FastToLocalsWithError(_PyInterpreterFrame *frame) +THP_PyFrame_FastToLocalsWithError(_PyInterpreterFrame *frame, int *free_vars_copied) { - PyObject *locals = THP_PyFrame_GetLocals(frame, 0); + PyObject *locals = THP_PyFrame_GetLocals(frame, 0, free_vars_copied); if (locals == NULL) { return -1; } @@ -247,8 +251,10 @@ THP_PyFrame_FastToLocalsWithError(_PyInterpreterFrame *frame) #else // https://github.com/python/cpython/blob/a7715ccfba5b86ab09f86ec56ac3755c93b46b48/Objects/frameobject.c#L1182 +// free_vars_copied argument added in order to let caller know that the COPY_FREE_VARS +// codepath occurred. int -THP_PyFrame_FastToLocalsWithError(_PyInterpreterFrame *frame) { +THP_PyFrame_FastToLocalsWithError(_PyInterpreterFrame *frame, int *free_vars_copied) { /* Merge fast locals into f->f_locals */ PyObject *locals = NULL; PyObject **fast = NULL; @@ -267,13 +273,8 @@ THP_PyFrame_FastToLocalsWithError(_PyInterpreterFrame *frame) { if (lasti < 0 && _Py_OPCODE(_PyCode_CODE(co)[0]) == COPY_FREE_VARS) { /* Free vars have not been initialized -- Do that */ PyCodeObject *co = frame->f_code; - #if IS_PYTHON_3_12_PLUS - PyObject *closure = ((PyFunctionObject *)frame->f_funcobj)->func_closure; - int offset = co->co_nlocals + co->co_ncellvars; - #else PyObject *closure = frame->f_func->func_closure; int offset = co->co_nlocals + co->co_nplaincellvars; - #endif for (int i = 0; i < co->co_nfreevars; ++i) { PyObject *o = PyTuple_GET_ITEM(closure, i); Py_INCREF(o); @@ -281,6 +282,8 @@ THP_PyFrame_FastToLocalsWithError(_PyInterpreterFrame *frame) { } // COPY_FREE_VARS doesn't have inline CACHEs, either: frame->prev_instr = _PyCode_CODE(frame->f_code); + + *free_vars_copied = 1; } for (int i = 0; i < co->co_nlocalsplus; i++) { _PyLocals_Kind kind = _PyLocals_GetKind(co->co_localspluskinds, i); diff --git a/torch/csrc/dynamo/cpython_defs.h b/torch/csrc/dynamo/cpython_defs.h index bb1c7e2c8c..b762f87d69 100644 --- a/torch/csrc/dynamo/cpython_defs.h +++ b/torch/csrc/dynamo/cpython_defs.h @@ -10,7 +10,9 @@ #include <internal/pycore_frame.h> -int THP_PyFrame_FastToLocalsWithError(_PyInterpreterFrame* frame); +int THP_PyFrame_FastToLocalsWithError( + _PyInterpreterFrame* frame, + int* free_vars_copied); PyFunctionObject* _PyFunction_CopyWithNewCode( PyFunctionObject* o, diff --git a/torch/csrc/dynamo/eval_frame.c b/torch/csrc/dynamo/eval_frame.c index 72817d4fad..b6a26f635e 100644 --- a/torch/csrc/dynamo/eval_frame.c +++ b/torch/csrc/dynamo/eval_frame.c @@ -132,7 +132,10 @@ THPPyInterpreterFrame* THPPyInterpreterFrame_New(_PyInterpreterFrame* frame) { #else #define THP_EVAL_API_FRAME_OBJECT PyFrameObject -#define THP_PyFrame_FastToLocalsWithError PyFrame_FastToLocalsWithError +static int +THP_PyFrame_FastToLocalsWithError(THP_EVAL_API_FRAME_OBJECT *frame, int *free_vars_copied) { + return PyFrame_FastToLocalsWithError(frame); +} #endif PyObject* guard_error_hook = NULL; @@ -161,7 +164,8 @@ static PyObject* _custom_eval_frame( PyThreadState* tstate, THP_EVAL_API_FRAME_OBJECT* frame, int throw_flag, - PyObject* callback); + PyObject* callback, + int* should_clear_frame); static PyObject *(*previous_eval_frame)(PyThreadState *tstate, THP_EVAL_API_FRAME_OBJECT* frame, int throw_flag) = NULL; @@ -283,7 +287,8 @@ inline static PyObject* eval_custom_code_impl( PyThreadState* tstate, THP_EVAL_API_FRAME_OBJECT* frame, PyCodeObject* code, - int throw_flag) { + int throw_flag, + int free_vars_copied) { DEBUG_NULL_CHECK(tstate); DEBUG_NULL_CHECK(frame); @@ -333,6 +338,13 @@ inline static PyObject* eval_custom_code_impl( } #endif + // for 3.11+, if free_vars_copied is true, we do not need to + // run the first COPY_FREE_VARS since THP_PyFrame_FastToLocalsWithError + // already did the equivalent action. + if (free_vars_copied && _Py_OPCODE(_PyCode_CODE(shadow->f_code)[0]) == COPY_FREE_VARS) { + shadow->prev_instr = _PyCode_CODE(shadow->f_code); + } + #else THP_EVAL_API_FRAME_OBJECT* shadow = PyFrame_New(tstate, code, frame->f_globals, NULL); @@ -428,14 +440,16 @@ inline static PyObject* eval_custom_code_impl( fastlocals_new[j] = fastlocals_old[i]; } + // NOTE: if you want to evaluate frame instead of shadow in 3.12+, + // you need to clear_old_frame_if_python_312_plus the shadow frame BEFORE + // calling eval_frame_default (i.e. here) and comment out the + // clear_old_frame_if_python_312_plus call on the original frame. + PyObject* result = eval_frame_default(tstate, shadow, throw_flag); #if IS_PYTHON_3_12_PLUS - // In 3.12, the frame evaluation function is responsible for - // clearing and popping the frame, so we manually do that on the - // old frame. - clear_old_frame_if_python_312_plus(tstate, frame); + // frame is cleared by caller Py_DECREF(func); #elif IS_PYTHON_3_11_PLUS @@ -460,13 +474,15 @@ inline static PyObject* eval_custom_code( PyThreadState* tstate, THP_EVAL_API_FRAME_OBJECT* frame, PyCodeObject* code, - int throw_flag) { + int throw_flag, + int free_vars_copied) { _PytorchRecordFunctionState* rf = _pytorch_record_function_enter("Torch-Compiled Region"); PyObject* result = eval_custom_code_impl( tstate, frame, code, - throw_flag + throw_flag, + free_vars_copied ); _pytorch_record_function_exit(rf); return result; @@ -487,18 +503,25 @@ static PyObject* _custom_eval_frame_shim( return eval_frame_default(tstate, frame, throw_flag); } - return _custom_eval_frame(tstate, frame, throw_flag, callback); + int should_clear_frame = 0; + PyObject* result = _custom_eval_frame(tstate, frame, throw_flag, callback, &should_clear_frame); + if (should_clear_frame) { + clear_old_frame_if_python_312_plus(tstate, frame); + } + return result; } -// NOTE: In 3.12+, any return NULL; statements must be preceded by -// clear_old_frame_if_python_312_plus(tstate, frame); since the eval frame function -// is now responsible for clearing/popping the frame. -// eval_frame_default/eval_custom_code will clear/pop the frame. +// NOTE: In 3.12+, the frame evaluation function (callee) is responsible for clearing/popping +// the frame, meaning that unless we default evaluate the original frame, +// we are responsible for clearing it - via clear_old_frame_if_python_312_plus. +// The should_clear_frame flag is used to indicate whether the frame should be +// cleared by _custom_eval_frame's caller. static PyObject* _custom_eval_frame( PyThreadState* tstate, THP_EVAL_API_FRAME_OBJECT* frame, int throw_flag, - PyObject* callback) { + PyObject* callback, + int* should_clear_frame) { #if IS_PYTHON_3_11_PLUS DEBUG_TRACE( "begin %s %s %i %i", @@ -552,9 +575,10 @@ static PyObject* _custom_eval_frame( } // TODO(jansel): investigate directly using the "fast" representation - if (THP_PyFrame_FastToLocalsWithError(frame) < 0) { + int free_vars_copied = 0; + if (THP_PyFrame_FastToLocalsWithError(frame, &free_vars_copied) < 0) { DEBUG_TRACE("error %s", get_frame_name(frame)); - clear_old_frame_if_python_312_plus(tstate, frame); + *should_clear_frame = 1; return NULL; } @@ -570,7 +594,7 @@ static PyObject* _custom_eval_frame( if (maybe_cached_code == NULL) { // guard eval failed, keep propagating - clear_old_frame_if_python_312_plus(tstate, frame); + *should_clear_frame = 1; return NULL; } else if (maybe_cached_code == Py_None) { DEBUG_TRACE("cache miss %s", get_frame_name(frame)); @@ -579,7 +603,8 @@ static PyObject* _custom_eval_frame( PyCodeObject* cached_code = (PyCodeObject*)maybe_cached_code; // used cached version DEBUG_TRACE("cache hit %s", get_frame_name(frame)); - return eval_custom_code(tstate, frame, cached_code, throw_flag); + *should_clear_frame = 1; + return eval_custom_code(tstate, frame, cached_code, throw_flag, free_vars_copied); } DEBUG_CHECK(PyDict_CheckExact(frame->f_locals)); DEBUG_CHECK(PyDict_CheckExact(frame->f_globals)); @@ -595,7 +620,7 @@ static PyObject* _custom_eval_frame( _pytorch_record_function_exit(rf); if (maybe_cached_code == NULL) { // Python error - clear_old_frame_if_python_312_plus(tstate, frame); + *should_clear_frame = 1; return NULL; } else if (maybe_cached_code != Py_None) { PyCodeObject* cached_code = (PyCodeObject*)maybe_cached_code; @@ -603,7 +628,8 @@ static PyObject* _custom_eval_frame( DEBUG_TRACE("cache hit %s", get_frame_name(frame)); // Re-enable custom behavior eval_frame_callback_set(callback); - return eval_custom_code(tstate, frame, cached_code, throw_flag); + *should_clear_frame = 1; + return eval_custom_code(tstate, frame, cached_code, throw_flag, free_vars_copied); } // cache miss CacheEntry* cache_entry = extract_cache_entry(extra); @@ -618,7 +644,7 @@ static PyObject* _custom_eval_frame( // cascading failure from internal exceptions. The upshot is if // Dynamo barfs, that's it for Dynamo, even if you catch the exception // inside the torch.compile block we won't try to Dynamo anything else. - clear_old_frame_if_python_312_plus(tstate, frame); + *should_clear_frame = 1; return NULL; } else if (result != Py_None) { DEBUG_TRACE("create cache %s", get_frame_name(frame)); @@ -636,7 +662,8 @@ static PyObject* _custom_eval_frame( // will be cleaned up when set_extra_state is called. // Re-enable custom behavior eval_frame_callback_set(callback); - return eval_custom_code(tstate, frame, CacheEntry_get_code(new_cache_entry), throw_flag); + *should_clear_frame = 1; + return eval_custom_code(tstate, frame, CacheEntry_get_code(new_cache_entry), throw_flag, free_vars_copied); } else { DEBUG_TRACE("create skip %s", get_frame_name(frame)); Py_DECREF(result);
2.41.0
fed2e826beef1498aecd56ee2260046fabd08ab
Thu, 18 Apr 2024 03:29:16 +0000
[PATCH 0300/1000] [DTensor][Test] Add unit tests to keep track of DTensor sharding for 2D (#123687)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/123687 Approved by: https://github.com/wanchaol
diff --git a/test/distributed/_tensor/test_utils.py b/test/distributed/_tensor/test_utils.py index a19e6e4753..7ba49ae520 100644 --- a/test/distributed/_tensor/test_utils.py +++ b/test/distributed/_tensor/test_utils.py @@ -3,13 +3,15 @@ import itertools import torch -from torch.distributed._tensor import distribute_tensor +from torch.distributed._tensor import distribute_tensor, DTensor from torch.distributed._tensor._utils import ( compute_local_shape, compute_local_shape_and_global_offset, ) + +from torch.distributed._tensor.debug import CommDebugMode from torch.distributed._tensor.placement_types import Replicate, Shard -from torch.distributed.device_mesh import DeviceMesh +from torch.distributed.device_mesh import DeviceMesh, init_device_mesh from torch.testing._internal.common_utils import run_tests from torch.testing._internal.distributed._tensor.common_dtensor import ( @@ -17,6 +19,8 @@ from torch.testing._internal.distributed._tensor.common_dtensor import ( with_comms, ) +c10d_functional = torch.ops.c10d_functional + class UtilTest(DTensorTestBase): @property @@ -119,5 +123,84 @@ class UtilTest(DTensorTestBase): ) +class Test2DStridedLocalShard(DTensorTestBase): + @property + def world_size(self): + return 4 + + @with_comms + def test_fsdp1_tp_2d_dtensor_local_shards_and_offsets(self): + # We are mimicking the behavior of FSDP1 + TP. + # Currently, the 2D DTensor's local shard is correct, since from_local + redistribute incurs a all_gather behind the scene. + # When we have a global_tensor of [0, 1, 2, 3, 4, 5, 6, 7], the local shard of 2D DTensor would be: + # rank0: [0, 1], rank1: [2, 3], rank2: [4, 5], rank3: [6, 7] + with CommDebugMode() as comm_mode: + global_tensor = torch.arange(8).view(4, 2) + mesh_2d = init_device_mesh( + self.device_type, (2, 2), mesh_dim_names=("DP", "TP") + ) + tp_mesh = mesh_2d["TP"] + dtensor_tp = distribute_tensor( + global_tensor, tp_mesh, placements=[Shard(0)] + ) + dtensor_2d = DTensor.from_local( + dtensor_tp.to_local(), mesh_2d, [Replicate(), Shard(0)] + ).redistribute(mesh_2d, [Shard(0), Shard(0)]) + self.assertEqual(len(comm_mode.get_comm_counts()), 1) + self.assertEqual( + comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1 + ) + + self.assertEqual( + dtensor_2d.to_local(), global_tensor[self.rank : self.rank + 1] + ) + # compute_local_shape_and_global_offset currently does take into consideration of strided sharding, + # which should after strided sharding is added. + local_size, global_offset = compute_local_shape_and_global_offset( + global_tensor.shape, mesh_2d, [Shard(0), Shard(0)] + ) + self.assertEqual(local_size, torch.Size([1, 2])) + self.assertEqual(global_offset, torch.Size([self.rank, 0])) + + @with_comms + def test_fsdp2_tp_2d_dtensor_local_shards_and_offsets(self): + # We are mimicking the behavior of FSDP2 + TP. + # Currently, the 2D DTensor's local shard is incorrect for resharding, since we want to avoid extra communication. + # It's incorrect for resharding, since `compute_local_shape_and_global_offset` + # doesn't know the correct offsets for resharding. + # When we have a global_tensor of [0, 1, 2, 3, 4, 5, 6, 7], the local shard of 2D DTensor would be: + # local tensor -- rank0: [0, 1], rank1: [4, 5], rank2: [2, 3], rank3: [6, 7] + # current offsets -- rank0: [0, 0], rank1: [1, 0], rank2: [2, 0], rank3: [3, 0] + # Ideally, with strided sharding, the offsets should be rank0: [0, 0], rank1: [2, 0], rank2: [1, 0], rank3: [3, 0] + # TODO: to make the local shard of FSDP2 + TP correct for resharding, it would require strided_sharding + # as well as let compute_local_shape_and_global_offset takes into consideration of strided_sharding. + with CommDebugMode() as comm_mode: + global_tensor = torch.arange(8).view(4, 2) + mesh_2d = init_device_mesh( + self.device_type, (2, 2), mesh_dim_names=("DP", "TP") + ) + tp_mesh = mesh_2d["TP"] + dtensor_tp = distribute_tensor( + global_tensor, tp_mesh, placements=[Shard(0)] + ) + chunks = list(torch.chunk(dtensor_tp.to_local(), 2, dim=0)) + shard_rank = 0 if self.rank // 2 == 0 else 1 + sharded_param = chunks[shard_rank] + dtensor_2d = DTensor( + sharded_param, + mesh_2d, + [Shard(0), Shard(0)], + shape=global_tensor.size(), + dtype=global_tensor.dtype, + requires_grad=False, + stride=global_tensor.stride(), + ) + + self.assertEqual(len(comm_mode.get_comm_counts()), 0) + self.assertEqual( + comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 0 + ) + + if __name__ == "__main__": run_tests()
2.41.0
213f262af77d4e2b8fc2333cb4391996296eca4
Wed, 17 Apr 2024 10:09:27 -0700
[PATCH 0301/1000] [dynamo][cpp-guards] Improve when to use Dict vs DictSubclassGuardManager (#124237)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124237 Approved by: https://github.com/jansel, https://github.com/mlazos ghstack dependencies: #124230
diff --git a/test/dynamo/test_guard_manager.py b/test/dynamo/test_guard_manager.py index 7f161ec008..88cb5e5968 100644 --- a/test/dynamo/test_guard_manager.py +++ b/test/dynamo/test_guard_manager.py @@ -11,6 +11,7 @@ from torch.testing._internal.common_utils import set_default_dtype RootGuardManager = guards.RootGuardManager DictGuardManager = guards.DictGuardManager +DictSubclassGuardManager = guards.DictSubclassGuardManager GetAttrGuardAccessor = guards.GetAttrGuardAccessor GetItemGuardAccessor = guards.GetItemGuardAccessor TypeGuardAccessor = guards.TypeGuardAccessor @@ -687,6 +688,69 @@ num_guards_executed=0) # fails because of len check self.assertFalse(root.check(f_locals)) + def test_dict_guard_manager2(self): + root = RootGuardManager() + + f_locals = { + "d": {"a": 1, 100: torch.randn(4)}, + } + dict_mgr = root.getitem_manager("d", "", f_locals["d"]) + self.assertTrue(type(dict_mgr) is DictGuardManager) + self.assertTrue(root.check(f_locals)) + + # defaultdict + root = RootGuardManager() + from collections import defaultdict + + f_locals = {} + f_locals["d"] = defaultdict() + f_locals["d"]["a"] = 1 + f_locals["d"][100] = torch.randn(4) + dict_mgr = root.getitem_manager("d", "", f_locals["d"]) + self.assertTrue(type(dict_mgr) is DictGuardManager) + self.assertTrue(root.check(f_locals)) + + # ordereddict + root = RootGuardManager() + from collections import OrderedDict + + f_locals = {} + f_locals["d"] = OrderedDict() + f_locals["d"]["a"] = 1 + f_locals["d"][100] = torch.randn(4) + dict_mgr = root.getitem_manager("d", "", f_locals["d"]) + self.assertTrue(type(dict_mgr) is DictSubclassGuardManager) + self.assertTrue(root.check(f_locals)) + + # dict subclass - should be treated as a dict + root = RootGuardManager() + + class MyDict(dict): + pass + + f_locals = {} + f_locals["d"] = MyDict() + f_locals["d"]["a"] = 1 + f_locals["d"][100] = torch.randn(4) + dict_mgr = root.getitem_manager("d", "", f_locals["d"]) + self.assertTrue(type(dict_mgr) is DictGuardManager) + self.assertTrue(root.check(f_locals)) + + # dict subclass - with modified keys + root = RootGuardManager() + + class ReversedDict(dict): + def keys(self): + return [10, 100] + + f_locals = {} + f_locals["d"] = ReversedDict() + f_locals["d"][100] = torch.randn(4) + f_locals["d"][10] = torch.randn(4) + dict_mgr = root.getitem_manager("d", "", f_locals["d"]) + self.assertTrue(type(dict_mgr) is DictSubclassGuardManager) + self.assertTrue(root.check(f_locals)) + if __name__ == "__main__": from torch._dynamo.test_case import run_tests diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py index 8e5ddf58f0..c4c2c9dcbb 100644 --- a/test/dynamo/test_misc.py +++ b/test/dynamo/test_misc.py @@ -10432,6 +10432,78 @@ fn with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True): fn(torch.randn(4), d) + def test_defaultdict(self): + d = collections.defaultdict() + d["foo"] = 1 + d["bar"] = 2 + + @torch.compile(backend="eager") + def fn(x, d): + return x * d["foo"] * d["bar"] + + fn(torch.randn(4), d) + with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True): + fn(torch.randn(4), d) + + def test_custom_dict(self): + class MyDict(dict): + pass + + d = { + "foo": 1, + "bar": 2, + } + + d = MyDict(d) + + @torch.compile(backend="eager") + def fn(x, d): + return x * d["foo"] * d["bar"] + + fn(torch.randn(4), d) + with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True): + fn(torch.randn(4), d) + + def test_custom_iter_dict(self): + class ReversedDict(dict): + def __iter__(self): + return reversed(list(self.keys())) + + d = { + "foo": 1, + "bar": 2, + } + + d = ReversedDict(d) + + @torch.compile(backend="eager") + def fn(x, d): + return x * d["foo"] * d["bar"] + + fn(torch.randn(4), d) + with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True): + fn(torch.randn(4), d) + + def test_custom_keys_iter_dict(self): + class ReversedDict(dict): + def keys(self): + return ["bar", "foo"] + + d = { + "foo": 1, + "bar": 2, + } + + d = ReversedDict(d) + + @torch.compile(backend="eager") + def fn(x, d): + return x * d["foo"] * d["bar"] + + fn(torch.randn(4), d) + with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True): + fn(torch.randn(4), d) + class TestTracer(JitTestCase): def test_jit_save(self): diff --git a/torch/csrc/dynamo/guards.cpp b/torch/csrc/dynamo/guards.cpp index f1d8020fb3..ac8fddfbd3 100644 --- a/torch/csrc/dynamo/guards.cpp +++ b/torch/csrc/dynamo/guards.cpp @@ -2174,8 +2174,10 @@ class DictSubclassGuardManager : public DictGuardManager { // Points to the key index in the dict Py_ssize_t dict_pointer = 0; - // Use iter(obj) to iterate over the keys - PyObject* iterator = PyObject_GetIter(obj); // new reference + // Use iter(dict.keys()) to iterate over the keys + py::object keys = + py::handle(obj).attr("keys")(); // py::object handles the references + PyObject* iterator = PyObject_GetIter(keys.ptr()); // new reference PyObject* key = nullptr; while (index_pointer < _indices.size() && @@ -2236,8 +2238,10 @@ class DictSubclassGuardManager : public DictGuardManager { int num_guards_executed = 0; - // Use iter(obj) to iterate over the keys - PyObject* iterator = PyObject_GetIter(obj); // new reference + // Use iter(dict.keys()) to iterate over the keys + py::object keys = + py::handle(obj).attr("keys")(); // py::object handles the references + PyObject* iterator = PyObject_GetIter(keys.ptr()); // new reference PyObject* key = nullptr; while (index_pointer < _indices.size() && @@ -2280,13 +2284,46 @@ class DictSubclassGuardManager : public DictGuardManager { } }; +bool has_base_dict_keys_iter(py::handle& obj) { + // Implements `type(obj).keys is type(dict()).keys` + py::object obj_type = py::type::of(obj); + py::object dict_type = py::type::of(py::dict()); + + // Fetch keys for both types + py::object obj_keys = obj_type.attr("keys"); + py::object dict_keys = dict_type.attr("keys"); + + return obj_keys.ptr() == dict_keys.ptr(); +} + std::unique_ptr<GuardManager> make_guard_manager( RootGuardManager* root, std::string source, py::handle example_value) { - // Check if example_value is a dict if (py::isinstance<py::dict>(example_value)) { - if (PyDict_CheckExact(example_value.ptr())) { + // The purpose of having both DictGuardManager and DictSubclassGuardManager + // is to handle the variability in how dictionaries and their subclasses + // manage key ordering. + + // While inserting dictionary guards (check guards.py), we rely on the + // list(d.keys()) ordering. Therefore, the cpp guard equivalent must have + // the same keys ordering. For standard dictionaries, .keys() API internally + // uses PyDict_Next. So, DictGuardManager directly uses PyDict_Next to + // speedup the key fetches. + + // But PyDict_Next might not give correct ordering for subclasses of dict. + // For example, OrderedDict override the .keys() API without changing the + // underlying datastructure. This leads to different keys ordering than the + // one given by PyDict_Next. We use DictSubclassGuardManager to account for + // this discrepancy. DictSubclassGuardManager directly calls the .keys() API + // to accurately capture key ordering. This approach is less efficient than + // using PyDict_Next (handled by DictGuardManager), but it ensures + // correctness. + + // Since regular dicts are more common than subclasses of dicts with + // overridden keys method, we still optimize for the common case with + // DictGuardManager by relying on PyDict_Next. + if (has_base_dict_keys_iter(example_value)) { return std::make_unique<DictGuardManager>( root, std::move(source), example_value); }
2.41.0
7daa110c8629398ebe6d6c3ccc1d9d692eb9872
Thu, 18 Apr 2024 03:33:51 +0000
[PATCH 0302/1000] Back out "Refresh OpOverloadPacket if a new OpOverload gets added (#123578)" (#124324)
Summary: Original commit changeset: 528276bc8a92 Original Phabricator Diff: D56057952 Differential Revision: D56271240 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124324 Approved by: https://github.com/davidberard98
diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py index 6e04e8890d..69a83eddde 100644 --- a/test/test_custom_ops.py +++ b/test/test_custom_ops.py @@ -2458,30 +2458,6 @@ Please use `add.register_fake` to add an fake impl.""", y = f(x) self.assertEqual(y, x.sin()) - @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug") - def test_overloading(self): - called_f = 0 - called_f1 = 0 - - @torch.library.custom_op("_torch_testing::f", mutates_args=()) - def f(x: Tensor) -> Tensor: - nonlocal called_f - called_f += 1 - return x.clone() - - x = torch.randn(2, 3) - torch.ops._torch_testing.f(x) - self.assertEqual(called_f, 1) - - @torch.library.custom_op("_torch_testing::f.overload", mutates_args=()) - def f1(x: Tensor, y: Tensor) -> Tensor: - nonlocal called_f1 - called_f1 += 1 - return x.clone() - - torch.ops._torch_testing.f(x, x) - self.assertEqual(called_f1, 1) - def test_disallows_output_aliasing(self): @torch.library.custom_op("_torch_testing::f", mutates_args=()) def f(x: Tensor) -> Tensor: diff --git a/torch/_ops.py b/torch/_ops.py index 03ff25a688..9610662876 100644 --- a/torch/_ops.py +++ b/torch/_ops.py @@ -931,10 +931,8 @@ class _OpNamespace(types.ModuleType): # for overloads and raise an exception if there are more than one. namespace_name = self.name qualified_op_name = f"{namespace_name}::{op_name}" - op_module = self.__module__ + "." + namespace_name - try: - op, overload_names = _get_packet(qualified_op_name, op_module) + op, overload_names = torch._C._jit_get_operation(qualified_op_name) if op is None: raise AttributeError( f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'" @@ -946,6 +944,10 @@ class _OpNamespace(types.ModuleType): f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'" ) from e + # let the script frontend know that op is identical to the builtin op + # with qualified_op_name + torch.jit._builtins._register_builtin(op, qualified_op_name) + op.__module__ = self.__module__ + "." + namespace_name opoverloadpacket = OpOverloadPacket( qualified_op_name, op_name, op, overload_names ) @@ -957,22 +959,6 @@ class _OpNamespace(types.ModuleType): return opoverloadpacket -def _get_packet(qualname, op_module): - op, overload_names = torch._C._jit_get_operation(qualname) - if op is not None: - op.__module__ = op_module - # let the script frontend know that op is identical to the builtin op - # with qualified_op_name - torch.jit._builtins._register_builtin(op, qualname) - return op, overload_names - - -def _refresh_packet(packet): - op, overload_names = _get_packet(packet._qualified_op_name, packet._op.__module__) - packet._op = op - packet._overload_names = overload_names - - class _PyOpNamespace(_OpNamespace): def __init__(self, name, ops): super().__init__(name) diff --git a/torch/library.py b/torch/library.py index f4779d7b93..4255b7d4d7 100644 --- a/torch/library.py +++ b/torch/library.py @@ -109,18 +109,7 @@ class Library: if isinstance(tags, torch.Tag): tags = (tags,) result = self.m.define(schema, alias_analysis, tuple(tags)) - name = schema.split("(")[0] - qualname = self.ns + "::" + name - - # If the OpOverloadPacket exists already, then this means we're adding a - # new OpOverload for it. Refresh the packet to include the new OpOverload. - packet_name = name.split(".")[0] if "." in name else name - if hasattr(torch.ops, self.ns): - ns = getattr(torch.ops, self.ns) - if hasattr(ns, packet_name): - packet = getattr(ns, packet_name) - torch._ops._refresh_packet(packet) - + qualname = self.ns + "::" + schema.split("(")[0] self._op_defs.add(qualname) _defs.add(qualname) return result
2.41.0
9ab9248ce0391c233e7189c0ee71f2aca6a786c
Tue, 16 Apr 2024 19:35:36 -0700
[PATCH 0303/1000] [Inductor Intel GPU backend Upstream] Generalize device-bias code in (#124249)
Generalize device-bias code in tirton_utils.py Pull Request resolved: https://github.com/pytorch/pytorch/pull/124249 Approved by: https://github.com/EikanWang, https://github.com/guangyey, https://github.com/jansel
diff --git a/torch/testing/_internal/triton_utils.py b/torch/testing/_internal/triton_utils.py index dd1ab9e6d6..301c3cd472 100644 --- a/torch/testing/_internal/triton_utils.py +++ b/torch/testing/_internal/triton_utils.py @@ -3,11 +3,11 @@ import unittest from torch.testing._internal.inductor_utils import HAS_CUDA - +from torch.utils._triton import has_triton requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") -if HAS_CUDA: +if has_triton(): import triton from triton import language as tl
2.41.0
e1c0d2497811bdb1855202747c7708154ab4d7e
Thu, 18 Apr 2024 04:17:38 +0000
[PATCH 0304/1000] [cublas] Keep explicit workspace creation to avoid OOM (#124250)
Summary: We explicitly set the cublas workspace even though CUDA 12.2+ fixed the issue where memory usage increased during graph capture. Original issue: https://github.com/pytorch/pytorch/pull/83461 This is because in CUDA 12.2+, the use of cudaMallocAsync in cublas will allocate memory dynamically (even if they're cheap) outside PyTorch's CUDA caching allocator. It's possible that CCA used up all the memory and cublas's cudaMallocAsync will return OOM Test Plan: CI Differential Revision: D56226746 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124250 Approved by: https://github.com/houseroad, https://github.com/eqy
diff --git a/aten/src/ATen/cuda/CublasHandlePool.cpp b/aten/src/ATen/cuda/CublasHandlePool.cpp index ba10cc01c3..95d1ba2fb4 100644 --- a/aten/src/ATen/cuda/CublasHandlePool.cpp +++ b/aten/src/ATen/cuda/CublasHandlePool.cpp @@ -77,7 +77,7 @@ using CuBlasPoolType = DeviceThreadHandlePool<cublasHandle_t, createCublasHandle } // namespace void clearCublasWorkspaces() { - #if !defined(USE_ROCM) && defined(CUDA_VERSION) && CUDA_VERSION < 12020 + #if !defined(USE_ROCM) cublas_handle_stream_to_workspace().clear(); #endif } @@ -156,10 +156,14 @@ cublasHandle_t getCurrentCUDABlasHandle() { auto handle = myPoolWindow->reserve(device); auto stream = c10::cuda::getCurrentCUDAStream(); TORCH_CUDABLAS_CHECK(cublasSetStream(handle, stream)); -#if !defined(USE_ROCM) && defined(CUDA_VERSION) && CUDA_VERSION < 12020 - // cuBLAS should not need an explicitly allocated workspace after CUDA 12.2 - // to avoid increasing memory usage during graph captures +#if !defined(USE_ROCM) + // We explicitly set the cublas workspace even though CUDA 12.2+ fixed the + // issue where memory usage increased during graph capture. // original issue: https://github.com/pytorch/pytorch/pull/83461 + // This is because in CUDA 12.2+, the use of cudaMallocAsync in cublas + // will allocate memory dynamically (even if they're cheap) outside + // PyTorch's CUDA caching allocator. It's possible that CCA used up + // all the memory and cublas's cudaMallocAsync will return OOM cudaStream_t _stream = stream; auto key = std::make_tuple(static_cast<void *>(handle), static_cast<void *>(_stream)); auto workspace_it = cublas_handle_stream_to_workspace().find(key); @@ -167,8 +171,6 @@ cublasHandle_t getCurrentCUDABlasHandle() { workspace_it = cublas_handle_stream_to_workspace().insert(workspace_it, {key, getNewWorkspace()}); } TORCH_CUDABLAS_CHECK(cublasSetWorkspace(handle, workspace_it->second.get(), getChosenWorkspaceSize())); -#endif -#if !defined(USE_ROCM) // On CUDA >= 11, and architecture >= Ampere, cuBLAS can use TF32 to speedup // FP32 data type calculations based on the value of the allow_tf32 flag. // To enable TF32, set the math mode of the handle to CUBLAS_TF32_TENSOR_OP_MATH. @@ -177,8 +179,7 @@ cublasHandle_t getCurrentCUDABlasHandle() { } else { TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); } -#endif -#if defined(USE_ROCM) +#else hipblasAtomicsMode_t hipblas_mode; if (at::globalContext().deterministicAlgorithms()) { hipblas_mode = HIPBLAS_ATOMICS_NOT_ALLOWED;
2.41.0
ad66e05d2c7618e0fb752455cdc6144aeb4c3d4
Thu, 18 Apr 2024 04:19:37 +0000
[PATCH 0305/1000] [4/x][AMD][Lowering Enablement] Enabling meta internal AOTInductor compilation on ROCM (#124123)
Summary: as title Test Plan: CI & unit test Differential Revision: D56163334 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124123 Approved by: https://github.com/chenyang78, https://github.com/jansel
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py index d4f2805773..4e3c24cf96 100644 --- a/torch/_inductor/codecache.py +++ b/torch/_inductor/codecache.py @@ -962,7 +962,7 @@ class CompiledFxGraph: def cpp_compiler() -> str: if config.is_fbcode(): - return build_paths.cc() + return build_paths.cc() if torch.version.hip is None else build_paths.clang() if isinstance(config.cpp.cxx, (list, tuple)): search = tuple(config.cpp.cxx) else: @@ -1373,18 +1373,23 @@ def homebrew_libomp() -> Tuple[bool, str]: return False, "" -def get_include_and_linking_paths( - include_pytorch: bool = False, - vec_isa: VecISA = invalid_vec_isa, - cuda: bool = False, - aot_mode: bool = False, -) -> Tuple[List[str], str, str, str, str]: +def _set_gpu_runtime_env() -> None: if ( config.is_fbcode() + and torch.version.hip is None and "CUDA_HOME" not in os.environ and "CUDA_PATH" not in os.environ ): os.environ["CUDA_HOME"] = os.path.dirname(build_paths.cuda()) + + +def get_include_and_linking_paths( + include_pytorch: bool = False, + vec_isa: VecISA = invalid_vec_isa, + cuda: bool = False, + aot_mode: bool = False, +) -> Tuple[List[str], str, str, str, str]: + _set_gpu_runtime_env() from torch.utils import cpp_extension macros = vec_isa.build_macro() if vec_isa != invalid_vec_isa else "" @@ -1416,7 +1421,7 @@ def get_include_and_linking_paths( libs += ["omp"] if aot_mode: ipaths += [os.path.dirname(cpp_prefix_path())] - if cuda: + if cuda and torch.version.hip is None: # This is a special treatment for Meta internal cuda-12 where all libs # are in lib/cuda-12 and lib/cuda-12/stubs for i, path in enumerate(lpaths): @@ -1447,7 +1452,10 @@ def get_include_and_linking_paths( if cuda: if torch.version.hip is not None: - libs += ["c10_hip", "torch_hip"] + if config.is_fbcode(): + libs += ["amdhip64"] + else: + libs += ["c10_hip", "torch_hip"] macros += " -D __HIP_PLATFORM_AMD__" else: if config.is_fbcode(): @@ -1513,16 +1521,27 @@ def get_include_and_linking_paths( # third party libs if config.is_fbcode(): - ipaths.append(build_paths.sleef()) + # Note that the order of include paths do matter, as a result + # we need to have several branches interleaved here + if torch.version.hip is None: + ipaths.append(build_paths.sleef()) ipaths.append(build_paths.openmp()) ipaths.append(build_paths.python()) - ipaths.append(build_paths.cc_include()) - ipaths.append(build_paths.libgcc()) - ipaths.append(build_paths.libgcc_arch()) + if torch.version.hip is not None: + ipaths.append(build_paths.clang_include()) + ipaths.append(build_paths.gcc_include()) + ipaths.append(build_paths.gcc_install_tools_include()) + else: + ipaths.append(build_paths.cc_include()) + ipaths.append(build_paths.libgcc()) + ipaths.append(build_paths.libgcc_arch()) ipaths.append(build_paths.libgcc_backward()) ipaths.append(build_paths.glibc()) ipaths.append(build_paths.linux_kernel()) - ipaths.append(build_paths.cuda()) + if torch.version.hip is not None: + ipaths.append(build_paths.rocm()) + else: + ipaths.append(build_paths.cuda()) # We also need to bundle includes with absolute paths into a remote directory # (later on, we copy the include paths from cpp_extensions into our remote dir) ipaths.append("include") @@ -1530,7 +1549,8 @@ def get_include_and_linking_paths( static_link_libs = [] if aot_mode and cuda and config.is_fbcode(): # For Meta internal cuda-12, it is recommended to static link cudart - static_link_libs = ["-Wl,-Bstatic", "-lcudart_static", "-Wl,-Bdynamic"] + if torch.version.hip is None: + static_link_libs = ["-Wl,-Bstatic", "-lcudart_static", "-Wl,-Bdynamic"] lpaths_str = " ".join(["-L" + p for p in lpaths]) libs_str = " ".join(static_link_libs + ["-l" + p for p in libs])
2.41.0
ff85b42f9c6a2bf3b7ae597a0abb9fd56204a98
Thu, 18 Apr 2024 06:12:54 +0000
[PATCH 0307/1000] Revert "Add swap_tensors path to nn parametrizations (#124130)"
This reverts commit 64f6ddf12c11738c3f4b1ed01cf4f699541496bf. Reverted https://github.com/pytorch/pytorch/pull/124130 on behalf of https://github.com/DanilBaibak due to Broken trunk ([comment](https://github.com/pytorch/pytorch/pull/124130#issuecomment-2063074856))
diff --git a/test/dynamo_expected_failures/TestNNParametrization.test_deepcopy_after_parametrization_swap_False b/test/dynamo_expected_failures/TestNNParametrization.test_deepcopy_after_parametrization similarity index 100% rename from test/dynamo_expected_failures/TestNNParametrization.test_deepcopy_after_parametrization_swap_False rename to test/dynamo_expected_failures/TestNNParametrization.test_deepcopy_after_parametrization diff --git a/test/dynamo_expected_failures/TestNNParametrization.test_deepcopy_after_parametrization_swap_True b/test/dynamo_expected_failures/TestNNParametrization.test_errors_unparametrized_tensor_parametrization similarity index 100% rename from test/dynamo_expected_failures/TestNNParametrization.test_deepcopy_after_parametrization_swap_True rename to test/dynamo_expected_failures/TestNNParametrization.test_errors_unparametrized_tensor_parametrization diff --git a/test/dynamo_expected_failures/TestNNParametrization.test_errors_unparametrized_tensor_parametrization_swap_False b/test/dynamo_expected_failures/TestNNParametrization.test_errors_unparametrized_tensor_parametrization_swap_False deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestNNParametrization.test_errors_unparametrized_tensor_parametrization_swap_True b/test/dynamo_expected_failures/TestNNParametrization.test_errors_unparametrized_tensor_parametrization_swap_True deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestNNParametrization.test_initialization_parametrization_swap_True b/test/dynamo_expected_failures/TestNNParametrization.test_initialization_parametrization_swap_True deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestNNParametrization.test_register_and_remove_buffer_parametrization_swap_True b/test/dynamo_expected_failures/TestNNParametrization.test_register_and_remove_buffer_parametrization_swap_True deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestNNParametrization.test_register_and_remove_nested_parametrization_swap_True b/test/dynamo_expected_failures/TestNNParametrization.test_register_and_remove_nested_parametrization_swap_True deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestNNParametrization.test_register_and_remove_parametrization_swap_True b/test/dynamo_expected_failures/TestNNParametrization.test_register_and_remove_parametrization_swap_True deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestNNParametrization.test_serialization_parametrization_swap_True b/test/dynamo_expected_failures/TestNNParametrization.test_serialization_parametrization_swap_True deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestNNParametrization.test_transfer_parametrizations_and_params_right_inverse_swap_True b/test/dynamo_expected_failures/TestNNParametrization.test_transfer_parametrizations_and_params_right_inverse_swap_True deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestNNParametrization.test_transfer_parametrizations_and_params_swap_True b/test/dynamo_expected_failures/TestNNParametrization.test_transfer_parametrizations_and_params_swap_True deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestNNParametrization.test_wrapper_subclass_parametrization_swap_True b/test/dynamo_expected_failures/TestNNParametrization.test_wrapper_subclass_parametrization_swap_True deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_skips/TestNNParametrization.test_new_spectral_norm_dim_swap_True b/test/dynamo_skips/TestNNParametrization.test_new_spectral_norm_dim_swap_True deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/nn/test_parametrization.py b/test/nn/test_parametrization.py index 19daadc34a..8203c2e275 100644 --- a/test/nn/test_parametrization.py +++ b/test/nn/test_parametrization.py @@ -9,8 +9,6 @@ import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init import torch.nn.utils.parametrize as parametrize -from torch import Tensor -from torch.__future__ import get_swap_module_params_on_conversion from torch.nn import Parameter from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import instantiate_device_type_tests @@ -22,10 +20,8 @@ from torch.testing._internal.common_utils import ( set_default_dtype, skipIfNoLapack, skipIfTorchDynamo, - swap, TemporaryFileName, ) -from torch.testing._internal.two_tensor import TwoTensor class TestNNParametrization(NNTestCase): @@ -36,7 +32,6 @@ class TestNNParametrization(NNTestCase): # and remove the `@skipIfNoLapack` (see #70995) # torch/nn/utils/parametrize @skipIfNoLapack - @swap([True, False]) def test_register_and_remove_parametrization(self): r"""Test that it is possible to add a few parametrizations on a parameter or a buffer and that removing them restores the initial state @@ -99,7 +94,8 @@ class TestNNParametrization(NNTestCase): self.assertTrue(parametrize.is_parametrized(model, "weight")) self.assertFalse(parametrize.is_parametrized(model, "bias")) self.assertNotIn("weight", model._parameters) - self.assertTrue(model.weight.shape[0] == 1) + A = model.weight + self.assertTrue(A.shape[0] == 1) parametrize.remove_parametrizations(model, "weight", leave_parametrized=False) self.assertFalse(hasattr(model, "parametrizations")) self.assertEqual(model.weight, initial_model.weight) @@ -114,7 +110,8 @@ class TestNNParametrization(NNTestCase): self.assertTrue(parametrize.is_parametrized(model, "weight")) self.assertFalse(parametrize.is_parametrized(model, "bias")) self.assertNotIn("weight", model._parameters) - self.assertTrue(model.weight.shape[0] == 1) + A = model.weight + self.assertTrue(A.shape[0] == 1) parametrize.remove_parametrizations(model, "weight", leave_parametrized=False) self.assertFalse(hasattr(model, "parametrizations")) self.assertEqual(model.weight, initial_model.weight) @@ -131,10 +128,6 @@ class TestNNParametrization(NNTestCase): # Result should be skew-symmetric A = model.weight self.assertEqual(A, -A.T) - if get_swap_module_params_on_conversion(): - # When using the swap_tensors path, this is needed so that the autograd - # graph is not alive anymore. - del A # Remove and check consistency parametrize.remove_parametrizations(model, "weight", leave_parametrized=False) self.assertFalse(hasattr(model, "parametrizations")) @@ -152,10 +145,6 @@ class TestNNParametrization(NNTestCase): # Result should be skew-symmetric A = model.weight self.assertEqual(A, -A.T) - if get_swap_module_params_on_conversion(): - # When using the swap_tensors path, this is needed so that the autograd - # graph is not alive anymore. - del A # Remove and check consistency parametrize.remove_parametrizations(model, "weight", leave_parametrized=False) self.assertFalse(hasattr(model, "parametrizations")) @@ -170,10 +159,6 @@ class TestNNParametrization(NNTestCase): X = model.weight Id = torch.eye(X.size(0), device=X.device) self.assertEqual(X.T @ X, Id) - if get_swap_module_params_on_conversion(): - # When using the swap_tensors path, this is needed so that the autograd - # graph is not alive anymore. - del X # Structure tests self.assertTrue(hasattr(model, "parametrizations")) self.assertTrue(parametrize.is_parametrized(model)) @@ -261,10 +246,6 @@ class TestNNParametrization(NNTestCase): sgd.step() self.assertNotEqual(model.weight, weight_copy) self.assertNotEqual(model.bias, bias_copy) - if get_swap_module_params_on_conversion(): - # When using the swap_tensors path, this is needed so that the autograd - # graph is not alive anymore. - del weight_copy, bias_copy # Test leave_parametrized=True for _ in range(2): @@ -285,12 +266,7 @@ class TestNNParametrization(NNTestCase): sgd.step() self.assertNotEqual(model.weight, weight_copy) self.assertNotEqual(model.bias, bias_copy) - if get_swap_module_params_on_conversion(): - # When using the swap_tensors path, this is needed so that the autograd - # graph is not alive anymore. - del weight_copy, bias_copy - @swap([True, False]) def test_register_and_remove_nested_parametrization(self): r"""Test that it is possible to nest the parametrizations meaning that the original param is parametrized again @@ -312,10 +288,6 @@ class TestNNParametrization(NNTestCase): # Result should be skew-symmetric A = model.weight self.assertEqual(A, -A.T) - if get_swap_module_params_on_conversion(): - # When using the swap_tensors path, this is needed so that the autograd - # graph is not alive anymore. - del A # Add nested parametrization param_mod = model.parametrizations.weight @@ -344,7 +316,6 @@ class TestNNParametrization(NNTestCase): self.assertFalse(hasattr(model, "parametrizations")) self.assertEqual(model.__class__, nn.Linear) - @swap([True, False]) def test_register_and_remove_buffer_parametrization(self): r"""Test that it is possible to add and remove parametrizations on buffers""" @@ -383,7 +354,6 @@ class TestNNParametrization(NNTestCase): # FIXME: Rewrite this test using functions not depending on LAPACK # and remove the `@skipIfNoLapack` (see #70995) @skipIfNoLapack - @swap([True, False]) def test_serialization_parametrization(self): r"""Test that it is possible to serialize a parametrized model via state_dict""" @@ -433,7 +403,6 @@ class TestNNParametrization(NNTestCase): # FIXME: Rewrite this test using functions not depending on LAPACK # and remove the `@skipIfNoLapack` (see #70995) @skipIfNoLapack - @swap([True, False]) def test_initialization_parametrization(self): r"""Test that it is possible to initialize a parametrization when it implements a `right_inverse` method @@ -503,7 +472,6 @@ class TestNNParametrization(NNTestCase): self.assertEqual(model.weight, X) self.assertEqual(model.parametrizations.weight.original, torch.zeros_like(X)) - @swap([True, False]) def test_errors_unparametrized_tensor_parametrization(self): # Test errors when registering a parametrization on an unparametrized tensor module = nn.Linear(3, 4) @@ -653,7 +621,6 @@ class TestNNParametrization(NNTestCase): self.assertFalse(parametrize.is_parametrized(module)) self.assertEqual(module.weight, weight_init) - @swap([True, False]) def test_errors_parametrized_tensor_parametrization(self): # Test errors when registering a parametrization on a parametrized tensor @@ -735,7 +702,6 @@ class TestNNParametrization(NNTestCase): # FIXME: Rewrite this test using functions not depending on LAPACK # and remove the `@skipIfNoLapack` (see #70995) @skipIfNoLapack - @swap([True, False]) def test_multiple_inputs_parametrization(self): # A parametrization with several outputs class RankOne(nn.Module): @@ -837,7 +803,6 @@ class TestNNParametrization(NNTestCase): # FIXME: Rewrite this test using functions not depending on LAPACK # and remove the `@skipIfNoLapack` (see #70995) @skipIfNoLapack - @swap([True, False]) def test_caching_parametrization(self): r"""Test the caching system of a parametrization""" @@ -865,7 +830,6 @@ class TestNNParametrization(NNTestCase): # FIXME: Rewrite this test using functions not depending on LAPACK # and remove the `@skipIfNoLapack` (see #70995) @skipIfNoLapack - @swap([True, False]) def test_caching_parametrization_with_transfer_parametrizations_and_params(self): r"""Test that transferring parametrizations doesn't cause issues with caching""" @@ -898,7 +862,6 @@ class TestNNParametrization(NNTestCase): # test that the results are distinct objects for each module self.assertNotEqual(id(A), id(X)) - @swap([True, False]) def test_parametrization_same_training_mode(self): r"""Test training mode updated on parametrization registration""" @@ -915,7 +878,6 @@ class TestNNParametrization(NNTestCase): self.assertTrue(module.parametrizations.weight[0].training) self.assertTrue(module.parametrizations.weight[1].training) - @swap([True, False]) def test_type_before_parametrizations(self): r"""Test that type_before_parametrizations always retrieves original type""" @@ -933,7 +895,6 @@ class TestNNParametrization(NNTestCase): parametrize.type_before_parametrizations(model) == original_type ) - @swap([True, False]) def test_deepcopy_after_parametrization(self): r"""Test that we are able to create a deepcopy of the module when it's parametrized.""" @@ -994,7 +955,6 @@ class TestNNParametrization(NNTestCase): parametrize.register_parametrization(model, "weight", AddOne()) check_deepcopy(model, deepcopy(model)) - @swap([True, False]) def test_transfer_parametrizations_and_params(self): r"""Test that all parametrizations and their associated parameters are transferred.""" @@ -1034,10 +994,6 @@ class TestNNParametrization(NNTestCase): # check that the transfer didn't affect the original value self.assertEqual(hold_weight, model.weight) - if get_swap_module_params_on_conversion(): - # When using the swap_tensors path, this is needed so that the autograd - # graph is not alive anymore. - del hold_weight # testing that changes to one set of parametrizations do not affect the other parametrize.remove_parametrizations(to_model, "weight") @@ -1062,7 +1018,6 @@ class TestNNParametrization(NNTestCase): # check that the new transfer didn't change the value for the from_module self.assertEqual(hold_test_param, model.test_param) - @swap([True, False]) def test_transfer_parametrizations_and_params_right_inverse(self): r"""Test that all parametrizations and their associated parameters are transferred.""" @@ -1092,7 +1047,6 @@ class TestNNParametrization(NNTestCase): # check that transfer doesn't affect the from_model weight self.assertEqual(hold_weight, model.weight) - @swap([True, False]) def test_transfer_parametrizations_and_params_single_param(self): r"""Test that all parametrizations and their associated parameters are transferred.""" @@ -1132,7 +1086,6 @@ class TestNNParametrization(NNTestCase): # FIXME: Rewrite this test using functions not depending on LAPACK # and remove the `@skipIfNoLapack` (see #70995) @skipIfNoLapack - @swap([True, False]) def test_transfer_parametrizations_and_params_many_to_one(self): # A parametrization with several outputs class RankOne(nn.Module): @@ -1199,7 +1152,6 @@ class TestNNParametrization(NNTestCase): # check that the new transfer didn't change the value for the from_module self.assertEqual(hold_test_param, model.test_param) - @swap([True, False]) def test_new_spectral_norm(self): with set_default_dtype(torch.double): input = torch.randn(3, 5) @@ -1337,30 +1289,16 @@ class TestNNParametrization(NNTestCase): # avoid doing another power iteration m, wrapped_m, _ = get_modules() pre_remove_out = wrapped_m(input) - if get_swap_module_params_on_conversion(): - # When using the swap_tensors path, this is needed so that the autograd - # graph is not alive anymore. - pre_remove_out_ref = pre_remove_out.detach() - del pre_remove_out - else: - pre_remove_out_ref = pre_remove_out m.eval() - m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight') - self.assertEqual(wrapped_m(input), pre_remove_out_ref) + m = torch.nn.utils.parametrize.remove_parametrizations(m, "weight") + self.assertEqual(wrapped_m(input), pre_remove_out) torch.nn.utils.parametrizations.spectral_norm(m) for _ in range(3): pre_remove_out = wrapped_m(input) - if get_swap_module_params_on_conversion(): - # When using the swap_tensors path, this is needed so that the autograd - # graph is not alive anymore. - pre_remove_out_ref = pre_remove_out.detach() - del pre_remove_out - else: - pre_remove_out_ref = pre_remove_out m.eval() - m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight') - self.assertEqual(wrapped_m(input), pre_remove_out_ref) + m = torch.nn.utils.parametrize.remove_parametrizations(m, "weight") + self.assertEqual(wrapped_m(input), pre_remove_out) # TEST EVAL BEHAVIOR m, wrapped_m, spectral_norm_m = get_modules() @@ -1414,7 +1352,6 @@ class TestNNParametrization(NNTestCase): gradcheck(fn, (m.parametrizations.weight.original,)) - @swap([True, False]) def test_new_spectral_norm_load_state_dict(self): for activate_times in (0, 3): inp = torch.randn(2, 3) @@ -1494,7 +1431,6 @@ class TestNNParametrization(NNTestCase): snm.eval() self.assertEqual(out3_eval, snm(inp)) - @swap([True, False]) def test_new_spectral_norm_dim(self): inp = torch.randn(2, 3, 10, 12) m = nn.ConvTranspose2d(3, 4, (5, 6)) @@ -1507,7 +1443,6 @@ class TestNNParametrization(NNTestCase): snm._u.shape, m.parametrizations.weight.original[0, :, 0, 0].shape ) - @swap([True, False]) def test_new_spectral_norm_forward(self): input = torch.randn(3, 5) m = nn.Linear(5, 7) @@ -1526,7 +1461,6 @@ class TestNNParametrization(NNTestCase): expect_out = m(input) self.assertEqual(expect_out, out_hat) - @swap([True, False]) @skipIfTorchDynamo("Test does not work with TorchDynamo") def test_new_spectral_norm_value(self): # a test that the spectral norm (= top singular value) @@ -1543,7 +1477,6 @@ class TestNNParametrization(NNTestCase): self.assertEqual(m.weight.data, expected) @skipIfNoLapack - @swap([True, False]) def test_orthogonal_parametrization(self): # Orthogonal implements 6 algorithms (3x parametrizations times 2 options of use_trivialization) @@ -1599,13 +1532,7 @@ class TestNNParametrization(NNTestCase): # We do not support householder for complex inputs # See Note [Householder complex] - - # When using the swap_tensors path, this is needed so that the autograd - # graph is not alive anymore. - if get_swap_module_params_on_conversion(): - w_init = m.weight.clone().detach() - else: - w_init = m.weight.clone() + w_init = m.weight.clone() if parametrization == "householder" and m.weight.is_complex(): msg = "householder parametrization does not support complex tensors" with self.assertRaisesRegex(ValueError, msg): @@ -1678,7 +1605,6 @@ class TestNNParametrization(NNTestCase): assert_is_orthogonal(m.weight) @skipIfNoLapack - @swap([True, False]) def test_orthogonal_errors(self): m = nn.Linear(3, 4) with self.assertRaisesRegex(ValueError, "has to be one of"): @@ -1692,7 +1618,6 @@ class TestNNParametrization(NNTestCase): m.weight = torch.randn(5, 5) torch.nn.utils.parametrize.remove_parametrizations(m, "weight") - @swap([True, False]) def test_weight_norm_state_dict_compat(self): m = nn.Linear(4, 5) m = torch.nn.utils.weight_norm(m) @@ -1705,14 +1630,12 @@ class TestNNParametrization(NNTestCase): input = torch.randn(3, 4) self.assertEqual(m(input), m2(input)) - @swap([True, False]) def test_weight_norm_pickle(self): m = nn.Linear(4, 5) m = torch.nn.utils.parametrizations.weight_norm(m) with self.assertRaisesRegex(RuntimeError, "state_dict"): pickle.dumps(m) - @swap([True, False]) def test_weight_norm_deepcopy(self): m = nn.Linear(4, 5) m = torch.nn.utils.parametrizations.weight_norm(m) @@ -1720,93 +1643,8 @@ class TestNNParametrization(NNTestCase): input = torch.randn(3, 4) self.assertEqual(m(input), m2(input)) - @swap([True]) - def test_wrapper_subclass_parametrization(self): - class Subclassify(nn.Module): - def forward(self, X): - return TwoTensor(X, X) - - class UnSubclassify(nn.Module): - def forward(self, X): - return X.a - - class IdentityWithRightInverse(nn.Module): - def forward(self, X): - return X - - def right_inverse(self, X): - return TwoTensor(X, X) - - def _check_parametrization(parametrization, - type_before_registration, - type_after_registration, - leave_parametrized=False, - type_after_right_inverse=None): - model = nn.Linear(2, 2) - buf = torch.randn(2, 2) - model.register_buffer('buf', buf) - if type_before_registration == TwoTensor and type_after_registration == Tensor: - model._apply(lambda t: TwoTensor(t, t)) - initial_weight = model.weight.clone().detach() - initial_weight_id = id(model.weight) - initial_buf = model.buf.clone().detach() - initial_buf_id = id(model.buf) - type_original_weight = type_before_registration if type_after_right_inverse is None else type_after_right_inverse - type_original_buf = Tensor if type_original_weight is nn.Parameter else type_original_weight - type_after_removal_buf = type_after_registration if leave_parametrized else type_original_buf - if leave_parametrized: - if type_after_registration is Tensor: - type_after_removal_weight = nn.Parameter - else: - type_after_removal_weight = type_after_registration - else: - type_after_removal_weight = type_original_weight - - parametrize.register_parametrization(model, "weight", parametrization()) - parametrize.register_parametrization(model, "buf", parametrization()) - self.assertTrue(hasattr(model, "parametrizations")) - self.assertTrue(parametrize.is_parametrized(model)) - self.assertFalse(parametrize.is_parametrized(model, "bias")) - # checks for weight - self.assertTrue(parametrize.is_parametrized(model, "weight")) - self.assertTrue(isinstance(model.parametrizations.weight.original, nn.Parameter)) - self.assertTrue(type(model.parametrizations.weight.original) is type_original_weight) - self.assertNotIn("weight", model._parameters) - self.assertTrue(type(model.weight) is type_after_registration) - # checks for buf - self.assertTrue(parametrize.is_parametrized(model, "buf")) - self.assertFalse(isinstance(model.parametrizations.buf.original, nn.Parameter)) - self.assertTrue(type(model.parametrizations.buf.original) is type_original_buf) - self.assertTrue(type(model.buf) is type_after_registration) - parametrize.remove_parametrizations(model, "weight", leave_parametrized=leave_parametrized) - parametrize.remove_parametrizations(model, "buf", leave_parametrized=leave_parametrized) - self.assertFalse(hasattr(model, "parametrizations")) - self.assertEqual(model.__class__, nn.Linear) - # checks for weight - self.assertTrue(type(model.weight) is type_after_removal_weight) - self.assertTrue(isinstance(model.weight, nn.Parameter)) - self.assertEqual(id(model.weight), initial_weight_id) - # checks for buf - self.assertTrue(type(model.buf) is type_after_removal_buf) - self.assertFalse(isinstance(model.buf, nn.Parameter)) - self.assertEqual(id(model.buf), initial_buf_id) - if not leave_parametrized and type_after_right_inverse is None: - self.assertEqual(model.weight, initial_weight) - self.assertEqual(model.buf, initial_buf) - - - _check_parametrization(Subclassify, nn.Parameter, TwoTensor) - _check_parametrization(UnSubclassify, TwoTensor, Tensor) - _check_parametrization(IdentityWithRightInverse, nn.Parameter, TwoTensor, - type_after_right_inverse=TwoTensor) - _check_parametrization(Subclassify, nn.Parameter, TwoTensor, leave_parametrized=True) - _check_parametrization(UnSubclassify, TwoTensor, Tensor, leave_parametrized=True) - _check_parametrization(IdentityWithRightInverse, nn.Parameter, TwoTensor, - leave_parametrized=True, type_after_right_inverse=TwoTensor) - class TestNNParametrizationDevice(NNTestCase): - @swap([True, False]) def test_weight_norm_parametrization(self, device): for dtype in [torch.float, torch.bfloat16]: input = torch.randn(3, 4, dtype=dtype, device=device) diff --git a/torch/nn/utils/parametrize.py b/torch/nn/utils/parametrize.py index aa4f9656d5..e73aada232 100644 --- a/torch/nn/utils/parametrize.py +++ b/torch/nn/utils/parametrize.py @@ -1,8 +1,6 @@ import torch -from torch.__future__ import get_swap_module_params_on_conversion from torch.nn.modules.container import ModuleList, ModuleDict, Module from torch.nn.parameter import Parameter -from torch.utils._python_dispatch import is_traceable_wrapper_subclass from torch import Tensor import collections @@ -66,14 +64,6 @@ def _register_parameter_or_buffer(module, name, X): else: module.register_buffer(name, X) -def _maybe_set(dest: Tensor, src: Tensor) -> None: - should_swap = get_swap_module_params_on_conversion() or is_traceable_wrapper_subclass(dest) - if should_swap: - if isinstance(dest, Parameter) and not isinstance(src, Parameter): - src = Parameter(src, requires_grad=dest.requires_grad) - torch.utils.swap_tensors(dest, src) - else: - dest.set_(src) # type: ignore[call-overload] class ParametrizationList(ModuleList): r"""A sequential container that holds and manages the original parameters or buffers of a parametrized :class:`torch.nn.Module`. @@ -167,7 +157,7 @@ class ParametrizationList(ModuleList): # Set the original to original so that the user does not need to re-register the parameter # manually in the optimiser with torch.no_grad(): - _maybe_set(original, new) + original.set_(new) # type: ignore[call-overload] _register_parameter_or_buffer(self, "original", original) else: for i, originali in enumerate(new): @@ -241,7 +231,7 @@ class ParametrizationList(ModuleList): f"while `original` has dtype {self.original.dtype}" ) # We know that the result is going to have the same dtype - _maybe_set(self.original, value) + self.original.set_(value) # type: ignore[call-overload] else: if not isinstance(value, collections.abc.Sequence): raise ValueError( @@ -265,7 +255,7 @@ class ParametrizationList(ModuleList): f"Tensor {i} returned by `right_inverse` has dtype {tensor.dtype} " f"while `original{i}` has dtype {original_i.dtype}" ) - _maybe_set(original_i, tensor) + original_i.set_(tensor) def forward(self) -> Tensor: if torch.jit.is_scripting(): @@ -655,20 +645,18 @@ def remove_parametrizations( # This way the user does not need to update the optimizer with torch.no_grad(): if type(original) is torch.Tensor: - _maybe_set(original, t) + original.set_(t) else: try: - _maybe_set(original, t) + original.set_(t) except RuntimeError as e: # TODO: Fix this for tensor subclasses that are parameters: # RuntimeError: set_storage is not allowed on a Tensor created from .data or .detach(). raise RuntimeError("Calling remove_parametrizations() with leave_parametrized=True " "for a parameter that is an instance of a tensor subclass requires " - "set_() to be implemented correctly for the tensor subclass." - "Alternatively, one can opt into the swap_tensors path" - "Either set leave_parametrized=False or provide a working implementation" - "for set_() in the tensor subclass or set " - "torch.__future__.set_swap_module_params_on_conversion(True).") from e + "set_() to be implemented correctly for the tensor subclass. Either " + "set leave_parametrized=False or provide a working implementation for " + "set_() in the tensor subclass.") from e else: if leave_parametrized: # We cannot use no_grad because we need to know whether one or more
2.41.0
e86a40694f03c66680201c01c13347eff38a951
Thu, 18 Apr 2024 06:34:32 +0000
[PATCH 0308/1000] Revert "[Dynamo] Check for __bool__ attribute before accessing it (#120943)"
This reverts commit dd7aeedb72f8a96d0f168308292e0d41c095f01b. Reverted https://github.com/pytorch/pytorch/pull/120943 on behalf of https://github.com/DanilBaibak due to Broken trunk ([comment](https://github.com/pytorch/pytorch/pull/120943#issuecomment-2063098295))
diff --git a/test/dynamo_expected_failures/TestComposability.test_convert_without_squash_mask b/test/dynamo_expected_failures/TestComposability.test_convert_without_squash_mask new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestComposability.test_fusion_before_s_prep b/test/dynamo_expected_failures/TestComposability.test_fusion_before_s_prep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestComposability.test_q_prep_before_s_prep b/test/dynamo_expected_failures/TestComposability.test_q_prep_before_s_prep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestComposability.test_qat_prep_before_s_prep b/test/dynamo_expected_failures/TestComposability.test_qat_prep_before_s_prep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestComposability.test_s_prep_before_fusion b/test/dynamo_expected_failures/TestComposability.test_s_prep_before_fusion new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestComposability.test_s_prep_before_q_prep b/test/dynamo_expected_failures/TestComposability.test_s_prep_before_q_prep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestComposability.test_s_prep_before_qat_prep b/test/dynamo_expected_failures/TestComposability.test_s_prep_before_qat_prep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestGenerateNumericDebugHandle.test_quantize_pt2e_preserve_handle b/test/dynamo_expected_failures/TestGenerateNumericDebugHandle.test_quantize_pt2e_preserve_handle new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_add b/test/dynamo_expected_failures/TestPT2ERepresentation.test_add new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_add_relu b/test/dynamo_expected_failures/TestPT2ERepresentation.test_add_relu new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_conv2d b/test/dynamo_expected_failures/TestPT2ERepresentation.test_conv2d new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_dynamic_linear b/test/dynamo_expected_failures/TestPT2ERepresentation.test_dynamic_linear new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_maxpool2d b/test/dynamo_expected_failures/TestPT2ERepresentation.test_maxpool2d new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_qdq b/test/dynamo_expected_failures/TestPT2ERepresentation.test_qdq new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_qdq_per_channel b/test/dynamo_expected_failures/TestPT2ERepresentation.test_qdq_per_channel new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestPT2ERepresentation.test_static_linear b/test/dynamo_expected_failures/TestPT2ERepresentation.test_static_linear new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_composable_quantizer_linear_conv b/test/dynamo_expected_failures/TestQuantizePT2E.test_composable_quantizer_linear_conv new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_constant_prop_preserve_metadata b/test/dynamo_expected_failures/TestQuantizePT2E.test_constant_prop_preserve_metadata new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_embedding_conv_linear_quantization b/test/dynamo_expected_failures/TestQuantizePT2E.test_embedding_conv_linear_quantization new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_fold_all_ops_before_quantize b/test/dynamo_expected_failures/TestQuantizePT2E.test_fold_all_ops_before_quantize new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_fold_quantize b/test/dynamo_expected_failures/TestQuantizePT2E.test_fold_quantize new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_fold_quantize_per_channel b/test/dynamo_expected_failures/TestQuantizePT2E.test_fold_quantize_per_channel new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_groupwise_per_channel_quant b/test/dynamo_expected_failures/TestQuantizePT2E.test_groupwise_per_channel_quant new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_reentrant b/test/dynamo_expected_failures/TestQuantizePT2E.test_reentrant new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_save_load b/test/dynamo_expected_failures/TestQuantizePT2E.test_save_load new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2E.test_speed b/test/dynamo_expected_failures/TestQuantizePT2E.test_speed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQATModels.test_qat_mobilenet_v2 b/test/dynamo_expected_failures/TestQuantizePT2EQATModels.test_qat_mobilenet_v2 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQATModels.test_qat_resnet18 b/test/dynamo_expected_failures/TestQuantizePT2EQATModels.test_qat_resnet18 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_prepare_qat_conv_bn_fusion_getitem_placeholder b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_prepare_qat_conv_bn_fusion_getitem_placeholder new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_fusion b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_fusion new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_fusion_literal_args b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_fusion_literal_args new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_fusion_no_conv_bias b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_fusion_no_conv_bias new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_relu_fusion b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_relu_fusion new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_relu_fusion_no_conv_bias b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_relu_fusion_no_conv_bias new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_no_bias b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_no_bias new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_inplace_add_relu b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_inplace_add_relu new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_preserve_source_fn_stack b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_preserve_source_fn_stack new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_update_shared_qspec b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn1d.test_qat_update_shared_qspec new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_prepare_qat_conv_bn_fusion_getitem_placeholder b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_prepare_qat_conv_bn_fusion_getitem_placeholder new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_fusion b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_fusion new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_fusion_literal_args b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_fusion_literal_args new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_fusion_no_conv_bias b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_fusion_no_conv_bias new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_relu_fusion b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_relu_fusion new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_relu_fusion_no_conv_bias b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_relu_fusion_no_conv_bias new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_no_bias b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_no_bias new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_inplace_add_relu b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_inplace_add_relu new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_preserve_source_fn_stack b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_preserve_source_fn_stack new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_update_shared_qspec b/test/dynamo_expected_failures/TestQuantizePT2EQAT_ConvBn2d.test_qat_update_shared_qspec new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_add_and_inplace_add b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_add_and_inplace_add new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_add_mul_long b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_add_mul_long new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_add_mul_scalar b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_add_mul_scalar new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv1d b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv1d new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv1d_with_conv2d b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv1d_with_conv2d new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv2d b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv2d new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv_linear b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv_linear new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv_linear_no_permute b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_conv_linear_no_permute new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_dynamic_linear b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_dynamic_linear new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_dynamic_linear_int4_weight b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_dynamic_linear_int4_weight new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_dynamic_linear_with_conv b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_dynamic_linear_with_conv new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_gru b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_gru new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear_gru b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear_gru new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear_relu b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear_relu new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear_with_dynamic_shape b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_linear_with_dynamic_shape new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_mul_and_inplace_mul b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_mul_and_inplace_mul new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_mul_float32_max b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_mul_float32_max new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_obs_sharing_ops b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_obs_sharing_ops new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_propagate_annotation b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_propagate_annotation new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_qat_dynamic_linear b/test/dynamo_expected_failures/TestXNNPACKQuantizer.test_qat_dynamic_linear new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/dynamo_expected_failures/TestXNNPACKQuantizerModels.test_resnet18 b/test/dynamo_expected_failures/TestXNNPACKQuantizerModels.test_resnet18 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/torch/_dynamo/symbolic_convert.py b/torch/_dynamo/symbolic_convert.py index 621b09670c..0c8e5898e6 100644 --- a/torch/_dynamo/symbolic_convert.py +++ b/torch/_dynamo/symbolic_convert.py @@ -409,14 +409,10 @@ def generic_jump(truth_fn: typing.Callable[[object], bool], push: bool): self.push(value) self.jump(inst) elif isinstance(value, UserDefinedObjectVariable): - x = None - has_bool = value.call_hasattr(self, "__bool__") - if has_bool.is_python_constant() and has_bool.as_python_constant(): - x = value.var_getattr(self, "__bool__") - else: - has_len = value.call_hasattr(self, "__len__") - if has_len.is_python_constant() and has_len.as_python_constant(): - x = value.var_getattr(self, "__len__") + x = value.var_getattr(self, "__bool__") + # if __bool__ is missing, trying __len__ to infer a truth value. + if isinstance(x, GetAttrVariable): + x = value.var_getattr(self, "__len__") # __bool__ or __len__ is function if isinstance(x, UserMethodVariable):
2.41.0
04fac5618dbefcf6b29832c0215ae5f5e0fa2c3
Wed, 17 Apr 2024 10:09:27 -0700
[PATCH 0309/1000] [dynamo][cpp-guard] Reland Attempt 1 - Enable cpp guard manager (#124231)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124231 Approved by: https://github.com/jansel ghstack dependencies: #124230, #124237
diff --git a/torch/_dynamo/config.py b/torch/_dynamo/config.py index d95e7986b4..9482cfabcc 100644 --- a/torch/_dynamo/config.py +++ b/torch/_dynamo/config.py @@ -341,7 +341,7 @@ numpy_default_int = "int64" use_numpy_random_stream = False # Use C++ guard manager -enable_cpp_guard_manager = os.environ.get("TORCHDYNAMO_CPP_GUARD_MANAGER", "0") == "1" +enable_cpp_guard_manager = os.environ.get("TORCHDYNAMO_CPP_GUARD_MANAGER", "1") == "1" # Inline inbuilt nn modules inline_inbuilt_nn_modules = (
2.41.0
b82345e487a7f1a6ba3a663201e0e0235d87b23
Thu, 18 Apr 2024 07:21:41 +0000
[PATCH 0310/1000] Revert "Re-land precompile triton templates (#124030)"
This reverts commit 030bb13fe84c88ab5c988351543362b60fefb556. Reverted https://github.com/pytorch/pytorch/pull/124030 on behalf of https://github.com/DanilBaibak due to Broken trunk ([comment](https://github.com/pytorch/pytorch/pull/124030#issuecomment-2063191117))
diff --git a/test/inductor/test_max_autotune.py b/test/inductor/test_max_autotune.py index af87aba112..d1f074de51 100644 --- a/test/inductor/test_max_autotune.py +++ b/test/inductor/test_max_autotune.py @@ -328,8 +328,7 @@ class TestMaxAutotune(TestCase): inputs: str, benchmark: Callable[[Any], Dict[ChoiceCaller, float]], ) -> Dict[ChoiceCaller, float]: - if benchmark is not None: - return benchmark(choices) + return benchmark(choices) asc = AlgorithmSelectorCache() @@ -427,25 +426,6 @@ class TestMaxAutotune(TestCase): FileCheck().check_not("extern_kernels.convolution").run(code[0]) self.assertEqual(conv1x1(input_tensor), out, atol=1e-2, rtol=0) - @skipIfRocm - def test_filled_cache_precompile(self): - def fn(a, b, c): - a = (a @ b) @ c - a, b, c = (t.to(torch.float16) for t in [a, b, c]) - return (a @ b) @ c - - fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn) - inputs = [torch.rand([256, 256], device="cuda") for _ in range(3)] - from torch._dynamo.utils import counters - - self.assertEqual(fn(*inputs), fn_c(*inputs), atol=1e-2, rtol=1e-2) - - torch._dynamo.reset() - counters.clear() - - fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn) - self.assertEqual(counters["inductor"]["select_algorithm_precompile"], 0) - def test_cat_addmm(self): def fn(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor): return torch.cat( diff --git a/test/inductor/test_select_algorithm.py b/test/inductor/test_select_algorithm.py index 48713bb63e..3b76651fcc 100644 --- a/test/inductor/test_select_algorithm.py +++ b/test/inductor/test_select_algorithm.py @@ -19,10 +19,8 @@ aten = torch.ops.aten def patches(fn): - def skip_cache(self, choices, name, key, benchmark): - if benchmark is None: - return {} - return benchmark(choices) + def skip_cache(self, choices, name, key, generate): + return generate(choices) for patcher in [ dynamo_config.patch(verbose=True), diff --git a/torch/_inductor/autotune_process.py b/torch/_inductor/autotune_process.py index 35beb6fb06..ba1976745c 100644 --- a/torch/_inductor/autotune_process.py +++ b/torch/_inductor/autotune_process.py @@ -502,6 +502,7 @@ class TestBenchmarkRequest(BenchmarkRequest): class TritonBenchmarkRequest(BenchmarkRequest): # Important: Instances of this class have to be serializable # across process boundaries. Do not put CUDA Tensors in here! + def __init__( self, kernel_name: str, @@ -544,8 +545,6 @@ class TritonBenchmarkRequest(BenchmarkRequest): if "warmup" in inspect.signature(run_method).parameters: warmup_arg["warmup"] = False - from torch._C import _cuda_getCurrentRawStream as get_raw_stream - if torch.version.hip and self.matrix_instr_nonkdim != 0: return functools.partial( run_method, @@ -554,7 +553,9 @@ class TritonBenchmarkRequest(BenchmarkRequest): *self.extra_args, grid=self.grid, **warmup_arg, - stream=get_raw_stream(self.output_tensor_meta.device.index), + num_stages=self.num_stages, + num_warps=self.num_warps, + matrix_instr_nonkdim=self.matrix_instr_nonkdim, ) else: return functools.partial( @@ -564,13 +565,10 @@ class TritonBenchmarkRequest(BenchmarkRequest): *self.extra_args, grid=self.grid, **warmup_arg, - stream=get_raw_stream(self.output_tensor_meta.device.index), + num_stages=self.num_stages, + num_warps=self.num_warps, ) - def precompile(self): - mod = PyCodeCache.load_by_key_path(self.module_cache_key, self.module_path) - getattr(mod, self.kernel_name).precompile() - def __str__(self) -> str: return f"{self.kernel_name=}, {self.module_path=}, {self.module_cache_key=}" diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py index 4e3c24cf96..7c0ee56e19 100644 --- a/torch/_inductor/codecache.py +++ b/torch/_inductor/codecache.py @@ -156,7 +156,7 @@ class CacheBase: try: import triton - triton_version = triton.__version__ # type: ignore[attr-defined] + triton_version = triton.__version__ except ModuleNotFoundError: triton_version = None @@ -262,7 +262,7 @@ class PersistentCache(CacheBase): choices: List[ChoiceCaller], op: str, inputs: str, - benchmark: Optional[Callable[[Any], Dict[ChoiceCaller, float]]], + benchmark: Callable[[Any], Dict[ChoiceCaller, float]], ) -> Dict[ChoiceCaller, float]: """ Check to see if we have benchmarked the given choice callers. For each @@ -270,7 +270,7 @@ class PersistentCache(CacheBase): 1. Check global_cache[op][inputs][choice][precision], return benchmark if cached. 2. Check local_cache[op][inputs][choice][precision], return benchmark if cached. - 3. If benchmark is not None: + 3. a. `max_autotune_gemm=True`: benchmark the choice, update local_cache[op][inputs][choice], and return the benchmark. b. `max_autotune_gemm=False`: don't benchmark the choice, return nothing. @@ -303,13 +303,9 @@ class PersistentCache(CacheBase): if config.max_autotune or config.max_autotune_gemm: local_cache = self.get_local_cache() # check local cache first since it is data specific to the current machine - if ( - not check_cache(local_cache) - and not ( - use_global_cache() - and check_cache(self.get_global_cache(), callback=log_stats) - ) - and benchmark is not None + if not check_cache(local_cache) and not ( + use_global_cache() + and check_cache(self.get_global_cache(), callback=log_stats) ): try: # re-benchmark everything to try to get consistent numbers from the same machine diff --git a/torch/_inductor/codegen/triton_utils.py b/torch/_inductor/codegen/triton_utils.py index c8a7d92e3c..c95e699bcd 100644 --- a/torch/_inductor/codegen/triton_utils.py +++ b/torch/_inductor/codegen/triton_utils.py @@ -65,32 +65,6 @@ def signature_to_meta( } -def is_unaligned_buffer(arg: TensorArg): - buf_name = arg.buffer - if buf_name in V.graph.graph_inputs: - return not config.assume_aligned_inputs - - if buf_name in V.graph.constants: - # all constants are assumed to be aligned - return False - - if V.graph.scheduler: - layout = V.graph.scheduler.get_buffer_layout(buf_name) - else: - buffer = V.graph.get_buffer(buf_name) - # output arg - if not buffer: - assert buf_name == V.kernel.output_node.name - layout = V.kernel.output_node.layout - else: - layout = buffer.get_layout() - - if isinstance(layout, torch._inductor.ir.NonOwningLayout): - return not layout.maybe_guard_aligned() - else: - return False - - def config_of( args: List[KernelArgType], *, @@ -109,7 +83,9 @@ def config_of( offset_aligned = V.graph.sizevars.statically_known_multiple_of( x.offset * x.dtype.itemsize, alignment # type: ignore[arg-type] ) - return offset_aligned and not is_unaligned_buffer(x) + return offset_aligned and not V.graph.scheduler.is_unaligned_buffer( + x.buffer + ) else: return False if isinstance(x, SizeArg): diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py index 049a77a4ef..573e79acd6 100644 --- a/torch/_inductor/scheduler.py +++ b/torch/_inductor/scheduler.py @@ -2495,9 +2495,18 @@ class Scheduler: self.flush() - def get_buffer_layout(self, buf_name: str) -> ir.Layout: + def is_unaligned_buffer(self, buf_name): + if buf_name in V.graph.graph_inputs: + return not config.assume_aligned_inputs + if buf_name in V.graph.constants: + # all constants are assumed to be aligned + return False node = self.name_to_node[buf_name] - return node.node.get_layout() + layout = node.node.get_layout() + if isinstance(layout, ir.NonOwningLayout): + return not layout.maybe_guard_aligned() + else: + return False class BaseScheduling: diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py index 3261909d2b..75deeaf5e3 100644 --- a/torch/_inductor/select_algorithm.py +++ b/torch/_inductor/select_algorithm.py @@ -94,7 +94,7 @@ class TritonTemplateKernel(TritonKernel): grid_fn, meta, call_sizes, - use_jit=False, + use_jit=True, prefix_args=0, suffix_args=0, epilogue_fn=identity, @@ -153,8 +153,8 @@ class TritonTemplateKernel(TritonKernel): argdefs, _, signature = self.args.python_argdefs() triton_meta = { "signature": signature_to_meta(signature, size_dtype=self.index_dtype), - "device": self.output_node.get_device().index, - "device_type": self.output_node.get_device().type, + "device": V.graph.scheduler.current_device.index, + "device_type": V.graph.scheduler.current_device.type, "constants": {}, } triton_meta["configs"] = [config_of(signature)] @@ -554,7 +554,7 @@ class TritonTemplate(KernelTemplate): ), TritonTemplateKernel( kernel_name=kernel_name, output_node=fake_out, - use_jit=False, + use_jit=True, **kernel_options, ) as kernel: try: @@ -740,10 +740,6 @@ class TritonTemplateCaller(ir.TritonTemplateCallerBase): assert self.bmreq is not None return self.bmreq.benchmark(*args, output_tensor=out) - def precompile(self): - assert self.bmreq is not None - self.bmreq.precompile() - def __str__(self): return f"TritonTemplateCaller({self.bmreq.module_path}, {self.debug_extra})" @@ -885,7 +881,6 @@ class AlgorithmSelectorCache(PersistentCache): # TODO(nmacchioni): remove once CI tests are fixed choices = [choice for choice in choices if choice is not None] - if len(choices) == 0: raise RuntimeError( "No choices to select, please consider adding ATEN into max_autotune_gemm_backends " @@ -902,38 +897,19 @@ class AlgorithmSelectorCache(PersistentCache): def make_benchmark_fn(): return self.make_benchmark_fn(choices, input_nodes, layout, input_gen_fns) - def precompile(choices) -> Callable[[], None]: - def no_op(*args, **kwargs): - return - + def precompile(choices): if ( precompilation_timeout_seconds is None or precompilation_timeout_seconds <= 0 ): - return no_op + return num_workers = min( config.compile_threads, torch.get_num_threads(), len(choices), ) if num_workers <= 0: - return no_op - - # TODO - debug issue - if torch.version.hip: - return no_op - - # check local and global cache before precompiling - timings = self.lookup( - choices, - name, - repr([self.key_of(x) for x in input_nodes]), - benchmark=None, - ) - - if timings: - return no_op - + return log.info( "Multithreaded precompilation for %d choices using %d worker threads", len(choices),
2.41.0
59f1da62f6a74d815dede5ac5513b37c49733a3
Wed, 17 Apr 2024 02:15:59 -0700
[PATCH 0312/1000] [sym_shapes][perf] _find not update unchanged replacements (#124274)
Differential Revision: [D56236380](https://our.internmc.facebook.com/intern/diff/D56236380) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124274 Approved by: https://github.com/ezyang
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py index 1a9df2466f..37fa27b44b 100644 --- a/torch/fx/experimental/symbolic_shapes.py +++ b/torch/fx/experimental/symbolic_shapes.py @@ -3976,7 +3976,9 @@ class ShapeEnv: return a res = self.replacements[a] cur_replace = {s: self._find(s) for s in res.free_symbols} - self._set_replacement(a, self.replacements[a].xreplace(cur_replace), "find") + replaced, changed = self.replacements[a]._xreplace(cur_replace) + if changed: + self._set_replacement(a, replaced, "find") return self.replacements[a] @lru_cache(256)
2.41.0
fcbeb34894288fa10ec1b561cd0b58b5407e0a7
Thu, 18 Apr 2024 01:03:38 -0700
[PATCH 0313/1000] [ATen] Add CPU fp16 support for nll_loss and cross_entropy_loss (#123256)
Add CPU FP16 support for nll_loss and cross_entropy_loss. Resolve issue #123328. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123256 Approved by: https://github.com/jgong5, https://github.com/EikanWang, https://github.com/malfet
diff --git a/aten/src/ATen/native/LossNLL.cpp b/aten/src/ATen/native/LossNLL.cpp index 0bf787b99b..0e7de9c272 100644 --- a/aten/src/ATen/native/LossNLL.cpp +++ b/aten/src/ATen/native/LossNLL.cpp @@ -304,8 +304,12 @@ void nll_loss_forward_out_cpu_template( const Tensor& weight, int64_t reduction, int64_t ignore_index) { - AT_DISPATCH_FLOATING_TYPES_AND( - ScalarType::BFloat16, input.scalar_type(), "nll_loss_out_frame", [&] { + AT_DISPATCH_FLOATING_TYPES_AND2( + ScalarType::BFloat16, + ScalarType::Half, + input.scalar_type(), + "nll_loss_out_frame", + [&] { if (target.scalar_type() == kByte) { nll_loss_out_frame<scalar_t, uint8_t>( output, @@ -415,8 +419,9 @@ void nll_loss_backward_out_cpu_template( const Tensor& total_weight) { grad_input.zero_(); - AT_DISPATCH_FLOATING_TYPES_AND( + AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::BFloat16, + ScalarType::Half, input.scalar_type(), "nll_loss_backward_out_frame", [&] { diff --git a/aten/src/ATen/native/LossNLL2d.cpp b/aten/src/ATen/native/LossNLL2d.cpp index e1700e43ba..94c667dcb1 100644 --- a/aten/src/ATen/native/LossNLL2d.cpp +++ b/aten/src/ATen/native/LossNLL2d.cpp @@ -262,8 +262,9 @@ void nll_loss2d_forward_out_cpu_template( check_inputs_nll_loss2d(input, target, weight); total_weight.resize_({}); - AT_DISPATCH_FLOATING_TYPES_AND( + AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::BFloat16, + ScalarType::Half, input.scalar_type(), "nll_loss2d_forward_out_frame", [&] { @@ -383,8 +384,9 @@ void nll_loss2d_backward_out_cpu_template( total_weight.numel(), " elements)"); - AT_DISPATCH_FLOATING_TYPES_AND( + AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::BFloat16, + ScalarType::Half, input.scalar_type(), "nll_loss2d_backward_out_frame", [&] { diff --git a/test/onnx/test_fx_op_consistency.py b/test/onnx/test_fx_op_consistency.py index 9a1160939a..004574d3e8 100644 --- a/test/onnx/test_fx_op_consistency.py +++ b/test/onnx/test_fx_op_consistency.py @@ -2001,6 +2001,7 @@ class TestOnnxModelOutputConsistency(onnx_test_common._TestONNXRuntime): "nn.functional.multilabel_soft_margin_loss": [4e-2, 5e-3], "nn.functional.local_response_norm": [1e-2, 5e-3], "nn.functional.poisson_nll_loss": [3e-2, 1e-3], + "nn.functional.nll_loss": [3e-2, 1e-3], "native_batch_norm": [3e-2, 1e-3], "dot": [3e-2, 1e-3], "logit": [3e-2, 1e-3], diff --git a/test/test_mps.py b/test/test_mps.py index 511a76a87d..3597ec8d12 100644 --- a/test/test_mps.py +++ b/test/test_mps.py @@ -171,6 +171,8 @@ def mps_ops_grad_modifier(ops): 'nn.functional.conv_transpose1d': [torch.float16], 'nn.functional.conv_transpose2d': [torch.float16], 'nn.functional.conv_transpose3d': [torch.float16], + 'nn.functional.nll_loss': [torch.float16], + 'nn.functional.cross_entropy': [torch.float16], } MACOS_13_3_XFAILLIST_GRAD = { @@ -987,7 +989,10 @@ def mps_ops_modifier(ops): 'nn.functional.avg_pool2d': [torch.float16], # input types 'tensor<f32>' and 'tensor<1xf16>' are not broadcast compatible # Refer to the issue please: https://github.com/pytorch/pytorch/issues/124252 - 'nn.functional.binary_cross_entropy': [torch.float16] + 'nn.functional.binary_cross_entropy': [torch.float16], + + 'nn.functional.nll_loss': [torch.float16], + 'nn.functional.cross_entropy': [torch.float16], } def addDecorator(op, d) -> None: diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py index 4b27ff2002..dd8915a756 100644 --- a/torch/testing/_internal/common_methods_invocations.py +++ b/torch/testing/_internal/common_methods_invocations.py @@ -13009,8 +13009,7 @@ op_db: List[OpInfo] = [ supports_out=False), OpInfo( "nn.functional.cross_entropy", - dtypes=floating_types_and(torch.bfloat16), - dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + dtypes=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_cross_entropy, supports_out=False, supports_forward_ad=True, @@ -13033,6 +13032,9 @@ op_db: List[OpInfo] = [ "test_variant_consistency_jit", device_type="cuda", ), + DecorateInfo(unittest.skip("FP16 corss_entropy cases have not been enabled on MPS yet"), + dtypes=(torch.half,), device_type="mps"), + ) ), OpInfo('nn.functional.normalize', @@ -19427,8 +19429,7 @@ op_db: List[OpInfo] = [ ), OpInfo( "nn.functional.nll_loss", - dtypes=floating_types_and(torch.bfloat16), - dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), + dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_nll_loss, supports_forward_ad=True, @@ -19449,6 +19450,9 @@ op_db: List[OpInfo] = [ "test_cow_input", device_type='cuda', ), + DecorateInfo(unittest.skip("FP16 nll_loss cases have not been enabled on MPS yet"), + dtypes=(torch.half,), device_type="mps"), + ), ), OpInfo( diff --git a/torch/testing/_internal/common_modules.py b/torch/testing/_internal/common_modules.py index 9b58143b40..e111b20c08 100644 --- a/torch/testing/_internal/common_modules.py +++ b/torch/testing/_internal/common_modules.py @@ -4013,16 +4013,8 @@ module_db: List[ModuleInfo] = [ decorators=( # No channels_last support for loss functions. DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_memory_format'), - # Expect failures for tests that rely on torch.half implementation on CPU - DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", dtypes=[torch.float16], device_type='cpu'), - DecorateInfo(unittest.expectedFailure, "TestModule", "test_if_train_and_eval_modes_differ", - dtypes=[torch.float16], device_type='cpu'), - DecorateInfo(unittest.expectedFailure, "TestModule", "test_save_load", dtypes=[torch.float16], - device_type='cpu'), - DecorateInfo(unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors", dtypes=[torch.float16], - device_type='cpu'), - DecorateInfo(unittest.expectedFailure, "TestModule", "test_multiple_device_transfer", dtypes=[torch.float16], - device_type='cuda'), + DecorateInfo(toleranceOverride({torch.float16: tol(atol=3e-2, rtol=1e-3)}), "TestModule", + "test_forward", dtypes=[torch.float16], device_type='cpu'), DecorateInfo(unittest.expectedFailure, "TestModule", "test_cpu_gpu_parity", dtypes=[torch.float16], device_type='cuda'),), ),
2.41.0
032a780080646828bdda15f3af0277288b2fa34
Thu, 18 Apr 2024 12:06:24 +0000
[PATCH 0314/1000] Migrate linux-focal-cuda11_8-py3_10-gcc9-build to ARC (#123721)
Migrate linux-focal-cuda11_8-py3_10-gcc9-build to ARC Pull Request resolved: https://github.com/pytorch/pytorch/pull/123721 Approved by: https://github.com/jeanschmidt
diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index 8f54248101..15c9e35587 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -232,7 +232,7 @@ jobs: linux-focal-cuda11_8-py3_10-gcc9-build: name: linux-focal-cuda11.8-py3.10-gcc9 - uses: ./.github/workflows/_linux-build-label.yml + uses: ./.github/workflows/_linux-build-rg.yml with: build-environment: linux-focal-cuda11.8-py3.10-gcc9 docker-image-name: pytorch-linux-focal-cuda11.8-cudnn8-py3-gcc9
2.41.0
5d4ebe9aeabc1fc46ca39dee2d446f9b5e9e114
Thu, 18 Apr 2024 12:06:53 +0000
[PATCH 0315/1000] Migrate linux-focal-cuda12_1-py3_10-gcc9-build to ARC (#123722)
Migrate linux-focal-cuda12_1-py3_10-gcc9-build to ARC Pull Request resolved: https://github.com/pytorch/pytorch/pull/123722 Approved by: https://github.com/jeanschmidt
diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index 15c9e35587..56ce145913 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -257,7 +257,7 @@ jobs: linux-focal-cuda12_1-py3_10-gcc9-build: name: linux-focal-cuda12.1-py3.10-gcc9 - uses: ./.github/workflows/_linux-build-label.yml + uses: ./.github/workflows/_linux-build-rg.yml with: build-environment: linux-focal-cuda12.1-py3.10-gcc9 docker-image-name: pytorch-linux-focal-cuda12.1-cudnn8-py3-gcc9
2.41.0
135c4b921dadf90dcacd68bc54cb03e66166703
Wed, 17 Apr 2024 12:28:30 -0700
[PATCH 0317/1000] torch.library.register_fake now accepts more types (#124066)
We allow it to accept: - a string with the op name - an opoverload - a new-style custom op If any of these are referring to a new-style custom op (created with the custom_op decorator), then we dispatch to CustomOpDef.register_fake. Otherwise, we do what we previously did. Test Plan: - new tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/124066 Approved by: https://github.com/albanD ghstack dependencies: #123937, #124064, #124065
diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py index 69a83eddde..037be095f6 100644 --- a/test/test_custom_ops.py +++ b/test/test_custom_ops.py @@ -2283,6 +2283,73 @@ class TestCustomOpAPI(TestCase): continue self.assertGreater(after, prev) + @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug") + def test_library_register_fake(self): + for mode in ["function", "qualname", "opoverload"]: + + @torch.library.custom_op("_torch_testing::add", mutates_args=()) + def add(x: Tensor, y: float) -> Tensor: + x_np = x.cpu().numpy() + out_np = x_np + y + return torch.from_numpy(out_np).to(x.device) + + called = False + + if mode == "function": + dec = torch.library.register_fake(add) + elif mode == "qualname": + dec = torch.library.register_fake("_torch_testing::add") + elif mode == "opoverload": + dec = torch.library.register_fake(torch.ops._torch_testing.add.default) + + @dec + def _(x, y): + nonlocal called + called = True + return torch.empty_like(x) + + with torch._subclasses.fake_tensor.FakeTensorMode(): + x = torch.randn(3) + y = 3.14 + z = add(x, y) + self.assertEqual(z.shape, x.shape) + self.assertTrue(called) + + @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug") + def test_library_register_fake_low_level(self): + for mode in ["qualname", "opoverload"]: + with torch.library._scoped_library("_torch_testing", "FRAGMENT") as lib: + lib.define("add3(Tensor x, float y) -> Tensor") + + def add(x: Tensor, y: float) -> Tensor: + x_np = x.cpu().numpy() + out_np = x_np + y + return torch.from_numpy(out_np).to(x.device) + + lib.impl("add3", add, "CPU") + + called = False + + if mode == "qualname": + dec = torch.library.register_fake("_torch_testing::add3", lib=lib) + elif mode == "opoverload": + dec = torch.library.register_fake( + torch.ops._torch_testing.add3.default, lib=lib + ) + + @dec + def _(x, y): + nonlocal called + called = True + return torch.empty_like(x) + + with torch._subclasses.fake_tensor.FakeTensorMode(): + x = torch.randn(3) + y = 3.14 + z = torch.ops._torch_testing.add3(x, y) + self.assertEqual(z.shape, x.shape) + self.assertTrue(called) + @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug") def test_fake(self): @torch.library.custom_op("_torch_testing::add", mutates_args=()) diff --git a/torch/_library/custom_ops.py b/torch/_library/custom_ops.py index 802c8a2656..c7a8670c84 100644 --- a/torch/_library/custom_ops.py +++ b/torch/_library/custom_ops.py @@ -1,4 +1,5 @@ import inspect +import weakref from typing import ( Any, Callable, @@ -14,7 +15,7 @@ from typing import ( from torch.utils._exposed_in import exposed_in -from .. import _C, _library, autograd, library, Tensor +from .. import _C, _library, _ops, autograd, library, Tensor device_types_t = Optional[Union[str, Sequence[str]]] @@ -130,6 +131,7 @@ class CustomOpDef: self._lib = get_library_allowing_overwrite(self._namespace, self._name) self._register_to_dispatcher() + OPDEFS[self._qualname] = self @property def _qualname(self) -> str: @@ -441,6 +443,7 @@ class CustomOpDef: OPDEF_TO_LIB: Dict[str, "library.Library"] = {} +OPDEFS: weakref.WeakValueDictionary = weakref.WeakValueDictionary() def get_library_allowing_overwrite(namespace: str, name: str) -> "library.Library": @@ -468,3 +471,16 @@ def iter_tensors( yield from check(arg) for kwarg in kwargs.values(): yield from check(kwarg) + + +def _maybe_get_opdef( + op: Union[CustomOpDef, _ops.OpOverload, str] +) -> Optional[CustomOpDef]: + if isinstance(op, CustomOpDef): + return op + if isinstance(op, _ops.OpOverload): + op = op._name + assert isinstance(op, str) + if op in OPDEFS: + return OPDEFS[op] + return None diff --git a/torch/library.py b/torch/library.py index 4255b7d4d7..d7623f6b8a 100644 --- a/torch/library.py +++ b/torch/library.py @@ -1,5 +1,5 @@ from ._ops import OpOverload -from typing import Any, Optional, Set, List +from typing import Any, Optional, Set, List, Union, Callable import traceback import torch import weakref @@ -9,7 +9,7 @@ import re import contextlib import sys import warnings -from torch._library.custom_ops import custom_op +from torch._library.custom_ops import custom_op, _maybe_get_opdef __all__ = [ @@ -420,8 +420,17 @@ def impl_abstract(qualname, func=None, *, lib=None, _stacklevel=1): return register_fake(qualname, func, lib=lib, _stacklevel=_stacklevel + 1) +_op_identifier = Union[str, "torch._ops.OpOverload", "torch._library.custom_ops.CustomOpDef"] -def register_fake(qualname, func=None, /, *, lib=None, _stacklevel=1): + + +def register_fake( + op: _op_identifier, + func: Optional[Callable] = None, + /, + *, + lib: Optional[Library] = None, + _stacklevel: int = 1): r"""Register a FakeTensor implementation ("fake impl") for this operator. Also sometimes known as a "meta kernel", "abstract impl". @@ -451,9 +460,9 @@ def register_fake(qualname, func=None, /, *, lib=None, _stacklevel=1): >>> from torch import Tensor >>> >>> # Example 1: an operator without data-dependent output shape - >>> torch.library.define( - >>> "mylib::custom_linear", - >>> "(Tensor x, Tensor weight, Tensor bias) -> Tensor") + >>> @torch.library.custom_op("mylib::custom_linear", mutates_args=()) + >>> def custom_linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor: + >>> raise NotImplementedError("Implementation goes here") >>> >>> @torch.library.register_fake("mylib::custom_linear") >>> def _(x, weight, bias): @@ -475,7 +484,11 @@ def register_fake(qualname, func=None, /, *, lib=None, _stacklevel=1): >>> assert y.shape == (2, 3) >>> >>> # Example 2: an operator with data-dependent output shape - >>> torch.library.define("mylib::custom_nonzero", "(Tensor x) -> Tensor") + >>> @torch.library.custom_op("mylib::custom_nonzero", mutates_args=()) + >>> def custom_nonzero(x: Tensor) -> Tensor: + >>> x_np = x.numpy(force=True) + >>> res = np.stack(np.nonzero(x_np), axis=1) + >>> return torch.tensor(res, device=x.device) >>> >>> @torch.library.register_fake("mylib::custom_nonzero") >>> def _(x): @@ -489,12 +502,6 @@ def register_fake(qualname, func=None, /, *, lib=None, _stacklevel=1): >>> result = x.new_empty(shape, dtype=torch.int64) >>> return result >>> - >>> @torch.library.impl("mylib::custom_nonzero", "cpu") - >>> def custom_nonzero_cpu(x): - >>> x_np = x.numpy() - >>> res = np.stack(np.nonzero(x_np), axis=1) - >>> return torch.tensor(res, device=x.device) - >>> >>> from torch.fx.experimental.proxy_tensor import make_fx >>> >>> x = torch.tensor([0, 1, 2, 3, 4, 0]) @@ -504,10 +511,22 @@ def register_fake(qualname, func=None, /, *, lib=None, _stacklevel=1): >>> assert torch.allclose(trace(x), torch.ops.mylib.custom_nonzero(x)) """ + if not isinstance(op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef)): + raise ValueError("register_fake(op): got unexpected type for op: {type(op)}") + if isinstance(op, torch._ops.OpOverload): + op = op._name + opdef = _maybe_get_opdef(op) + if opdef is not None: + if func is None: + return opdef.register_fake + else: + return opdef.register_fake(func) + assert isinstance(op, str) + stacklevel = _stacklevel def register(func): - namespace, op_name = torch._library.utils.parse_namespace(qualname) + namespace, op_name = torch._library.utils.parse_namespace(op) if lib is None: use_lib = Library(namespace, "FRAGMENT") _keep_alive.append(use_lib) @@ -520,7 +539,8 @@ def register_fake(qualname, func=None, /, *, lib=None, _stacklevel=1): return register else: stacklevel += 1 - register(func) + return register(func) + # If the op was defined in C++, then we want to make sure there was an # m.set_python_module(module, ...) call and that the module is the
2.41.0
45173a0b58c9fb23d862a6a4c170b78b9807718
Wed, 17 Apr 2024 16:49:34 -0700
[PATCH 0318/1000] Add torch.library.register_autograd (#124071)
Allows registering autograd for all custom op entry points: - the new-style custom op API (custom_op) - the old-style torch.library APIs - C++ operator registration Test Plan: - tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/124071 Approved by: https://github.com/albanD ghstack dependencies: #123937, #124064, #124065, #124066
diff --git a/docs/source/autograd.rst b/docs/source/autograd.rst index 046ee42717..195e96cd39 100644 --- a/docs/source/autograd.rst +++ b/docs/source/autograd.rst @@ -199,6 +199,8 @@ Tensor autograd functions Function.jvp Function.vmap +.. _context_method_mixins: + Context method mixins ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When creating a new :class:`Function`, the following methods are available to `ctx`. diff --git a/docs/source/library.rst b/docs/source/library.rst index c5f991ca8e..2b09e5c9ea 100644 --- a/docs/source/library.rst +++ b/docs/source/library.rst @@ -27,6 +27,7 @@ for any operators (they may have been created using :func:`torch.library.custom_ via PyTorch's C++ operator registration APIs). .. autofunction:: impl +.. autofunction:: register_autograd .. autofunction:: register_fake .. autofunction:: impl_abstract .. autofunction:: get_ctx diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py index 037be095f6..f06fea8b0a 100644 --- a/test/test_custom_ops.py +++ b/test/test_custom_ops.py @@ -2316,39 +2316,87 @@ class TestCustomOpAPI(TestCase): self.assertTrue(called) @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug") - def test_library_register_fake_low_level(self): + def test_library_register_autograd(self): + for mode in ["function", "qualname", "opoverload"]: + + @torch.library.custom_op("mylib::numpy_sin", mutates_args=()) + def numpy_sin(x: Tensor) -> Tensor: + x_np = x.cpu().numpy() + y_np = np.sin(x_np) + return torch.from_numpy(y_np).to(device=x.device) + + def setup_context(ctx, inputs, output) -> Tensor: + (x,) = inputs + ctx.save_for_backward(x) + + called = False + + def backward(ctx, grad): + nonlocal called + called = True + (x,) = ctx.saved_tensors + return grad * x.cos() + + if mode == "function": + torch.library.register_autograd(numpy_sin, setup_context, backward) + elif mode == "qualname": + torch.library.register_autograd( + "mylib::numpy_sin", setup_context, backward + ) + elif mode == "opoverload": + torch.library.register_autograd( + torch.ops.mylib.numpy_sin.default, setup_context, backward + ) + + x = torch.randn(3, requires_grad=True) + y = numpy_sin(x) + (grad_x,) = torch.autograd.grad(y, x, torch.ones_like(y)) + self.assertTrue(called) + self.assertEqual(grad_x, x.cos()) + + @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug") + def test_library_register_autograd_low_level(self): for mode in ["qualname", "opoverload"]: with torch.library._scoped_library("_torch_testing", "FRAGMENT") as lib: - lib.define("add3(Tensor x, float y) -> Tensor") + lib.define("sin5(Tensor x) -> Tensor") - def add(x: Tensor, y: float) -> Tensor: - x_np = x.cpu().numpy() - out_np = x_np + y - return torch.from_numpy(out_np).to(x.device) + def numpy_sin(x: Tensor) -> Tensor: + x_np = x.cpu().detach().numpy() + y_np = np.sin(x_np) + return torch.from_numpy(y_np).to(device=x.device) - lib.impl("add3", add, "CPU") + def setup_context(ctx, inputs, output) -> Tensor: + (x,) = inputs + ctx.save_for_backward(x) + + called = False + + def backward(ctx, grad): + nonlocal called + called = True + (x,) = ctx.saved_tensors + return grad * x.cos() + + lib.impl("sin5", numpy_sin, "CPU") called = False if mode == "qualname": - dec = torch.library.register_fake("_torch_testing::add3", lib=lib) + torch.library.register_autograd( + "_torch_testing::sin5", setup_context, backward, lib=lib + ) elif mode == "opoverload": - dec = torch.library.register_fake( - torch.ops._torch_testing.add3.default, lib=lib + torch.library.register_autograd( + torch.ops._torch_testing.sin5.default, + setup_context, + backward, + lib=lib, ) - - @dec - def _(x, y): - nonlocal called - called = True - return torch.empty_like(x) - - with torch._subclasses.fake_tensor.FakeTensorMode(): - x = torch.randn(3) - y = 3.14 - z = torch.ops._torch_testing.add3(x, y) - self.assertEqual(z.shape, x.shape) - self.assertTrue(called) + x = torch.randn(3, requires_grad=True) + y = torch.ops._torch_testing.sin5(x) + (grad_x,) = torch.autograd.grad(y, x, torch.ones_like(y)) + self.assertTrue(called) + self.assertEqual(grad_x, x.cos()) @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug") def test_fake(self): diff --git a/torch/_library/autograd.py b/torch/_library/autograd.py index ff4dc9cd7e..ab6cb001d8 100644 --- a/torch/_library/autograd.py +++ b/torch/_library/autograd.py @@ -1,26 +1,38 @@ -from typing import Any, Callable, Optional +import dataclasses +from typing import Any, Callable, Optional, Protocol -from .. import _C, autograd, Tensor +from .. import _C, _ops, autograd, Tensor from ..utils import _pytree from . import utils -def make_autograd_impl(opdef: Any) -> Callable: - name: str = f"GeneratedBackwardFor_{opdef._namespace}_{opdef._name}" +class InfoProtocol(Protocol): + _setup_context_fn: Optional[Callable] + _backward_fn: Optional[Callable] + + +@dataclasses.dataclass +class Info: + _setup_context_fn: Optional[Callable] + _backward_fn: Optional[Callable] + + +def make_autograd_impl(op: _ops.OpOverload, info: InfoProtocol) -> Callable: + name: str = f"GeneratedBackwardFor_{op._namespace}_{op._opname}_{op._overloadname}" def forward(ctx, *args): with _C._AutoDispatchBelowAutograd(): - result = opdef._opoverload(*args) - if opdef._setup_context_fn: - opdef._setup_context_fn(ctx, args, result) + result = op(*args) + if info._setup_context_fn: + info._setup_context_fn(ctx, args, result) return result def backward(ctx, *grads): - if opdef._backward_fn: - return opdef._backward_fn(ctx, *grads) + if info._backward_fn: + return info._backward_fn(ctx, *grads) raise RuntimeError( - f"Trying to backward through {opdef} but no autograd " + f"Trying to backward through {op} but no autograd " f"formula was registered. " f"Please use register_autograd to add one." ) @@ -34,7 +46,7 @@ def make_autograd_impl(opdef: Any) -> Callable: }, ) - schema = opdef._opoverload._schema + schema = op._schema if any( utils.is_tensorlist_like_type(a.type) for a in (*schema.arguments, *schema.returns) diff --git a/torch/_library/custom_ops.py b/torch/_library/custom_ops.py index c7a8670c84..489108181d 100644 --- a/torch/_library/custom_ops.py +++ b/torch/_library/custom_ops.py @@ -310,30 +310,33 @@ class CustomOpDef: return fn def register_autograd( - self, setup_context_fn: Callable, backward_fn: Callable, / + self, setup_context_fn: Optional[Callable], backward_fn: Callable, / ) -> None: r"""Register a backward formula for this custom op. In order for an operator to work with autograd, you need to register - a backward formula. There are two pieces to this: - 1. You must tell us what we need to save from the forward pass for - the backward pass. This is the "setup_context" function. - 2. You must tell us how to compute gradients during the backward pass. - This is the "backward" function. - - ``setup_context_fn(ctx, inputs, output)`` runs during the forward pass. - Please save quantities needed for backward onto the ``ctx`` object via - either :func:`ctx.save_for_backward` or assigning them as attributes of - ``ctx``. + a backward formula: + 1. You must tell us how to compute gradients during the backward pass + by providing us a "backward" function. + 2. If you need any values from the forward to compute gradients, you can + use `setup_context` to save values for backward. ``backward_fn`` runs during the backward pass. It accepts ``(ctx, *grads)``: - ``grads`` is one or more gradients. The number of gradients matches - the number of outputs of the operator. + the number of outputs of the operator. + The ``ctx`` object is `the same ctx object <context_method_mixins>`_ used by + :class:`torch.autograd.Function`. The semantics of ``backward_fn`` are the + same as :meth:`torch.autograd.Function.backward`. + + ``setup_context_fn(ctx, inputs, output)`` runs during the forward pass. + Please save quantities needed for backward onto the ``ctx`` object via + either :meth:`torch.autograd.function.FunctionCtx.save_for_backward` + or assigning them as attributes of ``ctx``. Both ``setup_context_fn`` and ``backward_fn`` must be traceable. That is, - they may not directly access Tensor.data_ptr and they must not depend on - or mutate global state. If you need a non-traceable backward, you can make - it a separate custom_op that you call inside ``backward_fn``. + they may not directly access :meth:`torch.Tensor.data_ptr` and they must + not depend on or mutate global state. If you need a non-traceable backward, + you can make it a separate custom_op that you call inside ``backward_fn``. Examples: >>> import torch @@ -392,7 +395,7 @@ class CustomOpDef: lib._register_fake(self._name, fake_impl) - autograd_impl = _library.autograd.make_autograd_impl(self) + autograd_impl = _library.autograd.make_autograd_impl(self._opoverload, self) lib.impl(self._name, autograd_impl, "Autograd") schema = self._opoverload._schema diff --git a/torch/_ops.py b/torch/_ops.py index 9610662876..4080d7ad66 100644 --- a/torch/_ops.py +++ b/torch/_ops.py @@ -595,6 +595,14 @@ class OpOverload(OperatorBase): is_write = a.alias_info.is_write or is_write self.is_view = is_write is not None and not is_write + @property + def _namespace(self): + self._schema.name.split("::")[0] + + @property + def _opname(self): + self._schema.name.split("::")[1] + # it's a no-op since OpOverload object is immutable and must be unique for a given op overload. def __deepcopy__(self, memo=None): return self diff --git a/torch/library.py b/torch/library.py index d7623f6b8a..f1f72285dd 100644 --- a/torch/library.py +++ b/torch/library.py @@ -10,6 +10,7 @@ import contextlib import sys import warnings from torch._library.custom_ops import custom_op, _maybe_get_opdef +import torch._library as _library __all__ = [ @@ -542,6 +543,85 @@ def register_fake( return register(func) +def register_autograd(op: _op_identifier, setup_context_fn: Optional[Callable], backward_fn: Callable, /, *, lib=None) -> None: + r"""Register a backward formula for this custom op. + + In order for an operator to work with autograd, you need to register + a backward formula: + 1. You must tell us how to compute gradients during the backward pass + by providing us a "backward" function. + 2. If you need any values from the forward to compute gradients, you can + use `setup_context` to save values for backward. + + ``backward_fn`` runs during the backward pass. It accepts ``(ctx, *grads)``: + - ``grads`` is one or more gradients. The number of gradients matches + the number of outputs of the operator. + The ``ctx`` object is `the same ctx object <context_method_mixins>`_ used by + :class:`torch.autograd.Function`. The semantics of ``backward_fn`` are the + same as :meth:`torch.autograd.Function.backward`. + + ``setup_context_fn(ctx, inputs, output)`` runs during the forward pass. + Please save quantities needed for backward onto the ``ctx`` object via + either :meth:`torch.autograd.function.FunctionCtx.save_for_backward` + or assigning them as attributes of ``ctx``. + + Both ``setup_context_fn`` and ``backward_fn`` must be traceable. That is, + they may not directly access :meth:`torch.Tensor.data_ptr` and they must + not depend on or mutate global state. If you need a non-traceable backward, + you can make it a separate custom_op that you call inside ``backward_fn``. + + Examples: + >>> import torch + >>> import numpy as np + >>> from torch import Tensor + >>> + >>> @torch.library.custom_op("mylib::numpy_sin", mutates_args=()) + >>> def numpy_sin(x: Tensor) -> Tensor: + >>> x_np = x.cpu().numpy() + >>> y_np = np.sin(x_np) + >>> return torch.from_numpy(y_np).to(device=x.device) + >>> + >>> def setup_context(ctx, inputs, output) -> Tensor: + >>> x, = inputs + >>> ctx.save_for_backward(x) + >>> + >>> def backward(ctx, grad): + >>> x, = ctx.saved_tensors + >>> return grad * x.cos() + >>> + >>> torch.library.register_autograd("mylib::numpy_sin", setup_context, backward) + >>> + >>> x = torch.randn(3, requires_grad=True) + >>> y = numpy_sin(x) + >>> grad_x, = torch.autograd.grad(y, x, torch.ones_like(y)) + >>> assert torch.allclose(grad_x, x.cos()) + + """ + if not isinstance(op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef)): + raise ValueError(f"register_autograd(op): got unexpected type for op: {type(op)}") + if isinstance(op, torch._ops.OpOverload): + op = op._name + opdef = _maybe_get_opdef(op) + if opdef is not None: + opdef.register_autograd(setup_context_fn, backward_fn) + return + + assert isinstance(op, str) + qualname = op + op = torch._library.utils.lookup_op(qualname) + schema = op._schema + if not _library.utils.is_functional_schema(schema): + raise RuntimeError( + f"Cannot register autograd formula for non-functional operator " + f"{op} with schema {schema}. Please create " + f"a functional operator and register an autograd formula for that." + ) + + info = _library.autograd.Info(setup_context_fn, backward_fn) + autograd_kernel = _library.autograd.make_autograd_impl(op, info) + impl(qualname, "Autograd", autograd_kernel, lib=lib) + + # If the op was defined in C++, then we want to make sure there was an # m.set_python_module(module, ...) call and that the module is the # same as the module that called torch.library.register_fake.
2.41.0
48c39c47d159add4f6217182812fbef31406244
Wed, 17 Apr 2024 16:49:35 -0700
[PATCH 0319/1000] Add OpOverload.redispatch; use it in new custom ops API (#124089)
A kernel has "dispatcher convention" if there is an additional keyset arg at the beginning of the argument list. This PR: - adds a way to register kernels with dispatcher_convention using Library.impl (pass dispatcher_convention = True) - adds OpOverload.redispatch We use both of the above in the new custom ops API: we register the autograd kernel in dispatcher convention so that we can actually call redispatch like how pytorch built-in ops do it. Test Plan: - existing tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/124089 Approved by: https://github.com/albanD ghstack dependencies: #123937, #124064, #124065, #124066, #124071
diff --git a/c10/core/impl/PyInterpreter.cpp b/c10/core/impl/PyInterpreter.cpp index 04b95d14ba..04f9d7c972 100644 --- a/c10/core/impl/PyInterpreter.cpp +++ b/c10/core/impl/PyInterpreter.cpp @@ -34,7 +34,9 @@ struct NoopPyInterpreterVTable final : public PyInterpreterVTable { void python_op_registration_trampoline( const c10::OperatorHandle& op, c10::DispatchKey, - torch::jit::Stack* stack) const override { + c10::DispatchKeySet keyset, + torch::jit::Stack* stack, + bool with_keyset) const override { PANIC(python_op_registration_trampoline); } diff --git a/c10/core/impl/PyInterpreter.h b/c10/core/impl/PyInterpreter.h index 521a76aa0f..2685496899 100644 --- a/c10/core/impl/PyInterpreter.h +++ b/c10/core/impl/PyInterpreter.h @@ -148,7 +148,9 @@ struct C10_API PyInterpreterVTable { virtual void python_op_registration_trampoline( const c10::OperatorHandle& op, c10::DispatchKey, - torch::jit::Stack* stack) const = 0; + c10::DispatchKeySet keyset, + torch::jit::Stack* stack, + bool with_keyset) const = 0; virtual void throw_abstract_impl_not_imported_error( std::string opname, diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in index f017c16597..1c6e40b38c 100644 --- a/torch/_C/__init__.pyi.in +++ b/torch/_C/__init__.pyi.in @@ -1430,6 +1430,9 @@ class _DispatchModule: def define(self, schema: str, alias: str = "") -> _DispatchModule: ... def fallback_fallthrough(self, dispatch: str = "") -> _DispatchModule: ... +_after_ADInplaceOrView_keyset: DispatchKeySet +_after_autograd_keyset: DispatchKeySet + def _dispatch_library( kind: str, name: str, diff --git a/torch/_library/autograd.py b/torch/_library/autograd.py index ab6cb001d8..a475b3d2b9 100644 --- a/torch/_library/autograd.py +++ b/torch/_library/autograd.py @@ -21,16 +21,23 @@ class Info: def make_autograd_impl(op: _ops.OpOverload, info: InfoProtocol) -> Callable: name: str = f"GeneratedBackwardFor_{op._namespace}_{op._opname}_{op._overloadname}" + saved_keyset = None + def forward(ctx, *args): with _C._AutoDispatchBelowAutograd(): - result = op(*args) + nonlocal saved_keyset + keyset = saved_keyset + assert keyset is not None, "Should have been set by autograd_impl" + saved_keyset = None + result = op.redispatch(keyset & _C._after_autograd_keyset, *args) if info._setup_context_fn: info._setup_context_fn(ctx, args, result) return result def backward(ctx, *grads): if info._backward_fn: - return info._backward_fn(ctx, *grads) + result = info._backward_fn(ctx, *grads) + return result raise RuntimeError( f"Trying to backward through {op} but no autograd " f"formula was registered. " @@ -53,7 +60,13 @@ def make_autograd_impl(op: _ops.OpOverload, info: InfoProtocol) -> Callable: ): Generated = supports_tensorlist(Generated) - def autograd_impl(*args): + def autograd_impl(keyset, *args): + # We set a nonlocal to ferry keyset from here to the forward. + # This supports recursive calls (we implement the forward carefully so + # that it'll read saved_keyset before making a recursive call to the op). + nonlocal saved_keyset + assert saved_keyset is None + saved_keyset = keyset result = Generated.apply(*args) # type: ignore[attr-defined] return result diff --git a/torch/_library/custom_ops.py b/torch/_library/custom_ops.py index 489108181d..508cca9ef9 100644 --- a/torch/_library/custom_ops.py +++ b/torch/_library/custom_ops.py @@ -396,12 +396,12 @@ class CustomOpDef: lib._register_fake(self._name, fake_impl) autograd_impl = _library.autograd.make_autograd_impl(self._opoverload, self) - lib.impl(self._name, autograd_impl, "Autograd") + lib.impl(self._name, autograd_impl, "Autograd", with_keyset=True) schema = self._opoverload._schema if schema.is_mutable: - def adinplaceorview_impl(*args, **kwargs): + def adinplaceorview_impl(keyset, *args, **kwargs): for arg, val in _library.utils.zip_schema(schema, args, kwargs): if not arg.alias_info: continue @@ -414,9 +414,16 @@ class CustomOpDef: if isinstance(v, Tensor): autograd.graph.increment_version(v) with _C._AutoDispatchBelowADInplaceOrView(): - return self._opoverload(*args, **kwargs) - - lib.impl(self._name, adinplaceorview_impl, "ADInplaceOrView") + return self._opoverload.redispatch( + keyset & _C._after_ADInplaceOrView_keyset, *args, **kwargs + ) + + lib.impl( + self._name, + adinplaceorview_impl, + "ADInplaceOrView", + with_keyset=True, + ) def __call__(self, *args, **kwargs): return self._opoverload(*args, **kwargs) diff --git a/torch/_ops.py b/torch/_ops.py index 4080d7ad66..8bfacf83c2 100644 --- a/torch/_ops.py +++ b/torch/_ops.py @@ -578,6 +578,9 @@ class OpOverload(OperatorBase): op.__module__ = overloadpacket.__module__ self.__qualname__ = self._name self.__annotations__ = {} + # Only compute the OperatorHandle when we need it. Not all OpOverloads have + # OperatorHandles (the TorchScript ones don't...) + self._lazy_handle = None # If the OpOverload was constructed from a Library.def in Python. self._defined_in_python = self.__qualname__ in torch.library._defs @@ -597,11 +600,19 @@ class OpOverload(OperatorBase): @property def _namespace(self): - self._schema.name.split("::")[0] + return self._schema.name.split("::")[0] @property def _opname(self): - self._schema.name.split("::")[1] + return self._schema.name.split("::")[1] + + @property + def _handle(self): + if self._lazy_handle is None: + self._lazy_handle = torch._C._dispatch_find_schema_or_throw( + self._schema.name, self._schema.overload_name + ) + return self._lazy_handle # it's a no-op since OpOverload object is immutable and must be unique for a given op overload. def __deepcopy__(self, memo=None): @@ -617,6 +628,11 @@ class OpOverload(OperatorBase): # are named "self". This way, all the aten ops can be called by kwargs. return self_._op(*args, **kwargs) + def redispatch(self_, keyset, *args, **kwargs): # noqa: B902 + # use `self_` to avoid naming collide with aten ops arguments that + # are named "self". This way, all the aten ops can be called by kwargs. + return self_._handle.redispatch_boxed(keyset, *args, **kwargs) + def __hash__(self): return hash(self._op) @@ -638,11 +654,6 @@ class OpOverload(OperatorBase): def namespace(self): return self._schema.name.split("::")[0] - def _handle(self): - return torch._C._dispatch_find_schema_or_throw( - self._schema.name, self._schema.overload_name - ) - def decompose(self, *args, **kwargs): dk = torch._C.DispatchKey.CompositeImplicitAutograd if dk in self.py_kernels: diff --git a/torch/csrc/PyInterpreter.cpp b/torch/csrc/PyInterpreter.cpp index a64f32e5b5..f71253d400 100644 --- a/torch/csrc/PyInterpreter.cpp +++ b/torch/csrc/PyInterpreter.cpp @@ -60,9 +60,11 @@ struct ConcretePyInterpreterVTable final void python_op_registration_trampoline( const c10::OperatorHandle& op, c10::DispatchKey key, - torch::jit::Stack* stack) const override { + c10::DispatchKeySet keyset, + torch::jit::Stack* stack, + bool with_keyset) const override { torch::impl::dispatch::python_op_registration_trampoline_impl( - op, key, stack); + op, key, keyset, stack, with_keyset); } void throw_abstract_impl_not_imported_error( std::string opname, diff --git a/torch/csrc/utils/python_dispatch.cpp b/torch/csrc/utils/python_dispatch.cpp index 5af8c438b1..118528b503 100644 --- a/torch/csrc/utils/python_dispatch.cpp +++ b/torch/csrc/utils/python_dispatch.cpp @@ -108,11 +108,17 @@ struct EnableHermeticPyObject { class PythonKernelHolder : public c10::OperatorKernel { c10::SafePyObject func_; c10::DispatchKey dispatch_key_; + // If "with_keyset", then we expect a keyset as the first arg. + bool with_keyset_; public: - PythonKernelHolder(py::object func, c10::DispatchKey dispatch_key) + PythonKernelHolder( + py::object func, + c10::DispatchKey dispatch_key, + bool with_keyset = false) : func_(func.release().ptr(), getPyInterpreter()), - dispatch_key_(dispatch_key) {} + dispatch_key_(dispatch_key), + with_keyset_(with_keyset) {} void operator()( const c10::OperatorHandle& op, @@ -127,7 +133,8 @@ class PythonKernelHolder : public c10::OperatorKernel { const auto& cur_torch_dispatch_mode_state = c10::impl::TorchDispatchModeTLS::get_stack_at(mode_stack_len - 1); cur_torch_dispatch_mode_state->pyinterpreter() - ->python_op_registration_trampoline(op, dispatch_key_, stack); + ->python_op_registration_trampoline( + op, dispatch_key_, keyset, stack, with_keyset_); return; } @@ -144,7 +151,8 @@ class PythonKernelHolder : public c10::OperatorKernel { ivalue.unsafeToTensorImpl()->key_set().has( at::DispatchKey::Python)) { (*interpreter) - ->python_op_registration_trampoline(op, dispatch_key_, stack); + ->python_op_registration_trampoline( + op, dispatch_key_, keyset, stack, with_keyset_); return; } } else if (ivalue.isTensorList() || ivalue.isOptionalTensorList()) { @@ -159,7 +167,8 @@ class PythonKernelHolder : public c10::OperatorKernel { if (interpreter && nv.unsafeToTensorImpl()->key_set().has(at::DispatchKey::Python)) { (*interpreter) - ->python_op_registration_trampoline(op, dispatch_key_, stack); + ->python_op_registration_trampoline( + op, dispatch_key_, keyset, stack, with_keyset_); return; } } @@ -180,10 +189,11 @@ class PythonKernelHolder : public c10::OperatorKernel { EnableHermeticPyObject g2; #endif auto args_kwargs = parseIValuesToPyArgsKwargs(op, arguments); - auto obj = py::reinterpret_steal<py::object>(PyObject_Call( - func_.ptr(getPyInterpreter()), - args_kwargs.first.ptr(), - args_kwargs.second.ptr())); + auto func = + py::reinterpret_borrow<py::object>(func_.ptr(getPyInterpreter())); + auto obj = with_keyset_ + ? func(keyset, *args_kwargs.first, **args_kwargs.second) + : func(*args_kwargs.first, **args_kwargs.second); if (!obj) { throw python_error(); } @@ -242,7 +252,25 @@ void initDispatchBindings(PyObject* module) { py::class_<c10::OperatorHandle>(m, "_DispatchOperatorHandle") .def("schema", &c10::OperatorHandle::schema) - .def("debug", &c10::OperatorHandle::debug); + .def("debug", &c10::OperatorHandle::debug) + .def( + "redispatch_boxed", + [](py::object self, + c10::DispatchKeySet keyset, + py::args args, + const py::kwargs& kwargs) { + auto& handle = self.cast<c10::OperatorHandle&>(); + auto stack = torch::jit::createStackForSchema( + handle.schema(), + std::move(args), + kwargs, + /*self=*/c10::nullopt); + { + pybind11::gil_scoped_release no_gil_guard; + handle.redispatchBoxed(keyset, &stack); + } + return torch::jit::createPyObjectForStack(std::move(stack)); + }); m.def("_dispatch_call_boxed", &ophandle_call_boxed); @@ -351,7 +379,8 @@ void initDispatchBindings(PyObject* module) { const char* name, // TODO: empty string no longer works c10::DispatchKey dispatch, - py::object func) { + py::object func, + bool with_keyset) { HANDLE_TH_ERRORS auto& lib = self.cast<torch::Library&>(); if (func.is(py::module::import("torch.library") @@ -367,7 +396,7 @@ void initDispatchBindings(PyObject* module) { dispatch, CppFunction::makeFromBoxedFunctor( std::make_unique<PythonKernelHolder>( - func, dispatch))), + func, dispatch, with_keyset))), register_or_verify()); python_registrations_[lib._resolve(name)].insert_or_assign( dispatch, @@ -379,7 +408,8 @@ void initDispatchBindings(PyObject* module) { "", py::arg("name"), py::arg("dispatch"), - py::arg("func")) + py::arg("func"), + py::arg("with_keyset") = false) .def( "define", [](const py::object& self, @@ -670,6 +700,10 @@ void initDispatchBindings(PyObject* module) { m.attr("_additional_keys_to_prop_for_wrapper_tensors") = py::cast(at::functorch::kKeysToPropagateToWrapper); + m.attr("_after_autograd_keyset") = py::cast(c10::after_autograd_keyset); + m.attr("_after_ADInplaceOrView_keyset") = + py::cast(c10::after_ADInplaceOrView_keyset); + m.def("_dispatch_has_backend_fallback", [](c10::DispatchKey t) { return c10::Dispatcher::singleton().hasBackendFallbackForDispatchKey(t); }); @@ -880,7 +914,9 @@ void initDispatchBindings(PyObject* module) { void python_op_registration_trampoline_impl( const c10::OperatorHandle& op, c10::DispatchKey key, - torch::jit::Stack* stack) { + c10::DispatchKeySet keyset, + torch::jit::Stack* stack, + bool with_keyset) { auto arguments = torch::jit::pop(*stack, op.schema().arguments().size()); py::gil_scoped_acquire g; auto args_kwargs = parseIValuesToPyArgsKwargs(op, arguments); @@ -888,8 +924,10 @@ void python_op_registration_trampoline_impl( TORCH_INTERNAL_ASSERT(func != nullptr); auto* pyobj = func->ptr(getPyInterpreter()); TORCH_INTERNAL_ASSERT(pyobj != nullptr); - auto obj = py::reinterpret_steal<py::object>( - PyObject_Call(pyobj, args_kwargs.first.ptr(), args_kwargs.second.ptr())); + auto callable = py::reinterpret_borrow<py::object>(pyobj); + auto obj = with_keyset + ? callable(keyset, *args_kwargs.first, **args_kwargs.second) + : callable(*args_kwargs.first, **args_kwargs.second); if (!obj) { throw python_error(); } diff --git a/torch/csrc/utils/python_dispatch.h b/torch/csrc/utils/python_dispatch.h index d719de7305..9549b817ba 100644 --- a/torch/csrc/utils/python_dispatch.h +++ b/torch/csrc/utils/python_dispatch.h @@ -10,7 +10,9 @@ void initDispatchBindings(PyObject* module); void python_op_registration_trampoline_impl( const c10::OperatorHandle& op, c10::DispatchKey key, - torch::jit::Stack* stack); + c10::DispatchKeySet keyset, + torch::jit::Stack* stack, + bool with_keyset); } // namespace dispatch } // namespace impl diff --git a/torch/library.py b/torch/library.py index f1f72285dd..ce6c75c261 100644 --- a/torch/library.py +++ b/torch/library.py @@ -139,7 +139,7 @@ class Library: handle = entry.abstract_impl.register(func_to_register, source) self._registration_handles.append(handle) - def impl(self, op_name, fn, dispatch_key=''): + def impl(self, op_name, fn, dispatch_key='', *, with_keyset=False): r'''Registers the function implementation for an operator defined in the library. Args: @@ -195,7 +195,7 @@ class Library: " for the base ops that it decomposes into.") assert self.m is not None - self.m.impl(name, dispatch_key if dispatch_key != "" else "CompositeImplicitAutograd", fn) + self.m.impl(name, dispatch_key if dispatch_key != "" else "CompositeImplicitAutograd", fn, with_keyset) _impls.add(key) self._op_impls.add(key) @@ -619,7 +619,11 @@ def register_autograd(op: _op_identifier, setup_context_fn: Optional[Callable], info = _library.autograd.Info(setup_context_fn, backward_fn) autograd_kernel = _library.autograd.make_autograd_impl(op, info) - impl(qualname, "Autograd", autograd_kernel, lib=lib) + namespace, opname = torch._library.utils.parse_namespace(qualname) + if lib is None: + lib = Library(namespace, "FRAGMENT") + _keep_alive.append(lib) + lib.impl(opname, autograd_kernel, "Autograd", with_keyset=True) # If the op was defined in C++, then we want to make sure there was an @@ -644,7 +648,7 @@ def _check_pystubs_once(func, qualname, actual_module_name): if maybe_pystub is None: if torch._library.utils.requires_set_python_module(): namespace = op.namespace - cpp_filename = op._handle().debug() + cpp_filename = op._handle.debug() raise RuntimeError( f"Operator '{qualname}' was defined in C++ and has a Python " f"fake impl. In this situation, we require there to also be a " @@ -655,7 +659,7 @@ def _check_pystubs_once(func, qualname, actual_module_name): else: pystub_module = maybe_pystub[0] if actual_module_name != pystub_module: - cpp_filename = op._handle().debug() + cpp_filename = op._handle.debug() raise RuntimeError( f"Operator '{qualname}' specified that its python fake impl " f"is in the Python module '{pystub_module}' but it was actually found "
2.41.0
542874311e82787abf4cee82ca3a1ca5e582d49
Wed, 17 Apr 2024 16:49:35 -0700
[PATCH 0320/1000] Delete qualname from custom_op decorator (#124092)
I forgot to delete this in an earlier PR. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124092 Approved by: https://github.com/albanD ghstack dependencies: #123937, #124064, #124065, #124066, #124071, #124089
diff --git a/torch/_library/custom_ops.py b/torch/_library/custom_ops.py index 508cca9ef9..f36d9e3393 100644 --- a/torch/_library/custom_ops.py +++ b/torch/_library/custom_ops.py @@ -28,7 +28,6 @@ def custom_op( *, mutates_args: Iterable[str], device_types: device_types_t = None, - qualname: Optional[str] = None, ) -> Callable: """Wraps a function into custom operator.
2.41.0
51f66c1950a582dd18d1b2ee67df840a8c4dbbe
Thu, 18 Apr 2024 13:35:48 +0000
[PATCH 0321/1000] [Environment Variable][1/N] Use thread-safe env variable API in c10 (#119449)
This PR is the beginning of attempts to wrap thread-unsafe getenv and set_env functions inside a RW mutex. Pull Request resolved: https://github.com/pytorch/pytorch/pull/119449 Approved by: https://github.com/albanD
diff --git a/c10/core/impl/alloc_cpu.cpp b/c10/core/impl/alloc_cpu.cpp index 9b7ae22f9f..def4c3a3a9 100644 --- a/c10/core/impl/alloc_cpu.cpp +++ b/c10/core/impl/alloc_cpu.cpp @@ -3,6 +3,7 @@ #include <c10/core/alignment.h> #include <c10/util/Flags.h> #include <c10/util/Logging.h> +#include <c10/util/env.h> #include <c10/util/irange.h> #include <c10/util/numa.h> @@ -53,8 +54,8 @@ void memset_junk(void* data, size_t num) { #if defined(__linux__) && !defined(__ANDROID__) static inline bool is_thp_alloc_enabled() { static bool value = [&] { - const char* ptr = std::getenv("THP_MEM_ALLOC_ENABLE"); - return ptr != nullptr ? std::atoi(ptr) : 0; + auto env = c10::utils::check_env("THP_MEM_ALLOC_ENABLE"); + return env.has_value() ? env.value() : 0; }(); return value; } diff --git a/c10/cuda/CUDAAllocatorConfig.cpp b/c10/cuda/CUDAAllocatorConfig.cpp index 1f81ed47b6..ca38dfd6a4 100644 --- a/c10/cuda/CUDAAllocatorConfig.cpp +++ b/c10/cuda/CUDAAllocatorConfig.cpp @@ -234,7 +234,7 @@ size_t CUDAAllocatorConfig::parseAllocatorConfig( return i; } -void CUDAAllocatorConfig::parseArgs(const char* env) { +void CUDAAllocatorConfig::parseArgs(const std::optional<std::string>& env) { // If empty, set the default values m_max_split_size = std::numeric_limits<size_t>::max(); m_roundup_power2_divisions.assign(kRoundUpPowerOfTwoIntervals, 0); @@ -242,16 +242,16 @@ void CUDAAllocatorConfig::parseArgs(const char* env) { bool used_cudaMallocAsync = false; bool used_native_specific_option = false; - if (env == nullptr) { + if (!env.has_value()) { return; } { std::lock_guard<std::mutex> lock(m_last_allocator_settings_mutex); - m_last_allocator_settings = env; + m_last_allocator_settings = env.value(); } std::vector<std::string> config; - lexArgs(env, config); + lexArgs(env.value().c_str(), config); for (size_t i = 0; i < config.size(); i++) { std::string_view config_item_view(config[i]); diff --git a/c10/cuda/CUDAAllocatorConfig.h b/c10/cuda/CUDAAllocatorConfig.h index 3106fc1b46..db5c9e1c8f 100644 --- a/c10/cuda/CUDAAllocatorConfig.h +++ b/c10/cuda/CUDAAllocatorConfig.h @@ -2,6 +2,7 @@ #include <c10/cuda/CUDAMacros.h> #include <c10/util/Exception.h> +#include <c10/util/env.h> #include <atomic> #include <cstddef> @@ -72,14 +73,13 @@ class C10_CUDA_API CUDAAllocatorConfig { static CUDAAllocatorConfig& instance() { static CUDAAllocatorConfig* s_instance = ([]() { auto inst = new CUDAAllocatorConfig(); - const char* env = getenv("PYTORCH_CUDA_ALLOC_CONF"); - inst->parseArgs(env); + inst->parseArgs(c10::utils::get_env("PYTORCH_CUDA_ALLOC_CONF")); return inst; })(); return *s_instance; } - void parseArgs(const char* env); + void parseArgs(const std::optional<std::string>& env); private: CUDAAllocatorConfig(); diff --git a/c10/cuda/CUDACachingAllocator.cpp b/c10/cuda/CUDACachingAllocator.cpp index c472e82ce2..afac5272b6 100644 --- a/c10/cuda/CUDACachingAllocator.cpp +++ b/c10/cuda/CUDACachingAllocator.cpp @@ -8,6 +8,7 @@ #include <c10/util/CallOnce.h> #include <c10/util/ScopeExit.h> #include <c10/util/UniqueVoidPtr.h> +#include <c10/util/env.h> #include <c10/util/flat_hash_map.h> #include <c10/util/hash.h> #include <c10/util/irange.h> @@ -2831,7 +2832,7 @@ class DeviceCachingAllocator { // errors, since the caching allocator foils cuda-memcheck. bool forceUncachedAllocator() { static bool force_uncached = - getenv("PYTORCH_NO_CUDA_MEMORY_CACHING") != nullptr; + c10::utils::has_env("PYTORCH_NO_CUDA_MEMORY_CACHING"); return force_uncached; } @@ -3363,9 +3364,9 @@ struct BackendStaticInitializer { // version checks, to CUDAAllocatorConfig's runtime doublecheck. If this // works, maybe we should move all of CUDAAllocatorConfig here? CUDAAllocator* parseEnvForBackend() { - const char* val = getenv("PYTORCH_CUDA_ALLOC_CONF"); - if (val != nullptr) { - const std::string config(val); + const auto val = c10::utils::get_env("PYTORCH_CUDA_ALLOC_CONF"); + if (val.has_value()) { + const std::string& config = val.value(); std::regex exp("[\\s,]+"); std::sregex_token_iterator it(config.begin(), config.end(), exp, -1); diff --git a/c10/cuda/CUDADeviceAssertionHost.cpp b/c10/cuda/CUDADeviceAssertionHost.cpp index 1d52af7812..ec41e6230f 100644 --- a/c10/cuda/CUDADeviceAssertionHost.cpp +++ b/c10/cuda/CUDADeviceAssertionHost.cpp @@ -3,6 +3,7 @@ #include <c10/cuda/CUDAFunctions.h> #include <c10/util/Backtrace.h> #include <c10/util/Exception.h> +#include <c10/util/env.h> #include <c10/util/irange.h> #include <cuda_runtime.h> @@ -80,8 +81,8 @@ bool dsa_check_if_all_devices_support_managed_memory() { } bool env_flag_set(const char* env_var_name) { - const char* const env_string = std::getenv(env_var_name); - return (env_string == nullptr) ? false : std::strcmp(env_string, "0"); + const auto env_flag = c10::utils::check_env(env_var_name); + return env_flag.has_value() && env_flag.value(); } /// Deleter for UVM/managed memory pointers diff --git a/c10/cuda/CUDAMiscFunctions.cpp b/c10/cuda/CUDAMiscFunctions.cpp index 11ea775366..9ef724813e 100644 --- a/c10/cuda/CUDAMiscFunctions.cpp +++ b/c10/cuda/CUDAMiscFunctions.cpp @@ -1,12 +1,14 @@ #include <c10/cuda/CUDAMiscFunctions.h> -#include <cstdlib> +#include <c10/util/env.h> namespace c10::cuda { +// NOLINTNEXTLINE(bugprone-exception-escape,-warnings-as-errors) const char* get_cuda_check_suffix() noexcept { - static char* device_blocking_flag = getenv("CUDA_LAUNCH_BLOCKING"); + static auto device_blocking_flag = + c10::utils::check_env("CUDA_LAUNCH_BLOCKING"); static bool blocking_enabled = - (device_blocking_flag && atoi(device_blocking_flag)); + (device_blocking_flag.has_value() && device_blocking_flag.value()); if (blocking_enabled) { return ""; } else { diff --git a/c10/test/util/DeadlockDetection_test.cpp b/c10/test/util/DeadlockDetection_test.cpp index 35c4953f6d..05ae154e22 100644 --- a/c10/test/util/DeadlockDetection_test.cpp +++ b/c10/test/util/DeadlockDetection_test.cpp @@ -1,9 +1,8 @@ #include <c10/util/DeadlockDetection.h> +#include <c10/util/env.h> #include <gtest/gtest.h> -#include <cstdlib> - using namespace ::testing; using namespace c10::impl; @@ -23,7 +22,7 @@ TEST(DeadlockDetection, basic) { #ifndef _WIN32 TEST(DeadlockDetection, disable) { - setenv("TORCH_DISABLE_DEADLOCK_DETECTION", "1", 1); + c10::utils::set_env("TORCH_DISABLE_DEADLOCK_DETECTION", "1"); DummyPythonGILHooks hooks; SetPythonGILHooks(&hooks); SetPythonGILHooks(&hooks); diff --git a/c10/util/DeadlockDetection.cpp b/c10/util/DeadlockDetection.cpp index 320fa7873c..4b00d24534 100644 --- a/c10/util/DeadlockDetection.cpp +++ b/c10/util/DeadlockDetection.cpp @@ -1,6 +1,5 @@ #include <c10/util/DeadlockDetection.h> - -#include <cstdlib> +#include <c10/util/env.h> namespace c10::impl { @@ -8,7 +7,7 @@ namespace { PythonGILHooks* python_gil_hooks = nullptr; bool disable_detection() { - return std::getenv("TORCH_DISABLE_DEADLOCK_DETECTION") != nullptr; + return c10::utils::has_env("TORCH_DISABLE_DEADLOCK_DETECTION"); } } // namespace diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp index e9c9e9c2f3..17459f69fa 100644 --- a/c10/util/Logging.cpp +++ b/c10/util/Logging.cpp @@ -1,6 +1,7 @@ #include <c10/util/Backtrace.h> #include <c10/util/Flags.h> #include <c10/util/Logging.h> +#include <c10/util/env.h> #ifdef FBCODE_CAFFE2 #include <folly/synchronization/SanitizeThread.h> #endif @@ -10,7 +11,6 @@ #endif #include <algorithm> -#include <cstdlib> #include <iostream> // Common code that we use regardless of whether we use glog or not. @@ -94,8 +94,8 @@ using DDPUsageLoggerType = std::function<void(const DDPLoggingData&)>; namespace { bool IsAPIUsageDebugMode() { - const char* val = getenv("PYTORCH_API_USAGE_STDERR"); - return val && *val; // any non-empty value + auto val = c10::utils::get_env("PYTORCH_API_USAGE_STDERR"); + return val.has_value() && !val.value().empty(); // any non-empty value } void APIUsageDebug(const string& event) { @@ -438,10 +438,10 @@ namespace c10::detail { namespace { void setLogLevelFlagFromEnv() { - const char* level_str = std::getenv("TORCH_CPP_LOG_LEVEL"); + auto level_env = c10::utils::get_env("TORCH_CPP_LOG_LEVEL"); // Not set, fallback to the default level (i.e. WARNING). - std::string level{level_str != nullptr ? level_str : ""}; + std::string level{level_env.has_value() ? level_env.value() : ""}; if (level.empty()) { return; } diff --git a/c10/util/env.cpp b/c10/util/env.cpp new file mode 100644 index 0000000000..865c6b9497 --- /dev/null +++ b/c10/util/env.cpp @@ -0,0 +1,108 @@ +#include <c10/util/Exception.h> +#include <c10/util/env.h> +#include <fmt/format.h> +#include <cstdlib> +#include <shared_mutex> + +namespace c10::utils { + +static std::shared_mutex env_mutex; + +// Set an environment variable. +void set_env(const char* name, const char* value, bool overwrite) { + std::lock_guard lk(env_mutex); +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4996) +#endif +#ifdef _MSC_VER + if (!overwrite) { + // NOLINTNEXTLINE(concurrency-mt-unsafe) + if (std::getenv(name) != nullptr) { + return; + } + } + auto full_env_variable = fmt::format("{}={}", name, value); + // NOLINTNEXTLINE(concurrency-mt-unsafe) + auto err = putenv(full_env_variable.c_str()); + TORCH_INTERNAL_ASSERT( + err == 0, + "putenv failed for environment \"", + name, + "\", the error is: ", + err); +#else + // NOLINTNEXTLINE(concurrency-mt-unsafe) + auto err = setenv(name, value, static_cast<int>(overwrite)); + TORCH_INTERNAL_ASSERT( + err == 0, + "setenv failed for environment \"", + name, + "\", the error is: ", + err); +#endif +#ifdef _MSC_VER +#pragma warning(pop) +#endif + return; +} + +// Checks an environment variable is set. +bool has_env(const char* name) noexcept { + std::shared_lock lk(env_mutex); +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4996) +#endif + // NOLINTNEXTLINE(concurrency-mt-unsafe) + auto envar = std::getenv(name); +#ifdef _MSC_VER +#pragma warning(pop) +#endif + return envar != nullptr; +} + +// Reads an environment variable and returns the content if it is set +std::optional<std::string> get_env(const char* name) noexcept { + std::shared_lock lk(env_mutex); +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4996) +#endif + // NOLINTNEXTLINE(concurrency-mt-unsafe) + auto envar = std::getenv(name); +#ifdef _MSC_VER +#pragma warning(pop) +#endif + if (envar != nullptr) { + return std::string(envar); + } + return std::nullopt; +} + +// Reads an environment variable and returns +// - optional<true>, if set equal to "1" +// - optional<false>, if set equal to "0" +// - nullopt, otherwise +// +// NB: +// Issues a warning if the value of the environment variable is not 0 or 1. +std::optional<bool> check_env(const char* name) { + auto env_opt = get_env(name); + if (env_opt.has_value()) { + if (*env_opt == "0") { + return false; + } + if (*env_opt == "1") { + return true; + } + TORCH_WARN( + "Ignoring invalid value for boolean flag ", + name, + ": ", + *env_opt, + "valid values are 0 or 1."); + } + return std::nullopt; +} +} // namespace c10::utils diff --git a/c10/util/env.h b/c10/util/env.h index 3db116c7db..04b7585861 100644 --- a/c10/util/env.h +++ b/c10/util/env.h @@ -1,11 +1,20 @@ #pragma once -#include <c10/util/Exception.h> -#include <cstdlib> -#include <cstring> +#include <c10/macros/Export.h> #include <optional> +#include <string> namespace c10::utils { + +// Set an environment variable. +C10_API void set_env( + const char* name, + const char* value, + bool overwrite = true); + +// Checks an environment variable is set. +C10_API bool has_env(const char* name) noexcept; + // Reads an environment variable and returns // - optional<true>, if set equal to "1" // - optional<false>, if set equal to "0" @@ -13,29 +22,10 @@ namespace c10::utils { // // NB: // Issues a warning if the value of the environment variable is not 0 or 1. -inline std::optional<bool> check_env(const char* name) { -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4996) -#endif - auto envar = std::getenv(name); -#ifdef _MSC_VER -#pragma warning(pop) -#endif - if (envar) { - if (strcmp(envar, "0") == 0) { - return false; - } - if (strcmp(envar, "1") == 0) { - return true; - } - TORCH_WARN( - "Ignoring invalid value for boolean flag ", - name, - ": ", - envar, - "valid values are 0 or 1."); - } - return std::nullopt; -} +C10_API std::optional<bool> check_env(const char* name); + +// Reads the value of an environment variable if it is set. +// However, check_env should be used if the value is assumed to be a flag. +C10_API std::optional<std::string> get_env(const char* name) noexcept; + } // namespace c10::utils diff --git a/c10/util/tempfile.cpp b/c10/util/tempfile.cpp index 28c3c7f14f..f106885a88 100644 --- a/c10/util/tempfile.cpp +++ b/c10/util/tempfile.cpp @@ -1,4 +1,5 @@ #include <c10/util/Exception.h> +#include <c10/util/env.h> #include <c10/util/tempfile.h> #include <fmt/format.h> @@ -22,10 +23,11 @@ static std::string make_filename(std::string_view name_prefix) { // We see if any of these environment variables is set and use their value, or // else default the temporary directory to `/tmp`. - const char* tmp_directory = "/tmp"; + std::string tmp_directory = "/tmp"; for (const char* variable : {"TMPDIR", "TMP", "TEMP", "TEMPDIR"}) { - if (const char* path = getenv(variable)) { - tmp_directory = path; + auto path_opt = c10::utils::get_env(variable); + if (path_opt.has_value()) { + tmp_directory = path_opt.value(); break; } }
2.41.0
325fd94a4927af5f08dcb063711097f1b034e38
Thu, 18 Apr 2024 18:41:37 +0000
[PATCH 0322/1000] Support xpu autocast policy (#124052)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124052 Approved by: https://github.com/jgong5, https://github.com/EikanWang, https://github.com/gujinghui, https://github.com/albanD
diff --git a/aten/src/ATen/autocast_mode.cpp b/aten/src/ATen/autocast_mode.cpp index 14b8a36199..0b1dac55f3 100644 --- a/aten/src/ATen/autocast_mode.cpp +++ b/aten/src/ATen/autocast_mode.cpp @@ -569,5 +569,42 @@ TORCH_LIBRARY_IMPL(aten, AutocastCPU, m) { } +TORCH_LIBRARY_IMPL(_, AutocastXPU, m) { + m.fallback(torch::CppFunction::makeFallthrough()); +} + +TORCH_LIBRARY_IMPL(aten, AutocastXPU, m) { + // lower_precision_fp +#define _KERNEL_XPU_LOW_PRECISION_FP(...) \ + KERNEL_XPU(__VA_ARGS__, lower_precision_fp) + + AT_FORALL_LOWER_PRECISION_FP(_KERNEL_XPU_LOW_PRECISION_FP) + + // fp32 +#define _KERNEL_XPU_FP32(...) KERNEL_XPU(__VA_ARGS__, fp32) + + AT_FORALL_FP32(_KERNEL_XPU_FP32) + + // fp32_set_opt_dtype +#define _KERNEL_XPU_FP32_SET_OPT_DTYPE(...) \ + KERNEL_XPU(__VA_ARGS__, fp32_set_opt_dtype) + + AT_FORALL_FP32_SET_OPT_DTYPE(_KERNEL_XPU_FP32_SET_OPT_DTYPE) + + // fp32_append_dtype + // The fp32_append_dtype wrapper overrides implicit promotion behavior. + // norm does not implicitly promote, but be aware when adding new ops to this policy. + AT_FORALL_DIFFERENT_REDISPATCH_SIGNATURE( + KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_XPU) + + // promote +#define _KERNEL_XPU_PROMOTE(...) KERNEL_XPU(__VA_ARGS__, promote) + + AT_FORALL_PROMOTE(_KERNEL_XPU_PROMOTE) + + m.impl(TORCH_SELECTIVE_NAME("aten::binary_cross_entropy"), + TORCH_FN((&at::autocast::binary_cross_entropy_banned))); +} + } // namespace } // namespace at::autocast diff --git a/aten/src/ATen/autocast_mode.h b/aten/src/ATen/autocast_mode.h index 9d400db03e..eead4bf2c9 100644 --- a/aten/src/ATen/autocast_mode.h +++ b/aten/src/ATen/autocast_mode.h @@ -630,6 +630,24 @@ copy pasted in from VariableTypeEverything.cpp with appropriate substitutions. REDISPATCH_SIGNATURE, \ POLICY) +// KERNEL_XPU/KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_XPU +// registration (OP, POLICY) or (OP, OVERLOAD, POLICY) for AutocastXPU +#define KERNEL_XPU(...) KERNEL(c10::DeviceType::XPU, __VA_ARGS__) + +#define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_XPU( \ + REDISPATCH_FUNC, \ + REGISTER_NAME, \ + REGISTER_SIGNATURE, \ + REDISPATCH_SIGNATURE, \ + POLICY) \ + KERNEL_DIFFERENT_REDISPATCH_SIGNATURE( \ + c10::DeviceType::XPU, \ + REDISPATCH_FUNC, \ + REGISTER_NAME, \ + REGISTER_SIGNATURE, \ + REDISPATCH_SIGNATURE, \ + POLICY) + // KERNEL_PRIVATEUSEONE/KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_PRIVATEUSEONE // registration (OP, POLICY) or (OP, OVERLOAD, POLICY) for AutocastPrivateUse1 #define KERNEL_PRIVATEUSEONE(OP, ...) \
2.41.0
385ef2a5dbd62cb877e863c91ff29a43c340456
Thu, 18 Apr 2024 14:07:00 +0000
[PATCH 0323/1000] Revert "Skip workspace permission change for ROCm CI (#123816)"
This reverts commit 4322a0e782119f870ba1a17aec2be8a0ef1103d7. Reverted https://github.com/pytorch/pytorch/pull/123816 on behalf of https://github.com/DanilBaibak due to Broken trunk ([comment](https://github.com/pytorch/pytorch/pull/123816#issuecomment-2063949316))
diff --git a/.ci/pytorch/build.sh b/.ci/pytorch/build.sh index 13069482ae..3a51f255fe 100755 --- a/.ci/pytorch/build.sh +++ b/.ci/pytorch/build.sh @@ -223,23 +223,19 @@ if [[ "${BUILD_ENVIRONMENT}" != *android* && "${BUILD_ENVIRONMENT}" != *cuda* ]] export BUILD_STATIC_RUNTIME_BENCHMARK=ON fi -# Do not change workspace permissions for ROCm CI jobs -# as it can leave workspace with bad permissions for cancelled jobs -if [[ "$BUILD_ENVIRONMENT" != *rocm* ]]; then - # Workaround for dind-rootless userid mapping (https://github.com/pytorch/ci-infra/issues/96) - WORKSPACE_ORIGINAL_OWNER_ID=$(stat -c '%u' "/var/lib/jenkins/workspace") - cleanup_workspace() { - echo "sudo may print the following warning message that can be ignored. The chown command will still run." - echo " sudo: setrlimit(RLIMIT_STACK): Operation not permitted" - echo "For more details refer to https://github.com/sudo-project/sudo/issues/42" - sudo chown -R "$WORKSPACE_ORIGINAL_OWNER_ID" /var/lib/jenkins/workspace - } - # Disable shellcheck SC2064 as we want to parse the original owner immediately. - # shellcheck disable=SC2064 - trap_add cleanup_workspace EXIT - sudo chown -R jenkins /var/lib/jenkins/workspace - git config --global --add safe.directory /var/lib/jenkins/workspace -fi +# Workaround for dind-rootless userid mapping (https://github.com/pytorch/ci-infra/issues/96) +WORKSPACE_ORIGINAL_OWNER_ID=$(stat -c '%u' "/var/lib/jenkins/workspace") +cleanup_workspace() { + echo "sudo may print the following warning message that can be ignored. The chown command will still run." + echo " sudo: setrlimit(RLIMIT_STACK): Operation not permitted" + echo "For more details refer to https://github.com/sudo-project/sudo/issues/42" + sudo chown -R "$WORKSPACE_ORIGINAL_OWNER_ID" /var/lib/jenkins/workspace +} +# Disable shellcheck SC2064 as we want to parse the original owner immediately. +# shellcheck disable=SC2064 +trap_add cleanup_workspace EXIT +sudo chown -R jenkins /var/lib/jenkins/workspace +git config --global --add safe.directory /var/lib/jenkins/workspace if [[ "$BUILD_ENVIRONMENT" == *-bazel-* ]]; then set -e diff --git a/.ci/pytorch/test.sh b/.ci/pytorch/test.sh index e0272d35df..5408e0f596 100755 --- a/.ci/pytorch/test.sh +++ b/.ci/pytorch/test.sh @@ -9,23 +9,19 @@ set -ex # shellcheck source=./common.sh source "$(dirname "${BASH_SOURCE[0]}")/common.sh" -# Do not change workspace permissions for ROCm CI jobs -# as it can leave workspace with bad permissions for cancelled jobs -if [[ "$BUILD_ENVIRONMENT" != *rocm* ]]; then - # Workaround for dind-rootless userid mapping (https://github.com/pytorch/ci-infra/issues/96) - WORKSPACE_ORIGINAL_OWNER_ID=$(stat -c '%u' "/var/lib/jenkins/workspace") - cleanup_workspace() { - echo "sudo may print the following warning message that can be ignored. The chown command will still run." - echo " sudo: setrlimit(RLIMIT_STACK): Operation not permitted" - echo "For more details refer to https://github.com/sudo-project/sudo/issues/42" - sudo chown -R "$WORKSPACE_ORIGINAL_OWNER_ID" /var/lib/jenkins/workspace - } - # Disable shellcheck SC2064 as we want to parse the original owner immediately. - # shellcheck disable=SC2064 - trap_add cleanup_workspace EXIT - sudo chown -R jenkins /var/lib/jenkins/workspace - git config --global --add safe.directory /var/lib/jenkins/workspace -fi +# Workaround for dind-rootless userid mapping (https://github.com/pytorch/ci-infra/issues/96) +WORKSPACE_ORIGINAL_OWNER_ID=$(stat -c '%u' "/var/lib/jenkins/workspace") +cleanup_workspace() { + echo "sudo may print the following warning message that can be ignored. The chown command will still run." + echo " sudo: setrlimit(RLIMIT_STACK): Operation not permitted" + echo "For more details refer to https://github.com/sudo-project/sudo/issues/42" + sudo chown -R "$WORKSPACE_ORIGINAL_OWNER_ID" /var/lib/jenkins/workspace +} +# Disable shellcheck SC2064 as we want to parse the original owner immediately. +# shellcheck disable=SC2064 +trap_add cleanup_workspace EXIT +sudo chown -R jenkins /var/lib/jenkins/workspace +git config --global --add safe.directory /var/lib/jenkins/workspace echo "Environment variables:" env
2.41.0
f93402f619f58d651845981ccd1eba1d68da077
Wed, 17 Apr 2024 20:45:43 -0400
[PATCH 0325/1000] [NJT] Inline through torch.nested.nested_tensor_from_jagged instead of graph break (#124343)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124343 Approved by: https://github.com/jbschlosser
diff --git a/test/dynamo/test_subclasses.py b/test/dynamo/test_subclasses.py index 387b6bf59b..8005d6e3a2 100644 --- a/test/dynamo/test_subclasses.py +++ b/test/dynamo/test_subclasses.py @@ -1361,6 +1361,14 @@ class TestNestedTensor(torch._dynamo.test_case.TestCase): self._check_recompiles(fn, (nt,), (nt2,), False) self._check_recompiles(fn, (nt,), (nt3,), True) + def test_inline_nested_tensor_from_jagged(self): + nt, _ = self._get_jagged_tensor(((2, 3, 4), 5), None) + + def fn(x): + return torch.nested.nested_tensor_from_jagged(x.values() * 2, x.offsets()) + + torch.compile(fn, fullgraph=True, backend="aot_eager")(nt) + def _get_views(self): # Test all cases with both an NT base and a dense base # Subclass -> Subclass diff --git a/torch/_dynamo/trace_rules.py b/torch/_dynamo/trace_rules.py index 393c649133..0698b5fd8f 100644 --- a/torch/_dynamo/trace_rules.py +++ b/torch/_dynamo/trace_rules.py @@ -173,6 +173,7 @@ manual_torch_name_rule_map = { "torch.nn.Parameter": TorchInGraphFunctionVariable, "torch._nested_tensor_from_mask": SkipFunctionVariable, "torch._nested_from_padded": SkipFunctionVariable, + "torch.nested.nested_tensor_from_jagged": UserFunctionVariable, # symbol operators implemented in Python "torch.sym_not": TorchInGraphFunctionVariable, "torch.sym_float": TorchInGraphFunctionVariable,
2.41.0
677128cb892d17fe2281beae9e394fd6f89e455
Thu, 18 Apr 2024 15:21:01 +0000
[PATCH 0326/1000] [MPS] Fix crash with binary_cross_entropy is invoked for half dtypes (#124258)
By creating constants using input tensors dtype One line reproducer: ``` python -c "import torch; x=torch.arange(3, dtype=torch.float16,device='mps');print(torch.nn.functional.binary_cross_entropy(x, x))" ``` Before the change ``` loc("mps_subtract"("(mpsFileLoc): /AppleInternal/Library/BuildRoots/ce725a5f-c761-11ee-a4ec-b6ef2fd8d87b/Library/Caches/com.apple.xbs/Sources/MetalPerformanceShadersGraph/mpsgraph/MetalPerformanceShadersGraph/Core/Files/MPSGraphUtilities.mm":233:0)): error: input types 'tensor<f32>' and 'tensor<3xf16>' are not broadcast compatible LLVM ERROR: Failed to infer result type(s). ``` After ``` tensor(-33.7812, device='mps:0', dtype=torch.float16) ``` Fixes https://github.com/pytorch/pytorch/issues/124252 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124258 Approved by: https://github.com/kulinseth
diff --git a/aten/src/ATen/native/mps/operations/LossOps.mm b/aten/src/ATen/native/mps/operations/LossOps.mm index 7b10476106..77727cb197 100644 --- a/aten/src/ATen/native/mps/operations/LossOps.mm +++ b/aten/src/ATen/native/mps/operations/LossOps.mm @@ -76,7 +76,7 @@ static Tensor& mse_loss_backward_out_impl(const Tensor& grad_output, newCachedGraph->targetTensor = mpsGraphRankedPlaceHolder(mpsGraph, target); newCachedGraph->gradOutputTensor = mpsGraphRankedPlaceHolder(mpsGraph, grad_output); - MPSGraphTensor* normTensor = [mpsGraph constantWithScalar:norm dataType:MPSDataTypeFloat32]; + MPSGraphTensor* normTensor = [mpsGraph constantWithScalar:norm dataType:[newCachedGraph->inputTensor dataType]]; MPSGraphTensor* diffTensor = [mpsGraph subtractionWithPrimaryTensor:newCachedGraph->inputTensor secondaryTensor:newCachedGraph->targetTensor name:nil]; @@ -116,11 +116,12 @@ struct CachedGraph : public MPSCachedGraph { static MPSGraphTensor* bce_forward_mps(CachedGraph* bceGraph) { MPSGraph* mpsGraph = bceGraph->graph(); + const auto inputType = [bceGraph->inputTensor dataType]; // Forward BCE: L = -w (y ln(x) + (1-y) ln(1-x)) - MPSGraphTensor* one = [mpsGraph constantWithScalar:1.0 dataType:MPSDataTypeFloat32]; + MPSGraphTensor* one = [mpsGraph constantWithScalar:1.0 dataType:inputType]; // -100 is the hard limit value defined in BCELoss Spec. to clamp the log - MPSGraphTensor* neg100 = [mpsGraph constantWithScalar:-100.0 dataType:MPSDataTypeFloat32]; + MPSGraphTensor* neg100 = [mpsGraph constantWithScalar:-100.0 dataType:inputType]; // 1 - x MPSGraphTensor* one_Input = [mpsGraph subtractionWithPrimaryTensor:one secondaryTensor:bceGraph->inputTensor @@ -154,11 +155,12 @@ static MPSGraphTensor* bce_forward_mps(CachedGraph* bceGraph) { static MPSGraphTensor* bce_backward_mps(CachedGraph* bceGraph) { MPSGraph* mpsGraph = bceGraph->graph(); + const auto inputType = [bceGraph->inputTensor dataType]; // Backward BCE: d(L)/d(x) = -w (y - x) / (x - x^2) - MPSGraphTensor* one = [mpsGraph constantWithScalar:1.0 dataType:MPSDataTypeFloat32]; + MPSGraphTensor* one = [mpsGraph constantWithScalar:1.0 dataType:inputType]; // epsilon used to clamp the grad input denominator - MPSGraphTensor* epsilon = [mpsGraph constantWithScalar:1e-12 dataType:MPSDataTypeFloat32]; + MPSGraphTensor* epsilon = [mpsGraph constantWithScalar:1e-12 dataType:inputType]; // 1 - x MPSGraphTensor* one_Input = [mpsGraph subtractionWithPrimaryTensor:one secondaryTensor:bceGraph->inputTensor @@ -238,7 +240,7 @@ static Tensor& bce_loss_out_impl(const Tensor& input, if (grad_output.defined()) { if (reduction == at::Reduction::Mean) { MPSGraphTensor* inputNumel = [mpsGraph constantWithScalar:static_cast<double>(input.numel()) - dataType:MPSDataTypeFloat32]; + dataType:[bceLoss dataType]]; newCachedGraph->gradInputTensor = [mpsGraph divisionWithPrimaryTensor:bceLoss secondaryTensor:inputNumel name:nil]; diff --git a/test/test_mps.py b/test/test_mps.py index 3597ec8d12..862bda96c7 100644 --- a/test/test_mps.py +++ b/test/test_mps.py @@ -67,6 +67,7 @@ def mps_ops_grad_modifier(ops): 'digamma': [torch.float32], 'special.polygammaspecial_polygamma_n_0': [torch.float16], 'polygammapolygamma_n_0': [torch.float16], + 'nn.functional.binary_cross_entropy': [torch.float16], # Unimplemented ops '__getitem__': [torch.float16], @@ -171,8 +172,6 @@ def mps_ops_grad_modifier(ops): 'nn.functional.conv_transpose1d': [torch.float16], 'nn.functional.conv_transpose2d': [torch.float16], 'nn.functional.conv_transpose3d': [torch.float16], - 'nn.functional.nll_loss': [torch.float16], - 'nn.functional.cross_entropy': [torch.float16], } MACOS_13_3_XFAILLIST_GRAD = { @@ -987,12 +986,6 @@ def mps_ops_modifier(ops): # Unsupported # input types 'tensor<1x3x9x9xf16>' and 'tensor<1xf32>' are not broadcast compatible 'nn.functional.avg_pool2d': [torch.float16], - # input types 'tensor<f32>' and 'tensor<1xf16>' are not broadcast compatible - # Refer to the issue please: https://github.com/pytorch/pytorch/issues/124252 - 'nn.functional.binary_cross_entropy': [torch.float16], - - 'nn.functional.nll_loss': [torch.float16], - 'nn.functional.cross_entropy': [torch.float16], } def addDecorator(op, d) -> None: @@ -11419,6 +11412,9 @@ class TestConsistency(TestCaseMPS): 'nn.functional.batch_norm', 'nn.functional.instance_norm', 'round', 'xlogy', 'addcmul', + 'nn.functional.cross_entropy', + 'nn.functional.binary_cross_entropy', + 'nn.functional.nll_loss', 'nn.functional.max_pool2d', 'nn.functional.gelu', 'nn.functional.glu',
2.41.0
15a8f6398818854b782221737e288d50f4903d9
Thu, 18 Apr 2024 16:30:05 +0000
[PATCH 0328/1000] Fixed issue in affine_grid_backward when grad_grid is non-contiguous (#124370)
Description: - replaced .view with .reshape to fix the problem when grad_grid is channels last 2d/3d - added a consistency test Fixes #124154 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124370 Approved by: https://github.com/lezcano
diff --git a/aten/src/ATen/native/AffineGridGenerator.cpp b/aten/src/ATen/native/AffineGridGenerator.cpp index 17e45acb1b..315027d706 100644 --- a/aten/src/ATen/native/AffineGridGenerator.cpp +++ b/aten/src/ATen/native/AffineGridGenerator.cpp @@ -110,7 +110,7 @@ static Tensor affine_grid_generator_4D_backward( AT_ASSERT(grad_grid.sizes() == IntArrayRef({N, H, W, 2})); auto grad_theta = base_grid.view({N, H * W, 3}) .transpose(1, 2) - .bmm(grad_grid.view({N, H * W, 2})); + .bmm(grad_grid.reshape({N, H * W, 2})); return grad_theta.transpose(1, 2); } @@ -126,7 +126,7 @@ static Tensor affine_grid_generator_5D_backward( AT_ASSERT(grad_grid.sizes() == IntArrayRef({N, D, H, W, 3})); auto grad_theta = base_grid.view({N, D * H * W, 4}) .transpose(1, 2) - .bmm(grad_grid.view({N, D * H * W, 3})); + .bmm(grad_grid.reshape({N, D * H * W, 3})); return grad_theta.transpose(1, 2); } diff --git a/test/test_nn.py b/test/test_nn.py index 720beb606e..008354ad72 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -5497,6 +5497,30 @@ tensor(..., device='meta', size=(1,), requires_grad=True)""") with self.assertRaisesRegex(NotImplementedError, "affine_grid only supports 4D and 5D sizes"): F.affine_grid(theta, torch.Size([1, 1, 2, 2, 2, 2]), align_corners=False) + @parametrize_test('device', ['cpu'] + (['cuda'] if TEST_CUDA else [])) + @parametrize_test('nd', [2, 3]) + def test_affine_grid_backward_cl_cf_consistency(self, device, nd): + # Test based on reported issue: https://github.com/pytorch/pytorch/issues/124154 + + theta = torch.rand([6, nd, nd + 1], requires_grad=True, device=device) + size = [6, 3, 4, 5] if nd == 2 else [6, 3, 4, 5, 5] + grid = torch.nn.functional.affine_grid(theta, size, align_corners=False) + + grad_tensor = torch.rand(grid.shape, device=device) + + memory_format_cl = torch.channels_last if nd == 2 else torch.channels_last_3d + grad_tensor_cl = grad_tensor.contiguous(memory_format=memory_format_cl) + + assert theta.grad is None + grid.backward(grad_tensor_cl) + theta_grad_cl = theta.grad.clone().contiguous() + + theta.grad.zero_() + grid.backward(grad_tensor) + theta_grad_cf = theta.grad + + self.assertEqual(theta_grad_cf, theta_grad_cl) + @set_default_dtype(torch.double) def test_grid_sample(self): # Backward pass of native C++ and CUDA kernels branch depending on whether input requires gradient,
2.41.0
1062f57382980cbfa123df76556159c21561244
Thu, 18 Apr 2024 16:35:51 +0000
[PATCH 0329/1000] [export] Add a printer to unflattened module. (#124315)
Summary: add a helper method to print graph in every level of unflattened module. Test Plan: {F1489609684} Differential Revision: D56263195 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124315 Approved by: https://github.com/tugsbayasgalan
diff --git a/torch/export/unflatten.py b/torch/export/unflatten.py index 0ec948e513..33fd10a4e9 100644 --- a/torch/export/unflatten.py +++ b/torch/export/unflatten.py @@ -237,6 +237,12 @@ class UnflattenedModule(torch.nn.Module): fqn_order.keys() ) + def _print_graph(self): + for fqn, mod in self.named_modules(): + print(fqn + ":") + if hasattr(mod, "graph") and isinstance(mod.graph, torch.fx.Graph): + print(mod.graph) + def forward(self, *args, **kwargs): signature = self.module_call_graph[0].signature
2.41.0
8cf91c39533bb5223b983e309188d346aaa17c2
Thu, 18 Apr 2024 17:02:38 +0000
[PATCH 0330/1000] Fix predispatch tracing for aten::lift_fresh_copy (#124198)
Differential Revision: D56200666 Previously, when we hit the Functionalize kernel for lift_fresh_copy, we directly dispatch self.clone() to proxy dispatch. As a result, we end up receiving a functional tensor at proxy dispatch. As a work around, I unwrap self manually. Not sure, why it works ok in aot-dispatch tho Pull Request resolved: https://github.com/pytorch/pytorch/pull/124198 Approved by: https://github.com/bdhirsh
diff --git a/aten/src/ATen/FunctionalizeFallbackKernel.cpp b/aten/src/ATen/FunctionalizeFallbackKernel.cpp index 52de86935a..594f627e17 100644 --- a/aten/src/ATen/FunctionalizeFallbackKernel.cpp +++ b/aten/src/ATen/FunctionalizeFallbackKernel.cpp @@ -210,7 +210,13 @@ static at::Tensor lift_fresh_functionalize_copy(const at::Tensor & self) { // but that isn't really a use case today. // Needed for https://github.com/pytorch/pytorch/issues/105327 if (at::functionalization::impl::isFunctionalTensor(self)) { - return self.clone(); + // Note [Composite Functionalization under PreDispatch mode] + // When we are tracing under PreDispatch, PreDispatch key will be + // in the local include TLS. As a result, when we redispatch here, + // we will end up hitting PreDispatch stack first. So, we should + // directly redispatch to the functionalize key manually. + static auto op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::clone", "").typed<at::Tensor(const at::Tensor &, c10::optional<at::MemoryFormat>)>(); + return op.redispatch(c10::DispatchKeySet({c10::DispatchKey::Functionalize}), self, c10::nullopt); } at::AutoDispatchSkipFunctionalize guard; diff --git a/test/export/test_export.py b/test/export/test_export.py index 53e3b3c2ef..7a0bbc7eed 100644 --- a/test/export/test_export.py +++ b/test/export/test_export.py @@ -2626,6 +2626,17 @@ def forward(self, arg_0): torch.allclose(ep.module()(torch.ones(6, 4)), Foo()(torch.ones(6, 4))) ) + def test_aten_lift_fresh_copy(self): + class M(torch.nn.Module): + def forward(self, x): + return torch.ops.aten.lift_fresh_copy(x) + + ep = export(M(), (torch.ones(6, 4),)) + found = False + + op = "torch.ops.aten.clone.default" + FileCheck().check_count(op, 1, exactly=True).run(ep.graph_module.code) + def test_cond_buffers(self): class M(torch.nn.Module): def __init__(self):
2.41.0
a6edb0b6644eb2b28650ea3be1c806e4a57e351
Mon, 15 Apr 2024 11:06:09 -0700
[PATCH 0331/1000] Possible fix for einops warning (#124084)
See https://github.com/arogozhnikov/einops/issues/315 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124084 Approved by: https://github.com/peterbell10
diff --git a/torch/_dynamo/trace_rules.py b/torch/_dynamo/trace_rules.py index 0698b5fd8f..daeb8626c1 100644 --- a/torch/_dynamo/trace_rules.py +++ b/torch/_dynamo/trace_rules.py @@ -3036,10 +3036,6 @@ def add_module_init_func(name: str, init_func: Callable[[], None]) -> None: """Register a module without eagerly importing it""" # If the module is already imported, eagerly run init assert "." not in name, f"Expected a root module name, but got {name}" - if name in sys.modules: - init_func() - - # Module is not yet imported, delay processing until needed assert name not in _lazy_module_init _lazy_module_init[name].append(init_func)
2.41.0
b17721899d4d6a55d66d4f7188e36c20a078231
Wed, 17 Apr 2024 12:05:21 -0700
[PATCH 0332/1000] Build device generic torch.Stream and torch.Event based on c10::Stream/Event (#123611)
This diff intends to build device generic torch.Stream and torch.Event for newly added accelerators in PyTorch. ------------ **torch.Stream APIs** ``` # Defined in torch/csrc/Stream.cpp class Stream(_StreamBase): stream_id: _int # Stream id device_index: _int device_type: _int device: _device # The device of the stream @overload def __new__(self, device: Optional[DeviceLikeType] = None, priority: _int = 0) -> Stream: ... @overload def __new__(self, stream_id: _int, device_index: _int, device_type: _int, priority: _int = 0) -> Stream: ... def query(self) -> _bool: ... def synchronize(self) -> None: ... def wait_event(self, event: Event) -> None: ... def wait_stream(self, other: Stream) -> None: ... def record_event(self, event: Optional[Event] = None) -> Event: ... def query(self) -> None: ... def synchronize(self) -> None: ... def __hash__(self) -> _int: ... def __repr__(self) -> str: ... def __eq__(self, other: object) -> _bool: ... ``` ------------------ **torch.Event APIs**: - IPC related APIs are not implemented, since many device backends don't support it, but we leave interfaces there for future adaption of torch.cuda.Stream. - currently only the enable_timing is supported, since it is the most common one used in other device backends. We have to refactor the event flag system in PyTorch to support more fancy flag. - elapsedTime API is added to c10::Event ``` # Defined in torch/csrc/Event.cpp class Event(_EventBase): device: _device # The device of the Event event_id: _int # The raw event created by device backend def __new__(self, device: Optional[DeviceLikeType] = None, enable_timing: _bool = False, blocking: _bool = False, interprocess: _bool = False) -> Event: ... @classmethod def from_ipc_handle(self, device: DeviceLikeType, ipc_handle: bytes) -> Event: ... def record(self, stream: Optional[Stream] = None) -> None: ... def wait(self, stream: Optional[Stream] = None) -> None: ... def query(self) -> _bool: ... def elapsed_time(self, other: Event) -> _float: ... def synchronize(self) -> None: ... def ipc_handle(self) -> bytes: ... def __repr__(self) -> str: ... ``` ----------- c10::Event provides new APIs - calculate **elapsedTime**. - Get raw event id - Synchronize event. ``` double elapsedTime(const Event& event) const { return impl_.elapsedTime(event.impl_); } void* eventId() const { return impl_.eventId(); } void synchronize() const { return impl_.synchronize(); } ``` ---------- TODO: need to find a good way to test them in PyTorch with API mocks. Differential Revision: [D55351839](https://our.internmc.facebook.com/intern/diff/D55351839/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123611 Approved by: https://github.com/albanD
diff --git a/build_variables.bzl b/build_variables.bzl index 571623076d..71b5c5eda8 100644 --- a/build_variables.bzl +++ b/build_variables.bzl @@ -795,6 +795,7 @@ libtorch_python_core_sources = [ "torch/csrc/StorageMethods.cpp", "torch/csrc/StorageSharing.cpp", "torch/csrc/Stream.cpp", + "torch/csrc/Event.cpp", "torch/csrc/TypeInfo.cpp", "torch/csrc/api/src/python/init.cpp", "torch/csrc/autograd/functions/init.cpp", diff --git a/c10/core/Event.h b/c10/core/Event.h index 2cbaf18022..b94db9f4f2 100644 --- a/c10/core/Event.h +++ b/c10/core/Event.h @@ -118,6 +118,18 @@ struct Event final { return impl_.query(); } + double elapsedTime(const Event& event) const { + return impl_.elapsedTime(event.impl_); + } + + void* eventId() const { + return impl_.eventId(); + } + + void synchronize() const { + return impl_.synchronize(); + } + private: impl::InlineEvent<impl::VirtualGuardImpl> impl_; }; diff --git a/c10/core/impl/DeviceGuardImplInterface.h b/c10/core/impl/DeviceGuardImplInterface.h index 1b168f7821..59210a92d6 100644 --- a/c10/core/impl/DeviceGuardImplInterface.h +++ b/c10/core/impl/DeviceGuardImplInterface.h @@ -122,6 +122,16 @@ struct C10_API DeviceGuardImplInterface { TORCH_CHECK(false, "Backend doesn't support acquiring a stream from pool.") } + /** + * Return a new stream for a given device and priority. The stream will be + * copied and shared around, device backend should be able to correctly handle + * the lifetime of the stream. + */ + virtual Stream getNewStream(Device, int priority = 0) const { + (void)priority; + TORCH_CHECK(false, "Backend doesn't support create a new Stream.") + } + /** * Set a stream to be the thread local current stream for its device. * Return the previous stream for that device. You are NOT required @@ -194,6 +204,14 @@ struct C10_API DeviceGuardImplInterface { TORCH_CHECK(false, "Backend doesn't support synchronizing streams."); } + /** + * Wait (by blocking the calling thread) until all the work previously + * recorded on the event has completed running on the device. + */ + virtual void synchronizeEvent(void* /*event*/) const { + TORCH_CHECK(false, "Backend doesn't support synchronizing events."); + } + /** * Ensure the caching allocator (if any) is aware that the given DataPtr is * being used on the given stream, and that it should thus avoid recycling the @@ -202,6 +220,13 @@ struct C10_API DeviceGuardImplInterface { virtual void recordDataPtrOnStream(const c10::DataPtr&, const Stream&) const { } + /** + * Fetch the elapsed time between two recorded events. + */ + virtual double elapsedTime(void* /*event1*/, void* /*event2*/) const { + TORCH_CHECK(false, "Backend doesn't support elapsedTime."); + } + /** * Intended use of this class is to leak the DeviceGuardImpl at program end. * So you better not call the destructor, buster! @@ -234,6 +259,13 @@ struct NoOpDeviceGuardImpl final : public DeviceGuardImplInterface { // no-op return Stream(Stream::DEFAULT, Device(D, -1)); } + + Stream getNewStream(Device, int priority = 0) const override { + // no-op + (void)priority; + return Stream(Stream::DEFAULT, Device(D, -1)); + } + // NB: These do NOT set the current device Stream exchangeStream(Stream) const noexcept override { // no-op diff --git a/c10/core/impl/InlineEvent.h b/c10/core/impl/InlineEvent.h index ef1e2c6d6f..3485da37c9 100644 --- a/c10/core/impl/InlineEvent.h +++ b/c10/core/impl/InlineEvent.h @@ -101,6 +101,32 @@ struct InlineEvent final { return backend_.queryEvent(event_); } + void* eventId() const { + return event_; + } + + double elapsedTime(const InlineEvent& other) const { + TORCH_CHECK( + other.was_marked_for_recording(), + "other was not marked for recording."); + TORCH_CHECK( + was_marked_for_recording(), "self was not marked for recording."); + TORCH_CHECK( + other.device_type() == device_type_, + "Event device type ", + DeviceTypeName(device_type_), + " does not match other's device type ", + DeviceTypeName(other.device_type()), + "."); + return backend_.elapsedTime(event_, other.event_); + } + + void synchronize() const { + if (!was_marked_for_recording_) + return; + backend_.synchronizeEvent(event_); + } + private: void* event_ = nullptr; T backend_; diff --git a/c10/core/impl/VirtualGuardImpl.h b/c10/core/impl/VirtualGuardImpl.h index ce32411d3b..2065150535 100644 --- a/c10/core/impl/VirtualGuardImpl.h +++ b/c10/core/impl/VirtualGuardImpl.h @@ -39,6 +39,9 @@ class VirtualGuardImpl final : public DeviceGuardImplInterface { Stream getStream(Device d) const noexcept override { return impl_->getStream(d); } + Stream getNewStream(Device d, int priority = 0) const override { + return impl_->getNewStream(d, priority); + } Stream getDefaultStream(Device d) const override { return impl_->getDefaultStream(d); } @@ -84,6 +87,14 @@ class VirtualGuardImpl final : public DeviceGuardImplInterface { impl_->recordDataPtrOnStream(data_ptr, stream); } + double elapsedTime(void* event1, void* event2) const override { + return impl_->elapsedTime(event1, event2); + } + + void synchronizeEvent(void* event) const override { + return impl_->synchronizeEvent(event); + } + private: const DeviceGuardImplInterface* impl_ = nullptr; }; diff --git a/c10/cuda/impl/CUDAGuardImpl.h b/c10/cuda/impl/CUDAGuardImpl.h index 7c0ea21b12..2d983beaf8 100644 --- a/c10/cuda/impl/CUDAGuardImpl.h +++ b/c10/cuda/impl/CUDAGuardImpl.h @@ -62,6 +62,9 @@ struct CUDAGuardImpl final : public c10::impl::DeviceGuardImplInterface { Stream getDefaultStream(Device d) const override { return getDefaultCUDAStream(d.index()); } + Stream getNewStream(Device d, int priority = 0) const override { + return getStreamFromPool(priority, d.index()); + } Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false) const override { return getStreamFromPool(isHighPriority, d.index()); diff --git a/test/test_public_bindings.py b/test/test_public_bindings.py index 65aa339aff..18e373edaa 100644 --- a/test/test_public_bindings.py +++ b/test/test_public_bindings.py @@ -228,6 +228,7 @@ class TestPublicBindings(TestCase): "StaticModule", "Stream", "StreamObjType", + "Event", "StringType", "SUM", "SymFloat", diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in index 1c6e40b38c..882055e089 100644 --- a/torch/_C/__init__.pyi.in +++ b/torch/_C/__init__.pyi.in @@ -112,7 +112,44 @@ class Stream: device_index: _int device_type: _int - device: device # The device of the stream + device: _device # The device of the stream + + @overload + def __new__(self, device: Optional[DeviceLikeType] = None, *, priority: _int = 0) -> Stream: ... + @overload + def __new__(self, stream_id: _int, device_index: _int, device_type: _int, *, priority: _int = 0) -> Stream: ... + def query(self) -> _bool: ... + def synchronize(self) -> None: ... + def wait_event(self, event: Event) -> None: ... + def wait_stream(self, other: Stream) -> None: ... + def record_event(self, event: Optional[Event] = None) -> Event: ... + def __hash__(self) -> _int: ... + def __repr__(self) -> str: ... + def __eq__(self, other: object) -> _bool: ... + + +# Defined in torch/csrc/Event.cpp +class Event: + + device: _device # The device of the Event + event_id: _int # The raw event created by device backend + + def __new__(self, + device: Optional[DeviceLikeType] = None, + *, + enable_timing: _bool = False, + blocking: _bool = False, + interprocess: _bool = False) -> Event: ... + @classmethod + def from_ipc_handle(self, device: _device, ipc_handle: bytes) -> Event: ... + def record(self, stream: Optional[Stream] = None) -> None: ... + def wait(self, stream: Optional[Stream] = None) -> None: ... + def query(self) -> _bool: ... + def elapsed_time(self, other: Event) -> _float: ... + def synchronize(self) -> None: ... + def ipc_handle(self) -> bytes: ... + def __repr__(self) -> str: ... + # Defined in torch/csrc/Size.cpp class Size(Tuple[_int, ...]): diff --git a/torch/csrc/Event.cpp b/torch/csrc/Event.cpp new file mode 100644 index 0000000000..b8cf8b2580 --- /dev/null +++ b/torch/csrc/Event.cpp @@ -0,0 +1,328 @@ +#include <pybind11/pybind11.h> +#include <torch/csrc/Device.h> +#include <torch/csrc/Event.h> +#include <torch/csrc/Stream.h> +#include <torch/csrc/THP.h> +#include <torch/csrc/utils/pybind.h> +#include <torch/csrc/utils/pycfunction_helpers.h> +#include <torch/csrc/utils/python_arg_parser.h> + +#include <c10/core/Event.h> +#include <c10/core/Stream.h> + +#include <c10/core/DeviceType.h> +#include <c10/core/impl/DeviceGuardImplInterface.h> +#include <structmember.h> +#include <string> + +PyObject* THPEventClass = nullptr; + +static PyObject* THPEvent_pynew( + PyTypeObject* type, + PyObject* args, + PyObject* kwargs) { + HANDLE_TH_ERRORS + + unsigned char enable_timing = 0; + unsigned char blocking = 0; + unsigned char interprocess = 0; + + static torch::PythonArgParser parser({ + "Event(Device device=None, *, bool enable_timing=True, bool blocking=False, bool interprocess=False)", + }); + + torch::ParsedArgs<4> parsed_args; + auto r = parser.parse(args, kwargs, parsed_args); + + auto device = r.deviceOptional(0); + + if (!device.has_value()) { + device = at::Device(at::getAccelerator(false).value_or(at::kCPU)); + } + enable_timing = r.toBoolWithDefault(1, true); + blocking = r.toBoolWithDefault(2, false); + interprocess = r.toBoolWithDefault(3, false); + + THPObjectPtr ptr(type->tp_alloc(type, 0)); + if (!ptr) { + TORCH_CHECK(ptr, "Failed to allocate memory for Event"); + } + + THPEvent* self = (THPEvent*)ptr.get(); + + // TODO: blocking and interprocess are not supported yet. To support them, the + // flag system of c10::Event needs to be refactored. C10::Event should also + // provide a generic constructor to support blocking and interprocess events. + (void)blocking; + (void)interprocess; + + new (&self->event) c10::Event( + device->type(), + (enable_timing ? c10::EventFlag::PYTORCH_DEFAULT + : c10::EventFlag::BACKEND_DEFAULT)); + + return (PyObject*)ptr.release(); + END_HANDLE_TH_ERRORS +} + +PyObject* THPEvent_new(c10::DeviceType device_type, c10::EventFlag flag) { + auto type = (PyTypeObject*)&THPEventType; + auto self = THPObjectPtr{type->tp_alloc(type, 0)}; + TORCH_CHECK(self, "Failed to allocate memory for Event"); + auto self_ = reinterpret_cast<THPEvent*>(self.get()); + new (&self_->event) c10::Event(device_type, flag); + return self.release(); +} + +static void THPEvent_dealloc(THPEvent* self) { + { + pybind11::gil_scoped_release no_gil{}; + self->event.~Event(); + } + Py_TYPE(self)->tp_free((PyObject*)self); +} + +static PyObject* THPEvent_get_device(THPEvent* self, void* unused) { + HANDLE_TH_ERRORS + at::optional<at::Device> device = self->event.device(); + if (!device) { + Py_RETURN_NONE; + } + return THPDevice_New(device.value()); + END_HANDLE_TH_ERRORS +} + +static PyObject* THPEvent_record( + PyObject* _self, + PyObject* args, + PyObject* kwargs) { + HANDLE_TH_ERRORS + auto self = (THPEvent*)_self; + PyObject* _stream = Py_None; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + constexpr const char* accepted_args[] = {"stream", nullptr}; + if (!PyArg_ParseTupleAndKeywords( + args, + kwargs, + "|O", + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + const_cast<char**>(accepted_args), + &_stream)) { + TORCH_WARN("Parsing THPEvent_record arg fails"); + return nullptr; + } + if (_stream != Py_None) { + auto stream = (THPStream*)_stream; + self->event.record(c10::Stream::unpack3( + stream->stream_id, + stream->device_index, + static_cast<c10::DeviceType>(stream->device_type))); + } else { + c10::impl::VirtualGuardImpl impl{ + static_cast<c10::DeviceType>(self->event.device_type())}; + self->event.record(impl.getStream(impl.getDevice())); + } + Py_RETURN_NONE; + END_HANDLE_TH_ERRORS +} + +static PyObject* THPEvent_from_ipc_handle( + PyObject* _type, + PyObject* args, + PyObject* kwargs) { + HANDLE_TH_ERRORS + auto type = (PyTypeObject*)_type; + + static torch::PythonArgParser parser({ + "from_ipc_handle(Device device, std::string ipc_handle)", + }); + torch::ParsedArgs<2> parsed_args; + auto r = parser.parse(args, kwargs, parsed_args); + + at::Device device = r.device(0); + std::string handle_string = r.string(1); + TORCH_CHECK_NOT_IMPLEMENTED( + false, + "torch.Event ipc is not supported yet, please open an issue if you need this!"); + THPObjectPtr ptr(type->tp_alloc(type, 0)); + if (!ptr) { + return nullptr; + } + THPEvent* self = (THPEvent*)ptr.get(); + + // TODO: for constructing event from ipc handle, the c10::Event needs to have + // more general constructor to achieve that. + new (&self->event) c10::Event(device.type(), c10::EventFlag::PYTORCH_DEFAULT); + + return (PyObject*)ptr.release(); + END_HANDLE_TH_ERRORS +} + +static PyObject* THPEvent_ipc_handle(PyObject* _self, PyObject* noargs) { + HANDLE_TH_ERRORS + auto self = (THPEvent*)_self; + (void)self; + TORCH_CHECK_NOT_IMPLEMENTED( + false, + "torch.Event ipc is not supported yet, please open an issue if you need this!"); + std::string handle = "0"; + return PyBytes_FromStringAndSize((const char*)&handle, sizeof(handle)); + END_HANDLE_TH_ERRORS +} + +static PyObject* THPEvent_wait( + PyObject* _self, + PyObject* args, + PyObject* kwargs) { + HANDLE_TH_ERRORS { + auto self = (THPEvent*)_self; + PyObject* _stream = Py_None; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + constexpr const char* accepted_args[] = {"stream", nullptr}; + if (!PyArg_ParseTupleAndKeywords( + args, + kwargs, + "|O", + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + const_cast<char**>(accepted_args), + &_stream)) { + TORCH_WARN("Parsing THPEvent_wait arg fails"); + return nullptr; + } + if (_stream != Py_None) { + auto stream = (THPStream*)_stream; + self->event.block(c10::Stream::unpack3( + stream->stream_id, + stream->device_index, + static_cast<c10::DeviceType>(stream->device_type))); + } else { + c10::impl::VirtualGuardImpl impl{ + static_cast<c10::DeviceType>(self->event.device_type())}; + self->event.block(impl.getStream(impl.getDevice())); + } + } + Py_RETURN_NONE; + END_HANDLE_TH_ERRORS +} + +static PyObject* THPEvent_query(PyObject* _self, PyObject* noargs) { + HANDLE_TH_ERRORS + auto self = (THPEvent*)_self; + return PyBool_FromLong(self->event.query()); + END_HANDLE_TH_ERRORS +} + +static PyObject* THPEvent_elapsed_time(PyObject* _self, PyObject* _other) { + HANDLE_TH_ERRORS + auto self = (THPEvent*)_self; + auto other = (THPEvent*)_other; + return PyFloat_FromDouble(self->event.elapsedTime(other->event)); + END_HANDLE_TH_ERRORS +} + +static PyObject* THPEvent_synchronize(PyObject* _self, PyObject* noargs) { + HANDLE_TH_ERRORS { + pybind11::gil_scoped_release no_gil{}; + auto self = (THPEvent*)_self; + self->event.synchronize(); + } + Py_RETURN_NONE; + END_HANDLE_TH_ERRORS +} + +static PyObject* THPEvent_evend_id(PyObject* _self, PyObject* noargs) { + HANDLE_TH_ERRORS + auto self = (THPEvent*)_self; + return PyLong_FromVoidPtr(self->event.eventId()); + END_HANDLE_TH_ERRORS +} + +static PyObject* THPEvent_repr(THPEvent* self) { + HANDLE_TH_ERRORS + return THPUtils_packString( + "torch.Event device_type=" + + c10::DeviceTypeName( + static_cast<c10::DeviceType>(self->event.device_type()), true) + + ", device_index=" + std::to_string(self->event.device_index()) + + ", event_flag=" + + std::to_string(static_cast<int64_t>(self->event.flag())) + ", event_id=" + + std::to_string(reinterpret_cast<int64_t>(self->event.eventId()))); + END_HANDLE_TH_ERRORS +} + +// NOLINTNEXTLINE(*c-arrays*, *global-variables) +static struct PyGetSetDef THPEvent_properties[] = { + {"device", (getter)THPEvent_get_device, nullptr, nullptr, nullptr}, + {"event_id", (getter)THPEvent_evend_id, nullptr, nullptr, nullptr}, + {nullptr}}; + +// NOLINTNEXTLINE(*c-arrays*, *global-variables) +static PyMethodDef THPEvent_methods[] = { + {(char*)"from_ipc_handle", + castPyCFunctionWithKeywords(THPEvent_from_ipc_handle), + METH_CLASS | METH_VARARGS | METH_KEYWORDS, + nullptr}, + {(char*)"record", + castPyCFunctionWithKeywords(THPEvent_record), + METH_VARARGS | METH_KEYWORDS, + nullptr}, + {(char*)"wait", + castPyCFunctionWithKeywords(THPEvent_wait), + METH_VARARGS | METH_KEYWORDS, + nullptr}, + {(char*)"query", THPEvent_query, METH_NOARGS, nullptr}, + {(char*)"elapsed_time", THPEvent_elapsed_time, METH_O, nullptr}, + {(char*)"synchronize", THPEvent_synchronize, METH_NOARGS, nullptr}, + {(char*)"ipc_handle", THPEvent_ipc_handle, METH_NOARGS, nullptr}, + {nullptr}}; + +PyTypeObject THPEventType = { + PyVarObject_HEAD_INIT(nullptr, 0) "torch.Event", /* tp_name */ + sizeof(THPEvent), /* tp_basicsize */ + 0, /* tp_itemsize */ + (destructor)THPEvent_dealloc, /* tp_dealloc */ + 0, /* tp_vectorcall_offset */ + nullptr, /* tp_getattr */ + nullptr, /* tp_setattr */ + nullptr, /* tp_reserved */ + (reprfunc)THPEvent_repr, /* tp_repr */ + nullptr, /* tp_as_number */ + nullptr, /* tp_as_sequence */ + nullptr, /* tp_as_mapping */ + nullptr, /* tp_hash */ + nullptr, /* tp_call */ + nullptr, /* tp_str */ + nullptr, /* tp_getattro */ + nullptr, /* tp_setattro */ + nullptr, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ + nullptr, /* tp_doc */ + nullptr, /* tp_traverse */ + nullptr, /* tp_clear */ + nullptr, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + nullptr, /* tp_iter */ + nullptr, /* tp_iternext */ + THPEvent_methods, /* tp_methods */ + nullptr, /* tp_members */ + THPEvent_properties, /* tp_getset */ + nullptr, /* tp_base */ + nullptr, /* tp_dict */ + nullptr, /* tp_descr_get */ + nullptr, /* tp_descr_set */ + 0, /* tp_dictoffset */ + nullptr, /* tp_init */ + nullptr, /* tp_alloc */ + THPEvent_pynew, /* tp_new */ +}; + +void THPEvent_init(PyObject* module) { + THPEventClass = (PyObject*)&THPEventType; + if (PyType_Ready(&THPEventType) < 0) { + throw python_error(); + } + Py_INCREF(&THPEventType); + if (PyModule_AddObject(module, "Event", (PyObject*)&THPEventType) < 0) { + throw python_error(); + } +} diff --git a/torch/csrc/Event.h b/torch/csrc/Event.h new file mode 100644 index 0000000000..745610d5dd --- /dev/null +++ b/torch/csrc/Event.h @@ -0,0 +1,21 @@ +#ifndef THP_EVENT_INC +#define THP_EVENT_INC + +#include <c10/core/Event.h> +#include <torch/csrc/python_headers.h> + +struct TORCH_API THPEvent { + PyObject_HEAD c10::Event event; +}; +extern PyObject* THPEventClass; +TORCH_API extern PyTypeObject THPEventType; + +TORCH_API void THPEvent_init(PyObject* module); +TORCH_API PyObject* THPEvent_new( + c10::DeviceType device_type, + c10::EventFlag flag); +inline bool THPEvent_Check(PyObject* obj) { + return THPEventClass && PyObject_IsInstance(obj, THPEventClass); +} + +#endif // THP_EVENT_INC diff --git a/torch/csrc/Module.cpp b/torch/csrc/Module.cpp index 9343a48813..8aff73047f 100644 --- a/torch/csrc/Module.cpp +++ b/torch/csrc/Module.cpp @@ -39,6 +39,7 @@ #include <torch/csrc/Device.h> #include <torch/csrc/Dtype.h> #include <torch/csrc/DynamicTypes.h> +#include <torch/csrc/Event.h> #include <torch/csrc/Generator.h> #include <torch/csrc/Layout.h> #include <torch/csrc/MemoryFormat.h> @@ -1603,6 +1604,7 @@ PyObject* initModule() { THPQScheme_init(module); THPDevice_init(module); THPStream_init(module); + THPEvent_init(module); ASSERT_TRUE(THPVariable_initModule(module)); ASSERT_TRUE(THPFunction_initModule(module)); ASSERT_TRUE(THPEngine_initModule(module)); diff --git a/torch/csrc/Stream.cpp b/torch/csrc/Stream.cpp index bd8abb0ecd..06dac515c1 100644 --- a/torch/csrc/Stream.cpp +++ b/torch/csrc/Stream.cpp @@ -1,10 +1,19 @@ #include <pybind11/pybind11.h> #include <torch/csrc/Device.h> +#include <torch/csrc/Event.h> +#include <torch/csrc/Stream.h> #include <torch/csrc/THP.h> #include <torch/csrc/utils/pybind.h> +#include <torch/csrc/utils/pycfunction_helpers.h> #include <torch/csrc/utils/python_arg_parser.h> +#include <c10/core/DeviceGuard.h> +#include <c10/core/Stream.h> +#include <c10/core/impl/DeviceGuardImplInterface.h> +#include <c10/util/Exception.h> +#include <c10/util/hash.h> #include <structmember.h> +#include <cstdint> PyTypeObject* THPStreamClass = nullptr; @@ -13,22 +22,53 @@ static PyObject* THPStream_pynew( PyObject* args, PyObject* kwargs) { HANDLE_TH_ERRORS - int64_t stream_id = 0; - int64_t device_index = 0; + + int64_t stream_id = -1; int64_t device_type = 0; - // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) - constexpr const char* kwlist[] = { - "stream_id", "device_index", "device_type", nullptr}; - if (!PyArg_ParseTupleAndKeywords( - args, - kwargs, - "|LLL", - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) - const_cast<char**>(kwlist), - &stream_id, - &device_index, - &device_type)) { - return nullptr; + int64_t device_index = 0; + int64_t priority = 0; + + static torch::PythonArgParser parser({ + "Steram(Device device=None, *, int64_t priority=0)", + "Stream(int64_t stream_id, int64_t device_index, int64_t device_type, *, int64_t priority=0)", + }); + + torch::ParsedArgs<4> parsed_args; + auto r = parser.parse(args, kwargs, parsed_args); + + std::unique_ptr<c10::DeviceGuard> device_guard_ptr; + + if (r.idx == 0) { + auto default_accelerator = at::getAccelerator(false); + auto device = r.deviceOptional(0); + if (device.has_value()) { + device_type = static_cast<int64_t>(device->type()); + device_index = static_cast<int64_t>(device->index()); + // Initialize device guard if device is not None. + device_guard_ptr = std::make_unique<c10::DeviceGuard>(device.value()); + } else { + // If device is None, we will use the current accelerator and index. + // If the current accelerator is not set, we will use the CPU as device + // type. + device_type = static_cast<int64_t>( + default_accelerator.value_or(c10::DeviceType::CPU)); + c10::impl::VirtualGuardImpl impl{ + static_cast<c10::DeviceType>(device_type)}; + const auto current_device = impl.getDevice(); + device_index = current_device.index(); + } + priority = r.toInt64WithDefault(1, 0); + } else if (r.idx == 1) { + stream_id = r.toInt64WithDefault(0, -1); + device_index = r.toInt64WithDefault(1, 0); + device_type = + r.toInt64WithDefault(2, static_cast<int64_t>(c10::DeviceType::CPU)); + priority = r.toInt64WithDefault(3, 0); + } else { + TORCH_CHECK( + false, + "parse stream arg fails please check the usage: ", + parser.get_signatures()); } THPObjectPtr ptr(type->tp_alloc(type, 0)); @@ -37,9 +77,29 @@ static PyObject* THPStream_pynew( } THPStream* self = (THPStream*)ptr.get(); - self->stream_id = stream_id; - self->device_index = device_index; - self->device_type = device_type; + + // If torch.Stream is not created from existing Stream, then create a new one. + // It requires other device backends override getNewStream method. How the new + // stream is created is backend specific. Backend should be able to correctly + // manage the lifetime of streams. + c10::optional<c10::Stream> stream_opt; + if (r.idx == 0) { + c10::impl::VirtualGuardImpl impl{static_cast<c10::DeviceType>(device_type)}; + stream_opt = impl.getNewStream( + c10::Device(static_cast<c10::DeviceType>(device_type), device_index), + static_cast<int>(priority)); + } else { + stream_opt = c10::Stream::unpack3( + stream_id, + static_cast<c10::DeviceIndex>(device_index), + static_cast<c10::DeviceType>(device_type)); + } + + TORCH_CHECK(stream_opt.has_value(), "Failed to create stream"); + self->stream_id = static_cast<int64_t>(stream_opt->id()); + self->device_index = static_cast<int64_t>(stream_opt->device_index()); + self->device_type = static_cast<int64_t>(stream_opt->device_type()); + return (PyObject*)ptr.release(); END_HANDLE_TH_ERRORS } @@ -73,15 +133,167 @@ static PyObject* THPStream_get_device(THPStream* self, void* unused) { END_HANDLE_TH_ERRORS } +static PyObject* THPStream_query(PyObject* _self, PyObject* noargs) { + HANDLE_TH_ERRORS + auto self = (THPStream*)_self; + + return PyBool_FromLong(c10::Stream::unpack3( + self->stream_id, + self->device_index, + static_cast<c10::DeviceType>(self->device_type)) + .query()); + + END_HANDLE_TH_ERRORS +} + +static PyObject* THPStream_synchronize(PyObject* _self, PyObject* noargs) { + HANDLE_TH_ERRORS { + pybind11::gil_scoped_release no_gil; + auto self = (THPStream*)_self; + + c10::Stream::unpack3( + self->stream_id, + self->device_index, + static_cast<c10::DeviceType>(self->device_type)) + .synchronize(); + } + Py_RETURN_NONE; + END_HANDLE_TH_ERRORS +} + +static PyObject* THPStream_wait_event(PyObject* _self, PyObject* _event) { + HANDLE_TH_ERRORS { + auto self = (THPStream*)_self; + auto event = (THPEvent*)_event; + c10::Stream::unpack3( + self->stream_id, + self->device_index, + static_cast<c10::DeviceType>(self->device_type)) + .wait(event->event); + } + Py_RETURN_NONE; + END_HANDLE_TH_ERRORS +} + +static PyObject* THPStream_wait_stream(PyObject* _self, PyObject* _other) { + HANDLE_TH_ERRORS { + auto self = (THPStream*)_self; + auto other_stream = (THPStream*)_other; + c10::Event new_event( + static_cast<c10::DeviceType>(other_stream->device_type), + c10::EventFlag::PYTORCH_DEFAULT); + new_event.record(c10::Stream::unpack3( + other_stream->stream_id, + other_stream->device_index, + static_cast<c10::DeviceType>(other_stream->device_type))); + c10::Stream::unpack3( + self->stream_id, + self->device_index, + static_cast<c10::DeviceType>(self->device_type)) + .wait(new_event); + } + Py_RETURN_NONE; + END_HANDLE_TH_ERRORS +} + +static PyObject* THPStream_record_event( + PyObject* _self, + PyObject* args, + PyObject* kwargs) { + HANDLE_TH_ERRORS + auto self = (THPStream*)_self; + PyObject* _new_event; + PyObject* _event = Py_None; + + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) + constexpr const char* accepted_args[] = {"event", nullptr}; + if (!PyArg_ParseTupleAndKeywords( + args, + kwargs, + "|O", + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + const_cast<char**>(accepted_args), + &_event)) { + TORCH_CHECK(false, "parse record_event arg fails"); + } + if (_event != Py_None) { + // Increase the refcount of the event to avoid it being destroyed. + Py_INCREF(_event); + _new_event = _event; + } else { + _new_event = THPEvent_new( + static_cast<c10::DeviceType>(self->device_type), + c10::EventFlag::PYTORCH_DEFAULT); + } + auto new_event = (THPEvent*)_new_event; + TORCH_CHECK(new_event, "event must not be null"); + new_event->event.record(c10::Stream::unpack3( + self->stream_id, + self->device_index, + static_cast<c10::DeviceType>(self->device_type))); + return (PyObject*)new_event; + END_HANDLE_TH_ERRORS +} + +static PyObject* THPStream_repr(THPStream* self) { + HANDLE_TH_ERRORS + return THPUtils_packString( + "torch.Stream device_type=" + + c10::DeviceTypeName( + static_cast<c10::DeviceType>(self->device_type), true) + + ", device_index=" + std::to_string(self->device_index) + + ", stream_id=" + std::to_string(self->stream_id)); + END_HANDLE_TH_ERRORS +} + +static Py_hash_t THPStream_hash(THPStream* self) { + return static_cast<long>(at::hash_combine( + self->device_type, + (at::hash_combine(self->stream_id, self->device_index)))); +} + static PyObject* THPStream_eq(THPStream* self, THPStream* other) { HANDLE_TH_ERRORS return PyBool_FromLong( - self->stream_id == other->stream_id && - self->device_index == other->device_index && - self->device_type == other->device_type); + (self->stream_id == other->stream_id) && + (self->device_index == other->device_index) && + (self->device_type == other->device_type)); + END_HANDLE_TH_ERRORS +} + +static PyObject* THPStream_ne(THPStream* self, THPStream* other) { + HANDLE_TH_ERRORS + return PyBool_FromLong( + (self->stream_id != other->stream_id) || + (self->device_index != other->device_index) || + (self->device_type != other->device_type)); END_HANDLE_TH_ERRORS } +static PyObject* THPStream_richcompare( + PyObject* self, + PyObject* other, + int op) { + PyObject* result = NULL; + if (other == Py_None) { + result = Py_False; + } else { + switch (op) { + case Py_EQ: + result = THPStream_eq((THPStream*)self, (THPStream*)other); + break; + case Py_NE: + result = THPStream_ne((THPStream*)self, (THPStream*)other); + break; + default: + result = Py_False; + break; + } + } + Py_XINCREF(result); + return result; +} + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables) static struct PyMemberDef THPStream_members[] = { {"stream_id", @@ -108,6 +320,14 @@ static struct PyGetSetDef THPStream_properties[] = { // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables) static PyMethodDef THPStream_methods[] = { + {"query", THPStream_query, METH_NOARGS, nullptr}, + {"synchronize", THPStream_synchronize, METH_NOARGS, nullptr}, + {"wait_event", THPStream_wait_event, METH_O, nullptr}, + {"wait_stream", THPStream_wait_stream, METH_O, nullptr}, + {"record_event", + castPyCFunctionWithKeywords(THPStream_record_event), + METH_VARARGS | METH_KEYWORDS, + nullptr}, {"__eq__", (PyCFunction)THPStream_eq, METH_O, nullptr}, {nullptr}}; @@ -120,11 +340,11 @@ PyTypeObject THPStreamType = { nullptr, /* tp_getattr */ nullptr, /* tp_setattr */ nullptr, /* tp_reserved */ - nullptr, /* tp_repr */ + (reprfunc)THPStream_repr, /* tp_repr */ nullptr, /* tp_as_number */ nullptr, /* tp_as_sequence */ nullptr, /* tp_as_mapping */ - nullptr, /* tp_hash */ + (hashfunc)THPStream_hash, /* tp_hash */ nullptr, /* tp_call */ nullptr, /* tp_str */ nullptr, /* tp_getattro */ @@ -135,7 +355,7 @@ PyTypeObject THPStreamType = { nullptr, /* tp_doc */ nullptr, /* tp_traverse */ nullptr, /* tp_clear */ - nullptr, /* tp_richcompare */ + THPStream_richcompare, /* tp_richcompare */ 0, /* tp_weaklistoffset */ nullptr, /* tp_iter */ nullptr, /* tp_iternext */
2.41.0
7215a4fa2399584163631faf8c6a1bbcbbfe0ae
Wed, 17 Apr 2024 17:48:19 -0700
[PATCH 0334/1000] Fix memory leak in pattern_matcher (#124345)
#121313 changed precompiled patterns so they are more integrated with the pattern matching code. This resulted with a list of "known" patterns (with their example data) being stored globally. Unfortunately since small FakeTensors store a constant of the original tensor it meant that we leaked cuda tensors in the example data. Fix this by clearing out the constant storage for the example data that we keep around. Fixes #124081 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124345 Approved by: https://github.com/xuzhao9
diff --git a/torch/_inductor/pattern_matcher.py b/torch/_inductor/pattern_matcher.py index 177d7c9466..0f2f966185 100644 --- a/torch/_inductor/pattern_matcher.py +++ b/torch/_inductor/pattern_matcher.py @@ -1293,6 +1293,9 @@ def gen_register_replacement( scalar_workaround=(), exclusive_arg_names=(), ): + # Make sure the example_inputs is materialized. + example_inputs = tuple(example_inputs) + if "PYTORCH_GEN_PATTERNS" in os.environ: pat = _serialize_pattern( unique_name, search_fn, example_inputs, trace_fn, scalar_workaround @@ -1309,6 +1312,14 @@ def gen_register_replacement( ) pat = getattr(m, unique_name) + for arg in pytree.tree_iter(example_inputs): + if torch._subclasses.fake_tensor.is_fake(arg) and arg.constant is not None: + # This can be a problem - small fake tensors (e.g. `tensor(2)`) will + # hold onto their original constant value - and by stashing it here + # will cause a memory leak if the constant value is on GPU. + # Since this is just an optimization we can clear it out. + arg.constant = None + _known_precompiled_patterns.append( (search_fn, example_inputs, trace_fn, scalar_workaround, pat) )
2.41.0
6f0159db08c1ad55fe57a5e92d8933e21ea543e
Wed, 17 Apr 2024 12:05:23 -0700
[PATCH 0335/1000] Add test_cpp_extensions tests for stream_and_event and mita_backend (#123614)
Test the generic torch.Stream/Event with fake device gurad and hooks. @exported-using-ghexport Differential Revision: [D55902506](https://our.internmc.facebook.com/intern/diff/D55902506/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123614 Approved by: https://github.com/albanD ghstack dependencies: #123611, #123612
diff --git a/test/cpp_extensions/mtia_extension.cpp b/test/cpp_extensions/mtia_extension.cpp new file mode 100644 index 0000000000..3b02d3968e --- /dev/null +++ b/test/cpp_extensions/mtia_extension.cpp @@ -0,0 +1,219 @@ +#include <ATen/detail/MTIAHooksInterface.h> +#include <c10/core/Device.h> +#include <c10/core/Stream.h> +#include <c10/core/impl/DeviceGuardImplInterface.h> +#include <c10/util/Logging.h> +#include <torch/csrc/utils/device_lazy_init.h> +#include <thread> +namespace torch::mtia { + +constexpr c10::DeviceType kMTIADeviceType = c10::DeviceType::MTIA; +constexpr c10::DeviceIndex kMTIADeviceCount = 2; +static thread_local c10::DeviceIndex current_device = 0; +static thread_local std::array<c10::Stream, kMTIADeviceCount> current_streams = + {c10::Stream::unpack3(0, 0, c10::DeviceType::MTIA), + c10::Stream::unpack3(0, 1, c10::DeviceType::MTIA)}; +static int64_t stream_id_gen = 1; +static int64_t event_id_gen = 1; +static std::array<c10::Stream, kMTIADeviceCount> default_streams = { + c10::Stream::unpack3(0, 0, c10::DeviceType::MTIA), + c10::Stream::unpack3(0, 1, c10::DeviceType::MTIA)}; +struct MTIAGuardImpl final : public c10::impl::DeviceGuardImplInterface { + MTIAGuardImpl() = default; + explicit MTIAGuardImpl(c10::DeviceType t) { + TORCH_INTERNAL_ASSERT(t == kMTIADeviceType); + } + c10::DeviceType type() const override { + return kMTIADeviceType; + } + c10::Device exchangeDevice(c10::Device d) const override { + c10::Device old_device = getDevice(); + if (old_device.index() != d.index()) { + setDevice(d); + } + return old_device; + } + c10::Device getDevice() const override { + return c10::Device(kMTIADeviceType, current_device); + } + + void setDevice(c10::Device d) const override { + c10::Device current_device = getDevice(); + if (current_device.index() != d.index()) { + current_device = d; + } + } + void uncheckedSetDevice(c10::Device d) const noexcept override { + (void)d; + } + c10::Stream getStream(c10::Device d) const noexcept override { + return current_streams[d.index()]; + } + c10::Stream getNewStream(c10::Device d, int priority = 0) const override { + (void)priority; + return c10::Stream::unpack3(stream_id_gen++, d.index(), d.type()); + } + c10::Stream getDefaultStream(c10::Device d) const override { + return default_streams[d.index()]; + } + c10::Stream getStreamFromGlobalPool( + c10::Device d, + bool isHighPriority = false) const override { + return c10::Stream::unpack3(stream_id_gen++, d.index(), d.type()); + } + // NB: These do NOT set the current device + c10::Stream exchangeStream(c10::Stream s) const noexcept override { + c10::Stream old_stream = getStream(s.device()); + return old_stream; + } + c10::DeviceIndex deviceCount() const noexcept override { + return kMTIADeviceCount; + } + + void destroyEvent(void* event, const c10::DeviceIndex device_index) + const noexcept override { + (void)device_index; + } + + void record( + void** event, + const c10::Stream& stream, + const c10::DeviceIndex device_index, + const c10::EventFlag flag) const override { + TORCH_CHECK( + device_index == -1 || device_index == stream.device_index(), + "Event device index ", + device_index, + " does not match recording stream's device index ", + stream.device_index(), + "."); + + const auto orig_device = getDevice(); + + setDevice(stream.device()); + + if (*event == nullptr) { + *event = reinterpret_cast<void*>(event_id_gen++); + } + setDevice(orig_device); + } + + void block(void* event, const c10::Stream& stream) const override { + (void)event; + (void)stream; + } + + // May be called from any device + bool queryEvent(void* event) const override { + (void)event; + return true; + } + + // Stream-related functions + bool queryStream(const c10::Stream& stream) const override { + (void)stream; + return true; + } + + void synchronizeStream(const c10::Stream& stream) const override { + (void)stream; + } + + void recordDataPtrOnStream( + const c10::DataPtr& data_ptr, + const c10::Stream& stream) const override { + (void)data_ptr; + (void)stream; + } + + double elapsedTime(void* event1, void* event2) const override { + uint64_t elapsed_time = 1e6; + return (double)(elapsed_time / 1e6); + } + + void synchronizeEvent(void* event) const override { + (void)event; + } +}; + +struct MTIAHooks : public at::MTIAHooksInterface { + explicit MTIAHooks(at::MTIAHooksArgs) {} + void initMTIA() const override {} + + bool hasMTIA() const override { + return true; + } + + c10::DeviceIndex deviceCount() const override { + torch::utils::device_lazy_init(at::kMTIA); + return c10::DeviceIndex(2); + } + + void deviceSynchronize(c10::DeviceIndex device_index) const override { + torch::utils::device_lazy_init(at::kMTIA); + (void)device_index; + } + + std::string showConfig() const override { + return "None config"; + } + + c10::DeviceIndex exchangeDevice(c10::DeviceIndex device) const override { + torch::utils::device_lazy_init(at::kMTIA); + auto orig_device = current_device; + if (current_device != device) { + current_device = device; + } + return orig_device; + } + + c10::DeviceIndex maybeExchangeDevice(c10::DeviceIndex device) const override { + torch::utils::device_lazy_init(at::kMTIA); + + auto orig_device = current_device; + if (current_device != device) { + current_device = device; + } + return orig_device; + } + + c10::Stream getDefaultStream(c10::DeviceIndex device) const override { + torch::utils::device_lazy_init(at::kMTIA); + + return default_streams[device]; + } + + c10::Stream getCurrentStream(c10::DeviceIndex device) const override { + torch::utils::device_lazy_init(at::kMTIA); + + return current_streams[device]; + } + + void setCurrentStream(const c10::Stream& stream) const override { + torch::utils::device_lazy_init(at::kMTIA); + + current_streams[stream.device_index()] = stream; + } + + c10::DeviceIndex getCurrentDevice() const override { + torch::utils::device_lazy_init(at::kMTIA); + + return current_device; + } + + void setCurrentDevice(c10::DeviceIndex device) const override { + torch::utils::device_lazy_init(at::kMTIA); + + if (current_device != device) { + current_device = device; + } + } +}; + +using at::MTIAHooksRegistry; +using at::RegistererMTIAHooksRegistry; + +REGISTER_MTIA_HOOKS(MTIAHooks); +C10_REGISTER_GUARD_IMPL(MTIA, MTIAGuardImpl); + +} // namespace torch::mtia diff --git a/test/run_test.py b/test/run_test.py index c029a96566..d7bc40f521 100755 --- a/test/run_test.py +++ b/test/run_test.py @@ -191,6 +191,8 @@ XPU_TEST = [ RUN_PARALLEL_BLOCKLIST = [ "test_cpp_extensions_jit", "test_cpp_extensions_open_device_registration", + "test_cpp_extensions_stream_and_event", + "test_cpp_extensions_mtia_backend", "test_jit_disabled", "test_mobile_optimizer", "test_multiprocessing", diff --git a/test/test_cpp_extensions_mtia_backend.py b/test/test_cpp_extensions_mtia_backend.py new file mode 100644 index 0000000000..e2ebbf702d --- /dev/null +++ b/test/test_cpp_extensions_mtia_backend.py @@ -0,0 +1,154 @@ +# Owner(s): ["module: mtia"] + +import os +import shutil +import sys +import tempfile +import unittest + +import torch +import torch.testing._internal.common_utils as common +import torch.utils.cpp_extension +from torch.testing._internal.common_utils import ( + IS_ARM64, + IS_LINUX, + skipIfTorchDynamo, + TEST_CUDA, + TEST_PRIVATEUSE1, +) +from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME + + +TEST_CUDA = TEST_CUDA and CUDA_HOME is not None +TEST_ROCM = TEST_CUDA and torch.version.hip is not None and ROCM_HOME is not None + + +def remove_build_path(): + if sys.platform == "win32": + # Not wiping extensions build folder because Windows + return + default_build_root = torch.utils.cpp_extension.get_default_build_root() + if os.path.exists(default_build_root): + shutil.rmtree(default_build_root, ignore_errors=True) + + +@unittest.skipIf( + IS_ARM64 or not IS_LINUX or TEST_CUDA or TEST_PRIVATEUSE1, + "Only on linux platform and mutual exclusive to other backends", +) +@torch.testing._internal.common_utils.markDynamoStrictTest +class TestCppExtensionMTIABackend(common.TestCase): + """Tests MTIA backend with C++ extensions.""" + + module = None + + def setUp(self): + super().setUp() + # cpp extensions use relative paths. Those paths are relative to + # this file, so we'll change the working directory temporarily + self.old_working_dir = os.getcwd() + os.chdir(os.path.dirname(os.path.abspath(__file__))) + + def tearDown(self): + super().tearDown() + # return the working directory (see setUp) + os.chdir(self.old_working_dir) + + @classmethod + def tearDownClass(cls): + remove_build_path() + + @classmethod + def setUpClass(cls): + remove_build_path() + build_dir = tempfile.mkdtemp() + # Load the fake device guard impl. + cls.module = torch.utils.cpp_extension.load( + name="mtia_extension", + sources=["cpp_extensions/mtia_extension.cpp"], + build_directory=build_dir, + extra_include_paths=[ + "cpp_extensions", + "path / with spaces in it", + "path with quote'", + ], + is_python_module=False, + verbose=True, + ) + + @skipIfTorchDynamo("Not a TorchDynamo suitable test") + def test_get_device_module(self): + device = torch.device("mtia:0") + default_stream = torch.get_device_module(device).current_stream() + self.assertEqual( + default_stream.device_type, int(torch._C._autograd.DeviceType.MTIA) + ) + print(torch._C.Stream.__mro__) + print(torch.cuda.Stream.__mro__) + + @skipIfTorchDynamo("Not a TorchDynamo suitable test") + def test_stream_basic(self): + default_stream = torch.mtia.current_stream() + user_stream = torch.mtia.Stream() + self.assertEqual(torch.mtia.current_stream(), default_stream) + self.assertNotEqual(default_stream, user_stream) + # Check mtia_extension.cpp, default stream id starts from 0. + self.assertEqual(default_stream.stream_id, 0) + self.assertNotEqual(user_stream.stream_id, 0) + with torch.mtia.stream(user_stream): + self.assertEqual(torch.mtia.current_stream(), user_stream) + self.assertTrue(user_stream.query()) + default_stream.synchronize() + self.assertTrue(default_stream.query()) + + @skipIfTorchDynamo("Not a TorchDynamo suitable test") + def test_stream_context(self): + mtia_stream_0 = torch.mtia.Stream(device="mtia:0") + mtia_stream_1 = torch.mtia.Stream(device="mtia:0") + print(mtia_stream_0) + print(mtia_stream_1) + with torch.mtia.stream(mtia_stream_0): + current_stream = torch.mtia.current_stream() + msg = f"current_stream {current_stream} should be {mtia_stream_0}" + self.assertTrue(current_stream == mtia_stream_0, msg=msg) + + with torch.mtia.stream(mtia_stream_1): + current_stream = torch.mtia.current_stream() + msg = f"current_stream {current_stream} should be {mtia_stream_1}" + self.assertTrue(current_stream == mtia_stream_1, msg=msg) + + @skipIfTorchDynamo("Not a TorchDynamo suitable test") + def test_stream_context_different_device(self): + device_0 = torch.device("mtia:0") + device_1 = torch.device("mtia:1") + mtia_stream_0 = torch.mtia.Stream(device=device_0) + mtia_stream_1 = torch.mtia.Stream(device=device_1) + print(mtia_stream_0) + print(mtia_stream_1) + orig_current_device = torch.mtia.current_device() + with torch.mtia.stream(mtia_stream_0): + current_stream = torch.mtia.current_stream() + self.assertTrue(torch.mtia.current_device() == device_0.index) + msg = f"current_stream {current_stream} should be {mtia_stream_0}" + self.assertTrue(current_stream == mtia_stream_0, msg=msg) + self.assertTrue(torch.mtia.current_device() == orig_current_device) + with torch.mtia.stream(mtia_stream_1): + current_stream = torch.mtia.current_stream() + self.assertTrue(torch.mtia.current_device() == device_1.index) + msg = f"current_stream {current_stream} should be {mtia_stream_1}" + self.assertTrue(current_stream == mtia_stream_1, msg=msg) + self.assertTrue(torch.mtia.current_device() == orig_current_device) + + @skipIfTorchDynamo("Not a TorchDynamo suitable test") + def test_device_context(self): + device_0 = torch.device("mtia:0") + device_1 = torch.device("mtia:1") + with torch.mtia.device(device_0): + self.assertTrue(torch.mtia.current_device() == device_0.index) + + with torch.mtia.device(device_1): + self.assertTrue(torch.mtia.current_device() == device_1.index) + + +if __name__ == "__main__": + common.run_tests() diff --git a/test/test_cpp_extensions_stream_and_event.py b/test/test_cpp_extensions_stream_and_event.py new file mode 100644 index 0000000000..0be81dd492 --- /dev/null +++ b/test/test_cpp_extensions_stream_and_event.py @@ -0,0 +1,108 @@ +# Owner(s): ["module: mtia"] + +import os +import shutil +import sys +import tempfile +import unittest + +import torch +import torch.testing._internal.common_utils as common +import torch.utils.cpp_extension +from torch.testing._internal.common_utils import ( + IS_ARM64, + IS_LINUX, + skipIfTorchDynamo, + TEST_CUDA, + TEST_PRIVATEUSE1, +) +from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME + + +TEST_CUDA = TEST_CUDA and CUDA_HOME is not None +TEST_ROCM = TEST_CUDA and torch.version.hip is not None and ROCM_HOME is not None + + +def remove_build_path(): + if sys.platform == "win32": + # Not wiping extensions build folder because Windows + return + default_build_root = torch.utils.cpp_extension.get_default_build_root() + if os.path.exists(default_build_root): + shutil.rmtree(default_build_root, ignore_errors=True) + + +# Since we use a fake MTIA device backend to test generic Stream/Event, device backends are mutual exclusive to each other. +# The test will be skipped if any of the following conditions are met: +@unittest.skipIf( + IS_ARM64 or not IS_LINUX or TEST_CUDA or TEST_PRIVATEUSE1, + "Only on linux platform and mutual exclusive to other backends", +) +@torch.testing._internal.common_utils.markDynamoStrictTest +class TestCppExtensionStreamAndEvent(common.TestCase): + """Tests Stream and Event with C++ extensions.""" + + module = None + + def setUp(self): + super().setUp() + # cpp extensions use relative paths. Those paths are relative to + # this file, so we'll change the working directory temporarily + self.old_working_dir = os.getcwd() + os.chdir(os.path.dirname(os.path.abspath(__file__))) + + def tearDown(self): + super().tearDown() + # return the working directory (see setUp) + os.chdir(self.old_working_dir) + + @classmethod + def tearDownClass(cls): + remove_build_path() + + @classmethod + def setUpClass(cls): + remove_build_path() + build_dir = tempfile.mkdtemp() + # Load the fake device guard impl. + src = f"{os.path.abspath(os.path.dirname(__file__))}/cpp_extensions/mtia_extension.cpp" + cls.module = torch.utils.cpp_extension.load( + name="mtia_extension", + sources=[src], + build_directory=build_dir, + extra_include_paths=[ + "cpp_extensions", + "path / with spaces in it", + "path with quote'", + ], + is_python_module=False, + verbose=True, + ) + + @skipIfTorchDynamo("Not a TorchDynamo suitable test") + def test_stream_event(self): + s = torch.Stream() + self.assertTrue(s.device_type, int(torch._C._autograd.DeviceType.MTIA)) + e = torch.Event() + self.assertTrue(e.device.type, "mtia") + # Should be nullptr by default + self.assertTrue(e.event_id == 0) + s.record_event(event=e) + print(f"recorded event 1: {e}") + self.assertTrue(e.event_id != 0) + e2 = s.record_event() + print(f"recorded event 2: {e2}") + self.assertTrue(e2.event_id != 0) + self.assertTrue(e2.event_id != e.event_id) + e.synchronize() + e2.synchronize() + time_elapsed = e.elapsed_time(e2) + print(f"time elapsed between e1 and e2: {time_elapsed}") + old_event_id = e.event_id + e.record(stream=s) + print(f"recorded event 1: {e}") + self.assertTrue(e.event_id == old_event_id) + + +if __name__ == "__main__": + common.run_tests() diff --git a/tools/testing/modulefinder_determinator.py b/tools/testing/modulefinder_determinator.py index ce55fdb424..ba58d75c57 100644 --- a/tools/testing/modulefinder_determinator.py +++ b/tools/testing/modulefinder_determinator.py @@ -21,6 +21,8 @@ TARGET_DET_LIST = [ "test_cpp_extensions_aot_no_ninja", "test_cpp_extensions_jit", "test_cpp_extensions_open_device_registration", + "test_cpp_extensions_stream_and_event", + "test_cpp_extensions_mtia_backend", "test_cuda", "test_cuda_primary_ctx", "test_dataloader",
2.41.0
4bedbb9e10e59e7c3c971944001055fc3cbfdbb
Thu, 18 Apr 2024 18:20:11 +0000
[PATCH 0336/1000] [export] Serialize rational symint ranges (#123884)
Some symints result in rational ranges like 10/3 which runs into an error ([example](https://www.internalfb.com/intern/everpaste/?handle=GMG2AxkeoFUrh-UDAFcE8pKPgjoUbsIXAAAB)). Ed will eventually get rid(?) of these rational ranges but as a workaround export can just clamp the results during serialization time Pull Request resolved: https://github.com/pytorch/pytorch/pull/123884 Approved by: https://github.com/zhxchen17
diff --git a/test/export/test_serialize.py b/test/export/test_serialize.py index 8645eff8f6..60c148defb 100644 --- a/test/export/test_serialize.py +++ b/test/export/test_serialize.py @@ -28,7 +28,7 @@ from torch._export.serde.serialize import ( from torch._higher_order_ops.torchbind import enable_torchbind_tracing from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode from torch.export import Dim, export, load, save -from torch.fx.experimental.symbolic_shapes import is_concrete_int +from torch.fx.experimental.symbolic_shapes import is_concrete_int, ValueRanges from torch.testing._internal.common_utils import ( find_library_location, instantiate_parametrized_tests, @@ -277,6 +277,29 @@ class TestSerialize(TestCase): self.assertNotIn(name, seen) seen.add(name) + def test_rational_ranges(self) -> None: + class M(torch.nn.Module): + def forward(self, x): + return x + x + + ep = torch.export.export( + M(), (torch.randn(4),), dynamic_shapes=({0: Dim("temp")},) + ) + + range_constraints = list(ep.range_constraints.keys()) + assert len(range_constraints) == 1 + symint = range_constraints[0] + + import sympy + + upper_range = sympy.Rational(10, 3) + lower_range = sympy.Rational(10, 6) + ep.range_constraints[symint] = ValueRanges(lower=lower_range, upper=upper_range) + + serialized = ExportedProgramSerializer().serialize(ep) + self.assertEqual(serialized.exported_program.range_constraints["s0"].min_val, 2) + self.assertEqual(serialized.exported_program.range_constraints["s0"].max_val, 3) + def test_kwargs_default(self) -> None: """ Tests that the kwargs default values are serialized even if they are not diff --git a/torch/_export/serde/serialize.py b/torch/_export/serde/serialize.py index c2b2304d47..9b21fce91c 100644 --- a/torch/_export/serde/serialize.py +++ b/torch/_export/serde/serialize.py @@ -315,7 +315,7 @@ def deserialize_torch_artifact(serialized: Union[Dict[str, Any], Tuple[Any, ...] return artifact -def _sympy_int_to_int(val: sympy.Expr): +def _sympy_int_to_int(val: sympy.Expr, adjust: str): # Convert simple sympy Integers into concrete int if val == sympy.oo: return math.inf @@ -323,7 +323,20 @@ def _sympy_int_to_int(val: sympy.Expr): return -math.inf if isinstance(val, sympy.Integer): return int(val) - raise RuntimeError("Export constraints cannot be non-integer expressions") + + # TODO: Remove this adjustment when Ed gets rid of fractional ranges + log.warning( + "Export constraints cannot be non-integer expressions. Found " + "type %s, and value %s. We will attempt to %s " + "this value.", type(val), val, adjust + ) + + if adjust == "floor": + return math.floor(val) + elif adjust == "ceil": + return math.ceil(val) + else: + raise RuntimeError(f"Got invalid adjustment {adjust}") def _int_to_sympy_int(val) -> sympy.Expr: @@ -340,8 +353,8 @@ def serialize_range_constraints( ) -> Dict[str, RangeConstraint]: return { str(k): RangeConstraint( - _sympy_int_to_int(v.lower), # type: ignore[arg-type] - _sympy_int_to_int(v.upper), # type: ignore[arg-type] + _sympy_int_to_int(v.lower, "ceil"), # type: ignore[arg-type] + _sympy_int_to_int(v.upper, "floor"), # type: ignore[arg-type] ) for k, v in range_constraints.items() }
2.41.0
9407eca3b0be3c0272b5c583f8e77b9108a71f8
Thu, 18 Apr 2024 18:38:26 +0000
[PATCH 0337/1000] Capture triton kernel in execution trace (#124140)
Summary: This DIFF is to capture triton kernels in execution trace. Test Plan: buck test mode/dev-nosan caffe2/test:profiler -- test_execution_trace_with_pt2 Differential Revision: D56162599 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124140 Approved by: https://github.com/briancoutinho
diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py index 8e4e31718d..d9012d0e89 100644 --- a/test/profiler/test_profiler.py +++ b/test/profiler/test_profiler.py @@ -21,6 +21,7 @@ import torch.nn as nn import torch.optim import torch.utils.data import torch.utils.data.datapipes as dp +from torch import _dynamo as torchdynamo from torch._C._profiler import _TensorMetadata from torch.autograd import ( _record_function_with_args_enter, @@ -52,7 +53,9 @@ from torch.profiler._pattern_matcher import ( report_all_anti_patterns, SynchronizedDataLoaderPattern, ) -from torch.testing._internal.common_cuda import TEST_MULTIGPU + +from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU + from torch.testing._internal.common_device_type import skipCUDAVersionIn from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, @@ -70,6 +73,8 @@ from torch.testing._internal.common_utils import ( TestCase, ) +from torch.utils._triton import has_triton + Json = Dict[str, Any] try: @@ -513,41 +518,52 @@ class TestExecutionTrace(TestCase): assert loop_count == expected_loop_events @unittest.skipIf(IS_WINDOWS, "torch.compile does not support WINDOWS") + @unittest.skipIf( + sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" + ) + @unittest.skipIf(not TEST_CUDA or not has_triton(), "need CUDA and triton to run") def test_execution_trace_with_pt2(self): - class ConvAndRelu(nn.Module): - def __init__(self) -> None: - super().__init__() - self.linear = nn.Linear(4096, 4096) - self.relu = nn.ReLU(inplace=True) + @torchdynamo.optimize("inductor") + def fn(a, b, c): + x = torch.nn.functional.linear(a, b) + x = x + c + return x.cos() - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.linear(x) - x = self.relu(x) - return x + a, b, c = (torch.randn(4, 4, requires_grad=True).to("cuda") for _ in range(3)) + + inputs = [a, b, c] + fn(*inputs) # Create a temp file to save execution trace data. fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False) fp.close() - test_module = torch.compile(ConvAndRelu()) - - x = torch.rand(128, 4096) - et = ExecutionTraceObserver().register_callback(fp.name) - et.start() - test_module.forward(x) - et.stop() + et_file = fp.name + et = ExecutionTraceObserver() + et.register_callback(et_file) + with profile( + activities=torch.profiler.supported_activities(), record_shapes=True + ): + et.start() + fn(*inputs) + et.stop() assert fp.name == et.get_output_file_path() et.unregister_callback() + nodes = self.get_execution_trace_root(fp.name) - found_root_node = False + found_captured_triton_kernel_node = False for n in nodes: assert "name" in n - if "[pytorch|profiler|execution_trace|process]" in n["name"]: - found_root_node = True - - assert found_root_node + if "triton_" in n["name"]: + for attr in n["attrs"]: + if attr["name"] == "kernel_file" and attr["value"] != "": + found_captured_triton_kernel_node = True + assert len(n["inputs"]["values"]) > 0 + assert len(n["outputs"]["values"]) == 0 + + assert found_captured_triton_kernel_node def test_execution_trace_start_stop(self): use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities() diff --git a/torch/_inductor/triton_heuristics.py b/torch/_inductor/triton_heuristics.py index f5c6dae70d..b8dcbf1fcb 100644 --- a/torch/_inductor/triton_heuristics.py +++ b/torch/_inductor/triton_heuristics.py @@ -801,7 +801,7 @@ class CachingAutotuner(KernelInterface): args, { "kernel_file": self.filename, - "kernel_type": "triton", + "kernel_backend": "triton", "grid": grid_info, "stream": stream, }, diff --git a/torch/csrc/profiler/standalone/execution_trace_observer.cpp b/torch/csrc/profiler/standalone/execution_trace_observer.cpp index 763f449c23..80b6f2221d 100644 --- a/torch/csrc/profiler/standalone/execution_trace_observer.cpp +++ b/torch/csrc/profiler/standalone/execution_trace_observer.cpp @@ -236,6 +236,8 @@ const ExecutionTraceObserver::ID root_id{1}; struct FunctionCallContext : public ObserverContext { std::string name; + std::string kernel_backend; + std::string kernel_file; ExecutionTraceObserver::ID op_id{uninitialized_id}; ExecutionTraceObserver::ID parent_id{uninitialized_id}; ExecutionTraceObserver::ID fw_parent_id{uninitialized_id}; @@ -273,14 +275,24 @@ static void writeJsonNode( const std::string& outputs = "[]", const std::string& output_shapes = "[]", const std::string& output_types = "[]", - const std::string& operator_schema = "") { + const std::string& operator_schema = "", + const std::string& kernel_backend = "", + const std::string& kernel_file = "") { out << fmt::format( R"JSON( {{ "id": {}, "name": "{}", "ctrl_deps": {}, "inputs": {{"values": {}, "shapes": {}, "types": {}}}, "outputs": {{"values": {}, "shapes": {}, "types": {}}}, - "attrs": [{{"name": "rf_id", "type": "uint64", "value": {}}}, {{"name": "fw_parent", "type": "uint64", "value": {}}}, {{"name": "seq_id", "type": "int64", "value": {}}}, {{"name": "scope", "type": "uint64", "value": {}}}, {{"name": "tid", "type": "uint64", "value": {}}}, {{"name": "fw_tid", "type": "uint64", "value": {}}}, {{"name": "op_schema", "type": "string", "value": "{}"}}] + "attrs": [{{"name": "rf_id", "type": "uint64", "value": {}}}, + {{"name": "fw_parent", "type": "uint64", "value": {}}}, + {{"name": "seq_id", "type": "int64", "value": {}}}, + {{"name": "scope", "type": "uint64", "value": {}}}, + {{"name": "tid", "type": "uint64", "value": {}}}, + {{"name": "fw_tid", "type": "uint64", "value": {}}}, + {{"name": "op_schema", "type": "string", "value": "{}"}}, + {{"name": "kernel_backend", "type": "string", "value": "{}"}}, + {{"name": "kernel_file", "type": "string", "value": "{}"}}] }})JSON", id, name, @@ -297,7 +309,9 @@ static void writeJsonNode( scope, tid, fw_tid, - operator_schema); + operator_schema, + kernel_backend, + kernel_file); } inline std::string timeString(const std::time_t timepoint) { @@ -442,6 +456,44 @@ inline void appendValueInfo( shapes.push_back(getValueShape(val)); } +inline void handleKernelBackendInfo( + FunctionCallContext& fc, + const RecordFunction& fn) { + // triton kernel related information are in kwinputs + const auto& kwinputs = fn.kwinputs(); + if (kwinputs.find("kernel_backend") != kwinputs.end()) { + fc.kernel_backend = kwinputs.at("kernel_backend").toStringRef(); + if (fc.kernel_backend == "triton") { + fc.kernel_file = kwinputs.at("kernel_file").toStringRef(); + TORCH_INTERNAL_ASSERT( + kwinputs.find("kernel_file") != kwinputs.end(), + "kernel file is missing in triton kernel"); + // Remove the path of the file name + if (fc.kernel_file.find_last_of('/') != std::string::npos) + fc.kernel_file = + fc.kernel_file.substr(fc.kernel_file.find_last_of('/') + 1); + + // get grid information + TORCH_INTERNAL_ASSERT( + kwinputs.find("grid") != kwinputs.end(), + "grid is missing in triton kernel"); + fc.input_values.emplace_back( + "\"" + kwinputs.at("grid").toStringRef() + "\""); + fc.input_types.emplace_back("\"String\""); + fc.input_shapes.emplace_back("[]"); + + // get stream information + TORCH_INTERNAL_ASSERT( + kwinputs.find("stream") != kwinputs.end(), + "stream is missing in triton kernel"); + fc.input_values.emplace_back( + std::to_string(kwinputs.at("stream").toInt())); + fc.input_types.emplace_back("\"Int\""); + fc.input_shapes.emplace_back("[]"); + } + } +} + static void recordOperatorStart( ExecutionTraceObserver& ob, FunctionCallContext& fc, @@ -491,6 +543,9 @@ static void recordOperatorStart( appendValueInfo( ob, inputs[i], fc.input_values, fc.input_types, fc.input_shapes); } + + handleKernelBackendInfo(fc, fn); + fc.parent_id = ob.op_stack[tid].top(); // get parent id from the forward stack, this can be different for // autograd ops, which may execute on a different thread than the original @@ -615,7 +670,9 @@ static void onFunctionExit(const RecordFunction& fn, ObserverContext* ctx_ptr) { vectorToString(output_values), vectorToString(output_shapes), vectorToString(output_types), - op_schema_str); + op_schema_str, + fc.kernel_backend, + fc.kernel_file); ob->out << ","; } catch (const std::exception& e) { LOG(WARNING) << "Exception in execution trace observer: [" << fc.name diff --git a/torch/profiler/profiler.py b/torch/profiler/profiler.py index fc7a61bf45..bfc725700a 100644 --- a/torch/profiler/profiler.py +++ b/torch/profiler/profiler.py @@ -1,6 +1,7 @@ import gzip import json import os +import shutil import tempfile from abc import ABC, abstractmethod from enum import Enum @@ -784,8 +785,33 @@ class ExecutionTraceObserver(_ITraceObserver): """ Removes ET observer from record function callbacks. """ + + def _save_triton_kernels(): + # Save the kernel paths for the generated kernels + from torch._inductor.codecache import PyCodeCache as PyCodeCache + + kernel_files = [ + v.__file__ + for v in PyCodeCache.cache.values() + if getattr(v, "__file__", None) is not None + ] + work_dir, file_name = os.path.split(self._output_file_path) + resource_dir = os.path.join( + work_dir, os.path.splitext(file_name)[0] + "_resources" + ) + if not os.path.exists(resource_dir): + os.mkdir(resource_dir) + + for kernel_file in kernel_files: + if kernel_file is None: + continue + path, name = os.path.split(kernel_file) + dst = os.path.join(resource_dir, name) + shutil.copyfile(kernel_file, dst) + if self._registered: self.stop() + _save_triton_kernels() _remove_execution_trace_observer() self._registered = False
2.41.0
1bc188f42458786bb7e24a5bb9f5b6ddf05adb8
Thu, 18 Apr 2024 18:53:59 +0000
[PATCH 0338/1000] Revert "[Environment Variable][1/N] Use thread-safe env variable API in c10 (#119449)"
This reverts commit b51f66c1950a582dd18d1b2ee67df840a8c4dbbe. Reverted https://github.com/pytorch/pytorch/pull/119449 on behalf of https://github.com/malfet due to Broke gcc9 builds ([comment](https://github.com/pytorch/pytorch/pull/119449#issuecomment-2064936414))
diff --git a/c10/core/impl/alloc_cpu.cpp b/c10/core/impl/alloc_cpu.cpp index def4c3a3a9..9b7ae22f9f 100644 --- a/c10/core/impl/alloc_cpu.cpp +++ b/c10/core/impl/alloc_cpu.cpp @@ -3,7 +3,6 @@ #include <c10/core/alignment.h> #include <c10/util/Flags.h> #include <c10/util/Logging.h> -#include <c10/util/env.h> #include <c10/util/irange.h> #include <c10/util/numa.h> @@ -54,8 +53,8 @@ void memset_junk(void* data, size_t num) { #if defined(__linux__) && !defined(__ANDROID__) static inline bool is_thp_alloc_enabled() { static bool value = [&] { - auto env = c10::utils::check_env("THP_MEM_ALLOC_ENABLE"); - return env.has_value() ? env.value() : 0; + const char* ptr = std::getenv("THP_MEM_ALLOC_ENABLE"); + return ptr != nullptr ? std::atoi(ptr) : 0; }(); return value; } diff --git a/c10/cuda/CUDAAllocatorConfig.cpp b/c10/cuda/CUDAAllocatorConfig.cpp index ca38dfd6a4..1f81ed47b6 100644 --- a/c10/cuda/CUDAAllocatorConfig.cpp +++ b/c10/cuda/CUDAAllocatorConfig.cpp @@ -234,7 +234,7 @@ size_t CUDAAllocatorConfig::parseAllocatorConfig( return i; } -void CUDAAllocatorConfig::parseArgs(const std::optional<std::string>& env) { +void CUDAAllocatorConfig::parseArgs(const char* env) { // If empty, set the default values m_max_split_size = std::numeric_limits<size_t>::max(); m_roundup_power2_divisions.assign(kRoundUpPowerOfTwoIntervals, 0); @@ -242,16 +242,16 @@ void CUDAAllocatorConfig::parseArgs(const std::optional<std::string>& env) { bool used_cudaMallocAsync = false; bool used_native_specific_option = false; - if (!env.has_value()) { + if (env == nullptr) { return; } { std::lock_guard<std::mutex> lock(m_last_allocator_settings_mutex); - m_last_allocator_settings = env.value(); + m_last_allocator_settings = env; } std::vector<std::string> config; - lexArgs(env.value().c_str(), config); + lexArgs(env, config); for (size_t i = 0; i < config.size(); i++) { std::string_view config_item_view(config[i]); diff --git a/c10/cuda/CUDAAllocatorConfig.h b/c10/cuda/CUDAAllocatorConfig.h index db5c9e1c8f..3106fc1b46 100644 --- a/c10/cuda/CUDAAllocatorConfig.h +++ b/c10/cuda/CUDAAllocatorConfig.h @@ -2,7 +2,6 @@ #include <c10/cuda/CUDAMacros.h> #include <c10/util/Exception.h> -#include <c10/util/env.h> #include <atomic> #include <cstddef> @@ -73,13 +72,14 @@ class C10_CUDA_API CUDAAllocatorConfig { static CUDAAllocatorConfig& instance() { static CUDAAllocatorConfig* s_instance = ([]() { auto inst = new CUDAAllocatorConfig(); - inst->parseArgs(c10::utils::get_env("PYTORCH_CUDA_ALLOC_CONF")); + const char* env = getenv("PYTORCH_CUDA_ALLOC_CONF"); + inst->parseArgs(env); return inst; })(); return *s_instance; } - void parseArgs(const std::optional<std::string>& env); + void parseArgs(const char* env); private: CUDAAllocatorConfig(); diff --git a/c10/cuda/CUDACachingAllocator.cpp b/c10/cuda/CUDACachingAllocator.cpp index afac5272b6..c472e82ce2 100644 --- a/c10/cuda/CUDACachingAllocator.cpp +++ b/c10/cuda/CUDACachingAllocator.cpp @@ -8,7 +8,6 @@ #include <c10/util/CallOnce.h> #include <c10/util/ScopeExit.h> #include <c10/util/UniqueVoidPtr.h> -#include <c10/util/env.h> #include <c10/util/flat_hash_map.h> #include <c10/util/hash.h> #include <c10/util/irange.h> @@ -2832,7 +2831,7 @@ class DeviceCachingAllocator { // errors, since the caching allocator foils cuda-memcheck. bool forceUncachedAllocator() { static bool force_uncached = - c10::utils::has_env("PYTORCH_NO_CUDA_MEMORY_CACHING"); + getenv("PYTORCH_NO_CUDA_MEMORY_CACHING") != nullptr; return force_uncached; } @@ -3364,9 +3363,9 @@ struct BackendStaticInitializer { // version checks, to CUDAAllocatorConfig's runtime doublecheck. If this // works, maybe we should move all of CUDAAllocatorConfig here? CUDAAllocator* parseEnvForBackend() { - const auto val = c10::utils::get_env("PYTORCH_CUDA_ALLOC_CONF"); - if (val.has_value()) { - const std::string& config = val.value(); + const char* val = getenv("PYTORCH_CUDA_ALLOC_CONF"); + if (val != nullptr) { + const std::string config(val); std::regex exp("[\\s,]+"); std::sregex_token_iterator it(config.begin(), config.end(), exp, -1); diff --git a/c10/cuda/CUDADeviceAssertionHost.cpp b/c10/cuda/CUDADeviceAssertionHost.cpp index ec41e6230f..1d52af7812 100644 --- a/c10/cuda/CUDADeviceAssertionHost.cpp +++ b/c10/cuda/CUDADeviceAssertionHost.cpp @@ -3,7 +3,6 @@ #include <c10/cuda/CUDAFunctions.h> #include <c10/util/Backtrace.h> #include <c10/util/Exception.h> -#include <c10/util/env.h> #include <c10/util/irange.h> #include <cuda_runtime.h> @@ -81,8 +80,8 @@ bool dsa_check_if_all_devices_support_managed_memory() { } bool env_flag_set(const char* env_var_name) { - const auto env_flag = c10::utils::check_env(env_var_name); - return env_flag.has_value() && env_flag.value(); + const char* const env_string = std::getenv(env_var_name); + return (env_string == nullptr) ? false : std::strcmp(env_string, "0"); } /// Deleter for UVM/managed memory pointers diff --git a/c10/cuda/CUDAMiscFunctions.cpp b/c10/cuda/CUDAMiscFunctions.cpp index 9ef724813e..11ea775366 100644 --- a/c10/cuda/CUDAMiscFunctions.cpp +++ b/c10/cuda/CUDAMiscFunctions.cpp @@ -1,14 +1,12 @@ #include <c10/cuda/CUDAMiscFunctions.h> -#include <c10/util/env.h> +#include <cstdlib> namespace c10::cuda { -// NOLINTNEXTLINE(bugprone-exception-escape,-warnings-as-errors) const char* get_cuda_check_suffix() noexcept { - static auto device_blocking_flag = - c10::utils::check_env("CUDA_LAUNCH_BLOCKING"); + static char* device_blocking_flag = getenv("CUDA_LAUNCH_BLOCKING"); static bool blocking_enabled = - (device_blocking_flag.has_value() && device_blocking_flag.value()); + (device_blocking_flag && atoi(device_blocking_flag)); if (blocking_enabled) { return ""; } else { diff --git a/c10/test/util/DeadlockDetection_test.cpp b/c10/test/util/DeadlockDetection_test.cpp index 05ae154e22..35c4953f6d 100644 --- a/c10/test/util/DeadlockDetection_test.cpp +++ b/c10/test/util/DeadlockDetection_test.cpp @@ -1,8 +1,9 @@ #include <c10/util/DeadlockDetection.h> -#include <c10/util/env.h> #include <gtest/gtest.h> +#include <cstdlib> + using namespace ::testing; using namespace c10::impl; @@ -22,7 +23,7 @@ TEST(DeadlockDetection, basic) { #ifndef _WIN32 TEST(DeadlockDetection, disable) { - c10::utils::set_env("TORCH_DISABLE_DEADLOCK_DETECTION", "1"); + setenv("TORCH_DISABLE_DEADLOCK_DETECTION", "1", 1); DummyPythonGILHooks hooks; SetPythonGILHooks(&hooks); SetPythonGILHooks(&hooks); diff --git a/c10/util/DeadlockDetection.cpp b/c10/util/DeadlockDetection.cpp index 4b00d24534..320fa7873c 100644 --- a/c10/util/DeadlockDetection.cpp +++ b/c10/util/DeadlockDetection.cpp @@ -1,5 +1,6 @@ #include <c10/util/DeadlockDetection.h> -#include <c10/util/env.h> + +#include <cstdlib> namespace c10::impl { @@ -7,7 +8,7 @@ namespace { PythonGILHooks* python_gil_hooks = nullptr; bool disable_detection() { - return c10::utils::has_env("TORCH_DISABLE_DEADLOCK_DETECTION"); + return std::getenv("TORCH_DISABLE_DEADLOCK_DETECTION") != nullptr; } } // namespace diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp index 17459f69fa..e9c9e9c2f3 100644 --- a/c10/util/Logging.cpp +++ b/c10/util/Logging.cpp @@ -1,7 +1,6 @@ #include <c10/util/Backtrace.h> #include <c10/util/Flags.h> #include <c10/util/Logging.h> -#include <c10/util/env.h> #ifdef FBCODE_CAFFE2 #include <folly/synchronization/SanitizeThread.h> #endif @@ -11,6 +10,7 @@ #endif #include <algorithm> +#include <cstdlib> #include <iostream> // Common code that we use regardless of whether we use glog or not. @@ -94,8 +94,8 @@ using DDPUsageLoggerType = std::function<void(const DDPLoggingData&)>; namespace { bool IsAPIUsageDebugMode() { - auto val = c10::utils::get_env("PYTORCH_API_USAGE_STDERR"); - return val.has_value() && !val.value().empty(); // any non-empty value + const char* val = getenv("PYTORCH_API_USAGE_STDERR"); + return val && *val; // any non-empty value } void APIUsageDebug(const string& event) { @@ -438,10 +438,10 @@ namespace c10::detail { namespace { void setLogLevelFlagFromEnv() { - auto level_env = c10::utils::get_env("TORCH_CPP_LOG_LEVEL"); + const char* level_str = std::getenv("TORCH_CPP_LOG_LEVEL"); // Not set, fallback to the default level (i.e. WARNING). - std::string level{level_env.has_value() ? level_env.value() : ""}; + std::string level{level_str != nullptr ? level_str : ""}; if (level.empty()) { return; } diff --git a/c10/util/env.cpp b/c10/util/env.cpp deleted file mode 100644 index 865c6b9497..0000000000 --- a/c10/util/env.cpp +++ /dev/null @@ -1,108 +0,0 @@ -#include <c10/util/Exception.h> -#include <c10/util/env.h> -#include <fmt/format.h> -#include <cstdlib> -#include <shared_mutex> - -namespace c10::utils { - -static std::shared_mutex env_mutex; - -// Set an environment variable. -void set_env(const char* name, const char* value, bool overwrite) { - std::lock_guard lk(env_mutex); -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4996) -#endif -#ifdef _MSC_VER - if (!overwrite) { - // NOLINTNEXTLINE(concurrency-mt-unsafe) - if (std::getenv(name) != nullptr) { - return; - } - } - auto full_env_variable = fmt::format("{}={}", name, value); - // NOLINTNEXTLINE(concurrency-mt-unsafe) - auto err = putenv(full_env_variable.c_str()); - TORCH_INTERNAL_ASSERT( - err == 0, - "putenv failed for environment \"", - name, - "\", the error is: ", - err); -#else - // NOLINTNEXTLINE(concurrency-mt-unsafe) - auto err = setenv(name, value, static_cast<int>(overwrite)); - TORCH_INTERNAL_ASSERT( - err == 0, - "setenv failed for environment \"", - name, - "\", the error is: ", - err); -#endif -#ifdef _MSC_VER -#pragma warning(pop) -#endif - return; -} - -// Checks an environment variable is set. -bool has_env(const char* name) noexcept { - std::shared_lock lk(env_mutex); -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4996) -#endif - // NOLINTNEXTLINE(concurrency-mt-unsafe) - auto envar = std::getenv(name); -#ifdef _MSC_VER -#pragma warning(pop) -#endif - return envar != nullptr; -} - -// Reads an environment variable and returns the content if it is set -std::optional<std::string> get_env(const char* name) noexcept { - std::shared_lock lk(env_mutex); -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4996) -#endif - // NOLINTNEXTLINE(concurrency-mt-unsafe) - auto envar = std::getenv(name); -#ifdef _MSC_VER -#pragma warning(pop) -#endif - if (envar != nullptr) { - return std::string(envar); - } - return std::nullopt; -} - -// Reads an environment variable and returns -// - optional<true>, if set equal to "1" -// - optional<false>, if set equal to "0" -// - nullopt, otherwise -// -// NB: -// Issues a warning if the value of the environment variable is not 0 or 1. -std::optional<bool> check_env(const char* name) { - auto env_opt = get_env(name); - if (env_opt.has_value()) { - if (*env_opt == "0") { - return false; - } - if (*env_opt == "1") { - return true; - } - TORCH_WARN( - "Ignoring invalid value for boolean flag ", - name, - ": ", - *env_opt, - "valid values are 0 or 1."); - } - return std::nullopt; -} -} // namespace c10::utils diff --git a/c10/util/env.h b/c10/util/env.h index 04b7585861..3db116c7db 100644 --- a/c10/util/env.h +++ b/c10/util/env.h @@ -1,20 +1,11 @@ #pragma once -#include <c10/macros/Export.h> +#include <c10/util/Exception.h> +#include <cstdlib> +#include <cstring> #include <optional> -#include <string> namespace c10::utils { - -// Set an environment variable. -C10_API void set_env( - const char* name, - const char* value, - bool overwrite = true); - -// Checks an environment variable is set. -C10_API bool has_env(const char* name) noexcept; - // Reads an environment variable and returns // - optional<true>, if set equal to "1" // - optional<false>, if set equal to "0" @@ -22,10 +13,29 @@ C10_API bool has_env(const char* name) noexcept; // // NB: // Issues a warning if the value of the environment variable is not 0 or 1. -C10_API std::optional<bool> check_env(const char* name); - -// Reads the value of an environment variable if it is set. -// However, check_env should be used if the value is assumed to be a flag. -C10_API std::optional<std::string> get_env(const char* name) noexcept; - +inline std::optional<bool> check_env(const char* name) { +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4996) +#endif + auto envar = std::getenv(name); +#ifdef _MSC_VER +#pragma warning(pop) +#endif + if (envar) { + if (strcmp(envar, "0") == 0) { + return false; + } + if (strcmp(envar, "1") == 0) { + return true; + } + TORCH_WARN( + "Ignoring invalid value for boolean flag ", + name, + ": ", + envar, + "valid values are 0 or 1."); + } + return std::nullopt; +} } // namespace c10::utils diff --git a/c10/util/tempfile.cpp b/c10/util/tempfile.cpp index f106885a88..28c3c7f14f 100644 --- a/c10/util/tempfile.cpp +++ b/c10/util/tempfile.cpp @@ -1,5 +1,4 @@ #include <c10/util/Exception.h> -#include <c10/util/env.h> #include <c10/util/tempfile.h> #include <fmt/format.h> @@ -23,11 +22,10 @@ static std::string make_filename(std::string_view name_prefix) { // We see if any of these environment variables is set and use their value, or // else default the temporary directory to `/tmp`. - std::string tmp_directory = "/tmp"; + const char* tmp_directory = "/tmp"; for (const char* variable : {"TMPDIR", "TMP", "TEMP", "TEMPDIR"}) { - auto path_opt = c10::utils::get_env(variable); - if (path_opt.has_value()) { - tmp_directory = path_opt.value(); + if (const char* path = getenv(variable)) { + tmp_directory = path; break; } }
2.41.0
a0900d04b94831d0bd2b8c0a3051f0be1f5f202
Thu, 18 Apr 2024 18:55:46 +0000
[PATCH 0339/1000] Revert "[NJT] Inline through torch.nested.nested_tensor_from_jagged instead of graph break (#124343)"
This reverts commit ef93402f619f58d651845981ccd1eba1d68da077. Reverted https://github.com/pytorch/pytorch/pull/124343 on behalf of https://github.com/DanilBaibak due to Broken trunk ([comment](https://github.com/pytorch/pytorch/pull/124343#issuecomment-2064937192))
diff --git a/test/dynamo/test_subclasses.py b/test/dynamo/test_subclasses.py index 8005d6e3a2..387b6bf59b 100644 --- a/test/dynamo/test_subclasses.py +++ b/test/dynamo/test_subclasses.py @@ -1361,14 +1361,6 @@ class TestNestedTensor(torch._dynamo.test_case.TestCase): self._check_recompiles(fn, (nt,), (nt2,), False) self._check_recompiles(fn, (nt,), (nt3,), True) - def test_inline_nested_tensor_from_jagged(self): - nt, _ = self._get_jagged_tensor(((2, 3, 4), 5), None) - - def fn(x): - return torch.nested.nested_tensor_from_jagged(x.values() * 2, x.offsets()) - - torch.compile(fn, fullgraph=True, backend="aot_eager")(nt) - def _get_views(self): # Test all cases with both an NT base and a dense base # Subclass -> Subclass diff --git a/torch/_dynamo/trace_rules.py b/torch/_dynamo/trace_rules.py index daeb8626c1..763f6482cb 100644 --- a/torch/_dynamo/trace_rules.py +++ b/torch/_dynamo/trace_rules.py @@ -173,7 +173,6 @@ manual_torch_name_rule_map = { "torch.nn.Parameter": TorchInGraphFunctionVariable, "torch._nested_tensor_from_mask": SkipFunctionVariable, "torch._nested_from_padded": SkipFunctionVariable, - "torch.nested.nested_tensor_from_jagged": UserFunctionVariable, # symbol operators implemented in Python "torch.sym_not": TorchInGraphFunctionVariable, "torch.sym_float": TorchInGraphFunctionVariable,
2.41.0
e48b39603411a41c5025efbe52f89560b827825
Wed, 17 Apr 2024 22:00:41 -0700
[PATCH 0340/1000] Fix example_value of map (#124203)
Previously, we didn't expand the shape of example_value of map to the same as inputs (edit: the first mapped dimension). This pr fixes this bug. To make this easier, we change _call_function_and_unflatten_output to accept example_values directly instead of retrieving them from the variable trackers. Also remove a redundant call function node in strict_mode higher order op in dynamo. Test Plan: existing tests. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124203 Approved by: https://github.com/ezyang, https://github.com/zou3519
diff --git a/test/dynamo/test_higher_order_ops.py b/test/dynamo/test_higher_order_ops.py index 3bbfa4d7a3..4b46c568af 100644 --- a/test/dynamo/test_higher_order_ops.py +++ b/test/dynamo/test_higher_order_ops.py @@ -1292,6 +1292,59 @@ def forward(self, getitem, const): return (sin,)""", ) + def test_map_example_value_metadata_consistent_with_eager(self): + from torch._higher_order_ops.map import map_dense + + backend = EagerAndRecordGraphs() + + def inner(x): + return x.sin(), x.cos().T, x.sin().view(-1) + + rand_44 = torch.randn(4, 4) + inps = [ + torch.randn(3), + torch.randn(3, 4), + torch.randn(3, 4, 5, requires_grad=True), + torch.randn(3, 4, 5, requires_grad=True).permute((2, 0, 1)), + torch.randn(3, 4, 5, requires_grad=True).detach(), + torch.randn(3, 4, 5, requires_grad=True).narrow(1, 1, 2), + rand_44.T, + rand_44[::2], + rand_44[::2, ::2], + rand_44[1::3, 1::3], + rand_44[1::3, 1::2].T, + rand_44.unsqueeze(1), + rand_44.squeeze(0), + rand_44.reshape(2, 8), + ] + for x in inps: + compiled_ret = torch.compile( + control_flow.map, backend=backend, fullgraph=True + )(inner, x) + eager_sin, eager_transpose, eager_view = map_dense(inner, (x,), tuple()) + + map_node = next( + node + for node in backend.graphs[0].graph.nodes + if node.op == "call_function" and "map" in node.name + ) + + fake_sin, fake_transpose, fake_view = map_node.meta["example_value"] + + def _check_size_stride_contiguous(x, y): + self.assertEqual(y.size(), x.size()) + self.assertEqual(y.stride(), x.stride()) + self.assertEqual(y.requires_grad, x.requires_grad) + self.assertEqual(x.is_contiguous(), True) + self.assertEqual(y.is_contiguous(), True) + + _check_size_stride_contiguous(eager_sin, fake_sin) + _check_size_stride_contiguous(eager_transpose, fake_transpose) + _check_size_stride_contiguous(eager_view, fake_view) + + torch._dynamo.reset() + backend.graphs.clear() + def test_cond_subgraph_name_is_valid(self): backend = EagerAndRecordGraphs() cnt = CompileCounterWithBackend(backend) diff --git a/test/export/test_experimental.py b/test/export/test_experimental.py index fc317b495f..66f1a60ca9 100644 --- a/test/export/test_experimental.py +++ b/test/export/test_experimental.py @@ -45,15 +45,15 @@ class TestExperiment(TestCase): """\ def forward(self, b_submodule_buffer1, x): sin = torch.ops.aten.sin.default(x) - strict_graph_1 = self.strict_graph_1 - strict_mode_1 = torch.ops.higher_order.strict_mode(strict_graph_1, (sin, b_submodule_buffer1)); strict_graph_1 = sin = b_submodule_buffer1 = None - getitem_1 = strict_mode_1[0]; strict_mode_1 = None + strict_graph_0 = self.strict_graph_0 + strict_mode = torch.ops.higher_order.strict_mode(strict_graph_0, (sin, b_submodule_buffer1)); strict_graph_0 = sin = b_submodule_buffer1 = None + getitem = strict_mode[0]; strict_mode = None add = torch.ops.aten.add.Tensor(x, 3); x = None - return (getitem_1, add)""", + return (getitem, add)""", ) self.assertExpectedInline( - str(ep.graph_module.strict_graph_1.code.strip()), + str(ep.graph_module.strict_graph_0.code.strip()), """\ def forward(self, arg0_1, arg1_1): add = torch.ops.aten.add.Tensor(arg0_1, 2) diff --git a/torch/_dynamo/variables/higher_order_ops.py b/torch/_dynamo/variables/higher_order_ops.py index abe8c5b52d..e112e7bc7d 100644 --- a/torch/_dynamo/variables/higher_order_ops.py +++ b/torch/_dynamo/variables/higher_order_ops.py @@ -88,15 +88,11 @@ def _make_inlined(tx, f): return inline_call -def _call_function_and_unflatten_output(tx, fn, args, kwargs, ret_vt, ret_treespec): +def _call_function_and_unflatten_output( + tx, fn, args, kwargs, flat_example_value, ret_treespec +): from .builder import wrap_fx_proxy - flat_example_value = pytree.tree_map_only( - torch.fx.Proxy, - lambda a: a.node.meta["example_value"], - ret_vt.as_proxy(), - ) - # Store the invocation as a call flat_variable = wrap_fx_proxy( tx=tx, @@ -731,8 +727,19 @@ class CondHigherOrderVariable(TorchHigherOrderOperatorVariable): true_shared + unique_true + unique_false, ) + flat_example_value = pytree.tree_map_only( + torch.fx.Proxy, + lambda a: a.node.meta["example_value"], + true_r.as_proxy(), + ) + return _call_function_and_unflatten_output( - tx, torch.ops.higher_order.cond, p_args, {}, true_r, true_treespec + tx, + torch.ops.higher_order.cond, + p_args, + {}, + flat_example_value, + true_treespec, ) @@ -885,8 +892,19 @@ class WhileLoopHigherOrderVariable(TorchHigherOrderOperatorVariable): ), ) + flat_example_value = pytree.tree_map_only( + torch.fx.Proxy, + lambda a: a.node.meta["example_value"], + body_r.as_proxy(), + ) + return _call_function_and_unflatten_output( - tx, torch.ops.higher_order.while_loop, p_args, {}, body_r, body_treespec + tx, + torch.ops.higher_order.while_loop, + p_args, + {}, + flat_example_value, + body_treespec, ) @@ -950,6 +968,23 @@ class MapHigherOrderVariable(TorchHigherOrderOperatorVariable): should_flatten_outputs=True, ) + subgraph_example_value = [ + proxy.node.meta["example_value"] for proxy in body_r.as_proxy() + ] + + with tx.output.fake_mode: + # We need to expand the example output from map() so that it has + # the same first dimension as the mapped input. + # We also do a clone with contiguous_format. This is to be consistent with + # eager semantic of map, which stacks the outputs. The result is contiguous + # as a result of the stack operation. + map_example_out = [ + t.expand(sample_shape[0], *t.size()).clone( + memory_format=torch.contiguous_format + ) + for t in subgraph_example_value + ] + body_nn_modules = dict(tx.output.nn_modules) body_name = add_subgraph( @@ -965,8 +1000,9 @@ class MapHigherOrderVariable(TorchHigherOrderOperatorVariable): [args[1].as_proxy()], [arg.as_proxy() for arg in args[2:]] + list(body_lifted_freevars.keys()), ) + return _call_function_and_unflatten_output( - tx, torch.ops.higher_order.map_impl, p_args, {}, body_r, body_spec + tx, torch.ops.higher_order.map_impl, p_args, {}, map_example_out, body_spec ) @@ -1094,8 +1130,14 @@ class WrapHigherOrderVariable(TorchHigherOrderOperatorVariable): if len(p_kwargs) > 0: unimplemented("kwargs should have been flattened into lifted args") + flat_example_value = pytree.tree_map_only( + torch.fx.Proxy, + lambda a: a.node.meta["example_value"], + body_r.as_proxy(), + ) + return _call_function_and_unflatten_output( - tx, self.value, tuple(p_args), p_kwargs, body_r, treespec + tx, self.value, tuple(p_args), p_kwargs, flat_example_value, treespec ) @@ -1138,8 +1180,6 @@ class StrictModeHigherOrderVariable(TorchHigherOrderOperatorVariable): def call_function( self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" ) -> "VariableTracker": - from .builder import wrap_fx_proxy - callable = args[0] unpacked_sequence = args[1].unpack_var_sequence(tx) @@ -1187,20 +1227,13 @@ class StrictModeHigherOrderVariable(TorchHigherOrderOperatorVariable): ret_val.as_proxy(), ) - # Store the invocation as a call - flat_variable = wrap_fx_proxy( - tx=tx, - proxy=tx.output.create_proxy( - "call_function", - torch.ops.higher_order.strict_mode, - args=tuple(p_args), - kwargs={}, - ), - example_value=flat_example_value, - ) - return _call_function_and_unflatten_output( - tx, torch.ops.higher_order.strict_mode, p_args, {}, ret_val, ret_treespec + tx, + torch.ops.higher_order.strict_mode, + p_args, + {}, + flat_example_value, + ret_treespec, )
2.41.0
4f6340f21c173da96a9edc6dfaa1b7ecbdcc568
Mon, 15 Apr 2024 21:31:51 +0000
[PATCH 0343/1000] realize inputs to mem bound mm decomposition (#123165)
Differential Revision: [D55639709](https://our.internmc.facebook.com/intern/diff/D55639709) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123165 Approved by: https://github.com/jackiexu1992
diff --git a/test/inductor/test_decompose_mem_bound_mm.py b/test/inductor/test_decompose_mem_bound_mm.py index 4ae770bf22..c26233785e 100644 --- a/test/inductor/test_decompose_mem_bound_mm.py +++ b/test/inductor/test_decompose_mem_bound_mm.py @@ -7,6 +7,8 @@ import torch import torch._inductor from torch._dynamo.utils import counters from torch._inductor.test_case import run_tests, TestCase +from torch._inductor.utils import run_and_get_code +from torch.testing import FileCheck from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, @@ -234,6 +236,23 @@ class TestDecomposeMemMM(TestCase): ) counters.clear() + def test_realize_input(self): + m = 20480 + k = 5 + n = 2 + torch._logging.set_logs(inductor=logging.DEBUG) + input1 = torch.randn(m, k, device="cuda").T.contiguous() + input2 = torch.randn(k, n, device="cuda") + + @torch.compile() + def foo(x, y): + return x.T.contiguous() @ y + + out, code = run_and_get_code(foo, input1, input2) + + # two kernels generated + FileCheck().check_count(".run(", 2, exactly=True).run(code[0]) + if __name__ == "__main__": run_tests() diff --git a/torch/_inductor/fx_passes/decompose_mem_bound_mm.py b/torch/_inductor/fx_passes/decompose_mem_bound_mm.py index d6c650b631..f63bf552fa 100644 --- a/torch/_inductor/fx_passes/decompose_mem_bound_mm.py +++ b/torch/_inductor/fx_passes/decompose_mem_bound_mm.py @@ -32,6 +32,12 @@ def check_device(a: Tensor, b: Tensor) -> bool: return a.is_cuda and b.is_cuda +def realize_inputs(inputs: List[torch.fx.Node]): + for inp in inputs: + if isinstance(inp, torch.fx.node.Node): + inp.meta["inductor_realize_to_strides"] = True + + def should_decompose_bmm(mat1, mat2) -> bool: if is_node_meta_valid(mat1) and is_node_meta_valid(mat2): mat1 = mat1.meta["val"] @@ -98,6 +104,7 @@ def decompose_bmm(match: Match, mat1: torch.fx.Node, mat2: torch.fx.Node): counters["inductor"]["decompose_bmm"] += 1 match.replace_by_example(repl, [mat1, mat2]) print_decompose_pattern(match, [mat1, mat2]) + realize_inputs([mat1, mat2]) return @@ -119,6 +126,7 @@ def decompose_addmm( counters["inductor"]["decompose_addmm"] += 1 match.replace_by_example(repl, [mat1, mat2, mat3]) print_decompose_pattern(match, [mat1, mat2, mat3]) + realize_inputs([mat1, mat2, mat3]) return @@ -139,4 +147,5 @@ def decompose_mm( counters["inductor"]["decompose_mm"] += 1 match.replace_by_example(repl, [mat1, mat2]) print_decompose_pattern(match, [mat1, mat2]) + realize_inputs([mat1, mat2]) return diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py index aaaee95887..e42176a769 100644 --- a/torch/_inductor/graph.py +++ b/torch/_inductor/graph.py @@ -1149,6 +1149,20 @@ class GraphLowering(torch.fx.Interpreter): is_input_for_as_strided = any( user.target in as_strided_ops for user in n.users ) + + if n.meta.get("inductor_realize_to_strides", False) and isinstance( + result, TensorBox + ): + result.realize() + strides = n.meta["val"].stride() + sym_strides = torch._inductor.utils.any_is_symbolic(*strides) + if ( + not hasattr(result, "get_stride") + or result.get_stride() != strides + and not sym_strides + ): + stride_order = ir.get_stride_order(strides) + result = ir.ExternKernel.require_stride_order(result, stride_order) if ( is_output and isinstance(result, TensorBox)
2.41.0
0792cf3d6e0542354a87f7548cc3c7890c4defd
Thu, 18 Apr 2024 23:14:55 +0000
[PATCH 0344/1000] Make copy_cast, softmax and cat_out unranked (#123191)
Fixes #ISSUE_NUMBER This helps with the performance as it removes multiple copies of the graphs saved due to their shapes. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123191 Approved by: https://github.com/DenisVieriu97
diff --git a/aten/src/ATen/native/mps/operations/Copy.mm b/aten/src/ATen/native/mps/operations/Copy.mm index da1fe731ef..572582f5cb 100644 --- a/aten/src/ATen/native/mps/operations/Copy.mm +++ b/aten/src/ATen/native/mps/operations/Copy.mm @@ -48,9 +48,10 @@ static void copy_cast_mps(at::Tensor& dst, @autoreleasepool { const bool needs_conj = src.is_conj() != dst.is_conj(); - string key = "copy_cast_mps" + getTensorsStringKey({src, dst}) + ":" + std::to_string(needs_conj); + string key = "copy_cast_mps" + getTensorsStringKey({src, dst}, true, /*exclude_shape*/ true) + ":" + + std::to_string(needs_conj); auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) { - auto inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, src); + MPSGraphTensor* inputTensor = mpsGraphUnrankedPlaceHolder(mpsGraph, srcDType); auto outputTensor = inputTensor; if (isFloatingType(src.scalar_type()) && dstDType == MPSDataTypeUInt8) { outputTensor = [mpsGraph castTensor:inputTensor toType:MPSDataTypeInt32 name:@"cast"]; diff --git a/aten/src/ATen/native/mps/operations/Shape.mm b/aten/src/ATen/native/mps/operations/Shape.mm index 97ea642bc6..135041be1f 100644 --- a/aten/src/ATen/native/mps/operations/Shape.mm +++ b/aten/src/ATen/native/mps/operations/Shape.mm @@ -320,8 +320,16 @@ TORCH_IMPL_FUNC(cat_out_mps) }; @autoreleasepool { - string key = "cat_out_mps:" + to_string(dimension) + getTensorsStringKey(input_tensors, /*short_dtype*/ true) + - ":" + (memory_format == MemoryFormat::ChannelsLast ? "NHWC" : "NCHW"); + string key = + "cat_out_mps:" + to_string(dimension) + ":" + (memory_format == MemoryFormat::ChannelsLast ? "NHWC" : "NCHW"); + if (!all_same_dtype) { + key += getTensorsStringKey(input_tensors, true, all_same_sizes_and_stride); + } else { + key += ":" + getMPSTypeString(input_tensors[0].scalar_type(), true) + ":" + to_string(inputs.size()); + } + for (auto idx : skipped_tensor_indices) { + key += "," + std::to_string(idx); + } auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) { auto len_tensor_array = inputs.size() - skipped_tensor_indices.size(); @@ -334,8 +342,7 @@ TORCH_IMPL_FUNC(cat_out_mps) if (tensor.scalar_type() == kBool) { scalar_type = MPSDataTypeInt8; } - newCachedGraph->inputTensors_[idx] = - mpsGraphRankedPlaceHolder(mpsGraph, scalar_type, getMPSShape(tensor, MemoryFormat::Contiguous)); + newCachedGraph->inputTensors_[idx] = mpsGraphUnrankedPlaceHolder(mpsGraph, scalar_type); if (tensor.scalar_type() != out_dtype) { castInputTensors[idx] = [mpsGraph castTensor:newCachedGraph->inputTensors_[idx] toType:getMPSDataType(out_dtype) @@ -364,11 +371,7 @@ TORCH_IMPL_FUNC(cat_out_mps) if (tensor.scalar_type() == kBool) { scalar_type = MPSDataTypeInt8; } - inputPlaceholders.emplace_back(cachedGraph->inputTensors_[t_idx], - tensor, - getMPSShape(tensor, MemoryFormat::Contiguous), - /*gatherTensorData*/ true, - scalar_type); + inputPlaceholders.emplace_back(cachedGraph->inputTensors_[t_idx], tensor, nullptr, true, scalar_type); t_idx++; } i++; diff --git a/aten/src/ATen/native/mps/operations/SoftMax.mm b/aten/src/ATen/native/mps/operations/SoftMax.mm index 346663bc73..4687ac6b96 100644 --- a/aten/src/ATen/native/mps/operations/SoftMax.mm +++ b/aten/src/ATen/native/mps/operations/SoftMax.mm @@ -92,10 +92,11 @@ TORCH_IMPL_FUNC(softmax_mps_out) NSString* ns_shape_key = [[input_shape valueForKey:@"description"] componentsJoinedByString:@","]; - string key = "softmax_mps_out:" + mem_format_key + ":" + getMPSTypeString(input) + ":" + [ns_shape_key UTF8String] + + string key = "softmax_mps_out" + getTensorsStringKey(input, true, /*exclude_shape*/ true) + ":" + mem_format_key + ":" + std::to_string(dim_); + auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) { - MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, getMPSDataType(input), input_shape); + MPSGraphTensor* inputTensor = mpsGraphUnrankedPlaceHolder(mpsGraph, getMPSDataType(input.scalar_type())); // passing selector of softMaxWithTensor on the mpsGraph object MPSGraphTensor* outputTensor = [mpsGraph softMaxWithTensor:inputTensor axis:(NSInteger)dim_ name:nil];
2.41.0
a6a0e1348ba7dcade1833d983b1b4ca12a5c1e1
Thu, 18 Apr 2024 10:26:43 -0700
[PATCH 0346/1000] [c10d] remove the env of TORCH_NCCL_ABORT_IN_DESTROY_PG (#124334)
Summary: This ENV was introduced to safely rollout the behavior change in destroy process group (e.g., call ncclCommsAbort). Now that this behavior change were already rolled out, we no longer need this env and we should clean up it to keep our code cleaner Test Plan: Modified/existing ut pass Tags: Pull Request resolved: https://github.com/pytorch/pytorch/pull/124334 Approved by: https://github.com/wconstab
diff --git a/test/distributed/test_c10d_nccl.py b/test/distributed/test_c10d_nccl.py index bbe4461e0c..8743b37157 100644 --- a/test/distributed/test_c10d_nccl.py +++ b/test/distributed/test_c10d_nccl.py @@ -1221,39 +1221,11 @@ class ProcessGroupNCCLTest(MultiProcessTestCase): # First allreduce to initialize state. pg.allreduce(t) - # Destroy pg and validate pg is still in working condition since we hold a - # reference above. + # Destroy pg and validate pg is no longer valid dist.destroy_process_group() - pg.allreduce([t]) - - # Now close pg and validate it no longer works. - pg._get_backend(torch.device(device))._shutdown() - - # Try another collective. with self.assertRaises(dist.DistBackendError): pg.allreduce([t]) - @requires_nccl() - @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs") - def test_terminate_before_destruct_pg(self): - # Disable ASYNC_ERROR_HANDLING for this test to ensure we can programmatically - # abort the process group. - os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "0" - store = c10d.FileStore(self.file_name, self.world_size) - pg = self._create_process_group_nccl(store, self.opts()) - device = self.rank_to_GPU[self.rank][0] - - t = torch.rand(10, 10, device=device) - # First allreduce to initialize state. - pg.allreduce(t) - - # Destroy pg and validate pg is still in working condition since we hold a - # reference above. - dist.destroy_process_group() - pg.allreduce([t]) - - # Now close pg and validate it no longer works. - pg._get_backend(torch.device(device))._shutdown() del pg @requires_nccl() @@ -1262,7 +1234,6 @@ class ProcessGroupNCCLTest(MultiProcessTestCase): # Disable ASYNC_ERROR_HANDLING for this test to ensure we can programmatically # abort the process group. os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "0" - os.environ["TORCH_NCCL_ABORT_IN_DESTROY_PG"] = "1" store = c10d.FileStore(self.file_name, self.world_size) pg = self._create_process_group_nccl(store, self.opts()) device = self.rank_to_GPU[self.rank][0] @@ -1279,7 +1250,6 @@ class ProcessGroupNCCLTest(MultiProcessTestCase): # Disable ASYNC_ERROR_HANDLING for this test to ensure we can programmatically # abort the process group. os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "0" - os.environ["TORCH_NCCL_ABORT_IN_DESTROY_PG"] = "1" store = c10d.FileStore(self.file_name, self.world_size) pg = self._create_process_group_nccl(store, self.opts()) @@ -1332,7 +1302,6 @@ class ProcessGroupNCCLTest(MultiProcessTestCase): torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs" ) def test_abort_in_destroy_multi_pgs(self): - os.environ["TORCH_NCCL_ABORT_IN_DESTROY_PG"] = "1" store = c10d.FileStore(self.file_name, self.world_size) pg = self._create_process_group_nccl(store, self.opts()) device = self.rank_to_GPU[self.rank][0] @@ -1356,7 +1325,6 @@ class ProcessGroupNCCLTest(MultiProcessTestCase): torch.cuda.device_count() < 2, "NCCL test requires 2+ GPUs" ) def test_abort_in_destroy_mixed_empty_pgs(self): - os.environ["TORCH_NCCL_ABORT_IN_DESTROY_PG"] = "1" store = c10d.FileStore(self.file_name, self.world_size) pg = self._create_process_group_nccl(store, self.opts()) device = self.rank_to_GPU[self.rank][0] diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp index 29e4616be9..a84647cfc6 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp @@ -732,8 +732,6 @@ ProcessGroupNCCL::ProcessGroupNCCL( this->setGroupName(options_->group_name); logPrefix_ = createLogPrefix(); blockingWait_ = getCvarBool(TORCH_NCCL_BLOCKING_WAIT, false); - abortInDestroyProcessGroup_ = - getCvarBool(TORCH_NCCL_ABORT_IN_DESTROY_PG, false); asyncErrorHandling_ = static_cast<ErrorHandlingMode>( getCvarInt(TORCH_NCCL_ASYNC_ERROR_HANDLING, 3 /*SkipCleanUp*/)); desyncDebug_ = getCvarBool(TORCH_NCCL_DESYNC_DEBUG, false) || @@ -1109,17 +1107,13 @@ ProcessGroupNCCL::~ProcessGroupNCCL() { LOG(INFO) << logPrefix() << "ProcessGroupNCCL destructor entered."; if (!terminateProcessGroup_.load()) { - // Only if TORCH_NCCL_ABORT_IN_DESTROY_PG is enabled, terminateProcessGroup_ - // will be set to true through destroy_process_group - if (abortInDestroyProcessGroup_) { - LOG(WARNING) << c10::str( - "WARNING: process group has NOT been destroyed before it is being destructed. ", - "On normal program exit, the application should call destroy_process_group to ", - "ensure that any pending NCCL data transfers have finished in this process. " - "In rare cases this process can exit before this point and block the progress of " - "another member of the process group. This constraint has always been present, " - " but this warning has only been added since PyTorch 2.3"); - } + LOG(WARNING) << c10::str( + "WARNING: process group has NOT been destroyed before it is being destructed. ", + "On normal program exit, the application should call destroy_process_group to ", + "ensure that any pending NCCL data transfers have finished in this process. " + "In rare cases this process can exit before this point and block the progress of " + "another member of the process group. This constraint has always been present, " + " but this warning has only been added since PyTorch 2.4"); // If user haven't explicitly destroy/shutdown process group, destructor // needs to do so shutdown(); diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp index 18201db287..a2b819f2f9 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp @@ -100,12 +100,6 @@ static std::vector<std::string> TORCH_NCCL_WAIT_TIMEOUT_DUMP_MILSEC = { static std::vector<std::string> TORCH_NCCL_COORD_CHECK_MILSEC = { "TORCH_NCCL_COORD_CHECK_MILSEC"}; -// Whether to abort the communicators when users call destroy_process_group(). -// If yes, communicators will be aborted when destroy_process_group is called, -// but not in destructor. -static std::vector<std::string> TORCH_NCCL_ABORT_IN_DESTROY_PG = { - "TORCH_NCCL_ABORT_IN_DESTROY_PG"}; - constexpr const char* NCCL_BACKEND_NAME = "nccl"; constexpr const char* EXCEPTION_DUMP = "exception_dump"; @@ -1015,11 +1009,6 @@ class TORCH_API ProcessGroupNCCL : public Backend { // for the operation to complete. bool blockingWait_ = false; - // Whether to abort the communicators when users call destroy_process_group(). - // If yes, communicators will be aborted when destroy_process_group is called, - // but not in destructor. - bool abortInDestroyProcessGroup_ = false; - // Whether or not to hook the cache allocator to register all allocated // tensors bool useTensorRegisterAllocatorHook_ = false; diff --git a/torch/distributed/distributed_c10d.py b/torch/distributed/distributed_c10d.py index 7877331294..fed2a80d3f 100644 --- a/torch/distributed/distributed_c10d.py +++ b/torch/distributed/distributed_c10d.py @@ -996,12 +996,6 @@ def _is_barrier_after_init() -> int: return int(os.getenv("TORCH_DIST_INIT_BARRIER", "0")) -def _abort_in_destroy_pg() -> bool: - # Environment variable to control whether to abort the communicators when users call destroy_process_group() - env = os.getenv("TORCH_NCCL_ABORT_IN_DESTROY_PG", "0") - return env == "1" or env.lower() == "true" - - def _get_default_group() -> ProcessGroup: """Get the default process group created by init_process_group.""" if not is_initialized(): @@ -1422,7 +1416,7 @@ def _shutdown_backend(pg): backend = pg._get_backend(torch.device("cuda")) except RuntimeError: pass - if isinstance(backend, ProcessGroupNCCL): + if is_nccl_available() and isinstance(backend, ProcessGroupNCCL): # explictly call shutdown to ensure that NCCL resources are released backend._shutdown() @@ -1701,11 +1695,10 @@ def destroy_process_group(group: Optional[ProcessGroup] = None): pg._wait_for_pending_works() if group is None or group == GroupMember.WORLD: - if _abort_in_destroy_pg(): - # shutdown all backends in the order of pg names. shutting down in order because - # ncclCommAbort() was a 'collective' call in some versions of NCCL. - for pg_to_shutdown in sorted(_world.pg_names, key=lambda x: _world.pg_names[x], reverse=True): - _shutdown_backend(pg_to_shutdown) + # shutdown all backends in the order of pg names. shutting down in order because + # ncclCommAbort() was a 'collective' call in some versions of NCCL. + for pg_to_shutdown in sorted(_world.pg_names, key=lambda x: _world.pg_names[x], reverse=True): + _shutdown_backend(pg_to_shutdown) _update_default_pg(None) _world.pg_map.clear() @@ -1728,8 +1721,7 @@ def destroy_process_group(group: Optional[ProcessGroup] = None): # process group is in good state, we aren't dealing with failures. _world.group_count = 0 else: - if _abort_in_destroy_pg(): - _shutdown_backend(pg) + _shutdown_backend(pg) del _world.pg_map[pg] del _world.pg_names[pg] del _world.pg_group_ranks[pg]
2.41.0
ed9b22ec01d6b56046f483c36a2f13d2d6b4f67
Thu, 18 Apr 2024 03:06:42 +0000
[PATCH 0347/1000] Implement efficient_conv_bn_eval_decomp_graph_transform to handle conv and bn fusion after decomp (#123680)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123680 Approved by: https://github.com/ezyang, https://github.com/youkaichao
diff --git a/test/inductor/test_efficient_conv_bn_eval.py b/test/inductor/test_efficient_conv_bn_eval.py index ec83bf3a4b..c65b7585f9 100644 --- a/test/inductor/test_efficient_conv_bn_eval.py +++ b/test/inductor/test_efficient_conv_bn_eval.py @@ -103,7 +103,12 @@ class MultiUserConvOp(nn.Module): class EfficientConvBNEvalTemplate(TestCase): @inductor_config.patch({"efficient_conv_bn_eval_fx_passes": True}) def test_basic(self): - def test_conv_bn_eval(test_class, use_bias, module, sync_bn): + def test_conv_bn_eval( + test_class, use_bias, module, sync_bn, decompose_nn_module + ): + from functorch import make_fx + from torch._dispatch.python import enable_python_dispatcher + kwargs = {"kernel_size": 3, "stride": 2} if module[0] != nn.Linear else {} mod_eager = test_class( module[0], @@ -122,7 +127,6 @@ class EfficientConvBNEvalTemplate(TestCase): mod_optimized ).eval() torch._dynamo.reset() - mod_optimized = torch.compile(mod_optimized) inps = [4, 3] # Conv shape goes from big to small, and ConvTranspose shape goes from small to big @@ -137,6 +141,11 @@ class EfficientConvBNEvalTemplate(TestCase): inps += [spatial_d] * 3 inp = torch.rand(inps).to(self.device) + if decompose_nn_module: + with enable_python_dispatcher(): + mod_optimized = make_fx(mod_optimized, pre_dispatch=True)(inp) + mod_optimized = torch.compile(mod_optimized) + original_value = counters["inductor"]["efficient_conv_bn_eval"] optim_eager = torch.optim.SGD(mod_eager.parameters(), lr=1e-3) @@ -179,10 +188,23 @@ class EfficientConvBNEvalTemplate(TestCase): ] test_classes = [ConvOp, MultiUserConvOp] sync_bns = [False, True] - for test_class, use_bias, module, sync_bn in itertools.product( - test_classes, conv_bias, modules, sync_bns + decompose_nn_modules = [False, True] + for ( + test_class, + use_bias, + module, + sync_bn, + decompose_nn_module, + ) in itertools.product( + test_classes, + conv_bias, + modules, + sync_bns, + decompose_nn_modules, ): - test_conv_bn_eval(test_class, use_bias, module, sync_bn) + test_conv_bn_eval( + test_class, use_bias, module, sync_bn, decompose_nn_module + ) if HAS_CPU and not torch.backends.mps.is_available(): diff --git a/torch/_inductor/fx_passes/efficient_conv_bn_eval.py b/torch/_inductor/fx_passes/efficient_conv_bn_eval.py index a0a5b7ccd6..7ab01e0abb 100644 --- a/torch/_inductor/fx_passes/efficient_conv_bn_eval.py +++ b/torch/_inductor/fx_passes/efficient_conv_bn_eval.py @@ -5,7 +5,12 @@ from torch._dynamo.utils import counters from torch._inductor import config as inductor_config from torch.func import functional_call -from ..pattern_matcher import CallModuleVarArgs, Match, register_graph_pattern +from ..pattern_matcher import ( + CallFunctionVarArgs, + CallModuleVarArgs, + Match, + register_graph_pattern, +) from .pre_grad import efficient_conv_bn_eval_pass @@ -15,7 +20,7 @@ def efficient_conv_bn_eval( ): """ Implementation based on https://arxiv.org/abs/2305.11624 - "Tune-Mode ConvBN Blocks For Efficient Transfer Learning" + "Efficient ConvBN Blocks for Transfer Learning and Beyond" It leverages the associative law between convolution and affine transform, i.e., normalize (weight conv feature) = (normalize weight) conv feature. It works for Eval mode of ConvBN blocks during validation, and can be used @@ -70,6 +75,160 @@ def efficient_conv_bn_eval( return output +def efficient_conv_bn_eval_decomposed( + bn_weight, + bn_bias, + bn_running_mean, + bn_running_var, + bn_eps, + conv: torch._ops.OpOverload, + conv_weight, + conv_bias, + x, + conv_remainging_args, +): + """ + Implementation based on https://arxiv.org/abs/2305.11624 + "Efficient ConvBN Blocks for Transfer Learning and Beyond" + It leverages the associative law between convolution and affine transform, + i.e., normalize (weight conv feature) = (normalize weight) conv feature. + It works for Eval mode of ConvBN blocks during validation, and can be used + for **training** as well, but only if one sets `bn.training=False`. It + reduces memory footprint and computation cost, at the cost of slightly + reduced numerical stability. + Args: + """ + assert bn_running_var is not None + + # These lines of code are designed to deal with various cases + # like bn without affine transform, and conv without bias + weight_on_the_fly = conv_weight + if conv_bias is not None: + bias_on_the_fly = conv_bias + else: + bias_on_the_fly = torch.zeros_like(bn_running_var) + + if bn_weight is not None: + bn_weight = bn_weight + else: + bn_weight = torch.ones_like(bn_running_var) + + if bn_bias is not None: + bn_bias = bn_bias + else: + bn_bias = torch.zeros_like(bn_running_var) + + # shape of [C_out, 1, 1, 1] in Conv2d + target_shape = [-1] + [1] * (conv_weight.ndim - 1) + if "conv_transpose" in conv.__str__(): + # for transposed conv, the C_out dimension should at index 1. + target_shape[:2] = [target_shape[1], target_shape[0]] + weight_coeff = torch.rsqrt(bn_running_var + bn_eps).reshape(target_shape) + # shape of [C_out, 1, 1, 1] in Conv2d + coefff_on_the_fly = bn_weight.view_as(weight_coeff) * weight_coeff + + # shape of [C_out, C_in, k, k] in Conv2d + weight_on_the_fly = weight_on_the_fly * coefff_on_the_fly + # shape of [C_out] in Conv2d + bias_on_the_fly = bn_bias + coefff_on_the_fly.flatten() * ( + bias_on_the_fly - bn_running_mean + ) + + input = x + return conv(*((input, weight_on_the_fly, bias_on_the_fly) + conv_remainging_args)) + + +@register_graph_pattern( + CallFunctionVarArgs( + [ + torch.ops.aten.batch_norm.default, + ] + ), + pass_dict=efficient_conv_bn_eval_pass, + extra_check=lambda match: not inductor_config.freezing + and inductor_config.efficient_conv_bn_eval_fx_passes, +) +def efficient_conv_bn_eval_graph_transform_decomposed(match: Match, *args, **kwargs): + bn_node = match.nodes[0] + graph = match.graph + assert len(bn_node.args) == 9 + + # We can only use efficient conv-bn for eval mode with track_running_stats + # bn_node.args is `training` + if bn_node.args[-4]: + return + + # Check if the input is Conv + input_node = bn_node.args[0] + + if input_node.op != "call_function": # type: ignore[union-attr] + return + + input_fn = input_node.target # type: ignore[arg-type, union-attr] + supported_convs = [ + torch.ops.aten.linear.default, + torch.ops.aten.conv1d.default, + torch.ops.aten.conv2d.default, + torch.ops.aten.conv3d.default, + torch.ops.aten.conv_transpose1d.default, + torch.ops.aten.conv_transpose2d.input, + torch.ops.aten.conv_transpose3d.input, + ] + + if not any(input_fn is cls for cls in supported_convs): + return + + conv_node = input_node + # Output of conv is used by other nodes, cannot optimize + if len(conv_node.users) > 1: # type: ignore[union-attr] + return + + counters["inductor"]["efficient_conv_bn_eval"] += 1 + + with graph.inserting_before(bn_node): + # prepare args for the fused function + bn_weight = bn_node.args[1] + bn_bias = bn_node.args[2] + bn_running_mean = bn_node.args[3] + bn_running_var = bn_node.args[4] + bn_eps = bn_node.args[7] + assert len(conv_node.args) >= 2 # type: ignore[union-attr] + conv_input = conv_node.args[0] # type: ignore[union-attr] + conv_weight = conv_node.args[1] # type: ignore[union-attr] + conv_bias = conv_node.args[2] if len(conv_node.args) >= 3 else None # type: ignore[union-attr] + conv_remainging_args = conv_node.args[3:] # type: ignore[union-attr] + args = ( + bn_weight, + bn_bias, + bn_running_mean, + bn_running_var, + bn_eps, + conv_node.target, # type: ignore[union-attr] + conv_weight, + conv_bias, + conv_input, + conv_remainging_args, + ) + + # create a new node + new_node = graph.create_node( + op="call_function", + target=efficient_conv_bn_eval_decomposed, + args=args, + name="efficient_conv_bn_eval", + ) + + # this node replaces the original conv + bn, and therefore + # should replace the uses of bn_node + bn_node.replace_all_uses_with(new_node) + # take care of the deletion order: + # delete bn_node first, and then conv_node + graph.erase_node(bn_node) + graph.erase_node(conv_node) + + return + + @register_graph_pattern( CallModuleVarArgs( [
2.41.0
befaf2a37b5f0659395fea3e1301a4e36a40013
Thu, 11 Apr 2024 08:18:42 -0700
[PATCH 0348/1000] [AOTI] Move c10/util ostream function implementations to their headers (#123847)
Summary: AOTInductor generated code for CPU models may have direct reference to these c10-implemented data types, see _inductor/codegen/cpp_prefix.h. To make sure the AOTI generated code is ABI backward compatible, we need to change those headers to a header-only implementation. The next PR in this stack will add tests to use those data types without linking against libtorch. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123847 Approved by: https://github.com/jansel
diff --git a/c10/util/BFloat16.h b/c10/util/BFloat16.h index ab65eebedb..95bc5f9183 100644 --- a/c10/util/BFloat16.h +++ b/c10/util/BFloat16.h @@ -7,8 +7,8 @@ #include <cmath> #include <cstdint> #include <cstring> - #include <iosfwd> +#include <ostream> #if defined(__CUDACC__) && !defined(USE_ROCM) #include <cuda_bf16.h> @@ -114,7 +114,12 @@ struct alignas(2) BFloat16 { #endif }; -C10_API std::ostream& operator<<(std::ostream& out, const BFloat16& value); +C10_API inline std::ostream& operator<<( + std::ostream& out, + const BFloat16& value) { + out << (float)value; + return out; +} } // namespace c10 diff --git a/c10/util/Bfloat16.cpp b/c10/util/Bfloat16.cpp index 2ea7f74cf6..ae6a839455 100644 --- a/c10/util/Bfloat16.cpp +++ b/c10/util/Bfloat16.cpp @@ -1,5 +1,4 @@ #include <c10/util/BFloat16.h> -#include <ostream> #include <type_traits> namespace c10 { @@ -8,8 +7,4 @@ static_assert( std::is_standard_layout_v<BFloat16>, "c10::BFloat16 must be standard layout."); -std::ostream& operator<<(std::ostream& out, const BFloat16& value) { - out << (float)value; - return out; -} } // namespace c10 diff --git a/c10/util/Float8_e4m3fn.cpp b/c10/util/Float8_e4m3fn.cpp index 2a4ed5159c..9cb648b2b4 100644 --- a/c10/util/Float8_e4m3fn.cpp +++ b/c10/util/Float8_e4m3fn.cpp @@ -1,5 +1,4 @@ #include <c10/util/Float8_e4m3fn.h> -#include <ostream> #include <type_traits> namespace c10 { @@ -8,8 +7,4 @@ static_assert( std::is_standard_layout_v<Float8_e4m3fn>, "c10::Float8_e4m3fn must be standard layout."); -std::ostream& operator<<(std::ostream& out, const Float8_e4m3fn& value) { - out << (float)value; - return out; -} } // namespace c10 diff --git a/c10/util/Float8_e4m3fn.h b/c10/util/Float8_e4m3fn.h index a952b8330e..d51feabcc8 100644 --- a/c10/util/Float8_e4m3fn.h +++ b/c10/util/Float8_e4m3fn.h @@ -239,7 +239,12 @@ struct alignas(1) Float8_e4m3fn { inline C10_HOST_DEVICE bool isnan() const; }; -C10_API std::ostream& operator<<(std::ostream& out, const Float8_e4m3fn& value); +C10_API inline std::ostream& operator<<( + std::ostream& out, + const Float8_e4m3fn& value) { + out << (float)value; + return out; +} } // namespace c10 diff --git a/c10/util/Float8_e4m3fnuz.cpp b/c10/util/Float8_e4m3fnuz.cpp index 5c790e3053..b18167f692 100644 --- a/c10/util/Float8_e4m3fnuz.cpp +++ b/c10/util/Float8_e4m3fnuz.cpp @@ -1,5 +1,4 @@ #include <c10/util/Float8_e4m3fnuz.h> -#include <ostream> namespace c10 { @@ -7,9 +6,4 @@ static_assert( std::is_standard_layout_v<Float8_e4m3fnuz>, "c10::Float8_e4m3fnuz must be standard layout."); -std::ostream& operator<<(std::ostream& out, const Float8_e4m3fnuz& value) { - out << (float)value; - return out; -} - } // namespace c10 diff --git a/c10/util/Float8_e4m3fnuz.h b/c10/util/Float8_e4m3fnuz.h index e51630d1bd..bed2989174 100644 --- a/c10/util/Float8_e4m3fnuz.h +++ b/c10/util/Float8_e4m3fnuz.h @@ -127,9 +127,12 @@ struct alignas(1) Float8_e4m3fnuz { inline C10_HOST_DEVICE bool isnan() const; }; -C10_API std::ostream& operator<<( +C10_API inline std::ostream& operator<<( std::ostream& out, - const Float8_e4m3fnuz& value); + const Float8_e4m3fnuz& value) { + out << (float)value; + return out; +} } // namespace c10 diff --git a/c10/util/Float8_e5m2.cpp b/c10/util/Float8_e5m2.cpp index 8833283f16..3a9fc99981 100644 --- a/c10/util/Float8_e5m2.cpp +++ b/c10/util/Float8_e5m2.cpp @@ -1,5 +1,4 @@ #include <c10/util/Float8_e5m2.h> -#include <ostream> namespace c10 { @@ -7,8 +6,4 @@ static_assert( std::is_standard_layout<Float8_e5m2>::value, "c10::Float8_e5m2 must be standard layout."); -std::ostream& operator<<(std::ostream& out, const Float8_e5m2& value) { - out << (float)value; - return out; -} } // namespace c10 diff --git a/c10/util/Float8_e5m2.h b/c10/util/Float8_e5m2.h index c05f974067..442b7ee87e 100644 --- a/c10/util/Float8_e5m2.h +++ b/c10/util/Float8_e5m2.h @@ -136,7 +136,12 @@ struct alignas(1) Float8_e5m2 { inline C10_HOST_DEVICE bool isinf() const; }; -C10_API std::ostream& operator<<(std::ostream& out, const Float8_e5m2& value); +C10_API inline std::ostream& operator<<( + std::ostream& out, + const Float8_e5m2& value) { + out << (float)value; + return out; +} } // namespace c10 diff --git a/c10/util/Float8_e5m2fnuz.cpp b/c10/util/Float8_e5m2fnuz.cpp index 261355051e..e3349b5872 100644 --- a/c10/util/Float8_e5m2fnuz.cpp +++ b/c10/util/Float8_e5m2fnuz.cpp @@ -1,5 +1,4 @@ #include <c10/util/Float8_e5m2fnuz.h> -#include <ostream> namespace c10 { @@ -7,9 +6,4 @@ static_assert( std::is_standard_layout_v<Float8_e5m2fnuz>, "c10::Float8_e5m2 must be standard layout."); -std::ostream& operator<<(std::ostream& out, const Float8_e5m2fnuz& value) { - out << (float)value; - return out; -} - } // namespace c10 diff --git a/c10/util/Float8_e5m2fnuz.h b/c10/util/Float8_e5m2fnuz.h index f43a912c01..f63773914c 100644 --- a/c10/util/Float8_e5m2fnuz.h +++ b/c10/util/Float8_e5m2fnuz.h @@ -126,9 +126,12 @@ struct alignas(1) Float8_e5m2fnuz { inline C10_HOST_DEVICE bool isinf() const; }; -C10_API std::ostream& operator<<( +C10_API inline std::ostream& operator<<( std::ostream& out, - const Float8_e5m2fnuz& value); + const Float8_e5m2fnuz& value) { + out << (float)value; + return out; +} } // namespace c10 diff --git a/c10/util/Half.cpp b/c10/util/Half.cpp index 7c75112c88..e977aedf9d 100644 --- a/c10/util/Half.cpp +++ b/c10/util/Half.cpp @@ -1,5 +1,4 @@ #include <c10/util/Half.h> -#include <ostream> #include <type_traits> namespace c10 { @@ -8,8 +7,4 @@ static_assert( std::is_standard_layout_v<Half>, "c10::Half must be standard layout."); -std::ostream& operator<<(std::ostream& out, const Half& value) { - out << (float)value; - return out; -} } // namespace c10 diff --git a/c10/util/Half.h b/c10/util/Half.h index 979e0d8cfe..3d5a38cb36 100644 --- a/c10/util/Half.h +++ b/c10/util/Half.h @@ -30,6 +30,7 @@ #include <cstring> #include <iosfwd> #include <limits> +#include <ostream> #ifdef __CUDACC__ #include <cuda_fp16.h> @@ -531,7 +532,10 @@ std::enable_if_t<is_complex<From>::value, bool> overflows( typename From::value_type>(f.imag()); } -C10_API std::ostream& operator<<(std::ostream& out, const Half& value); +C10_API inline std::ostream& operator<<(std::ostream& out, const Half& value) { + out << (float)value; + return out; +} } // namespace c10
2.41.0
946638f06e5916ea9bd0f790ff620bdb78a92a3
Thu, 11 Apr 2024 19:55:18 -0700
[PATCH 0349/1000] [AOTI] Add ABI-compatiblity tests (#123848)
Summary: In AOTInductor generated CPU model code, there can be direct references to some aten/c10 utility functions and data structures, e.g. at::vec and c10::Half. These are performance critical and thus it doesn't make sense to create C shim for them. Instead, we make sure they are implemented in a header-only way, and use this set of tests to guard future changes. There are more header files to be updated, but we will do it in other followup PRs. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123848 Approved by: https://github.com/jansel ghstack dependencies: #123847
diff --git a/.ci/pytorch/test.sh b/.ci/pytorch/test.sh index 5408e0f596..23eaf8a2dd 100755 --- a/.ci/pytorch/test.sh +++ b/.ci/pytorch/test.sh @@ -334,7 +334,7 @@ test_inductor() { # TODO: need a faster way to build if [[ "$BUILD_ENVIRONMENT" != *rocm* ]]; then BUILD_AOT_INDUCTOR_TEST=1 python setup.py develop - CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}" python test/run_test.py --cpp --verbose -i cpp/test_aot_inductor + CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference fi } diff --git a/.github/labeler.yml b/.github/labeler.yml index 74ea0abd43..8a572bfcba 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -36,7 +36,8 @@ - torch/distributed/_tensor/** - torch/distributed/fsdp/** - torch/csrc/inductor/**
+- test/cpp/aoti_abi_check/**
dd0ed1b430f3241490d9bc071036188466b0f22
Fri, 19 Apr 2024 00:57:05 +0000
[PATCH 0350/1000] distributed: templated ring attention (#124215)
This adds a templated version of the ring attention forwards function as well as tests it with memory efficient attention. This doesn't add support for memory efficient attention in DTensor. That will be added in a follow up PR. This templating is also a POC of how to support other attention ops such as Jagged/nested tensor and as well how to implement striped attention in a scalable way. Misc changes: * Fixes all_to_all_single autograd implementation with CUDA + adds NCCL test * Adds compile support to the ring attention implementations (required some tweaks to process groups) Test plan: ``` pytest test/distributed/_tensor/test_attention.py pytest test/distributed/test_functional_api.py ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124215 Approved by: https://github.com/wanchaol
diff --git a/test/distributed/_tensor/test_attention.py b/test/distributed/_tensor/test_attention.py index 3a34af11d9..db5a26d438 100644 --- a/test/distributed/_tensor/test_attention.py +++ b/test/distributed/_tensor/test_attention.py @@ -10,6 +10,8 @@ from torch.distributed._tensor.experimental.attention import ( _CausalBehavior, _is_causal_behavior, _scaled_dot_product_chunk_flash_attention, + _scaled_dot_product_ring_efficient_attention, + _scaled_dot_product_ring_flash_attention, attention_context_parallel, AttentionContextParallel, ) @@ -295,6 +297,86 @@ class RingAttentionTest(DTensorTestBase): }, ) + @skip_if_lt_x_gpu(2) + @unittest.skipIf( + not PLATFORM_SUPPORTS_FLASH_ATTENTION, "Does not support flash attention" + ) + @with_comms + @parametrize( + "attention_fn", + [ + _scaled_dot_product_ring_flash_attention, + _scaled_dot_product_ring_efficient_attention, + # _scaled_dot_product_ring_cudnn_attention, # TODO: not built by default + ], + ) + def test_ring_attention_compile(self, attention_fn: object) -> None: + device_mesh = DeviceMesh( + self.device_type, + torch.arange(0, self.world_size), + ) + dtype = torch.bfloat16 + bs = 8 + query_tokens = 8 + context_tokens = 24 + dim = 32 + nheads = 8 + query = torch.rand( + (bs, nheads, self.world_size * query_tokens, dim), + device=self.device_type, + dtype=dtype, + requires_grad=True, + ) + key = torch.rand( + (bs, nheads, self.world_size * context_tokens, dim), + device=self.device_type, + dtype=dtype, + ) + value = torch.rand( + (bs, nheads, self.world_size * context_tokens, dim), + device=self.device_type, + dtype=dtype, + ) + + query_placement = [Shard(2)] + dquery = distribute_tensor(query, device_mesh, query_placement) + self.assertEqual(query.shape, (bs, nheads, self.world_size * query_tokens, dim)) + + context_placement = [Shard(2)] + dkey = distribute_tensor(key, device_mesh, context_placement) + dvalue = distribute_tensor(value, device_mesh, context_placement) + + # compiled = attention_fn + compiled = torch.compile(attention_fn, fullgraph=True, backend="aot_eager") + + out, lse, *args = compiled( + device_mesh.get_group(), + dquery.to_local(), + dkey.to_local(), + dvalue.to_local(), + ) + self.assertEqual(out.shape, (bs, nheads, query_tokens, dim)) + self.assertIsInstance(lse, torch.Tensor) + + ( + out_chunk, + *others, + ) = _scaled_dot_product_chunk_flash_attention( + query, + key, + value, + size=self.world_size, + is_causal=False, + ) + self.assertEqual( + out, + out_chunk[ + :, :, self.rank * query_tokens : (self.rank + 1) * query_tokens, : + ], + ) + + out.sum().backward() + instantiate_parametrized_tests(RingAttentionTest) diff --git a/test/distributed/test_functional_api.py b/test/distributed/test_functional_api.py index d26dcf970a..f2255637a6 100644 --- a/test/distributed/test_functional_api.py +++ b/test/distributed/test_functional_api.py @@ -634,14 +634,14 @@ class TestFunctionalAutograd(MultiThreadedTestCase): def test_all_to_all_single(self, compile: bool = True) -> None: group = dist.group.WORLD.group_name - t = torch.rand((self.world_size, 2), requires_grad=True) + t = torch.ones((self.world_size, 2), requires_grad=True) def my_func(t: torch.Tensor, world_size: int) -> torch.Tensor: sizes = [1] * world_size - t = t * 10 + t = t * 2 assert t.requires_grad out = ft_c.all_to_all_single_autograd(t, sizes, sizes, group) - out = out + 2 + out = out + 0 return out if compile: @@ -650,11 +650,13 @@ class TestFunctionalAutograd(MultiThreadedTestCase): compiled = my_func out = compiled(t, self.world_size) + self.assertEqual(out.shape, t.shape) + self.assertEqual(out, torch.full_like(t, 2.0)) self.assertIsNotNone(out.grad_fn) self.assertTrue(out.requires_grad) loss = out.sum() loss.backward() - self.assertIsNotNone(t.grad) + self.assertEqual(t.grad, torch.full_like(t, 2.0)) def test_all_to_all_single_inductor(self) -> None: group = dist.group.WORLD.group_name @@ -752,5 +754,61 @@ class TestFunctionalAutograd(MultiThreadedTestCase): self.assertEqual(input_tensor.grad, torch.full(output_size, fill_value=1.0)) +class TestFunctionalAutogradWithNCCL(MultiProcessTestCase): + def setUp(self): + super().setUp() + os.environ["WORLD_SIZE"] = str(self.world_size) + os.environ["BACKEND"] = dist.Backend.NCCL + self._spawn_processes() + + @property + def device(self): + return torch.device(self.rank) + + @property + def world_size(self): + return 2 + + @property + def process_group(self): + return dist.group.WORLD + + def dist_init(self): + dist.init_process_group( + backend=BACKEND, + world_size=self.world_size, + rank=self.rank, + init_method=f"file://{self.file_name}", + ) + + # set device for nccl pg for collectives + if BACKEND == "nccl": + torch.cuda.set_device(self.rank) + + def destroy_comms(self): + # Wait for all ranks to reach here before starting shutdown. + dist.barrier() + dist.destroy_process_group() + + @requires_nccl() + @with_comms() + def test_all_to_all_single(self) -> None: + group = self.process_group.group_name + + t = torch.ones((self.world_size, 2), requires_grad=True, device=self.device) + + sizes = [1] * self.world_size + assert t.requires_grad + out = ft_c.all_to_all_single_autograd(t * 2, sizes, sizes, group) + 0 + + self.assertEqual(out.shape, t.shape) + self.assertEqual(out, torch.full_like(t, 2.0)) + self.assertIsNotNone(out.grad_fn) + self.assertTrue(out.requires_grad) + loss = out.sum() + loss.backward() + self.assertEqual(t.grad, torch.full_like(t, 2.0)) + + if __name__ == "__main__": run_tests() diff --git a/torch/csrc/distributed/c10d/Functional.cpp b/torch/csrc/distributed/c10d/Functional.cpp index 942ae7358d..5728774f74 100644 --- a/torch/csrc/distributed/c10d/Functional.cpp +++ b/torch/csrc/distributed/c10d/Functional.cpp @@ -409,7 +409,7 @@ class AllToAllSingle : public torch::autograd::Function<AllToAllSingle> { const std::string& group_name = ctx->saved_data["group_name"].toStringRef(); DCHECK(grad_out_list.size() == 1); - auto grad_out = grad_out_list[0]; + auto grad_out = grad_out_list[0].contiguous(); auto out = c10::Dispatcher::singleton() @@ -434,7 +434,7 @@ at::Tensor all_to_all_single_autograd( const std::vector<int64_t>& input_split_sizes, const std::string& group_name) { return AllToAllSingle::apply( - input, output_split_sizes, input_split_sizes, group_name)[0]; + input, output_split_sizes, input_split_sizes, group_name); } class ReduceScatterTensor diff --git a/torch/distributed/_tensor/debug/comm_mode.py b/torch/distributed/_tensor/debug/comm_mode.py index 43def0b9d6..b195b30154 100644 --- a/torch/distributed/_tensor/debug/comm_mode.py +++ b/torch/distributed/_tensor/debug/comm_mode.py @@ -8,6 +8,7 @@ from torch.utils._python_dispatch import TorchDispatchMode funcol_native = torch.ops._c10d_functional funcol_py = torch.ops.c10d_functional +funcol_autograd = torch.ops._c10d_functional_autograd NATIVE_TO_PY_MAPPING = { funcol_native.all_gather_into_tensor: funcol_py.all_gather_into_tensor, @@ -17,6 +18,8 @@ NATIVE_TO_PY_MAPPING = { funcol_native.broadcast: funcol_py.broadcast, funcol_native.reduce_scatter_tensor: funcol_py.reduce_scatter_tensor, funcol_native.reduce_scatter_tensor_coalesced: funcol_py.reduce_scatter_tensor_coalesced, + # functional ops + funcol_autograd.all_to_all_single: funcol_py.all_to_all_single, } diff --git a/torch/distributed/_tensor/experimental/attention.py b/torch/distributed/_tensor/experimental/attention.py index 195a94fed8..eb7703a96b 100644 --- a/torch/distributed/_tensor/experimental/attention.py +++ b/torch/distributed/_tensor/experimental/attention.py @@ -1,7 +1,7 @@ import contextlib import weakref from enum import Enum -from typing import Any, Dict, Generator, List, Optional, Tuple, Union +from typing import Any, Dict, Generator, List, Optional, Protocol, Tuple, Union import torch import torch.distributed as dist @@ -54,6 +54,10 @@ def _merge_sdpa( """ assert len(chunks) == len(logsumexps) + # LSE may be padded in the sequence dimension such as with memory efficient attention. + seq_len = chunks[0].size(2) + logsumexps = [lse[:, :, :seq_len] for lse in logsumexps] + softmax_lse = torch.stack([lse.exp() for lse in logsumexps]).sum(dim=0).log_() out = [] @@ -80,19 +84,148 @@ def _scaled_dot_product_ring_flash_attention( if return_debug_mask: raise NotImplementedError("return_debug_mask is not supported yet") + return _templated_ring_attention( + mesh, + torch.ops.aten._scaled_dot_product_flash_attention, + query=query, + key=key, + value=value, + dropout_p=dropout_p, + is_causal=is_causal, + scale=scale, + ) + + +def _scaled_dot_product_ring_efficient_attention( + mesh: DeviceMesh, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attn_bias: Optional[torch.Tensor] = None, + dropout_p: float = 0.0, + is_causal: bool = False, + compute_log_sumexp: bool = True, + *, + scale: Optional[float] = None, +) -> Tuple[torch.Tensor, ...]: + if attn_bias is not None: + raise NotImplementedError("attn_bias is not supported yet") + if not compute_log_sumexp: + raise NotImplementedError("compute_log_sumexp must be set") + + return _templated_ring_attention( + mesh, + torch.ops.aten._scaled_dot_product_efficient_attention, + query=query, + key=key, + value=value, + attn_bias=attn_bias, + dropout_p=dropout_p, + is_causal=is_causal, + scale=scale, + compute_log_sumexp=compute_log_sumexp, + ) + + +def _scaled_dot_product_ring_cudnn_attention( + mesh: DeviceMesh, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attn_bias: Optional[torch.Tensor] = None, + dropout_p: float = 0.0, + is_causal: bool = False, + return_debug_mask: bool = True, + *, + scale: Optional[float] = None, +) -> Tuple[torch.Tensor, ...]: + if not return_debug_mask: + raise NotImplementedError("return_debug_mask must be set") + + return _templated_ring_attention( + mesh, + torch.ops.aten._scaled_dot_product_cudnn_attention, + query=query, + key=key, + value=value, + dropout_p=dropout_p, + is_causal=is_causal, + return_debug_mask=return_debug_mask, + scale=scale, + ) + + +def _ring_rotate(block: torch.Tensor, pg: dist.ProcessGroup) -> torch.Tensor: + rank = dist.get_rank(pg) + size = dist.get_world_size(pg) + + # rank 0 sends to rank 1, rank 1 sends to rank 2, ..., rank n-1 sends to rank 0 + input_split_sizes = [0] * size + input_split_sizes[(rank + 1) % size] = len(block) + output_split_sizes = [0] * size + output_split_sizes[(rank - 1) % size] = len(block) + + out = ft_c.all_to_all_single_autograd( + block, input_split_sizes, output_split_sizes, pg + ) + return out + + +class AttentionOp(Protocol): + def __call__( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + *args: object, + is_causal: bool = False, + **kwargs: object, + ) -> Tuple[torch.Tensor, ...]: + ... + + +def _templated_ring_attention( + mesh: DeviceMesh, + op: AttentionOp, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + *args: object, + is_causal: bool = False, + **kwargs: object, +) -> Tuple[torch.Tensor, ...]: + """ + This is a generalized ring attention implementation that can support multiple attention ops. + + Parameters + ---------- + op: + The attention op to use + *args: + additional args are passed to the op + **kwargs: + additional kwargs are passed to the op + + Returns + ------- + out: + The merged attention output + softmax_lse: + The logsumexp of the merged attention output + """ if is_causal and (query.size(2) != key.size(2)): raise NotImplementedError( "is_causal requires the same query and context sequence lengths" ) - pg = mesh.get_group() - assert isinstance(pg, dist.ProcessGroup), "must be single dimension" + if isinstance(mesh, dist.ProcessGroup): + pg: Union[dist.ProcessGroup, List[dist.ProcessGroup]] = mesh + else: + pg = mesh.get_group() + assert isinstance(pg, dist.ProcessGroup), "process group must be single dimension" rank = dist.get_rank(pg) size = dist.get_world_size(pg) - # rank 0 sends to rank 1, rank 1 sends to rank 2, ..., rank n-1 sends to rank 0 - right_dsts = list(range(1, size)) + [0] - next_kv = None chunks = [] @@ -106,20 +239,20 @@ def _scaled_dot_product_ring_flash_attention( if i < (size - 1): next_kv = torch.cat([key.flatten(), value.flatten()]) - next_kv = ft_c.permute_tensor(next_kv, right_dsts, pg) + next_kv = _ring_rotate(next_kv, pg) is_causal_behavior = _is_causal_behavior( rank=rank, world_size=size, i=i, is_causal=is_causal ) if is_causal_behavior != _CausalBehavior.SKIP: - local_results = torch.ops.aten._scaled_dot_product_flash_attention( + local_results = op( query, key, value, - dropout_p=dropout_p, + *args, is_causal=is_causal_behavior.value, - scale=scale, + **kwargs, ) chunks.append(local_results[0]) logsumexps.append(local_results[1])
2.41.0
affd230140e1cdb5b96e9a4a83c8519c08251fe
Fri, 19 Apr 2024 00:57:16 +0000
[PATCH 0351/1000] Enable UFMT on test/test_python_dispatch.py (#124373)
Part of https://github.com/pytorch/pytorch/issues/123062 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124373 Approved by: https://github.com/ezyang
diff --git a/.lintrunner.toml b/.lintrunner.toml index e914794148..6bd56ba7ac 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -1152,7 +1152,6 @@ exclude_patterns = [ 'test/test_proxy_tensor.py', 'test/test_pruning_op.py', 'test/test_public_bindings.py', - 'test/test_python_dispatch.py', 'test/test_quantization.py', 'test/test_reductions.py', 'test/test_scatter_gather_ops.py', diff --git a/test/test_python_dispatch.py b/test/test_python_dispatch.py index bd027ff47b..a86223d8b6 100644 --- a/test/test_python_dispatch.py +++ b/test/test_python_dispatch.py @@ -1,34 +1,47 @@ # Owner(s): ["module: __torch_dispatch__"] import tempfile -import torch +import unittest from copy import deepcopy -from torch.library import Library, impl, fallthrough_kernel, _scoped_library -from torch.fx.experimental.symbolic_shapes import ShapeEnv + +import torch from torch import SymInt from torch._subclasses.fake_tensor import FakeTensorMode from torch.cuda.jiterator import _create_jit_fn -import unittest +from torch.fx.experimental.symbolic_shapes import ShapeEnv +from torch.library import _scoped_library, fallthrough_kernel, impl, Library from torch.testing._internal.common_utils import * # noqa: F403 -from torch.utils._mode_utils import no_dispatch, all_same_mode -from torch.testing._internal.logging_tensor import LoggingTensor, LoggingTensorReentrant, LoggingTensorMode, \ - log_input, capture_logs, capture_logs_with_logging_tensor_mode -from torch.testing._internal.two_tensor import TwoTensor -from torch.utils._pytree import tree_map, tree_map_only -from torch.utils import _pytree as pytree -from torch.utils._python_dispatch import TorchDispatchMode, _get_current_dispatch_mode, _get_current_dispatch_mode_stack +import logging +import sys + +import torch._dynamo +from torch._C import DispatchKey, DispatchKeySet from torch._custom_op.functional import register_functional_op -from torch._C import DispatchKeySet, DispatchKey from torch.fx.experimental.proxy_tensor import make_fx -from torch.testing._internal.common_device_type import ops +from torch.multiprocessing.reductions import StorageWeakRef +from torch.testing._internal.common_device_type import ( + instantiate_device_type_tests, + ops, +) from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.custom_op_db import custom_op_db -from torch.testing._internal.common_device_type import instantiate_device_type_tests -from torch.multiprocessing.reductions import StorageWeakRef - -import logging -import sys -import torch._dynamo +from torch.testing._internal.logging_tensor import ( + capture_logs, + capture_logs_with_logging_tensor_mode, + log_input, + LoggingTensor, + LoggingTensorMode, + LoggingTensorReentrant, +) +from torch.testing._internal.two_tensor import TwoTensor +from torch.utils import _pytree as pytree +from torch.utils._mode_utils import all_same_mode, no_dispatch +from torch.utils._python_dispatch import ( + _get_current_dispatch_mode, + _get_current_dispatch_mode_stack, + TorchDispatchMode, +) +from torch.utils._pytree import tree_map, tree_map_only class TestDispatcherPythonBindings(TestCase): @@ -40,7 +53,7 @@ class TestDispatcherPythonBindings(TestCase): class TestPythonRegistration(TestCase): - test_ns = '_test_python_registration' + test_ns = "_test_python_registration" def tearDown(self): if hasattr(torch.ops, self.test_ns): @@ -56,13 +69,15 @@ class TestPythonRegistration(TestCase): # Now we are secretly making the operator a view op so autograd needs to know how # to handle it - my_lib1.impl('neg', my_neg, "AutogradCPU") + my_lib1.impl("neg", my_neg, "AutogradCPU") self.assertTrue(torch.neg(x).is_neg()) # RuntimeError: impl("aten::neg", ...): # Explicitly provided namespace (aten) in operator name does not match ... - with self.assertRaisesRegex(RuntimeError, "operator name does not match namespace"): + with self.assertRaisesRegex( + RuntimeError, "operator name does not match namespace" + ): with _scoped_library("foo", "DEF") as my_lib3: my_lib3.define("neg(Tensor self) -> Tensor") my_lib3.impl(torch.ops.aten.neg.default, my_neg, "AutogradCPU") @@ -79,7 +94,9 @@ class TestPythonRegistration(TestCase): # Assert that a user can't override the behavior of a (ns, op, dispatch_key) # combination if someone overrided the behavior for the same before them - with self.assertRaisesRegex(RuntimeError, 'already a kernel registered from python'): + with self.assertRaisesRegex( + RuntimeError, "already a kernel registered from python" + ): my_lib2.impl(torch.ops.aten.mul.Tensor, my_mul, "ZeroTensor") # Validate that lib2 is not affected by removing lib1 @@ -90,7 +107,9 @@ class TestPythonRegistration(TestCase): self.assertTrue(torch.mul(x, y)._is_zerotensor()) def test_error_if_fn_not_callable(self): - with self.assertRaisesRegex(TypeError, "Input function is required to be a callable"): + with self.assertRaisesRegex( + TypeError, "Input function is required to be a callable" + ): with _scoped_library("aten", "IMPL") as my_lib: my_lib.impl(torch.ops.aten.neg.default, [], "AutogradCPU") @@ -112,7 +131,7 @@ class TestPythonRegistration(TestCase): pass lib.impl(f"{self.test_ns}::foo123", foo123, "CPU") - key = f'{self.test_ns}/foo123/CPU' + key = f"{self.test_ns}/foo123/CPU" self.assertTrue(key in torch.library._impls) saved_op_impls = lib._op_impls @@ -142,7 +161,7 @@ class TestPythonRegistration(TestCase): return args[0].clone() with _scoped_library("aten", "IMPL") as my_lib1: - my_lib1.impl('aten::sum', my_sum, "CPU") + my_lib1.impl("aten::sum", my_sum, "CPU") x = torch.tensor([1, 2]) self.assertEqual(torch.sum(x), x) self.assertTrue(run[0]) @@ -152,11 +171,11 @@ class TestPythonRegistration(TestCase): def test_override_cuda_with_jiterator(self) -> None: def override_where_cuda() -> None: # Example 1: Invert the behavior of where's condition input - not_where_code_string = ''' + not_where_code_string = """ template <typename T> T inverted_where(bool cond, T a, T b){ return !cond ? a : b; } - ''' + """ jitted_where = _create_jit_fn(not_where_code_string) CALLED = [False] @@ -167,10 +186,12 @@ class TestPythonRegistration(TestCase): # overriding where's cuda kernel with Jiterator generated kernel with _scoped_library("aten", "IMPL") as my_lib: - my_lib.impl('aten::where.self', inverted_where, "CUDA") + my_lib.impl("aten::where.self", inverted_where, "CUDA") - device = 'cuda' - cond = torch.tensor([True, True, False], device=device, dtype=torch.bool) + device = "cuda" + cond = torch.tensor( + [True, True, False], device=device, dtype=torch.bool + ) x = torch.tensor([1, 2, 3], device=device) y = torch.tensor([-1, -2, -3], device=device) @@ -182,11 +203,11 @@ class TestPythonRegistration(TestCase): def override_gelu_cuda() -> None: # Example 2: Use relu to approximate gelu for faster compute - fastest_gelu_code_string = ''' + fastest_gelu_code_string = """ template <typename T> T fast_gelu(T a){ return a > 0 ? a : 0; } - ''' + """ jitted_gelu = _create_jit_fn(fastest_gelu_code_string) CALLED = [False] @@ -197,22 +218,26 @@ class TestPythonRegistration(TestCase): # overriding gelu's cuda kernel with Jiterator generated relu kernel with _scoped_library("aten", "IMPL") as my_lib: - my_lib.impl('aten::gelu', fast_gelu, "CUDA") + my_lib.impl("aten::gelu", fast_gelu, "CUDA") - x = torch.rand([3, 3], device='cuda', dtype=torch.float) - self.assertEqual(torch.nn.functional.gelu(x), torch.nn.functional.relu(x)) + x = torch.rand([3, 3], device="cuda", dtype=torch.float) + self.assertEqual( + torch.nn.functional.gelu(x), torch.nn.functional.relu(x) + ) self.assertTrue(CALLED[0]) # behavior restored after deregistration - self.assertNotEqual(torch.nn.functional.gelu(x), torch.nn.functional.relu(x)) + self.assertNotEqual( + torch.nn.functional.gelu(x), torch.nn.functional.relu(x) + ) def override_exp_cuda() -> None: # Example 3: Preventing exp from exploding for float16 - clipped_exp_code_string = ''' + clipped_exp_code_string = """ template <typename T> T clipped_exp(T a){ return a > T(10.0) ? T(22026.4657948) : exp(a); } - ''' + """ jitted_exp = _create_jit_fn(clipped_exp_code_string) CALLED = [False] @@ -223,22 +248,27 @@ class TestPythonRegistration(TestCase): # overriding exp's cuda kernel with clipped_exp kernel with _scoped_library("aten", "IMPL") as my_lib: - my_lib.impl('aten::exp', clipped_exp, "CUDA") + my_lib.impl("aten::exp", clipped_exp, "CUDA") - x = torch.tensor([0.0, 100.0], device='cuda', dtype=torch.float16) - self.assertEqual(torch.exp(x), torch.tensor([1.0, 22026.4657948], dtype=torch.float16)) + x = torch.tensor([0.0, 100.0], device="cuda", dtype=torch.float16) + self.assertEqual( + torch.exp(x), + torch.tensor([1.0, 22026.4657948], dtype=torch.float16), + ) self.assertTrue(CALLED[0]) # behavior restored after deregistration - self.assertEqual(torch.exp(x), torch.tensor([1.0, torch.inf], dtype=torch.float16)) + self.assertEqual( + torch.exp(x), torch.tensor([1.0, torch.inf], dtype=torch.float16) + ) def override_add_cuda() -> None: # Example 4: simulate a hardware bug, where the adder is always off by 1 - buggy_add_code_string = ''' + buggy_add_code_string = """ template <typename T> T buggy_add(T a, T b){ return a + b + T(1); } - ''' + """ jitted_add = _create_jit_fn(buggy_add_code_string) CALLED = [False] @@ -248,10 +278,10 @@ class TestPythonRegistration(TestCase): return jitted_add(*args, **kwargs) with _scoped_library("aten", "IMPL") as my_lib: - my_lib.impl('aten::add.Tensor', buggy_add, "CUDA") + my_lib.impl("aten::add.Tensor", buggy_add, "CUDA") - x_cpu = torch.rand([3, 3], device='cpu') - y_cpu = torch.rand([3], device='cpu') + x_cpu = torch.rand([3, 3], device="cpu") + y_cpu = torch.rand([3], device="cpu") x_cuda = x_cpu.cuda() y_cuda = y_cpu.cuda() @@ -271,12 +301,15 @@ class TestPythonRegistration(TestCase): def test_extend_library_with_dispatch_key_arg(self): def my_sum(*args, **kwargs): return args[0].clone() + with _scoped_library("aten", "IMPL", dispatch_key="CPU") as my_lib1: # RuntimeError: Explicitly provided dispatch key (Conjugate) is # inconsistent with the dispatch key of the enclosing TORCH_LIBRARY_IMPL block - with self.assertRaisesRegex(RuntimeError, "inconsistent with the dispatch key"): - my_lib1.impl('sum', my_sum, "Conjugate") - my_lib1.impl('aten::sum', my_sum) + with self.assertRaisesRegex( + RuntimeError, "inconsistent with the dispatch key" + ): + my_lib1.impl("sum", my_sum, "Conjugate") + my_lib1.impl("aten::sum", my_sum) x = torch.tensor([1, 2]) self.assertEqual(torch.sum(x), x) @@ -348,7 +381,9 @@ class TestPythonRegistration(TestCase): called = [0] - @torch.library.define(my_lib1, "_op() -> None", alias_analysis=alias_analysis) + @torch.library.define( + my_lib1, "_op() -> None", alias_analysis=alias_analysis + ) def _op(*args, **kwargs): called[0] += 1 @@ -367,7 +402,7 @@ class TestPythonRegistration(TestCase): with self.assertRaisesRegex(ValueError, "Unsupported kind"): my_lib1 = Library("myns", "BLA") # noqa: TOR901 - for kind in ('DEF', 'FRAGMENT'): + for kind in ("DEF", "FRAGMENT"): with self.assertRaisesRegex(ValueError, "reserved namespace"): my_lib1 = Library("prim", kind) # noqa: TOR901 @@ -400,9 +435,9 @@ class TestPythonRegistration(TestCase): register_functional_op(lib, "abs", torch.ops.aten.abs.out) schemas = [ - 'foo(Tensor x, Tensor(a!)[] y) -> ()', - 'foo(Tensor x, Tensor(a!) y, Tensor(b) z) -> Tensor(b)', - 'foo(Tensor x, Tensor(a!) y) -> (Tensor, Tensor(a))', + "foo(Tensor x, Tensor(a!)[] y) -> ()", + "foo(Tensor x, Tensor(a!) y, Tensor(b) z) -> Tensor(b)", + "foo(Tensor x, Tensor(a!) y) -> (Tensor, Tensor(a))", ] for schema in schemas: @@ -412,7 +447,8 @@ class TestPythonRegistration(TestCase): register_functional_op( lib, "foo_functional", - getattr(torch.ops, self.test_ns).foo.default) + getattr(torch.ops, self.test_ns).foo.default, + ) def _check_is_functional_variant(self, mutable_op, functional_op, args): # functional op should not mutate @@ -428,12 +464,23 @@ class TestPythonRegistration(TestCase): flat_mutable_result = pytree.tree_leaves(mutable_result) flat_functional_result = pytree.tree_leaves(functional_result) assert len(flat_functional_result) > len(flat_mutable_result) - self.assertEqual(flat_functional_result[:len(flat_mutable_result)], flat_mutable_result) + self.assertEqual( + flat_functional_result[: len(flat_mutable_result)], flat_mutable_result + ) # check rest of functional_result is the mutated args - mutated_args = [maybe_mutated_arg for maybe_mutated_arg, arg in zip(cloned_args, args) - if not (maybe_mutated_arg is not None and arg is not None and torch.allclose(maybe_mutated_arg, arg))] - self.assertEqual(flat_functional_result[len(flat_mutable_result):], mutated_args) + mutated_args = [ + maybe_mutated_arg + for maybe_mutated_arg, arg in zip(cloned_args, args) + if not ( + maybe_mutated_arg is not None + and arg is not None + and torch.allclose(maybe_mutated_arg, arg) + ) + ] + self.assertEqual( + flat_functional_result[len(flat_mutable_result) :], mutated_args + ) # check that functionalization kernel was indeed registered def fn(*args): @@ -451,28 +498,31 @@ class TestPythonRegistration(TestCase): def test_register_functional_op_no_returns(self): with _scoped_library(self.test_ns, "FRAGMENT") as lib: - lib.define('foo(Tensor x, Tensor(a!) y, Tensor z, Tensor(b!) w) -> ()') + lib.define("foo(Tensor x, Tensor(a!) y, Tensor z, Tensor(b!) w) -> ()") def foo_impl(x, y, z, w): y.fill_(3.14) w.fill_(2.71) - lib.impl('foo', foo_impl, 'CPU') + lib.impl("foo", foo_impl, "CPU") register_functional_op( - lib, - 'foo_functional', - getattr(torch.ops, self.test_ns).foo.default) + lib, "foo_functional", getattr(torch.ops, self.test_ns).foo.default + ) x = torch.randn([]) y = torch.randn([]) z = torch.randn([]) w = torch.randn([]) self._check_is_functional_variant( getattr(torch.ops, self.test_ns).foo.default, - getattr(torch.ops, self.test_ns).foo_functional.default, (x, y, z, w)) + getattr(torch.ops, self.test_ns).foo_functional.default, + (x, y, z, w), + ) def test_register_functional_op_with_optional(self): with _scoped_library(self.test_ns, "FRAGMENT") as lib: - lib.define('foo(Tensor x, Tensor(a!) y, Tensor (b!) z, Tensor(c!)? w) -> ()') + lib.define( + "foo(Tensor x, Tensor(a!) y, Tensor (b!) z, Tensor(c!)? w) -> ()" + ) def foo_impl(x, y, z, w): y.fill_(3.14) @@ -480,25 +530,30 @@ class TestPythonRegistration(TestCase): if w is not None: w.fill_(1.618) - lib.impl('foo', foo_impl, 'CPU') + lib.impl("foo", foo_impl, "CPU") register_functional_op( - lib, - 'foo_functional', - getattr(torch.ops, self.test_ns).foo.default) + lib, "foo_functional", getattr(torch.ops, self.test_ns).foo.default + ) x = torch.randn([]) y = torch.randn([]) z = torch.randn([]) w = torch.randn([]) self._check_is_functional_variant( getattr(torch.ops, self.test_ns).foo.default, - getattr(torch.ops, self.test_ns).foo_functional.default, (x, y, z, w)) + getattr(torch.ops, self.test_ns).foo_functional.default, + (x, y, z, w), + ) self._check_is_functional_variant( getattr(torch.ops, self.test_ns).foo.default, - getattr(torch.ops, self.test_ns).foo_functional.default, (x, y, z, None)) + getattr(torch.ops, self.test_ns).foo_functional.default, + (x, y, z, None), + ) def test_register_functional_op_one_return(self): with _scoped_library(self.test_ns, "FRAGMENT") as lib: - lib.define('foo(Tensor x, Tensor(a!) y, Tensor(c!) z, Tensor(b!) w) -> Tensor') + lib.define( + "foo(Tensor x, Tensor(a!) y, Tensor(c!) z, Tensor(b!) w) -> Tensor" + ) def foo_impl(x, y, z, w): y.fill_(3.14) @@ -506,33 +561,35 @@ class TestPythonRegistration(TestCase): z.fill_(0.99) return x.clone() - lib.impl('foo', foo_impl, 'CPU') + lib.impl("foo", foo_impl, "CPU") register_functional_op( - lib, - "foo_functional", - getattr(torch.ops, self.test_ns).foo.default) + lib, "foo_functional", getattr(torch.ops, self.test_ns).foo.default + ) x = torch.randn([]) y = torch.randn([]) z = torch.randn([]) w = torch.randn([]) self._check_is_functional_variant( getattr(torch.ops, self.test_ns).foo.default, - getattr(torch.ops, self.test_ns).foo_functional.default, (x, y, z, w)) + getattr(torch.ops, self.test_ns).foo_functional.default, + (x, y, z, w), + ) def test_register_functional_op_multiple_returns(self): with _scoped_library(self.test_ns, "FRAGMENT") as lib: - lib.define('foo(Tensor x, Tensor(a!) y, Tensor z, Tensor(b!) w) -> (Tensor, Tensor)') + lib.define( + "foo(Tensor x, Tensor(a!) y, Tensor z, Tensor(b!) w) -> (Tensor, Tensor)" + ) def foo_impl(x, y, z, w): y.fill_(3.14) w.fill_(2.71) return x.clone(), z.clone() - lib.impl('foo', foo_impl, 'CPU') + lib.impl("foo", foo_impl, "CPU") register_functional_op( - lib, - 'foo_functional', - getattr(torch.ops, self.test_ns).foo.default) + lib, "foo_functional", getattr(torch.ops, self.test_ns).foo.default + ) x = torch.randn([]) y = torch.randn([]) @@ -540,14 +597,16 @@ class TestPythonRegistration(TestCase): w = torch.randn([]) self._check_is_functional_variant( getattr(torch.ops, self.test_ns).foo.default, - getattr(torch.ops, self.test_ns).foo_functional.default, (x, y, z, w)) + getattr(torch.ops, self.test_ns).foo_functional.default, + (x, y, z, w), + ) def test_register_fallthrough(self): - with _scoped_library('aten', 'IMPL') as my_lib: + with _scoped_library("aten", "IMPL") as my_lib: my_lib.impl("mm", fallthrough_kernel, "AutocastCPU") - a = torch.randn(2, 3, device='cpu', dtype=torch.float32) - b = torch.randn(3, 2, device='cpu', dtype=torch.float32) + a = torch.randn(2, 3, device="cpu", dtype=torch.float32) + b = torch.randn(3, 2, device="cpu", dtype=torch.float32) with torch.autocast(device_type="cpu", dtype=torch.bfloat16): # dtype for mm should be float32 since we registered a fallthrough self.assertEqual(torch.mm(a, b).dtype, torch.float32) @@ -558,6 +617,7 @@ class TestPythonRegistration(TestCase): # default behavior should have been restored self.assertEqual(torch.mm(a, b).dtype, torch.bfloat16) + class TestPythonDispatch(TestCase): def test_basic(self) -> None: with capture_logs() as logs: @@ -567,7 +627,7 @@ class TestPythonDispatch(TestCase): saved_x = y.grad_fn._saved_self grad_y = LoggingTensor(torch.tensor([1.0])) log_input("grad_y", grad_y) - g, = torch.autograd.grad((y,), (x,), (grad_y,)) + (g,) = torch.autograd.grad((y,), (x,), (grad_y,)) self.assertEqual(g.elem, torch.tensor([6.0])) with torch.no_grad(): @@ -577,13 +637,16 @@ class TestPythonDispatch(TestCase): self.assertEqual(saved_x, x) # TODO: figure out why broken # self.assertEqual(saved_x._version, x._version) - self.assertExpectedInline('\n'.join(logs), '''\ + self.assertExpectedInline( + "\n".join(logs), + """\ $0: f32[1] = input('x') $1: f32[1] = torch._ops.aten.mul.Tensor($0, $0) $2: f32[1] = input('grad_y') $3: f32[1] = torch._ops.aten.mul.Tensor($2, $0) $4: f32[1] = torch._ops.aten.mul.Tensor($2, $0) -$5: f32[1] = torch._ops.aten.add.Tensor($4, $3)''') +$5: f32[1] = torch._ops.aten.add.Tensor($4, $3)""", + ) def test_out(self) -> None: with capture_logs() as logs: @@ -596,10 +659,13 @@ $5: f32[1] = torch._ops.aten.add.Tensor($4, $3)''') self.assertEqual(y.elem, torch.ones(1)) # TODO: arguably this shouldn't pass and we should complain # that out isn't a kwarg - self.assertExpectedInline('\n'.join(logs), '''\ + self.assertExpectedInline( + "\n".join(logs), + """\ $0: f32[1] = input('x') $1: f32[1] = input('y') -$2: f32[1] = torch._ops.aten.abs.out($0, out=$1)''') +$2: f32[1] = torch._ops.aten.abs.out($0, out=$1)""", + ) def test_kwarg_only(self) -> None: with capture_logs() as logs: @@ -617,7 +683,9 @@ $2: f32[1] = torch._ops.aten.abs.out($0, out=$1)''') # The expectation is that beta/alpha don't show up when they're # defaulted. This is even if the user explicitly specified it. - self.assertExpectedInline('\n'.join(logs), '''\ + self.assertExpectedInline( + "\n".join(logs), + """\ $0: f32[1] = input('x') $1: f32[1, 1] = input('y') $2: f32[1] = input('z') @@ -625,7 +693,8 @@ $3: f32[1] = torch._ops.aten.addmv.default($0, $1, $2) $4: f32[1] = torch._ops.aten.addmv.default($0, $1, $2) $5: f32[1] = torch._ops.aten.addmv.default($0, $1, $2, beta=2) $6: f32[1] = torch._ops.aten.addmv.default($0, $1, $2, alpha=2) -$7: f32[1] = torch._ops.aten.addmv.default($0, $1, $2, beta=2, alpha=2)''') +$7: f32[1] = torch._ops.aten.addmv.default($0, $1, $2, beta=2, alpha=2)""", + ) def test_kwarg_only_and_positional_default(self) -> None: with capture_logs() as logs: @@ -638,12 +707,15 @@ $7: f32[1] = torch._ops.aten.addmv.default($0, $1, $2, beta=2, alpha=2)''') # What we are testing here is that we omit arg2 # if it is defaulted, even if a kwarg is set - self.assertExpectedInline('\n'.join(logs), '''\ + self.assertExpectedInline( + "\n".join(logs), + """\ $0: f32[1] = input('x') $1: f32[1] = torch._ops.aten._foobar.default($0) $2: f32[1] = torch._ops.aten._foobar.default($0, False) $3: f32[1] = torch._ops.aten._foobar.default($0, arg3=False) -$4: f32[1] = torch._ops.aten._foobar.default($0, False, arg3=False)''') +$4: f32[1] = torch._ops.aten._foobar.default($0, False, arg3=False)""", + ) def test_produce_real_type(self) -> None: with capture_logs() as logs: @@ -651,17 +723,22 @@ $4: f32[1] = torch._ops.aten._foobar.default($0, False, arg3=False)''') log_input("x", x) x.to(dtype=torch.double) # non-optional dtype torch.cumprod(x, 0, dtype=torch.double) # optional dtype - x[:, 1].contiguous(memory_format=torch.contiguous_format) # optional memory format + x[:, 1].contiguous( + memory_format=torch.contiguous_format + ) # optional memory format # There doesn't appear to be any layout signatures which are # triggerable using tensor subclasses (need to use a mode) - self.assertExpectedInline('\n'.join(logs), '''\ + self.assertExpectedInline( + "\n".join(logs), + """\ $0: f32[2, 2] = input('x') $1: f64[2, 2] = torch._ops.aten._to_copy.default($0, dtype=torch.float64) $2: f64[2, 2] = torch._ops.aten.cumprod.default($0, 0, dtype=torch.float64) $3: f32[2, 2] = torch._ops.aten.slice.Tensor($0, 0, 0, 9223372036854775807) $4: f32[2] = torch._ops.aten.select.int($3, 1, 1) -$5: f32[2] = torch._ops.aten.clone.default($4, memory_format=torch.contiguous_format)''') +$5: f32[2] = torch._ops.aten.clone.default($4, memory_format=torch.contiguous_format)""", + ) def test_optional_tensor_list(self) -> None: def weird(xs): @@ -676,13 +753,17 @@ $5: f32[2] = torch._ops.aten.clone.default($4, memory_format=torch.contiguous_fo log_input("x", x) torch.ops.my_lib.weird.default([None, x]) - self.assertExpectedInline('\n'.join(logs), '''\ + self.assertExpectedInline( + "\n".join(logs), + """\ $0: f32[2, 2] = input('x') -$1: f32[] = torch._ops.my_lib.weird.default(['None', '$0'])''') +$1: f32[] = torch._ops.my_lib.weird.default(['None', '$0'])""", + ) def test_list_ret(self) -> None: # test all sequence types are permissible returns for list_type in (list, tuple): + class A(torch._C.TensorBase): @staticmethod def __new__(cls, elem): @@ -698,7 +779,7 @@ $1: f32[] = torch._ops.my_lib.weird.default(['None', '$0'])''') self.assertEqual( torch.split(A(torch.tensor([0, 1])), 2), - torch.split(torch.tensor([0, 1]), 2) + torch.split(torch.tensor([0, 1]), 2), ) def test_invalid_ret(self) -> None: @@ -714,10 +795,14 @@ $1: f32[] = torch._ops.my_lib.weird.default(['None', '$0'])''') # Wobbles depending on NDEBUG mode of pybind11 self.assertRaisesRegex( - RuntimeError, "Unable to cast", lambda: A(torch.zeros(1)).neg(), + RuntimeError, + "Unable to cast", + lambda: A(torch.zeros(1)).neg(), ) self.assertRaisesRegex( - RuntimeError, "Unable to cast", lambda: A(torch.zeros(1)).detach(), + RuntimeError, + "Unable to cast", + lambda: A(torch.zeros(1)).detach(), ) def test_detach_appears_twice_when_called_once(self) -> None: @@ -729,10 +814,13 @@ $1: f32[] = torch._ops.my_lib.weird.default(['None', '$0'])''') # it currently emits two, for reasons unclear to us. Leaving # this test here to make sure we don't regress even further (it # would be bad if calling .detach() once emits 3+ detaches). - self.assertExpectedInline('\n'.join(logs), '''\ + self.assertExpectedInline( + "\n".join(logs), + """\ $0: f32[1] = input('x') $1: f32[1] = torch._ops.aten.detach.default($0) -$2: f32[1] = torch._ops.aten.detach.default($1)''') +$2: f32[1] = torch._ops.aten.detach.default($1)""", + ) def test_storage(self) -> None: # For now, just make sure it doesn't crash. Ideally, we should @@ -783,10 +871,18 @@ $2: f32[1] = torch._ops.aten.detach.default($1)''') def __torch_dispatch__(cls, func, types, args=(), kwargs=None): raise ErrorB - self.assertRaises(ErrorA, lambda: torch.add(A(torch.empty(1)), A(torch.empty(1)))) - self.assertRaises(ErrorB, lambda: torch.add(A(torch.empty(1)), B(torch.empty(1)))) - self.assertRaises(ErrorB, lambda: torch.add(B(torch.empty(1)), A(torch.empty(1)))) - self.assertRaises(ErrorB, lambda: torch.add(B(torch.empty(1)), B(torch.empty(1)))) + self.assertRaises( + ErrorA, lambda: torch.add(A(torch.empty(1)), A(torch.empty(1))) + ) + self.assertRaises( + ErrorB, lambda: torch.add(A(torch.empty(1)), B(torch.empty(1))) + ) + self.assertRaises( + ErrorB, lambda: torch.add(B(torch.empty(1)), A(torch.empty(1))) + ) + self.assertRaises( + ErrorB, lambda: torch.add(B(torch.empty(1)), B(torch.empty(1))) + ) def test_format(self) -> None: x = LoggingTensor(torch.ones(1)) @@ -803,14 +899,14 @@ $2: f32[1] = torch._ops.aten.detach.default($1)''') class Square(torch.autograd.Function): @staticmethod def forward(ctx, x): - y = x ** 2 + y = x**2 ctx.save_for_backward(x) return y @staticmethod def backward(ctx, grad_output): assert isinstance(grad_output, LoggingTensor) - x, = ctx.saved_tensors + (x,) = ctx.saved_tensors assert isinstance(x, LoggingTensor) escape[0] = x return grad_output * 2 * x @@ -835,14 +931,17 @@ $2: f32[1] = torch._ops.aten.detach.default($1)''') # TODO: figure out why this is broken # self.assertEqual(escape[0]._version, x._version) - self.assertExpectedInline('\n'.join(logs), '''\ + self.assertExpectedInline( + "\n".join(logs), + """\ $0: f32[1] = input('x') $1: f32[1] = input('x.grad') $2: f32[1] = torch._ops.aten.pow.Tensor_Scalar($0, 2) $3: f32[1] = input('grad_output') $4: f32[1] = torch._ops.aten.mul.Tensor($3, 2) $5: f32[1] = torch._ops.aten.mul.Tensor($4, $0) -$6: f32[1] = torch._ops.aten.add_.Tensor($1, $5)''') +$6: f32[1] = torch._ops.aten.add_.Tensor($1, $5)""", + ) def test_subclass_creation(self): # Make sure these statements runs without error @@ -880,13 +979,14 @@ $6: f32[1] = torch._ops.aten.add_.Tensor($1, $5)''') f_name = f + "_like" self.assertEqual(type(getattr(torch, f_name)(MyTensor(2))), MyTensor) - self.assertEqual(type(torch.full_like(MyTensor(2), 1.)), MyTensor) + self.assertEqual(type(torch.full_like(MyTensor(2), 1.0)), MyTensor) self.assertEqual(type(torch.randint_like(MyTensor(2), high=3)), MyTensor) def test_make_fx_with_subclass(self) -> None: def f(x, y): # Returns (TwoTensor, Tensor) return x * y, y + y + x_a = torch.zeros(4) x_b = torch.zeros(4) y = torch.ones(4) @@ -901,8 +1001,11 @@ $6: f32[1] = torch._ops.aten.add_.Tensor($1, $5)''') out1, out2 = f(x, y) out1_unwrapped_attrs, _ = out1.__tensor_flatten__() return (*[getattr(out1, attr) for attr in out1_unwrapped_attrs], out2) - fx_g = make_fx(f_to_trace, tracing_mode='fake')(x_a, x_b, y) - self.assertExpectedInline(fx_g.code, """\ + + fx_g = make_fx(f_to_trace, tracing_mode="fake")(x_a, x_b, y) + self.assertExpectedInline( + fx_g.code, + """\ @@ -911,7 +1014,8 @@ def forward(self, x_a_1, x_b_1, y_1): mul_1 = torch.ops.aten.mul.Tensor(x_b_1, y_1); x_b_1 = None add = torch.ops.aten.add.Tensor(y_1, y_1); y_1 = None return (mul, mul_1, add) - """) + """, + ) # See https://github.com/pytorch/pytorch/issues/117794 def test_return_and_correct_aliasing_gives_correct_stride(self): @@ -924,15 +1028,20 @@ def forward(self, x_a_1, x_b_1, y_1): class WrapperTensor(torch.Tensor): elem: torch.Tensor - __slots__ = ['elem'] + __slots__ = ["elem"] @staticmethod def __new__(cls, elem, *args, **kwargs): r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] - cls, elem.size(), - dtype=elem.dtype, layout=elem.layout, - device=elem.device, requires_grad=elem.requires_grad, - strides=elem.stride(), storage_offset=elem.storage_offset()) + cls, + elem.size(), + dtype=elem.dtype, + layout=elem.layout, + device=elem.device, + requires_grad=elem.requires_grad, + strides=elem.stride(), + storage_offset=elem.storage_offset(), + ) r.elem = elem return r @@ -964,20 +1073,26 @@ def forward(self, x_a_1, x_b_1, y_1): self.assertEqual(x.elem, x_copy.elem) self.assertFalse(x is x_copy) - def test_deepcopy_wrapper_subclass_with_clone_returning_different_type(self) -> None: - + def test_deepcopy_wrapper_subclass_with_clone_returning_different_type( + self, + ) -> None: class MyWrapperTensor(torch.Tensor): elem: torch.Tensor - __slots__ = ['elem'] + __slots__ = ["elem"] @staticmethod def __new__(cls, elem, *args, **kwargs): r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] - cls, elem.size(), - dtype=elem.dtype, layout=elem.layout, - device=elem.device, requires_grad=elem.requires_grad, - strides=elem.stride(), storage_offset=elem.storage_offset()) + cls, + elem.size(), + dtype=elem.dtype, + layout=elem.layout, + device=elem.device, + requires_grad=elem.requires_grad, + strides=elem.stride(), + storage_offset=elem.storage_offset(), + ) r.elem = elem return r @@ -993,12 +1108,13 @@ def forward(self, x_a_1, x_b_1, y_1): # explicitly disable __torch_function__ for this subclass. x = MyWrapperTensor(torch.randn(3)) - with self.assertRaisesRegex(RuntimeError, - "for which cloning returns another instance of the same subclass"): + with self.assertRaisesRegex( + RuntimeError, + "for which cloning returns another instance of the same subclass", + ): x_copy = deepcopy(x) def test_deepcopy_non_wrapper_subclass(self) -> None: - # Ensure correct error is thrown for common error cases. class SubTensorError1(torch.Tensor): # Default implementation of new_empty() returns a plain tensor. @@ -1011,8 +1127,10 @@ def forward(self, x_a_1, x_b_1, y_1): for error_cls in [SubTensorError1, SubTensorError2]: x = error_cls(3) - with self.assertRaisesRegex(RuntimeError, - "for which that function returns another instance of the same subclass"): + with self.assertRaisesRegex( + RuntimeError, + "for which that function returns another instance of the same subclass", + ): x_copy = deepcopy(x) # Ensure a correctly implemented new_empty() causes deepcopy() to work. @@ -1032,11 +1150,21 @@ def forward(self, x_a_1, x_b_1, y_1): # extra dispatch keys. We probably want to unify the two APIs # in the future. r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] - cls, elem.size(), elem.stride(), elem.storage_offset(), + cls, + elem.size(), + elem.stride(), + elem.storage_offset(), torch.contiguous_format, - elem.dtype, elem.layout, - elem.device, False, False, None, False, False, - DispatchKeySet(DispatchKey.NestedTensor)) + elem.dtype, + elem.layout, + elem.device, + False, + False, + None, + False, + False, + DispatchKeySet(DispatchKey.NestedTensor), + ) return r @classmethod @@ -1045,21 +1173,26 @@ def forward(self, x_a_1, x_b_1, y_1): x = ExtraKeysTensor(torch.randn(3)) self.assertTrue(torch._C._dispatch_keys(x).has(DispatchKey.NestedTensor)) - self.assertFalse(torch._C._dispatch_keys(x).has(DispatchKey.AutogradNestedTensor)) + self.assertFalse( + torch._C._dispatch_keys(x).has(DispatchKey.AutogradNestedTensor) + ) def test_index_put_where_only_index_is_subclass(self) -> None: called_funcs = [] class MyTensor(torch.Tensor): elem: torch.Tensor - __slots__ = ['elem'] + __slots__ = ["elem"] @staticmethod def __new__(cls, elem, *args, **kwargs): r = torch.Tensor._make_wrapper_subclass( - cls, elem.size(), - dtype=elem.dtype, layout=elem.layout, - device=elem.device, requires_grad=elem.requires_grad + cls, + elem.size(), + dtype=elem.dtype, + layout=elem.layout, + device=elem.device, + requires_grad=elem.requires_grad, ) r.elem = elem return r @@ -1079,8 +1212,11 @@ def forward(self, x_a_1, x_b_1, y_1): with capture_logs(is_mode=True) as logs: with LoggingTensorMode(): torch.empty([]) - self.assertExpectedInline('\n'.join(logs), """\ -$0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False)""") + self.assertExpectedInline( + "\n".join(logs), + """\ +$0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False)""", + ) def test_torch_dispatch_mode_unrelated_tensors(self) -> None: x = torch.randn([]) @@ -1088,7 +1224,9 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p with capture_logs(is_mode=True) as logs: with LoggingTensorMode(): x + y - self.assertExpectedInline('\n'.join(logs), """$2: f32[] = torch._ops.aten.add.Tensor($0, $1)""") + self.assertExpectedInline( + "\n".join(logs), """$2: f32[] = torch._ops.aten.add.Tensor($0, $1)""" + ) def test_nested_push_logging_tensor_mode(self): x = torch.randn([]) @@ -1099,11 +1237,14 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p torch.empty([]) x + y - self.assertExpectedInline('\n'.join(logs), """\ + self.assertExpectedInline( + "\n".join(logs), + """\ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False) $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False) $3: f32[] = torch._ops.aten.add.Tensor($1, $2) -$3: f32[] = torch._ops.aten.add.Tensor($1, $2)""") +$3: f32[] = torch._ops.aten.add.Tensor($1, $2)""", + ) def test_capture_logs_with_torch_dispatch_mode(self): x = torch.randn([]) @@ -1111,9 +1252,12 @@ $3: f32[] = torch._ops.aten.add.Tensor($1, $2)""") with capture_logs_with_logging_tensor_mode() as logs: torch.empty([]) x + y - self.assertExpectedInline('\n'.join(logs), """\ + self.assertExpectedInline( + "\n".join(logs), + """\ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False) -$3: f32[] = torch._ops.aten.add.Tensor($1, $2)""") +$3: f32[] = torch._ops.aten.add.Tensor($1, $2)""", + ) x = torch.randn([]) y = torch.randn([]) @@ -1123,11 +1267,14 @@ $3: f32[] = torch._ops.aten.add.Tensor($1, $2)""") torch.empty([]) x + y - self.assertExpectedInline('\n'.join(logs2), """\ + self.assertExpectedInline( + "\n".join(logs2), + """\ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False) $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False) $3: f32[] = torch._ops.aten.add.Tensor($1, $2) -$3: f32[] = torch._ops.aten.add.Tensor($1, $2)""") +$3: f32[] = torch._ops.aten.add.Tensor($1, $2)""", + ) self.assertEqual(logs1, logs2) @@ -1217,7 +1364,9 @@ $3: f32[] = torch._ops.aten.add.Tensor($1, $2)""") class TestMode(TorchDispatchMode): def __torch_dispatch__(self, func, types, args=(), kwargs=None): - tree_map_only(torch.Tensor, lambda t: test_case.assertIn(t, seen), (args, kwargs)) + tree_map_only( + torch.Tensor, lambda t: test_case.assertIn(t, seen), (args, kwargs) + ) if kwargs is None: kwargs = {} r = func(*args, **kwargs) @@ -1237,7 +1386,7 @@ $3: f32[] = torch._ops.aten.add.Tensor($1, $2)""") class AMode(TorchDispatchMode): def __torch_dispatch__(self, func, types, args=(), kwargs=None): - if func.__name__ == 'randn.default': + if func.__name__ == "randn.default": raise RuntimeError return A(torch.zeros(())) @@ -1310,7 +1459,7 @@ $3: f32[] = torch._ops.aten.add.Tensor($1, $2)""") def __torch_dispatch__(self, func, types, args=(), kwargs=None): return func(*args, **kwargs) - x = torch.tensor(4.) + x = torch.tensor(4.0) with Mode(): y = x + x z = y + y @@ -1324,7 +1473,10 @@ $3: f32[] = torch._ops.aten.add.Tensor($1, $2)""") self.assertIsInstance(y, ModeTensor) self.assertIsInstance(z, ModeTensor) - assert self.assertRaisesRegex(RuntimeError, "subclass Mode but.* associated to a python object of type Mode") + assert self.assertRaisesRegex( + RuntimeError, + "subclass Mode but.* associated to a python object of type Mode", + ) def test_notimplemented_mode(self): sub_count = 0 @@ -1380,10 +1532,12 @@ $3: f32[] = torch._ops.aten.add.Tensor($1, $2)""") with LoggingTensorMode() as reenabled: with reenabled: torch.empty([]) - self.assertExpectedInline('\n'.join(logs), """\ + self.assertExpectedInline( + "\n".join(logs), + """\ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False) -$0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False)""") - +$0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), pin_memory=False)""", + ) def test_error_using_class_method_on_mode(self): class A(TorchDispatchMode): @@ -1391,8 +1545,10 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p def __torch_dispatch__(cls, func, types, args=(), kwargs=None): return func(args, kwargs) - x = torch.tensor(5.) - with self.assertRaisesRegex(RuntimeError, "classmethod is not supported, please make it a plain method"): + x = torch.tensor(5.0) + with self.assertRaisesRegex( + RuntimeError, "classmethod is not supported, please make it a plain method" + ): with A(): x + x @@ -1433,9 +1589,13 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p def test_tolist_numpy_with_torch_dispatch_mode(self) -> None: x = LoggingTensor(torch.tensor([2.0, 3.0])) - with self.assertRaisesRegex(RuntimeError, "is not supported for tensor subclasses."): + with self.assertRaisesRegex( + RuntimeError, "is not supported for tensor subclasses." + ): x.tolist() - with self.assertRaisesRegex(RuntimeError, "is not supported for tensor subclasses."): + with self.assertRaisesRegex( + RuntimeError, "is not supported for tensor subclasses." + ): x.numpy() with self.assertRaises(AssertionError): self.assertEqual(x, None) @@ -1453,7 +1613,7 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p self.testcase.assertEqual(args[1].device_index, 2) self.testcase.assertEqual(args[1].device_type, 3) - t = torch.tensor(5.) + t = torch.tensor(5.0) s = torch.Stream(stream_id=1, device_index=2, device_type=3) with TestMode(self): t.record_stream(s) @@ -1462,14 +1622,16 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p with _scoped_library("test_return_stream", "DEF") as l_def: l_def.define("return_stream(Tensor self) -> Stream") with _scoped_library("test_return_stream", "IMPL", "CPU") as l_impl: - l_impl.impl("return_stream", - lambda _: torch.Stream(stream_id=0, device_index=1, device_type=2)) + l_impl.impl( + "return_stream", + lambda _: torch.Stream(stream_id=0, device_index=1, device_type=2), + ) class TestMode(TorchDispatchMode): def __torch_dispatch__(self, func, types, args=(), kwargs=None): return torch.Stream(stream_id=1, device_index=2, device_type=3) - t = torch.tensor(5.) + t = torch.tensor(5.0) s = torch.ops.test_return_stream.return_stream(t) self.assertIsInstance(s, torch.Stream) self.assertEqual(s.stream_id, 0) @@ -1487,12 +1649,14 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p class NonWrapperSubclass(torch.Tensor): elem: torch.Tensor - __slots__ = ['elem'] + __slots__ = ["elem"] @staticmethod def __new__(cls, elem, *args, **kwargs): # Wrong device here! - r = torch.Tensor._make_subclass(cls, elem.to("meta"), elem.requires_grad) + r = torch.Tensor._make_subclass( + cls, elem.to("meta"), elem.requires_grad + ) # ...the real tensor is held as an element on the tensor. r.elem = elem return r @@ -1505,8 +1669,12 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p def wrap(e): return NonWrapperSubclass(e) if isinstance(e, torch.Tensor) else e - rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs))) - logging.getLogger("NonWrapperSubclass").info(f"{func.__module__}.{func.__name__}", args, kwargs, rs) + rs = tree_map( + wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs)) + ) + logging.getLogger("NonWrapperSubclass").info( + f"{func.__module__}.{func.__name__}", args, kwargs, rs + ) return rs x = NonWrapperSubclass(torch.tensor([3.0, 4.0], requires_grad=True)) @@ -1524,9 +1692,12 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p @staticmethod def __new__(cls, elem, *args, **kwargs): r = torch.Tensor._make_wrapper_subclass( - cls, elem.size(), - dtype=elem.dtype, layout=elem.layout, - device=elem.device, requires_grad=elem.requires_grad + cls, + elem.size(), + dtype=elem.dtype, + layout=elem.layout, + device=elem.device, + requires_grad=elem.requires_grad, ) r.elem = elem return r @@ -1539,7 +1710,9 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p def wrap(e): return SubclassWithNone(e) if isinstance(e, torch.Tensor) else e - rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs))) + rs = tree_map( + wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs)) + ) if func.overloadpacket.__name__ == "add": return None else: @@ -1672,6 +1845,7 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p def test_construct_int_tensor(self): class SubTensor(torch.Tensor): pass + # should not fail SubTensor(torch.zeros(2, dtype=torch.int)) @@ -1729,10 +1903,13 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p not_contiguous_data = torch.as_strided(data.clone(), (2, 2), (1, 2)) for use_wrapper_subclass in [True, False]: + class ExampleTensor1(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides") + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_sizes_strides_policy="strides" + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -1741,7 +1918,9 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p class ExampleTensor2(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides") + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_sizes_strides_policy="strides" + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -1752,7 +1931,9 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p class ExampleTensor3(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides") + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_sizes_strides_policy="strides" + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -1783,7 +1964,9 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p class ExampleTensor(torch.Tensor): @staticmethod def __new__(cls, data): - return TestPythonDispatch.subclass_helper(cls, data, False, dispatch_sizes_strides_policy="strides") + return TestPythonDispatch.subclass_helper( + cls, data, False, dispatch_sizes_strides_policy="strides" + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -1792,7 +1975,7 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p torch.ops.aten.is_contiguous.memory_format, torch.ops.aten.is_strides_like_format.default, torch.ops.aten.is_non_overlapping_and_dense.default, - torch.ops.aten.stride.default + torch.ops.aten.stride.default, ]: calls.append((func, list(args)[1:])) return None @@ -1801,20 +1984,32 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p e = ExampleTensor(torch.randn(2, 2)) self.assertFalse(e.is_contiguous(memory_format=torch.channels_last)) - self.assertEqual(calls, [(torch.ops.aten.is_contiguous.memory_format, [torch.channels_last])]) + self.assertEqual( + calls, [(torch.ops.aten.is_contiguous.memory_format, [torch.channels_last])] + ) calls.clear() - self.assertFalse(torch.ops.aten.is_strides_like_format.default(e, torch.channels_last)) - self.assertEqual(calls, [(torch.ops.aten.is_strides_like_format.default, [torch.channels_last])]) + self.assertFalse( + torch.ops.aten.is_strides_like_format.default(e, torch.channels_last) + ) + self.assertEqual( + calls, + [(torch.ops.aten.is_strides_like_format.default, [torch.channels_last])], + ) calls.clear() self.assertTrue(torch.ops.aten.is_non_overlapping_and_dense.default(e)) - self.assertEqual(calls, [(torch.ops.aten.is_non_overlapping_and_dense.default, [])]) + self.assertEqual( + calls, [(torch.ops.aten.is_non_overlapping_and_dense.default, [])] + ) def test_device_slowpath(self): for use_wrapper_subclass in [True]: + class ExampleTensor1(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_device=True) + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_device=True + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -1823,23 +2018,27 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p class ExampleTensor2(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_device=True) + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_device=True + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.prim.device: - return torch.device('meta') + return torch.device("meta") return NotImplemented class ExampleTensor3(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_device=True) + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_device=True + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if func.overloadpacket == torch.ops.prim.device: - return torch.device('meta') + return torch.device("meta") return NotImplemented err_msg = "Multiple dispatch failed for 'torch.ops.prim.device'" @@ -1848,22 +2047,25 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p e.device() ten = torch.rand([1]) - e = ExampleTensor2(torch.randn(3, 3, device='cpu'), use_wrapper_subclass) - self.assertEqual(e.device.type, 'meta') - self.assertEqual(ten.type_as(e).device.type, 'meta') + e = ExampleTensor2(torch.randn(3, 3, device="cpu"), use_wrapper_subclass) + self.assertEqual(e.device.type, "meta") + self.assertEqual(ten.type_as(e).device.type, "meta") - e = ExampleTensor3(torch.randn(3, 3, device='cpu'), use_wrapper_subclass) - self.assertEqual(e.device.type, 'meta') - self.assertEqual(ten.type_as(e).device.type, 'meta') + e = ExampleTensor3(torch.randn(3, 3, device="cpu"), use_wrapper_subclass) + self.assertEqual(e.device.type, "meta") + self.assertEqual(ten.type_as(e).device.type, "meta") def test_dim_slowpath(self): data = torch.randn(3, 3) for use_wrapper_subclass in [True, False]: + class DimNotImplementedTensor(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes") + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_sizes_strides_policy="sizes" + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -1872,7 +2074,9 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p class DimImplementedTensor(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes") + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_sizes_strides_policy="sizes" + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -1893,6 +2097,7 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p @classmethod def __torch_function__(cls, *args, **kwargs): pass + a = torch.rand(3) a[[T(), T()]] @@ -1906,17 +2111,22 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p @staticmethod def __new__(cls, *args, **kwargs): r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] - cls, (0,), dispatch_sizes_strides_policy="sizes") + cls, (0,), dispatch_sizes_strides_policy="sizes" + ) return r @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): if func in ( torch.ops.aten.sym_size.default, - torch.ops.aten.sym_stride.default + torch.ops.aten.sym_stride.default, ): from torch._dynamo.source import ConstantSource - from torch.fx.experimental.symbolic_shapes import ShapeEnv, DimDynamic + from torch.fx.experimental.symbolic_shapes import ( + DimDynamic, + ShapeEnv, + ) + shape_env = ShapeEnv() si = shape_env.create_symintnode( shape_env.create_symbol( @@ -1925,7 +2135,7 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p dynamic_dim=DimDynamic.DUCK, constraint_dim=None, ), - hint=123 + hint=123, ) return (si,) @@ -1937,10 +2147,13 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p def test_strides_slow_path(self): for use_wrapper_subclass in [True, False]: + class StridesNotImplemented(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides") + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_sizes_strides_policy="strides" + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -1949,7 +2162,9 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p class StridesCustomReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides") + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_sizes_strides_policy="strides" + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -1960,7 +2175,9 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p class StridesDefaultReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="strides") + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_sizes_strides_policy="strides" + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -1986,7 +2203,9 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p class SizesNotImplemented(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes") + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_sizes_strides_policy="sizes" + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -1997,7 +2216,9 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p class SizesCustomReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes") + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_sizes_strides_policy="sizes" + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -2010,7 +2231,9 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p class SizesDefaultReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes") + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_sizes_strides_policy="sizes" + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -2071,12 +2294,16 @@ $0: f32[] = torch._ops.aten.empty.memory_format([], device=device(type='cpu'), p def trace_fn(x): x_wrapper = CustomSizeDynamicShapesTensor(x) return x_wrapper.size(), x_wrapper.stride() + fx_g = make_fx(trace_fn, tracing_mode="symbolic")(x) - self.assertExpectedInline(fx_g.code.strip(), """\ + self.assertExpectedInline( + fx_g.code.strip(), + """\ def forward(self, x_1): sym_size_int = torch.ops.aten.sym_size.int(x_1, 0) sym_size_int_1 = torch.ops.aten.sym_size.int(x_1, 1); x_1 = None - return ((sym_size_int, sym_size_int_1), (sym_size_int, sym_size_int_1))""") + return ((sym_size_int, sym_size_int_1), (sym_size_int, sym_size_int_1))""", + ) def test_data_ptr_respects_numel_slow_path(self): data = torch.randn(6, 2) @@ -2084,7 +2311,9 @@ def forward(self, x_1): class NumelDefaultReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_sizes_strides_policy="sizes") + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_sizes_strides_policy="sizes" + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -2108,7 +2337,9 @@ def forward(self, x_1): class LayoutNotImplemented(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_layout=True) + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_layout=True + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -2117,7 +2348,9 @@ def forward(self, x_1): class LayoutCustomReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_layout=True) + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_layout=True + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -2128,7 +2361,9 @@ def forward(self, x_1): class LayoutDefaultReturn(torch.Tensor): @staticmethod def __new__(cls, data, wrapper): - return TestPythonDispatch.subclass_helper(cls, data, wrapper, dispatch_layout=True) + return TestPythonDispatch.subclass_helper( + cls, data, wrapper, dispatch_layout=True + ) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): @@ -2147,6 +2382,7 @@ def forward(self, x_1): e = LayoutDefaultReturn(torch.randn(4, 2), use_wrapper_subclass) self.assertEqual(e.layout, torch.strided) + class TestPythonDispatcher(TestCase): def test_basic(self): x = torch.randn(2, requires_grad=True) @@ -2161,8 +2397,8 @@ class TestPythonDispatcher(TestCase): python_disp_shape = torch.linalg.lstsq(a, b).solution.shape self.assertEqual(expected_shape, python_disp_shape) -class TestWrapperSubclassAliasing(TestCase): +class TestWrapperSubclassAliasing(TestCase): def _test_wrapper_subclass_aliasing(self, op, args, kwargs): def to_subclass(t: torch.Tensor): return TwoTensor(t, t.clone()) @@ -2175,16 +2411,24 @@ class TestWrapperSubclassAliasing(TestCase): result_test = op(*args_subclass, **kwargs_subclass) args_ref_flat = pytree.arg_tree_leaves(*args, **kwargs) - args_ref_flat_tensors = [x for x in args_ref_flat if isinstance(x, torch.Tensor)] + args_ref_flat_tensors = [ + x for x in args_ref_flat if isinstance(x, torch.Tensor) + ] args_test_flat = pytree.tree_leaves((args_subclass, kwargs_subclass)) - args_test_flat_tensors = [x for x in args_test_flat if isinstance(x, torch.Tensor)] + args_test_flat_tensors = [ + x for x in args_test_flat if isinstance(x, torch.Tensor) + ] result_ref_flat = pytree.tree_leaves(result_ref) - result_ref_flat_tensors = [x for x in result_ref_flat if isinstance(x, torch.Tensor)] + result_ref_flat_tensors = [ + x for x in result_ref_flat if isinstance(x, torch.Tensor) + ] result_test_flat = pytree.tree_leaves(result_test) - result_test_flat_tensors = [x for x in result_test_flat if isinstance(x, torch.Tensor)] + result_test_flat_tensors = [ + x for x in result_test_flat if isinstance(x, torch.Tensor) + ] for o_ref, o_test in zip(result_ref_flat_tensors, result_test_flat_tensors): for a_ref, a_test in zip(args_ref_flat_tensors, args_test_flat_tensors): @@ -2192,26 +2436,42 @@ class TestWrapperSubclassAliasing(TestCase): if out_is_inpt: self.assertTrue(o_test is a_test) - out_aliases_inpt = StorageWeakRef(o_ref.untyped_storage()) == StorageWeakRef(a_ref.untyped_storage()) + out_aliases_inpt = StorageWeakRef( + o_ref.untyped_storage() + ) == StorageWeakRef(a_ref.untyped_storage()) if out_aliases_inpt: - self.assertTrue(StorageWeakRef(o_test.untyped_storage()) == StorageWeakRef(a_test.untyped_storage())) + self.assertTrue( + StorageWeakRef(o_test.untyped_storage()) + == StorageWeakRef(a_test.untyped_storage()) + ) else: - self.assertFalse(StorageWeakRef(o_test.untyped_storage()) == StorageWeakRef(a_test.untyped_storage())) + self.assertFalse( + StorageWeakRef(o_test.untyped_storage()) + == StorageWeakRef(a_test.untyped_storage()) + ) # This tests the correctness of `torch.utils._python_dispatch.return_and_correct_aliasing`, # a util for wrapper subclasses to promise correct aliasing behavior. # It's probably overkill to test every OpInfo, # so I picked a sampling of ops with representative schemas. - @ops([op for op in op_db if op.name in [ - 'mul', # out-of-place - 'cat', # out-of-place (TensorList input) - 'index', # out-of-place (Optional TensorList input) - 'mul_', # inplace - 'view', # view - 't_', # inplace-view - 'split', # view (multi-return) - 'native_batch_norm', # mutable op (returns outputs and mutates some inputs) - ]], allowed_dtypes=(torch.float,)) + @ops( + [ + op + for op in op_db + if op.name + in [ + "mul", # out-of-place + "cat", # out-of-place (TensorList input) + "index", # out-of-place (Optional TensorList input) + "mul_", # inplace + "view", # view + "t_", # inplace-view + "split", # view (multi-return) + "native_batch_norm", # mutable op (returns outputs and mutates some inputs) + ] + ], + allowed_dtypes=(torch.float,), + ) def test_wrapper_subclass_aliasing(self, device, dtype, op): samples = op.sample_inputs(device, dtype) sample = first_sample(self, samples) @@ -2235,15 +2495,18 @@ class TestWrapperSubclassAliasing(TestCase): # Make sure that _return_and_correct_aliasing can handle this case # (I'm using inference_mode to make sure conv2d doesn't decompose and goes to torch_dispatch) with torch.inference_mode(): - self._test_wrapper_subclass_aliasing(torch.ops.aten.conv2d.default, args, kwargs) + self._test_wrapper_subclass_aliasing( + torch.ops.aten.conv2d.default, args, kwargs + ) def test_wrapper_subclass_aliasing_out_op(self, device): # Make sure that _return_and_correct_aliasing can handle kwargs w mutable tensors args = (torch.ones(4), torch.ones(4)) - kwargs = {'out': torch.empty(4)} + kwargs = {"out": torch.empty(4)} self._test_wrapper_subclass_aliasing(torch.ops.aten.add.out, args, kwargs) + instantiate_device_type_tests(TestWrapperSubclassAliasing, globals()) -if __name__ == '__main__': +if __name__ == "__main__": run_tests()
2.41.0
bde4efa842957ef304aac44e72c6782213a1fea
Fri, 19 Apr 2024 01:54:26 +0000
[PATCH 0352/1000] Fix broken test in `test_aot_inductor.py` (#124329)
Doesn't seem to run in upstream CI due to sm90 requirement but it is failing on our end due to the extra positional argument Pull Request resolved: https://github.com/pytorch/pytorch/pull/124329 Approved by: https://github.com/chenyang78
diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py index f5e38e48de..4c89d978fd 100644 --- a/test/inductor/test_aot_inductor.py +++ b/test/inductor/test_aot_inductor.py @@ -2375,7 +2375,7 @@ class AOTInductorTestsTemplate: def __init__(self): super().__init__() - def forward(self, x0, x1, x2, x3, x4): + def forward(self, x0, x1, x2, x3): t = ( x0.to(torch.float) + x1.to(torch.float)
2.41.0
620c3e814ee85f252d62b2ca07090a2b7253131
Thu, 18 Apr 2024 11:40:41 -0700
[PATCH 0353/1000] Optimized templated attention to use exp2 (#124356)
0.705 (vs. FA2) to 0.860 after this change. <img width="1270" alt="image" src="https://github.com/pytorch/pytorch/assets/6355099/d58f57ba-e50e-44ea-8a8a-4f13b8650adf"> to <img width="1277" alt="image" src="https://github.com/pytorch/pytorch/assets/6355099/f1945b67-0cfc-463c-a2f6-5812b90677fe"> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124356 Approved by: https://github.com/drisspg
diff --git a/benchmarks/transformer/score_mod.py b/benchmarks/transformer/score_mod.py index c2fe082f63..067f18f8e3 100644 --- a/benchmarks/transformer/score_mod.py +++ b/benchmarks/transformer/score_mod.py @@ -6,9 +6,9 @@ from typing import Callable, List import numpy as np import torch -import torch.utils.benchmark as benchmark +import torch.nn.functional as F from tabulate import tabulate -from torch.nn.attention._templated_attention import _compose, _templated_attention +from torch.nn.attention._templated_attention import _templated_attention from tqdm import tqdm torch._dynamo.config.automatic_dynamic_shapes = False @@ -16,15 +16,14 @@ torch._dynamo.config.automatic_dynamic_shapes = False torch._dynamo.config.cache_size_limit = 1000 +from triton.testing import do_bench + + def benchmark_torch_function_in_microseconds(func: Callable, *args, **kwargs) -> float: # warmup for _ in range(5): func(*args, **kwargs) - t0 = benchmark.Timer( - stmt="func(*args, **kwargs)", - globals={"args": args, "kwargs": kwargs, "func": func}, - ) - return t0.adaptive_autorange(min_run_time=0.1).median * 1e6 + return do_bench(lambda: func(*args, **kwargs)) * 1e3 @dataclass(frozen=True) @@ -110,8 +109,11 @@ def run_single_experiment(config: ExperimentConfig) -> ExperimentResults: config.dtype, device, ) - eager_sdpa = _templated_attention - compiled_sdpa = torch.compile(eager_sdpa) + + def eager_sdpa(query, key, value, _): + return F.scaled_dot_product_attention(query, key, value) + + compiled_sdpa = torch.compile(_templated_attention) score_mod = config.score_mod @@ -190,6 +192,9 @@ def print_results(results: List[Experiment]): def generate_score_mods() -> List[Callable]: + def noop(score, b, h, m, n): + return score + def causal_mask(score, b, h, token_q, token_kv): return torch.where(token_q >= token_kv, score, float("-inf")) @@ -199,14 +204,7 @@ def generate_score_mods() -> List[Callable]: def head_bias(score, b, h, m, n): return score + 2 * h - def pathological(score, b, h, m, n): - def sin(score, b, h, m, n): - return torch.sin(score) - - composed_mod = _compose(*(sin for _ in range(10))) - return composed_mod(score, b, h, m, n) - - return [causal_mask, relative_bias, head_bias, pathological] + return [noop, causal_mask, relative_bias, head_bias] def generate_experiment_configs() -> List[ExperimentConfig]: diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py index b374d6d77a..52c923e353 100644 --- a/test/inductor/test_templated_attention.py +++ b/test/inductor/test_templated_attention.py @@ -20,6 +20,7 @@ supported_platform = skipUnless( ) Tolerances = namedtuple("Tolerances", ["atol", "rtol"]) +torch.set_float32_matmul_precision("high") def create_attention(score_mod): @@ -49,18 +50,23 @@ class TestTemplatedSDPA(InductorTestCase): q = torch.randn((4, 8, 2048, 64), dtype=dtype, device="cuda") k = torch.randn((4, 8, 2048, 64), dtype=dtype, device="cuda") v = torch.randn((4, 8, 2048, 64), dtype=dtype, device="cuda") - ref_out = sdpa_partial( + golden_out = sdpa_partial( q.to(torch.float64), k.to(torch.float64), v.to(torch.float64) ) + ref_out = sdpa_partial(q, k, v) compiled_out = compiled_sdpa(q, k, v) - tolerance = Tolerances(atol=2e-2, rtol=2e-2) - torch.testing.assert_close( - ref_out.to(dtype=torch.float32), - compiled_out.to(dtype=torch.float32), - atol=tolerance.atol, - rtol=tolerance.rtol, - ) + compiled_error = (golden_out - compiled_out).abs().mean() + ref_error = (golden_out - ref_out).abs().mean() + # Note, it seems like we really are less accurate than the float32 + # computation, likely due to the online softmax + if dtype == torch.float32: + fudge_factor = 4.0 + else: + fudge_factor = 1.1 + if compiled_error > ref_error * fudge_factor: + msg = f"Compiled error {compiled_error} is greater than ref error {ref_error} by more than 10%." + self.assertTrue(False, msg) @supported_platform @common_utils.parametrize("dtype", test_dtypes) @@ -102,6 +108,14 @@ class TestTemplatedSDPA(InductorTestCase): self.run_test(score_mod, dtype) + @supported_platform + @common_utils.parametrize("dtype", test_dtypes) + def test_skip_odd_keys(self, dtype: torch.dtype): + def score_mod(score, b, h, q, kv): + return torch.where(kv % 2 == 0, score, float("-inf")) + + self.run_test(score_mod, dtype) + @supported_platform @common_utils.parametrize("dtype", test_dtypes) def test_alibi_causal(self, dtype: torch.dtype): diff --git a/torch/_inductor/kernel/templated_attention.py b/torch/_inductor/kernel/templated_attention.py index f068091406..6153c63a0f 100644 --- a/torch/_inductor/kernel/templated_attention.py +++ b/torch/_inductor/kernel/templated_attention.py @@ -1,8 +1,10 @@ """ Triton Implementation of the Templated SDPA Kernel""" import logging +from typing import Any, List import torch -from ..select_algorithm import TritonTemplate +from ..lowering import lowerings, register_lowering +from ..select_algorithm import autotune_select_algorithm, TritonTemplate log = logging.getLogger(__name__) aten = torch.ops.aten @@ -28,6 +30,13 @@ sdpa_template = TritonTemplate( # Q: Query, K: Key, V: Value # M: Number of queries, N: Number of keys/values, D: Model dimension # z: Batch size, h: Number of heads, m: Number of queries per head, k: Number of keys per head + # (Modifiable) Config options: + # BLOCK_M + # BLOCK_N + # SCORE_MOD_IS_LINEAR: Is the score modifier linear? If so, we can lift the + # change of base out of the loop + # ROWS_GUARANTEED_SAFE: Is it guaranteed that at least one value in each row + # is not masked out? If so, we can skip an extra safety check # Define Q Strides stride_qz = {{stride("Q", 0)}} @@ -49,10 +58,8 @@ sdpa_template = TritonTemplate( H = {{size("Q", 1)}} N_CTX = {{size("Q", 2)}} - # TODO I think we should do some performance work - # to find the optimal calls for perf/accuracy to tl.dot qk_scale = 1.0 - MATMUL_PRECISION = tl.float16 + MATMUL_PRECISION = Q.dtype.element_ty start_m = tl.program_id(0) off_hz = tl.program_id(1) @@ -89,12 +96,10 @@ sdpa_template = TritonTemplate( m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") l_i = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) - # scale sm_scale by log_2(e) and use - # 2^x instead of exp in the loop because CSE and LICM - # don't work as expected with `exp` in the loop - # TODO fix me - # qk_scale = sm_scale * 1.44269504 + q = tl.load(Q_block_ptr) + if SCORE_MOD_IS_LINEAR: + qk_scale *= 1.44269504 q = (q * qk_scale).to(MATMUL_PRECISION) # loop over k, v and update accumulator lo = 0 @@ -106,9 +111,8 @@ sdpa_template = TritonTemplate( v = tl.load(V_block_ptr) # -- compute qk --- qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - qk += tl.dot(q, k.to(MATMUL_PRECISION)) + qk = tl.dot(q, k.to(MATMUL_PRECISION), acc=qk) # ~~~~~~~~~~~~~~~~~~~ Apply score modification ~~~~~~~~~~~~~~~~~~~ - {{ modification( score="qk", b="off_hz // H", @@ -117,6 +121,9 @@ sdpa_template = TritonTemplate( n="start_n + offs_n[None, :]", out="qk" ) | indent_except_first(2) }} + # TODO: In the case that score_mod is linear, this can be LICMed + if not SCORE_MOD_IS_LINEAR: + qk *= 1.44269504 # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # -- compute scaling constant --- @@ -124,18 +131,16 @@ sdpa_template = TritonTemplate( m_i_new = tl.maximum(m_i, row_max) masked_out_rows = (m_i_new == float("-inf")) - # TODO FIX ME and use 2^x instead of exp - # alpha = tl.math.exp2(m_i - m_i_new) - # p = tl.math.exp2(qk - m_i_new[:, None]) - alpha = tl.math.exp(m_i - m_i_new) - alpha = tl.where(masked_out_rows, 0, alpha) - p = tl.math.exp(qk - m_i_new[:, None]) - p = tl.where(masked_out_rows[:, None], 0, p) + alpha = tl.math.exp2(m_i - m_i_new) + p = tl.math.exp2(qk - m_i_new[:, None]) + if not ROWS_GUARANTEED_SAFE: + alpha = tl.where(masked_out_rows, 0, alpha) + p = tl.where(masked_out_rows[:, None], 0, p) # -- scale and update acc -- acc_scale = l_i * 0 + alpha # workaround some compiler bug acc *= acc_scale[:, None] - acc += tl.dot(p.to(MATMUL_PRECISION), v.to(MATMUL_PRECISION)) + acc = tl.dot(p.to(MATMUL_PRECISION), v.to(MATMUL_PRECISION), acc) # -- update m_i and l_i -- l_i = l_i * alpha + tl.sum(p, 1) @@ -159,3 +164,125 @@ sdpa_template = TritonTemplate( {{store_output(("idx_z", "idx_h", "idx_m", "idx_d"), "acc")}} """, ) + + +@register_lowering(torch.ops.higher_order.templated_attention) +def templated_attention(*args, **kwargs): + from torch._prims_common import make_contiguous_strides_for + from ..ir import ( + ComputedBuffer, + FixedLayout, + FlexibleLayout, + InputBuffer, + StorageBox, + TensorBox, + ) + + query, key, value, subgraph = args + + def create_placeholder(name: str, dtype: torch.dtype) -> InputBuffer: + return TensorBox.create( + InputBuffer( + name, + FixedLayout( + query.get_device(), + dtype, + [ + 1, + ], + [ + 1, + ], + ), + ) + ) + + scalar_inps = ["score", "b", "h", "m", "n"] + env = {} + cnt = 0 + placeholder_inps = [ + create_placeholder(name, dtype) + for name, dtype in [ + ("score", query.get_dtype()), + ("b", torch.int64), + ("h", torch.int64), + ("m", torch.int64), + ("n", torch.int64), + ] + ] + for node in subgraph.graph_module.graph.nodes: + # There are two classes of placeholder inpts that we need + # to handle differently. For the first n_scalar_inps inputs + # we expect that these placeholders were generated by the make_fx call + # in the templated Attention HOP. So we need to create a new placeholder + # TensorBox for each of these inputs. For the rest of the inputs we + # expect that these are lifted inputs that fill up the '*other_buffers' + # tuple and already have corresponding TensorBoxes passed in as args. + if node.op == "placeholder": + is_lifted_input = cnt >= len(scalar_inps) + env[node] = args[cnt - 1] if is_lifted_input else placeholder_inps[cnt] + cnt += 1 + elif node.op == "call_function": + # For call_function we use the defulat lowerings and pass in the + # already created TensorBoxes as args + from torch.utils._pytree import tree_map + + env[node] = lowerings[node.target]( + *tree_map(lambda x: env[x] if x in env else x, node.args) + ) + elif node.op == "output": + # For the output node we need to create a ComputedBuffer + # which represents the actual score modification + + output_buffer = env[node.args[0]] + assert isinstance(output_buffer.data, StorageBox), ( + "The output node for the templated attention subgraph must be a StorageBox, but got: ", + type(output_buffer), + ) + # Create the ComputedBuffere directly that will be inlined into the modfication block + subgraph_buffer = ComputedBuffer( + name=None, + layout=FlexibleLayout( + device=output_buffer.data.get_device(), + dtype=output_buffer.data.get_dtype(), + size=output_buffer.data.get_size(), + ), + data=output_buffer.data.data, # type: ignore[arg-type] + ) + + layout = FixedLayout( + output_buffer.get_device(), + query.get_dtype(), + query.get_size(), + make_contiguous_strides_for(query.get_size()), + ) + choices: List[Any] = [] + configs: List[Any] = [] + if query.get_dtype() == torch.float32: + configs.append((64, 64, 4, 3)) + configs += [ + (128, 64, 4, 3), + (128, 128, 4, 3), + (128, 128, 8, 2), + (64, 128, 4, 3), + ] + + for BLOCK_M, BLOCK_N, num_warps, num_stages in configs: + sdpa_template.maybe_append_choice( + choices=choices, + input_nodes=(query, key, value), + layout=layout, + subgraphs=subgraph_buffer, + num_stages=num_stages, + num_warps=num_warps, + BLOCK_M=BLOCK_M, + BLOCK_N=BLOCK_N, + BLOCK_DMODEL=query.get_size()[-1], + # For now, we always assume the "sound" option + SCORE_MOD_IS_LINEAR=False, + ROWS_GUARANTEED_SAFE=False, + ) + return autotune_select_algorithm( + "sdpa", choices, [query, key, value], layout + ) + raise ValueError("TemplatedAttention was passed a subgraph with no output node!") diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py index b44ee2805b..57d48f1f4c 100644 --- a/torch/_inductor/lowering.py +++ b/torch/_inductor/lowering.py @@ -5623,123 +5623,6 @@ def while_loop(cond_fn, body_fn, carried_inputs, additional_inputs): return list(map(TensorBox.create, result)) -@register_lowering(torch.ops.higher_order.templated_attention) -def templated_attention(*args, **kwargs): - from torch._prims_common import make_contiguous_strides_for - from .ir import ( - ComputedBuffer, - FixedLayout, - FlexibleLayout, - InputBuffer, - StorageBox, - TensorBox, - ) - - query, key, value, subgraph = args - - def create_placeholder(name: str, dtype: torch.dtype) -> InputBuffer: - return TensorBox.create( - InputBuffer( - name, - FixedLayout( - query.get_device(), - dtype, - [ - 1, - ], - [ - 1, - ], - ), - ) - ) - - scalar_inps = ["score", "b", "h", "m", "n"] - env = {} - cnt = 0 - placeholder_inps = [ - create_placeholder(name, dtype) - for name, dtype in [ - ("score", query.get_dtype()), - ("b", torch.int64), - ("h", torch.int64), - ("m", torch.int64), - ("n", torch.int64), - ] - ] - for node in subgraph.graph_module.graph.nodes: - # There are two classes of placeholder inpts that we need - # to handle differently. For the first n_scalar_inps inputs - # we expect that these placeholders were generated by the make_fx call - # in the templated Attention HOP. So we need to create a new placeholder - # TensorBox for each of these inputs. For the rest of the inputs we - # expect that these are lifted inputs that fill up the '*other_buffers' - # tuple and already have corresponding TensorBoxes passed in as args. - if node.op == "placeholder": - is_lifted_input = cnt >= len(scalar_inps) - env[node] = args[cnt - 1] if is_lifted_input else placeholder_inps[cnt] - cnt += 1 - elif node.op == "call_function": - # For call_function we use the defulat lowerings and pass in the - # already created TensorBoxes as args - from torch.utils._pytree import tree_map - - env[node] = lowerings[node.target]( - *tree_map(lambda x: env[x] if x in env else x, node.args) - ) - elif node.op == "output": - # For the output node we need to create a ComputedBuffer - # which represents the actual score modification - - output_buffer = env[node.args[0]] - assert isinstance(output_buffer.data, StorageBox), ( - "The output node for the templated attention subgraph must be a StorageBox, but got: ", - type(output_buffer), - ) - # Create the ComputedBuffere directly that will be inlined into the modfication block - subgraph_buffer = ComputedBuffer( - name=None, - layout=FlexibleLayout( - device=output_buffer.data.get_device(), - dtype=output_buffer.data.get_dtype(), - size=output_buffer.data.get_size(), - ), - data=output_buffer.data.data, # type: ignore[arg-type] - ) - from .kernel.templated_attention import sdpa_template - - layout = FixedLayout( - output_buffer.get_device(), - query.get_dtype(), - query.get_size(), - make_contiguous_strides_for(query.get_size()), - ) - choices: List[Any] = [] - from .select_algorithm import autotune_select_algorithm - - for BLOCK_M, BLOCK_N, num_warps, num_stages in [ - (128, 64, 4, 3), - (128, 128, 4, 3), - (128, 128, 8, 2), - (64, 128, 4, 3), - ]: - sdpa_template.maybe_append_choice( - choices=choices, - input_nodes=(query, key, value), - layout=layout, - subgraphs=subgraph_buffer, - num_stages=num_stages, - num_warps=num_warps, - BLOCK_M=BLOCK_M, - BLOCK_N=BLOCK_N, - BLOCK_DMODEL=query.get_size()[-1], - ) - return autotune_select_algorithm( - "sdpa", choices, [query, key, value], layout - ) - raise ValueError("TemplatedAttention was passed a subgraph with no output node!") - - @register_lowering(torch.ops.prims._sink_tokens.default) def _sink_tokens(tokens): return None diff --git a/torch/_inductor/triton_heuristics.py b/torch/_inductor/triton_heuristics.py index b8dcbf1fcb..1371608f62 100644 --- a/torch/_inductor/triton_heuristics.py +++ b/torch/_inductor/triton_heuristics.py @@ -206,7 +206,13 @@ class CachingAutotuner(KernelInterface): compiled_binary, launcher = self._precompile_config( c, warm_cache_only_with_cc ) - except OutOfResources: + except OutOfResources as e: + if len(self.configs) == 1: + raise RuntimeError( + f"Failed to compile triton config: {c}. " + f"Report a fatal compilation error. " + f"{e}" + ) # Skip the config if we run out of resource continue self.launchers.append(launcher)
2.41.0
89e3eeed319ecbceded1cb9b007d2670d80a4ad
Thu, 18 Apr 2024 10:45:17 -0700
[PATCH 0354/1000] Avoid cuda init to FakeTensorMode (#124413)
Also partially fixes #122109 This PR: - We add a C++ flag (only_lift_cpu_tensors) to toggle the torch.tensor(1, device='cuda') ctor strategy. When false (default), it does the current PyTorch behavior of unconditionally constructing a concrete CUDA tensor then calling lift_fresh on it. When true, we instead construct a concrete CPU tensor, call lift_fresh, and then call Tensor.to(device) (under any ambient modes). - FakeTensorMode flips this flag depending on if CUDA is available or not. We don't unconditionally set the flag to True because that is likely BC-breaking. Test Plan: - existing tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/124413 Approved by: https://github.com/eellison
diff --git a/test/test_fake_tensor.py b/test/test_fake_tensor.py index bc820153d8..1ffb0a6cb3 100644 --- a/test/test_fake_tensor.py +++ b/test/test_fake_tensor.py @@ -1167,6 +1167,8 @@ class FakeTensorOperatorInvariants(TestCase): torch.ones(10, device='cuda') torch.zeros(10, device='cuda') torch.rand(10, device='cuda') + torch.tensor(3.14, device='cuda') + torch.tensor([[3.14, 2], [1, 2]], device='cuda') @skipIfRocm @unittest.skipIf(not RUN_CUDA, "requires cuda") diff --git a/torch/_subclasses/fake_tensor.py b/torch/_subclasses/fake_tensor.py index ef6ab4c637..7b0835b813 100644 --- a/torch/_subclasses/fake_tensor.py +++ b/torch/_subclasses/fake_tensor.py @@ -860,6 +860,15 @@ class FakeTensorMode(TorchDispatchMode): def is_our_fake(self, t): return isinstance(t, FakeTensor) and t.fake_mode is self + # If we should avoid device init. This changes the behavior of various APIs: + # - We avoid constant-prop on Tensors with ops that move them to another device + # - We change the torch.tensor ctor contract to never materialize + # tensors on device + # (see NOTE: [torch.tensor, lift_fresh, and device movement]) + @property + def avoid_device_init(self): + return not torch.cuda.is_available() + @count def __torch_dispatch__(self, func, types, args=(), kwargs=None): # FakeTensorMode should not be set when we're inside of it. @@ -874,23 +883,36 @@ class FakeTensorMode(TorchDispatchMode): # No-op if FakeTensorMode is already in use def __enter__(self): + prev_only_lift_cpu_tensors = None + if self.avoid_device_init: + # See NOTE: [torch.tensor, lift_fresh, and device movement] + prev_only_lift_cpu_tensors = torch._C._only_lift_cpu_tensors() + torch._C._set_only_lift_cpu_tensors(True) maybe_prev_fake_mode = torch._C._unset_dispatch_mode(self._mode_key) if self is not maybe_prev_fake_mode: - self.enter_stack.append((True, maybe_prev_fake_mode)) + self.enter_stack.append( + (True, maybe_prev_fake_mode, prev_only_lift_cpu_tensors) + ) return super().__enter__() else: # no-op (still need to re-set the fake mode though since we unset it) torch._C._set_dispatch_mode(self) - self.enter_stack.append((False, None)) + self.enter_stack.append((False, None, prev_only_lift_cpu_tensors)) return self def __exit__(self, a, b, c): - live, maybe_prev_fake_mode = self.enter_stack.pop() + ( + live, + maybe_prev_fake_mode, + maybe_prev_only_lift_cpu_tensors, + ) = self.enter_stack.pop() if live: out = super().__exit__(a, b, c) # Re-enable the previous fake mode, if there was one. if maybe_prev_fake_mode is not None: torch._C._set_dispatch_mode(maybe_prev_fake_mode) + if maybe_prev_only_lift_cpu_tensors is not None: + torch._C._set_only_lift_cpu_tensors(maybe_prev_only_lift_cpu_tensors) @classmethod def cache_info(cls) -> DispatchCacheInfo: @@ -1287,6 +1309,19 @@ class FakeTensorMode(TorchDispatchMode): if type(args[0]) is torch.Tensor: return converter.from_real_tensor(self, args[0]) + # If we are trying to avoid device init, then we need to avoid constant + # prop on constant tensors for ops that change devices. + avoiding_device_init = False + if self.avoid_device_init: + if ( + func == torch.ops.aten._to_copy.default + and "device" in kwargs + and kwargs["device"] != "cpu" + ): + avoiding_device_init = True + if func == torch.ops.prims.device_put.default: + avoiding_device_init = True + # Recompute flat_arg_fake_tensors here again in case some of the inputs # were real tensors and fakified in validate_and_convert_non_fake_tensors (flat_args, flat_arg_fake_tensors) = self.validate_and_convert_non_fake_tensors( @@ -1311,6 +1346,7 @@ class FakeTensorMode(TorchDispatchMode): and all_constant and len(flat_arg_fake_tensors) != 0 and not has_symbolic_sizes + and not avoiding_device_init ): const_flat_args = [maybe_to_constant(a) for a in flat_args] const_args, const_kwargs = pytree.tree_unflatten(const_flat_args, args_spec) diff --git a/torch/csrc/utils/device_lazy_init.cpp b/torch/csrc/utils/device_lazy_init.cpp index c748c7a2d7..d032071412 100644 --- a/torch/csrc/utils/device_lazy_init.cpp +++ b/torch/csrc/utils/device_lazy_init.cpp @@ -12,13 +12,18 @@ std::array<bool, at::COMPILE_TIME_MAX_DEVICE_TYPES> is_initialized{}; } // anonymous namespace +bool is_device_initialized(at::DeviceType device_type) { + pybind11::gil_scoped_acquire g; + return is_initialized[static_cast<int>(device_type)]; +} + void device_lazy_init(at::DeviceType device_type) { pybind11::gil_scoped_acquire g; // Protected by the GIL. We don't use call_once because under ASAN it // has a buggy implementation that deadlocks if an instance throws an // exception. In any case, call_once isn't necessary, because we // have taken a lock. - if (is_initialized[static_cast<int>(device_type)]) { + if (is_device_initialized(device_type)) { return; } diff --git a/torch/csrc/utils/device_lazy_init.h b/torch/csrc/utils/device_lazy_init.h index d55485bcae..b290ae04d7 100644 --- a/torch/csrc/utils/device_lazy_init.h +++ b/torch/csrc/utils/device_lazy_init.h @@ -45,4 +45,6 @@ static inline void maybe_initialize_device(const at::TensorOptions& options) { maybe_initialize_device(device); } +bool is_device_initialized(at::DeviceType device_type); + } // namespace torch::utils diff --git a/torch/csrc/utils/python_dispatch.cpp b/torch/csrc/utils/python_dispatch.cpp index 118528b503..780f37a675 100644 --- a/torch/csrc/utils/python_dispatch.cpp +++ b/torch/csrc/utils/python_dispatch.cpp @@ -16,6 +16,7 @@ #include <torch/csrc/PyInterpreter.h> #include <torch/csrc/autograd/python_variable.h> #include <torch/csrc/jit/python/pybind_utils.h> +#include <torch/csrc/utils/tensor_new.h> #include <c10/util/flat_hash_map.h> #include <pybind11/operators.h> @@ -903,6 +904,9 @@ void initDispatchBindings(PyObject* module) { ->set_warn_deprecated_on_mutable_data_ptr(); }); + m.def("_only_lift_cpu_tensors", &torch::utils::only_lift_cpu_tensors); + m.def("_set_only_lift_cpu_tensors", &torch::utils::set_only_lift_cpu_tensors); + using c10::impl::TorchDispatchModeKey; py::enum_<TorchDispatchModeKey>(m, "_TorchDispatchModeKey") .value("FUNCTIONAL", TorchDispatchModeKey::FUNCTIONAL) diff --git a/torch/csrc/utils/tensor_new.cpp b/torch/csrc/utils/tensor_new.cpp index 001dbc5521..e1755b5b36 100644 --- a/torch/csrc/utils/tensor_new.cpp +++ b/torch/csrc/utils/tensor_new.cpp @@ -48,6 +48,8 @@ namespace torch::utils { namespace { const int MAX_DIMS = 128; +thread_local bool kOnlyLiftCPUTensors = false; + TensorOptions build_options( c10::TensorOptions options, at::ScalarType scalar_type, @@ -453,19 +455,35 @@ Tensor internal_new_from_data( // "no observable data dependence". In an ideal world, we wouldn't trace // a to() call but I need to think harder about what exactly we should trace // in this case. - tensor = tensor.to( - device, inferred_scalar_type, /*non_blocking=*/false, /*copy=*/false); + if (only_lift_cpu_tensors()) { + tensor = tensor.to( + inferred_scalar_type, /*non_blocking=*/false, /*copy=*/false); + + } else { + tensor = tensor.to( + device, inferred_scalar_type, /*non_blocking=*/false, /*copy=*/false); + } } // torch.jit.trace will continue to trace out `.to()` instead of `.lift()`, // since changing it is BC-breaking. at::tracer::impl::NoTracerDispatchMode tracer_guard; - // lift has no autograd implementation, so we need to make sure we don't try - // to dispatch to it. - // TODO: arguably it should have an autograd implementation that noops - at::AutoDispatchBelowADInplaceOrView guard; - - return at::lift_fresh(tensor); + { + // lift has no autograd implementation, so we need to make sure we don't try + // to dispatch to it. + // TODO: arguably it should have an autograd implementation that noops + at::AutoDispatchBelowADInplaceOrView guard; + tensor = at::lift_fresh(tensor); + } + if (only_lift_cpu_tensors() && device.type() != DeviceType::CPU) { + if (!device.has_index() && + !torch::utils::is_device_initialized(device.type())) { + // Infer device 0 to avoid device init + device = c10::Device(device.type(), 0); + } + tensor = tensor.to(device, /*non_blocking=*/false, /*copy=*/false); + } + return tensor; } Tensor new_from_data_copy( @@ -1788,4 +1806,12 @@ Tensor asarray( return tensor; } +bool only_lift_cpu_tensors() { + return kOnlyLiftCPUTensors; +} + +void set_only_lift_cpu_tensors(bool value) { + kOnlyLiftCPUTensors = value; +} + } // namespace torch::utils diff --git a/torch/csrc/utils/tensor_new.h b/torch/csrc/utils/tensor_new.h index 7048660ec3..a1c34bd448 100644 --- a/torch/csrc/utils/tensor_new.h +++ b/torch/csrc/utils/tensor_new.h @@ -8,6 +8,28 @@ namespace torch { namespace utils { +// NOTE: [torch.tensor, lift_fresh, and device movement] +// +// The `only_lift_cpu_tensors` flag controls what happens on torch.tensor([1, 2, +// 3], device="cuda") (or any non-CPU devices). +// +// If false (default): +// - the data gets moved into a CPU Tensor +// - then, it gets moved to cuda (via .to) +// - finally, we call lift_fresh() on it. +// Steps 1 and 2 happen with all modes disabled. +// +// If true: +// - the data gets moved into a CPU Tensor (with correct dtype) +// - we call lift_fresh() on it +// - finally, we move it to cuda (via .to) +// Step 1 happens with all modes disabled. +// +// `only_lift_cpu_tensors=true` is useful to prevent CUDA initialization under +// FakeTensorMode because it avoids moving concrete data to CUDA. +TORCH_API bool only_lift_cpu_tensors(); +TORCH_API void set_only_lift_cpu_tensors(bool value); + at::Tensor base_tensor_ctor(PyObject* args, PyObject* kwargs); at::Tensor legacy_tensor_ctor( c10::DispatchKey dispatch_key,
2.41.0
90e3e7abb8ad9e1d716b2218147d4c5dfe2049b
Thu, 18 Apr 2024 09:36:51 -0700
[PATCH 0355/1000] Add ability to save TORCH_COMPILE_DEBUG logs for CI failures (#124408)
Summary: The intent is that we can whitelist certain benchmarks to a) enable TORCH_COMPILE_DEBUG=1, and b) save the generated artifacts in test/debug in case of a failure. Via the rules in action.yml, we can then upload test/debug/ to S3 whenever it exists. I chose to introduce a new directory (test/debug/) rather than using an existing one (e.g., test/test-reports/), because these don't seem like test reports and we can later add other debug-related artifacts if we find it useful. For example, we might want to later explore including the inductor cache artifacts. Test Plan: See artifacts generated when I force a failure: https://hud.pytorch.org/pr/124234 Specifically: https://gha-artifacts.s3.amazonaws.com/pytorch/pytorch/8729891826/1/artifact/debug-test-inductor_torchbench-2-2-linux.g5.4xlarge.nvidia.gpu_23953679574.zip Pull Request resolved: https://github.com/pytorch/pytorch/pull/124408 Approved by: https://github.com/desertfire
diff --git a/.github/actions/upload-test-artifacts/action.yml b/.github/actions/upload-test-artifacts/action.yml index ae27729720..04cb43b20c 100644 --- a/.github/actions/upload-test-artifacts/action.yml +++ b/.github/actions/upload-test-artifacts/action.yml @@ -46,7 +46,7 @@ runs: env: FILE_SUFFIX: ${{ inputs.file-suffix }} run: | - # Remove any previous test reports if they exist + # Remove any previous usage logs if they exist rm -f logs-*.zip # this workflow is also run in bazel build test, but we dont generate usage reports for it # so check to see if the file exists first @@ -57,6 +57,18 @@ runs: zip -r "logs-${FILE_SUFFIX}.zip" test -i '*.log' fi + - name: Zip debugging artifacts for upload + if: runner.os != 'Windows' && !inputs.use-gha + shell: bash + env: + FILE_SUFFIX: ${{ inputs.file-suffix }} + run: | + # Remove any previous debugging artifacts if they exist + rm -f debug-*.zip + if [ -d 'test/debug' ]; then + zip -r "debug-${FILE_SUFFIX}.zip" test/debug + fi + # Windows zip - name: Zip JSONs for upload if: runner.os == 'Windows' && !inputs.use-gha @@ -121,6 +133,18 @@ runs: if-no-files-found: ignore path: logs-*.zip + - name: Store Debug Artifacts on S3 + uses: seemethere/upload-artifact-s3@v5 + if: ${{ !inputs.use-gha }} + continue-on-error: true + with: + s3-bucket: ${{ inputs.s3-bucket }} + s3-prefix: | + ${{ github.repository }}/${{ github.run_id }}/${{ github.run_attempt }}/artifact + retention-days: 14 + if-no-files-found: ignore + path: debug-*.zip + # GHA upload - name: Store Test Downloaded JSONs on Github uses: actions/upload-artifact@v3 diff --git a/.gitignore b/.gitignore index dda13467a6..3bfb7a2e52 100644 --- a/.gitignore +++ b/.gitignore @@ -54,6 +54,7 @@ test/.coverage test/.hypothesis/ test/cpp/api/mnist test/custom_operator/model.pt +test/debug/ test/jit_hooks/*.pt test/data/legacy_modules.t7 test/data/*.pt diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py index 9e946df174..2dd5736f55 100644 --- a/benchmarks/dynamo/common.py +++ b/benchmarks/dynamo/common.py @@ -258,6 +258,15 @@ CI_USE_SGD = { DO_NOT_CAST_INPUTS = {"stable_diffusion"} +# Maps a benchmark model name to a list of status codes. For any listed entry, we'll +# capture TORCH_COMPILE_DEBUG logs in CI runs and preseve them (i.e., for upload) if +# the result status matches one listed. +CI_PRESERVE_COMPILE_DEBUG = { + # For example: + # "mnasnet1_0": ["fail_accuracy"], +} + + def model_specified_by_path(path_and_class_str): return ":" in path_and_class_str @@ -2854,6 +2863,24 @@ class BenchmarkRunner: repro_dir, ) + def maybe_preserve_compile_debug(self, name, status): + if ( + name in CI_PRESERVE_COMPILE_DEBUG + and status in CI_PRESERVE_COMPILE_DEBUG[name] + ): + src_dir = torch._dynamo.utils.get_debug_dir() + if os.path.isdir(src_dir): + dbg_dir = os.path.join( + os.getcwd(), "test", "debug", "torch_compile_debug" + ) + dst_dir = os.path.join(dbg_dir, os.path.basename(src_dir)) + try: + os.makedirs(dbg_dir, exist_ok=True) + os.rename(src_dir, dst_dir) + log.warning("Moved %s to %s", src_dir, dst_dir) + except OSError: + log.exception("Failed to preserve %s", src_dir) + def run_one_model( self, name, @@ -2891,6 +2918,8 @@ class BenchmarkRunner: print(status) torch.cuda.empty_cache() + self.maybe_preserve_compile_debug(name, status) + if self.args.timing: from torch._dynamo.utils import op_count, print_time_report from torch.utils._stats import simple_call_counter @@ -4068,8 +4097,13 @@ def run(runner, args, original_dir=None): timeout = args.timeout if should_diff_branch(args): timeout *= 2 + env = os.environ.copy() + if args.ci and name in CI_PRESERVE_COMPILE_DEBUG: + env["TORCH_COMPILE_DEBUG"] = "1" subprocess.check_call( - [sys.executable] + sys.argv + [f"--only={name}"], timeout=timeout + [sys.executable] + sys.argv + [f"--only={name}"], + timeout=timeout, + env=env, ) except subprocess.TimeoutExpired: write_csv_when_exception(args, name, "timeout")
2.41.0
03a08f8ae4a60882bdcedd4862e8e6eef81bd47
Fri, 19 Apr 2024 02:57:12 +0000
[PATCH 0356/1000] [ROCm] Add cublasGemmAlgo_t -> hipblasGemmAlgo_t (#121030)
This PR is to add cublasGemmAlgo_t -> hipblasGemmAlgo_t to cuda_to_hip_mappings.py. It is required for DeepSpeed transformer extension build on ROCm. Pull Request resolved: https://github.com/pytorch/pytorch/pull/121030 Approved by: https://github.com/jeffdaily, https://github.com/ezyang
diff --git a/torch/utils/hipify/cuda_to_hip_mappings.py b/torch/utils/hipify/cuda_to_hip_mappings.py index 86ff5daf3e..6652240938 100644 --- a/torch/utils/hipify/cuda_to_hip_mappings.py +++ b/torch/utils/hipify/cuda_to_hip_mappings.py @@ -454,6 +454,7 @@ CUDA_TYPE_NAME_MAP = collections.OrderedDict( ("cublasDiagType_t", ("hipblasDiagType_t", CONV_TYPE, API_BLAS)), ("cublasSideMode_t", ("hipblasSideMode_t", CONV_TYPE, API_BLAS)), ("cublasPointerMode_t", ("hipblasPointerMode_t", CONV_TYPE, API_BLAS)), + ("cublasGemmAlgo_t", ("hipblasGemmAlgo_t", CONV_TYPE, API_BLAS)), ( "cublasAtomicsMode_t", ("hipblasAtomicsMode_t", CONV_TYPE, API_BLAS, HIP_UNSUPPORTED),
2.41.0
68ce2cddad2057349d1194274a5f93c47c5ac88
Fri, 19 Apr 2024 03:31:13 +0000
[PATCH 0357/1000] [Profiler] Unify the device(CUDA, XPU, PrivateUse1) in torch profiler post processing (#123247)
This PR unifies the CUDA, XPU and PrivateUse1 in the torch profiler. Now CUDA, XPU and PrivateUse1 can together use string object `use_device` to distinguish each other and share one device path for calculating kineto time durations and memory statistics for post processing. #suppress-api-compatibility-check Co-authored-by: Aaron Enye Shi <enye.shi@gmail.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123247 Approved by: https://github.com/aaronenyeshi, https://github.com/gujinghui
diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py index d9012d0e89..56771eb188 100644 --- a/test/profiler/test_profiler.py +++ b/test/profiler/test_profiler.py @@ -1095,7 +1095,7 @@ class TestProfiler(TestCase): stats = run_profiler(create_cuda_tensor) check_metrics( stats, - "cuda_memory_usage", + "device_memory_usage", allocs=[ "test_user_scope_alloc", "aten::to", @@ -1147,7 +1147,7 @@ class TestProfiler(TestCase): deallocs=["[memory]"], ) if torch.cuda.is_available(): - check_metrics(stats, "cuda_memory_usage", deallocs=["[memory]"]) + check_metrics(stats, "device_memory_usage", deallocs=["[memory]"]) @unittest.skipIf( IS_JETSON, "Jetson has a guard against OOM since host and gpu memory are shared" diff --git a/test/test_autograd.py b/test/test_autograd.py index 95432aaa6a..5f2c4d28e4 100644 --- a/test/test_autograd.py +++ b/test/test_autograd.py @@ -4628,11 +4628,11 @@ Done""", self.assertEqual(avg.count, 4) self.assertEqual(avg.cpu_time_total, 30) self.assertEqual(avg.self_cpu_time_total, 30) - self.assertEqual(avg.cuda_time_total, 0) + self.assertEqual(avg.device_time_total, 0) # average stats self.assertEqual(avg.cpu_time, 7.5) - self.assertEqual(avg.cuda_time_total, 0) + self.assertEqual(avg.device_time_total, 0) def test_profiler_shapes(self): print("") diff --git a/torch/_C/_autograd.pyi b/torch/_C/_autograd.pyi index 7e503a8e90..e6c4c3ec9d 100644 --- a/torch/_C/_autograd.pyi +++ b/torch/_C/_autograd.pyi @@ -15,6 +15,7 @@ from ._profiler import ( class DeviceType(Enum): CPU = ... CUDA = ... + XPU = ... MKLDNN = ... OPENGL = ... OPENCL = ... diff --git a/torch/_C/_profiler.pyi b/torch/_C/_profiler.pyi index e1481dd9c1..d19e72f573 100644 --- a/torch/_C/_profiler.pyi +++ b/torch/_C/_profiler.pyi @@ -39,6 +39,7 @@ class ActiveProfilerType(Enum): class ProfilerActivity(Enum): CPU = ... CUDA = ... + XPU = ... MTIA = ... PrivateUse1 = ... diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py index 53319bd2dd..77e84d0829 100644 --- a/torch/_inductor/utils.py +++ b/torch/_inductor/utils.py @@ -136,7 +136,7 @@ def do_bench_using_profiling(fn: Callable[[], Any], warmup=25, rep=100) -> float log.debug("profiling time breakdown") log.debug(actual_events.table(row_limit=-1)) - res = sum(event.cuda_time_total for event in actual_events) / 1000.0 / n_repeat + res = sum(event.device_time_total for event in actual_events) / 1000.0 / n_repeat log.debug("profiling results: %s ms", res) return res diff --git a/torch/autograd/profiler.py b/torch/autograd/profiler.py index ba020fb3cb..f233277b7e 100644 --- a/torch/autograd/profiler.py +++ b/torch/autograd/profiler.py @@ -7,7 +7,6 @@ from warnings import warn import torch import torch.cuda -from torch._C import _get_privateuse1_backend_name from torch._C._profiler import _ExperimentalConfig from torch.autograd import ( @@ -112,8 +111,12 @@ class profile: Args: enabled (bool, optional): Setting this to False makes this context manager a no-op. - use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API. - Adds approximately 4us of overhead to each tensor operation. + use_cuda (bool, optional): Enables timing of CUDA events as well + using the cudaEvent API. (will be deprecated) + + use_device (str, optional): Enables timing of device events. + Adds approximately 4us of overhead to each tensor operation when use cuda. + The valid devices options are 'cuda', 'xpu' and 'privateuseone'. record_shapes (bool, optional): If shapes recording is set, information about input dimensions will be collected. This allows one to see which @@ -161,9 +164,9 @@ class profile: .. warning: Due to some CUDA multiprocessing limitations (multiprocessing-cuda-note_), - one cannot use the profiler with ``use_cuda = True`` to benchmark + one cannot use the profiler with ``use_device = 'cuda'`` to benchmark DataLoaders with ``num_workers > 0``. If you wish to benchmark data loading, - please use ``use_cuda = False`` or ``num_workers = 0``. + please use ``use_device = None`` or ``num_workers = 0``. Example: >>> # xdoctest: +SKIP @@ -207,9 +210,13 @@ class profile: if not self.enabled: return self.use_cuda = use_cuda - self.use_device: Optional[str] = ( - use_device if use_device != "privateuseone" else None - ) + if self.use_cuda: + warn( + "The attribute `use_cuda` will be deprecated soon, please use ``use_device = 'cuda'`` instead." + ) + self.use_device: Optional[str] = "cuda" + else: + self.use_device = use_device self.function_events: Optional[EventList] = None self.entered = False self.record_shapes = record_shapes @@ -233,17 +240,19 @@ class profile: use_kineto ), "Device-only events supported only with Kineto (use_kineto=True)" - if self.use_device == "cuda": - self.use_device = None - self.use_cuda = True - - if self.use_device and self.use_device != _get_privateuse1_backend_name(): - warn(f"{self.use_device} doesn't support profile.") + VALID_DEVICE_OPTIONS = ["cuda", "xpu", "privateuseone"] + if self.use_device not in VALID_DEVICE_OPTIONS: + warn(f"The {self.use_device} is not a valid device option.") self.use_device = None - if self.use_cuda and not torch.cuda.is_available(): + if self.use_device == "cuda" and not torch.cuda.is_available(): warn("CUDA is not available, disabling CUDA profiling") self.use_cuda = False + self.use_device = None + + if self.use_device == "xpu" and not torch.xpu.is_available(): + warn("XPU is not available, disabling XPU profiling") + self.use_device = None self.kineto_activities = set() if self.use_cpu: @@ -252,14 +261,18 @@ class profile: self.kineto_activities.add(ProfilerActivity.MTIA) self.profiler_kind = ProfilerState.KINETO - if self.use_cuda: + if self.use_device == "cuda": if not use_kineto or ProfilerActivity.CUDA not in _supported_activities(): assert self.use_cpu, "Legacy CUDA profiling requires use_cpu=True" self.profiler_kind = ProfilerState.KINETO_GPU_FALLBACK else: self.kineto_activities.add(ProfilerActivity.CUDA) - - if self.use_device: + elif self.use_device == "xpu": + assert ( + use_kineto and ProfilerActivity.XPU in _supported_activities() + ), "Legacy XPU profiling is not supported. Requires use_kineto=True on XPU devices." + self.kineto_activities.add(ProfilerActivity.XPU) + elif self.use_device is not None and self.use_device != "privateuseone": if ( not use_kineto or ProfilerActivity.PrivateUse1 not in _supported_activities() @@ -315,8 +328,10 @@ class profile: def __exit__(self, exc_type, exc_val, exc_tb): if not self.enabled: return - if self.use_cuda: + if self.use_device == "cuda": torch.cuda.synchronize() + elif self.use_device == "xpu": + torch.xpu.synchronize() t0 = perf_counter_ns() self.kineto_results = _disable_profiler() @@ -332,7 +347,6 @@ class profile: self.function_events = EventList( parsed_results, - use_cuda=self.use_cuda, use_device=self.use_device, profile_memory=self.profile_memory, with_flops=self.with_flops, @@ -445,17 +459,11 @@ class profile: else 0 ) - def _cuda_memory_usage(mem_record): + def _device_memory_usage(mem_record): return ( mem_record.nbytes() - if mem_record.device_type() in [DeviceType.CUDA, DeviceType.HIP] - else 0 - ) - - def _privateuse1_memory_usage(mem_record): - return ( - mem_record.nbytes() - if mem_record.device_type() in [DeviceType.PrivateUse1] + if mem_record.device_type() + in [DeviceType.CUDA, DeviceType.PrivateUse1, DeviceType.HIP] else 0 ) @@ -471,16 +479,14 @@ class profile: abs_end_ns = kineto_event.start_ns() + kineto_event.duration_ns() cpu_memory_usage = 0 - cuda_memory_usage = 0 - privateuse1_memory_usage = 0 + device_memory_usage = 0 if kineto_event.device_type() == DeviceType.CPU: # find the corresponding memory allocation events for mem_record in mem_records_acc.in_interval( kineto_event.start_ns() / 1000, abs_end_ns / 1000 ): cpu_memory_usage += _cpu_memory_usage(mem_record[0]) - cuda_memory_usage += _cuda_memory_usage(mem_record[0]) - privateuse1_memory_usage += _privateuse1_memory_usage(mem_record[0]) + device_memory_usage += _device_memory_usage(mem_record[0]) mem_record[1] = True is_async = kineto_event.is_async() or ( @@ -505,8 +511,7 @@ class profile: scope=kineto_event.scope(), use_device=self.use_device, cpu_memory_usage=cpu_memory_usage, - cuda_memory_usage=cuda_memory_usage, - privateuse1_memory_usage=privateuse1_memory_usage, + device_memory_usage=device_memory_usage, is_async=is_async, sequence_nr=kineto_event.sequence_nr(), device_type=kineto_event.device_type(), @@ -516,12 +521,12 @@ class profile: ) max_evt_id = max(max_evt_id, fe.id) if fe.device_type == DeviceType.CPU and not fe.is_async: - if self.use_device: + if self.use_device == "privateuseone": privateuse1_time = kineto_event.privateuse1_elapsed_us() if privateuse1_time > 0: fe.append_kernel(fe.name, fe.device_index, privateuse1_time) fe.is_legacy = True - else: + elif self.use_device == "cuda": # Check if we have CUDA time as a fallback cuda_time = kineto_event.cuda_elapsed_us() if cuda_time > 0: @@ -534,7 +539,7 @@ class profile: device_corr_map[corr_id] = [] device_corr_map[corr_id].append(fe) - # associate CUDA kernels and CUDA runtime (CPU) with CPU events + # associate device kernels and device runtime (CPU) with CPU events for fe in function_events: if ( fe.device_type == DeviceType.CPU @@ -549,7 +554,7 @@ class profile: f_evt.time_range.end - f_evt.time_range.start, ) elif f_evt.device_type == DeviceType.CPU: - # make sure that 'thread' of a CPU Kineto (e.g. CUDA Runtime) event is associated + # make sure that 'thread' of a CPU Kineto (e.g. Device Runtime) event is associated # with the 'thread' of the corresponding linked PyTorch event to properly track # parents and children f_evt.thread = fe.thread @@ -569,8 +574,7 @@ class profile: scope=0, # RecordScope::FUNCTION use_device=self.use_device, cpu_memory_usage=_cpu_memory_usage(evt), - cuda_memory_usage=_cuda_memory_usage(evt), - privateuse1_memory_usage=_privateuse1_memory_usage(evt), + device_memory_usage=_device_memory_usage(evt), is_async=False, sequence_nr=-1, device_type=DeviceType.CPU, diff --git a/torch/autograd/profiler_legacy.py b/torch/autograd/profiler_legacy.py index 32700ffb1c..c491f9797a 100644 --- a/torch/autograd/profiler_legacy.py +++ b/torch/autograd/profiler_legacy.py @@ -93,7 +93,7 @@ class profile: parsed_results = _parse_legacy_records(records) self.function_events = EventList( parsed_results, - use_cuda=self.use_cuda, + use_device="cuda" if self.use_cuda else None, profile_memory=self.profile_memory, with_flops=self.with_flops, ) @@ -251,7 +251,7 @@ def _parse_legacy_records(thread_records): ], scope=start.scope(), cpu_memory_usage=cpu_memory_usage, - cuda_memory_usage=cuda_memory_usage, + device_memory_usage=cuda_memory_usage, is_async=is_async, is_remote=is_remote_event, sequence_nr=start.sequence_nr(), @@ -287,7 +287,7 @@ def _parse_legacy_records(thread_records): end_us=0, stack=[], cpu_memory_usage=record.cpu_memory_usage(), - cuda_memory_usage=record.cuda_memory_usage(), + device_memory_usage=record.cuda_memory_usage(), is_legacy=True, ) functions.append(fe) diff --git a/torch/autograd/profiler_util.py b/torch/autograd/profiler_util.py index 4db601ad7b..6d446d6ade 100644 --- a/torch/autograd/profiler_util.py +++ b/torch/autograd/profiler_util.py @@ -26,12 +26,10 @@ class EventList(list): """A list of Events (for pretty printing).""" def __init__(self, *args, **kwargs): - use_cuda = kwargs.pop("use_cuda", True) use_device = kwargs.pop("use_device", None) profile_memory = kwargs.pop("profile_memory", False) with_flops = kwargs.pop("with_flops", False) super().__init__(*args, **kwargs) - self._use_cuda = use_cuda self._use_device = use_device self._profile_memory = profile_memory self._tree_built = False @@ -181,14 +179,16 @@ class EventList(list): Args: sort_by (str, optional): Attribute used to sort entries. By default they are printed in the same order as they were registered. - Valid keys include: ``cpu_time``, ``cuda_time``, ``cpu_time_total``, - ``cuda_time_total``, ``cpu_memory_usage``, ``cuda_memory_usage``, - ``self_cpu_memory_usage``, ``self_cuda_memory_usage``, ``count``. + Valid keys include: ``cpu_time``, ``cuda_time``, ``xpu_time``, + ``cpu_time_total``, ``cuda_time_total``, ``xpu_time_total``, + ``cpu_memory_usage``, ``cuda_memory_usage``, ``xpu_memory_usage``, + ``self_cpu_memory_usage``, ``self_cuda_memory_usage``, + ``self_xpu_memory_usage``, ``count``. top_level_events_only(bool, optional): Boolean flag to determine the selection of events to display. If true, the profiler will only display events at top level like top-level invocation of python `lstm`, python `add` or other functions, nested events like low-level - cpu/cuda ops events are omitted for profiler result readability. + cpu/cuda/xpu ops events are omitted for profiler result readability. Returns: A string containing the table. @@ -267,6 +267,7 @@ class EventList(list): return [ "self_cpu_time_total", "self_cuda_time_total", + "self_xpu_time_total", "self_privateuse1_time_total", ] @@ -280,7 +281,12 @@ class EventList(list): with open(path, "w") as f: for evt in self: if evt.stack and len(evt.stack) > 0: - metric_value = getattr(evt, metric) + metric_value = getattr( + evt, + metric.replace("cuda", "device") + .replace("xpu", "device") + .replace("privateuse1", "device"), + ) if int(metric_value) > 0: stack_str = "" for entry in reversed(evt.stack): @@ -325,7 +331,6 @@ class EventList(list): avg_list = EventList( stats.values(), - use_cuda=self._use_cuda, use_device=self._use_device, profile_memory=self._profile_memory, with_flops=self._with_flops, @@ -395,26 +400,23 @@ class FormattedTimesMixin: """ cpu_time_str = _attr_formatter("cpu_time") - cuda_time_str = _attr_formatter("cuda_time") - privateuse1_time_str = _attr_formatter("privateuse1_time") + device_time_str = _attr_formatter("device_time") cpu_time_total_str = _attr_formatter("cpu_time_total") - cuda_time_total_str = _attr_formatter("cuda_time_total") - privateuse1_time_total_str = _attr_formatter("privateuse1_time_total") + device_time_total_str = _attr_formatter("device_time_total") self_cpu_time_total_str = _attr_formatter("self_cpu_time_total") - self_cuda_time_total_str = _attr_formatter("self_cuda_time_total") - self_privateuse1_time_total_str = _attr_formatter("self_privateuse1_time_total") + self_device_time_total_str = _attr_formatter("self_device_time_total") @property def cpu_time(self): return 0.0 if self.count == 0 else 1.0 * self.cpu_time_total / self.count # type: ignore[attr-defined] @property - def cuda_time(self): - return 0.0 if self.count == 0 else 1.0 * self.cuda_time_total / self.count # type: ignore[attr-defined] + def device_time(self): + return 0.0 if self.count == 0 else 1.0 * self.device_time_total / self.count # type: ignore[attr-defined] @property - def privateuse1_time(self): - return 0.0 if self.count == 0 else 1.0 * self.privateuse1_time_total / self.count # type: ignore[attr-defined] + def cuda_time(self): # To be deprecated + return self.device_time class Interval: @@ -448,8 +450,7 @@ class FunctionEvent(FormattedTimesMixin): scope=0, use_device=None, cpu_memory_usage=0, - cuda_memory_usage=0, - privateuse1_memory_usage=0, + device_memory_usage=0, is_async=False, is_remote=False, sequence_nr=-1, @@ -479,8 +480,7 @@ class FunctionEvent(FormattedTimesMixin): self.scope: int = scope self.use_device: Optional[str] = use_device self.cpu_memory_usage: int = cpu_memory_usage - self.cuda_memory_usage: int = cuda_memory_usage - self.privateuse1_memory_usage: int = privateuse1_memory_usage + self.device_memory_usage: int = device_memory_usage self.is_async: bool = is_async self.is_remote: bool = is_remote self.sequence_nr: int = sequence_nr @@ -530,20 +530,23 @@ class FunctionEvent(FormattedTimesMixin): ) @property - def self_cuda_memory_usage(self): + def self_device_memory_usage(self): if self.is_async or self.device_type != DeviceType.CPU: return 0 - return self.cuda_memory_usage - sum( - child.cuda_memory_usage for child in self.cpu_children + return self.device_memory_usage - sum( + child.device_memory_usage for child in self.cpu_children ) @property - def self_privateuse1_memory_usage(self): - if self.is_async or self.device_type != DeviceType.CPU: + def self_cuda_memory_usage(self): # To be deprecated + self.self_device_memory_usage + + @property + def cpu_time_total(self): + if self.device_type == DeviceType.CPU: + return self.time_range.elapsed_us() + else: return 0 - return self.privateuse1_memory_usage - sum( - child.privateuse1_memory_usage for child in self.cpu_children - ) @property def self_cpu_time_total(self): @@ -554,84 +557,50 @@ class FunctionEvent(FormattedTimesMixin): ) @property - def cuda_time_total(self): - if self.is_async or self.use_device: + def device_time_total(self): + if self.is_async or not self.use_device: return 0 if self.device_type == DeviceType.CPU: if not self.is_legacy: # account for the kernels in the children ops return sum(kinfo.duration for kinfo in self.kernels) + sum( - ch.cuda_time_total for ch in self.cpu_children + ch.device_time_total for ch in self.cpu_children ) else: # each legacy cpu events has a single (fake) kernel return sum(kinfo.duration for kinfo in self.kernels) else: - assert self.device_type == DeviceType.CUDA + assert self.device_type in [DeviceType.CUDA, DeviceType.PrivateUse1] return self.time_range.elapsed_us() @property - def self_cuda_time_total(self): - if self.is_async or self.use_device: - return 0 - if self.device_type == DeviceType.CPU: - return self.cuda_time_total - sum( - child.cuda_time_total for child in self.cpu_children - ) - else: - assert self.device_type == DeviceType.CUDA - return self.cuda_time_total + def cuda_time_total(self): # To be deprecated + self.device_time_total @property - def cpu_time_total(self): - if self.device_type == DeviceType.CPU: - return self.time_range.elapsed_us() - else: - return 0 - - @property - def self_privateuse1_time_total(self): + def self_device_time_total(self): if self.is_async or not self.use_device: return 0 if self.device_type == DeviceType.CPU: - return self.privateuse1_time_total - sum( - child.privateuse1_time_total for child in self.cpu_children + return self.device_time_total - sum( + [child.device_time_total for child in self.cpu_children] ) else: - assert self.device_type == DeviceType.CUDA - return self.privateuse1_time_total + assert self.device_type in [DeviceType.CUDA, DeviceType.PrivateUse1] + return self.device_time_total @property - def privateuse1_time_total(self): - if self.is_async or not self.use_device: - return 0 - if self.device_type == DeviceType.CPU: - if not self.is_legacy: - # account for the kernels in the children ops - return sum(kinfo.duration for kinfo in self.kernels) + sum( - ch.privateuse1_time_total for ch in self.cpu_children - ) - else: - # each legacy cpu events has a single (fake) kernel - return sum(kinfo.duration for kinfo in self.kernels) - else: - assert self.device_type == DeviceType.PrivateUse1 - return self.time_range.elapsed_us() + def self_cuda_time_total(self): # To be deprecated + self.self_device_time_total @property def key(self): return self.name def __repr__(self): - device_name = "cuda" if not self.use_device else self.use_device - device_time = ( - self.cuda_time_str if not self.use_device else self.privateuse1_time_str - ) - device_memory_usage = ( - self.cuda_memory_usage - if not self.use_device - else self.privateuse1_memory_usage - ) + device_name = self.use_device + device_time = self.device_time_str + device_memory_usage = self.device_memory_usage return ( "<FunctionEvent id={} name={} device_type={} node_id={} cpu_time={} start_us={} end_us={} " "cpu_children={} {}_time={} name={} thread={} input_shapes={} " @@ -671,20 +640,16 @@ class FunctionEventAvg(FormattedTimesMixin): self.is_remote: bool = False self.use_device: Optional[str] = None self.cpu_time_total: int = 0 - self.cuda_time_total: int = 0 - self.privateuse1_time_total: int = 0 + self.device_time_total: int = 0 self.self_cpu_time_total: int = 0 - self.self_cuda_time_total: int = 0 - self.self_privateuse1_time_total: int = 0 + self.self_device_time_total: int = 0 self.input_shapes: Optional[List[List[int]]] = None self.stack: Optional[List] = None self.scope: Optional[int] = None self.cpu_memory_usage: int = 0 - self.cuda_memory_usage: int = 0 - self.privateuse1_memory_usage: int = 0 + self.device_memory_usage: int = 0 self.self_cpu_memory_usage: int = 0 - self.self_cuda_memory_usage: int = 0 - self.self_privateuse1_memory_usage: int = 0 + self.self_device_memory_usage: int = 0 self.cpu_children: Optional[List[FunctionEvent]] = None self.cpu_parent: Optional[FunctionEvent] = None self.device_type: DeviceType = DeviceType.CPU @@ -712,17 +677,13 @@ class FunctionEventAvg(FormattedTimesMixin): assert isinstance(other, (FunctionEvent, FunctionEventAvg)) assert other.key == self.key self.cpu_time_total += other.cpu_time_total - self.cuda_time_total += other.cuda_time_total - self.privateuse1_time_total += other.privateuse1_time_total + self.device_time_total += other.device_time_total self.self_cpu_time_total += other.self_cpu_time_total - self.self_cuda_time_total += other.self_cuda_time_total - self.self_privateuse1_time_total += other.self_privateuse1_time_total + self.self_device_time_total += other.self_device_time_total self.cpu_memory_usage += other.cpu_memory_usage - self.cuda_memory_usage += other.cuda_memory_usage - self.privateuse1_memory_usage += other.privateuse1_memory_usage + self.device_memory_usage += other.device_memory_usage self.self_cpu_memory_usage += other.self_cpu_memory_usage - self.self_cuda_memory_usage += other.self_cuda_memory_usage - self.self_privateuse1_memory_usage += other.self_privateuse1_memory_usage + self.self_device_memory_usage += other.self_device_memory_usage self.count += other.count if self.flops is None: self.flops = other.flops @@ -735,19 +696,9 @@ class FunctionEventAvg(FormattedTimesMixin): def __repr__(self): device_name = "cuda" if not self.use_device else self.use_device - self_device_time = ( - self.self_cuda_time_total_str - if not self.use_device - else self.self_privateuse1_time_total_str - ) - device_time = ( - self.cuda_time_str if not self.use_device else self.privateuse1_time_str - ) - device_memory = ( - self.cuda_memory_usage - if not self.use_device - else self.privateuse1_memory_usage - ) + self_device_time = self.self_device_time_total_str + device_time = self.device_time_str + device_memory = self.device_memory_usage return ( "<FunctionEventAvg key={} self_cpu_time={} cpu_time={} " " self_{}_time={} {}_time={} input_shapes={} " @@ -858,19 +809,14 @@ def _build_table( if len(events) == 0: return "" - has_cuda_time = any(event.self_cuda_time_total > 0 for event in events) - has_cuda_mem = any(event.self_cuda_memory_usage > 0 for event in events) - has_privateuse1_time = any( - event.self_privateuse1_time_total > 0 for event in events - ) - has_privateuse1_mem = any( - event.self_privateuse1_memory_usage > 0 for event in events - ) + has_device_time = any(event.self_device_time_total > 0 for event in events) + has_device_mem = any(event.self_device_memory_usage > 0 for event in events) use_device = events[0].use_device - if not use_device and (has_privateuse1_mem or has_privateuse1_time): - raise RuntimeError( - "use_device is None, but there is private device performance data." - ) + # Running on PrivateUse1 device with profiler but not enable + # ProfilerActivity.PrivateUse1 can also catch privateuse1 memory usage. + # Here only need to check has_privateuse1_time if not use_device. + if not use_device and has_device_time: + raise RuntimeError("use_device is None, but there is device performance data.") has_input_shapes = any( (event.input_shapes is not None and len(event.input_shapes) > 0) @@ -879,8 +825,16 @@ def _build_table( if sort_by is not None: events = EventList( - sorted(events, key=lambda evt: getattr(evt, sort_by), reverse=True), - use_cuda=has_cuda_time, + sorted( + events, + key=lambda evt: getattr( + evt, + sort_by.replace("cuda", "device") + .replace("xpu", "device") + .replace("privateuse1", "device"), + ), + reverse=True, + ), use_device=use_device, profile_memory=profile_memory, with_flops=with_flops, @@ -918,23 +872,14 @@ def _build_table( "CPU total", "CPU time avg", ] - if has_cuda_time: - headers.extend( - [ - "Self CUDA", - "Self CUDA %", - "CUDA total", - "CUDA time avg", - ] - ) - if has_privateuse1_time: - privateuse1 = use_device.upper() + device_name = use_device.upper() if use_device is not None else "None" + if has_device_time: headers.extend( [ - f"Self {privateuse1}", - f"Self {privateuse1} %", - f"{privateuse1} total", - f"{privateuse1} time avg", + f"Self {device_name}", + f"Self {device_name} %", + f"{device_name} total", + f"{device_name} time avg", ] ) if profile_memory: @@ -944,19 +889,11 @@ def _build_table( "Self CPU Mem", ] ) - if has_cuda_mem: + if has_device_mem: headers.extend( [ - "CUDA Mem", - "Self CUDA Mem", - ] - ) - if has_privateuse1_mem: - privateuse1 = use_device.upper() - headers.extend( - [ - f"{privateuse1} Mem", - f"Self {privateuse1} Mem", + f"{device_name} Mem", + f"Self {device_name} Mem", ] ) headers.append("# of Calls") @@ -1030,22 +967,16 @@ def _build_table( result.append(s) result.append("\n") # Yes, newline after the end as well - sum_self_cpu_time_total = sum(event.self_cpu_time_total for event in events) - sum_self_cuda_time_total = 0 - sum_self_privateuse1_time_total = 0 + sum_self_cpu_time_total = 0 + sum_self_device_time_total = 0 for evt in events: - if evt.device_type == DeviceType.CPU: + sum_self_cpu_time_total += evt.self_cpu_time_total + if evt.device_type == DeviceType.CPU and evt.is_legacy: # in legacy profiler, kernel info is stored in cpu events - if evt.is_legacy: - if not use_device: - sum_self_cuda_time_total += evt.self_cuda_time_total - else: - sum_self_privateuse1_time_total += evt.self_privateuse1_time_total - elif evt.device_type == DeviceType.CUDA: + sum_self_device_time_total += evt.self_device_time_total + elif evt.device_type in [DeviceType.CUDA, DeviceType.PrivateUse1]: # in kineto profiler, there're events with the correct device type (e.g. CUDA) - sum_self_cuda_time_total += evt.self_cuda_time_total - elif evt.device_type == DeviceType.PrivateUse1: - sum_self_privateuse1_time_total += evt.self_privateuse1_time_total + sum_self_device_time_total += evt.self_device_time_total # Actual printing if header is not None: @@ -1090,28 +1021,16 @@ def _build_table( evt.cpu_time_total_str, # CPU total evt.cpu_time_str, # CPU time avg ] - if has_cuda_time: + if has_device_time: row_values.extend( [ - evt.self_cuda_time_total_str, - # CUDA time total % + evt.self_device_time_total_str, + # device time total % _format_time_share( - evt.self_cuda_time_total, sum_self_cuda_time_total + evt.self_device_time_total, sum_self_device_time_total ), - evt.cuda_time_total_str, - evt.cuda_time_str, # Cuda time avg - ] - ) - if has_privateuse1_time: - row_values.extend( - [ - evt.self_privateuse1_time_total_str, - # PrivateUse1 time total % - _format_time_share( - evt.self_privateuse1_time_total, sum_self_privateuse1_time_total - ), - evt.privateuse1_time_total_str, - evt.privateuse1_time_str, # PrivateUse1 time avg + evt.device_time_total_str, + evt.device_time_str, # device time avg ] ) if profile_memory: @@ -1123,22 +1042,13 @@ def _build_table( _format_memory(evt.self_cpu_memory_usage), ] ) - if has_cuda_mem: - row_values.extend( - [ - # CUDA Mem Total - _format_memory(evt.cuda_memory_usage), - # Self CUDA Mem Total - _format_memory(evt.self_cuda_memory_usage), - ] - ) - if has_privateuse1_mem: + if has_device_mem: row_values.extend( [ - # PrivateUse1 Mem Total - _format_memory(evt.privateuse1_memory_usage), - # Self PrivateUse1 Mem Total - _format_memory(evt.self_privateuse1_memory_usage), + # Device Mem Total + _format_memory(evt.device_memory_usage), + # Self Device Mem Total + _format_memory(evt.self_device_memory_usage), ] ) row_values.append( @@ -1174,10 +1084,9 @@ def _build_table( append(header_sep) append(f"Self CPU time total: {_format_time(sum_self_cpu_time_total)}") - if has_cuda_time: - append(f"Self CUDA time total: {_format_time(sum_self_cuda_time_total)}") - if has_privateuse1_time: + if has_device_time: append( - f"Self {use_device.upper()} time total: {_format_time(sum_self_privateuse1_time_total)}" + f"Self {use_device.upper() if use_device is not None else 'None'} " + f"time total: {_format_time(sum_self_device_time_total)}" ) return "".join(result) diff --git a/torch/csrc/profiler/kineto_shim.cpp b/torch/csrc/profiler/kineto_shim.cpp index 85f91bf8b2..41561c6f3e 100644 --- a/torch/csrc/profiler/kineto_shim.cpp +++ b/torch/csrc/profiler/kineto_shim.cpp @@ -342,6 +342,7 @@ c10::DeviceType deviceTypeFromActivity(libkineto::ActivityType activity_type) { case libkineto::ActivityType::USER_ANNOTATION: case libkineto::ActivityType::EXTERNAL_CORRELATION: case libkineto::ActivityType::CUDA_RUNTIME: + case libkineto::ActivityType::XPU_RUNTIME: case libkineto::ActivityType::CPU_INSTANT_EVENT: case libkineto::ActivityType::GLOW_RUNTIME: case libkineto::ActivityType::MTIA_RUNTIME: diff --git a/torch/profiler/profiler.py b/torch/profiler/profiler.py index bfc725700a..120b2acad2 100644 --- a/torch/profiler/profiler.py +++ b/torch/profiler/profiler.py @@ -13,7 +13,6 @@ from typing_extensions import Self import torch import torch.autograd.profiler as prof -from torch._C import _get_privateuse1_backend_name from torch._C._profiler import ( _add_execution_trace_observer, _disable_execution_trace_observer, @@ -72,8 +71,10 @@ class _KinetoProfile: Args: activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values: - ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``. - Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA. + ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``, + ``torch.profiler.ProfilerActivity.XPU``. + Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA + or (when available) ProfilerActivity.XPU. record_shapes (bool): save information about operator's input shapes. profile_memory (bool): track tensor memory allocation/deallocation (see ``export_memory_timeline`` for more details). @@ -126,9 +127,13 @@ class _KinetoProfile: self.profiler: Optional[prof.profile] = None self.mem_tl: Optional[MemoryProfileTimeline] = None self.use_device = None - privateuse1_backend = _get_privateuse1_backend_name() - if privateuse1_backend != "privateuseone": - self.use_device = privateuse1_backend + if ProfilerActivity.CUDA in self.activities: + self.use_device = "cuda" + elif ProfilerActivity.XPU in self.activities: + self.use_device = "xpu" + else: + self.use_device = "privateuseone" + # user-defined metadata to be amended to the trace self.preset_metadata: Dict[str, str] = dict() @@ -144,7 +149,7 @@ class _KinetoProfile: use_cuda=(ProfilerActivity.CUDA in self.activities), use_cpu=(ProfilerActivity.CPU in self.activities), use_mtia=(ProfilerActivity.MTIA in self.activities), - use_device=None, + use_device=self.use_device, record_shapes=self.record_shapes, with_flops=self.with_flops, profile_memory=self.profile_memory, @@ -444,8 +449,10 @@ class profile(_KinetoProfile): Args: activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values: - ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``. - Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA. + ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``, + ``torch.profiler.ProfilerActivity.XPU``. + Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA + or (when available) ProfilerActivity.XPU. schedule (Callable): callable that takes step (int) as a single parameter and returns ``ProfilerAction`` value that specifies the profiler action to perform at each step. on_trace_ready (Callable): callable that is called at each step when ``schedule`` diff --git a/torch/testing/_internal/distributed/rpc/rpc_test.py b/torch/testing/_internal/distributed/rpc/rpc_test.py index 25495f0bf8..9f1a8f8411 100644 --- a/torch/testing/_internal/distributed/rpc/rpc_test.py +++ b/torch/testing/_internal/distributed/rpc/rpc_test.py @@ -4606,22 +4606,22 @@ class CudaRpcTest(RpcAgentTestFixture): function_events = p.function_events for event in function_events: if event.is_async: - self.assertEqual(0, event.cuda_time_total) + self.assertEqual(0, event.device_time_total) self.assertEqual([], event.kernels) - self.assertEqual(0, event.cuda_time) + self.assertEqual(0, event.device_time) else: if event.node_id == 1: continue self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1]) if get_name(event) in EXPECTED_REMOTE_EVENTS: - self.assertGreater(event.cuda_time_total, 0) + self.assertGreater(event.device_time_total, 0) self.assertEqual(1, len(event.kernels)) kernel = event.kernels[0] if event.node_id == dst_cuda_0: self.assertEqual(kernel.device, 0) if event.node_id == dst_cuda_1: self.assertEqual(kernel.device, 1) - self.assertGreater(event.cuda_time, 0) + self.assertGreater(event.device_time, 0) # Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled # events.
2.41.0
7f44d70b195d9f4bdfa3f564b47e015d8e2ec8f
Fri, 19 Apr 2024 04:07:00 +0000
[PATCH 0358/1000] =?UTF-8?q?[torch/distributed]=20Check=20gloo=20?= =?UTF-8?q?availability=20when=20doing=20isinstance(pg,=E2=80=A6=20(#12423?= =?UTF-8?q?3)?=MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit
Fixes a bug where a reference to `_ProcessGroupWrapper` is used without first checking whether gloo is available. This fails on pytorch builds that do not include gloo becuase `_ProcessGroupWrapper` is only pybinded when building with `USE_GLOO=1`. Therefore, creation of a new process group fails with a `NameError` when only NCCL is available as the backend. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124233 Approved by: https://github.com/rohan-varma, https://github.com/d4l3k
diff --git a/test/distributed/test_pg_wrapper.py b/test/distributed/test_pg_wrapper.py index 1305ddd042..d7e59f1c90 100644 --- a/test/distributed/test_pg_wrapper.py +++ b/test/distributed/test_pg_wrapper.py @@ -3,15 +3,19 @@ import os import sys from datetime import timedelta +from unittest.mock import patch import torch import torch.distributed as c10d +from torch._C._distributed_c10d import _ProcessGroupWrapper + if not c10d.is_available(): print("c10d not available, skipping tests", file=sys.stderr) sys.exit(0) from test_c10d_common import LOOPBACK + from torch.testing._internal.common_distributed import ( create_device, MultiProcessTestCase, @@ -346,6 +350,36 @@ if not TEST_WITH_DEV_DBG_ASAN: pg.allreduce([torch.ones(1, device=dev)]) pg._end_coalescing(torch.device(dev)) + @requires_nccl() + @skip_if_lt_x_gpu(2) + @with_dist_debug_levels(levels=["DETAIL"]) + @patch("torch.distributed.distributed_c10d._GLOO_AVAILABLE", False) + def test_debug_level_detail_no_gloo(self): + with self.assertRaisesRegex( + AssertionError, "ProcessGroupWrapper unsupported without GLOO backend" + ): + self._create_wrapper_pg() + + @requires_nccl() + @skip_if_lt_x_gpu(2) + @patch("torch.distributed.distributed_c10d._GLOO_AVAILABLE", False) + def test_new_group_no_gloo(self): + def patched_isinstance(obj, clazz): + if clazz is _ProcessGroupWrapper: + raise NameError + else: + return isinstance(obj, clazz) + + with patch( + "torch.distributed.distributed_c10d.isinstance", + side_effect=patched_isinstance, + ): + self._create_wrapper_pg(with_new_group=True) + # nothing to assert, isinstance(pg, _ProcessGroupWrapper) + # should never be invoked since it is preceeded by + # _GLOO_AVAILABLE check, this test will fail on + # an unexpected NameError if not. + @requires_gloo() class ProcessGroupGlooWrapperTest(AbstractProcessGroupWrapperTest): diff --git a/torch/distributed/distributed_c10d.py b/torch/distributed/distributed_c10d.py index fed2a80d3f..1834347865 100644 --- a/torch/distributed/distributed_c10d.py +++ b/torch/distributed/distributed_c10d.py @@ -1400,7 +1400,7 @@ def _get_split_source(pg): # If necessary, find a backend to split from by peeling process # group wrappers from our potentially wrapped process group. - while isinstance(split_from, _ProcessGroupWrapper): + while _GLOO_AVAILABLE and isinstance(split_from, _ProcessGroupWrapper): split_from = split_from.wrapped_pg return split_from @@ -3795,6 +3795,8 @@ def _create_process_group_wrapper( world_size: int, timeout: timedelta = default_pg_timeout, ): + assert _GLOO_AVAILABLE, "ProcessGroupWrapper unsupported without GLOO backend." + # (whc) this appears to be just for the gloo backend? if so, `default_pg_timeout` is appropriate... # Create a separate prefix store for the helper process group.
2.41.0
ba85b34dd5b1eafa71c158ef2aec433ebf86e8f
Fri, 19 Apr 2024 04:47:27 +0000
[PATCH 0359/1000] [AOTI] Enbale mmaped weights when CUDA is used (#124346)
By refactoring the logic that returns the start to constant pointer into `_get_constants_start()` method and call it from both CUDA and CPU readers It has no runtime impact, but export time is down from 10m to 3m if mmaped weights are used on AWS p4d.24xlarge Pull Request resolved: https://github.com/pytorch/pytorch/pull/124346 Approved by: https://github.com/mikekgfb, https://github.com/desertfire
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py index 7c0ee56e19..e120e6446a 100644 --- a/torch/_inductor/codecache.py +++ b/torch/_inductor/codecache.py @@ -1827,10 +1827,8 @@ class AotCodeCompiler: if name not in graph.folded_constants ) # TODO: Fix mmap weights with cuda - use_mmap_weights = ( - not cuda and not config.is_fbcode() and consts_size > 2_000_000_000 - ) - if config.aot_inductor.force_mmap_weights and not cuda: + use_mmap_weights = not config.is_fbcode() and consts_size > 2_000_000_000 + if config.aot_inductor.force_mmap_weights: use_mmap_weights = True compile_cmd = cpp_compile_command( input=input_path, diff --git a/torch/csrc/inductor/aoti_runtime/model.h b/torch/csrc/inductor/aoti_runtime/model.h index f03bf6d0fa..7ea53dc24b 100644 --- a/torch/csrc/inductor/aoti_runtime/model.h +++ b/torch/csrc/inductor/aoti_runtime/model.h @@ -268,51 +268,16 @@ class AOTInductorModelBase { if (!skip_copy) { AOTI_RUNTIME_DEVICE_CHECK(cudaMemcpy( internal_ptr, - _binary_constants_bin_start + bytes_read, + _get_constants_start() + bytes_read, data_size, cudaMemcpyHostToDevice)); } return internal_ptr; -#elif USE_MMAP_SELF - // get pointer to constant which is packed in model during compile time. - AOTI_RUNTIME_CHECK(!skip_copy, "pure cpu mode doesn't support skip copy"); - if (!self_mmap) { - Dl_info dl_info; - // get pointer to constant which are appended to the binary - AOTI_RUNTIME_CHECK( - dladdr(__func__, &dl_info), "Can't find shared library name"); - int fd = open(dl_info.dli_fname, O_RDONLY); - AOTI_RUNTIME_CHECK(fd >= 0, "Shared library file cannot be opened"); - auto fsize = lseek(fd, 0, SEEK_END); - auto weights_size = - reinterpret_cast<const uint64_t*>(_binary_constants_bin_start)[0]; - auto magic_number = - reinterpret_cast<const uint64_t*>(_binary_constants_bin_start)[1]; - auto weights_offset = fsize - weights_size; - AOTI_RUNTIME_CHECK( - (weights_offset & 0x3fff) == 0, - "weights_offset must be aligned to 16K boundary"); - auto ptr = mmap( - NULL, - weights_size, - PROT_READ | PROT_WRITE, - MAP_PRIVATE, - fd, - weights_offset); - close(fd); - AOTI_RUNTIME_CHECK(ptr != MAP_FAILED, "mmap() failed"); - self_mmap = static_cast<uint8_t*>(ptr); - AOTI_RUNTIME_CHECK( - reinterpret_cast<uint64_t*>( - self_mmap + weights_size - sizeof(uint64_t))[0] == magic_number, - "Weigths data seems corrupt"); - } - return self_mmap + bytes_read; -#else // !USE_CUDA&& !USE_MMAP_SELF +#else // get pointer to constant which is packed in model during compile time. AOTI_RUNTIME_CHECK(!skip_copy, "pure cpu mode doesn't support skip copy"); - return const_cast<uint8_t*>(_binary_constants_bin_start) + bytes_read; + return _get_constants_start() + bytes_read; #endif // USE_CUDA } @@ -470,6 +435,45 @@ class AOTInductorModelBase { } protected: + uint8_t* _get_constants_start() { +#ifndef USE_MMAP_SELF + return const_cast<uint8_t*>(_binary_constants_bin_start); +#else + if (self_mmap) { + return self_mmap; + } + Dl_info dl_info; + // get pointer to constant which are appended to the binary + AOTI_RUNTIME_CHECK( + dladdr(__func__, &dl_info), "Can't find shared library name"); + int fd = open(dl_info.dli_fname, O_RDONLY); + AOTI_RUNTIME_CHECK(fd >= 0, "Shared library file cannot be opened"); + auto fsize = lseek(fd, 0, SEEK_END); + auto weights_size = + reinterpret_cast<const uint64_t*>(_binary_constants_bin_start)[0]; + auto magic_number = + reinterpret_cast<const uint64_t*>(_binary_constants_bin_start)[1]; + auto weights_offset = fsize - weights_size; + AOTI_RUNTIME_CHECK( + (weights_offset & 0x3fff) == 0, + "weights_offset must be aligned to 16K boundary"); + auto ptr = mmap( + NULL, + weights_size, + PROT_READ | PROT_WRITE, + MAP_PRIVATE, + fd, + weights_offset); + close(fd); + AOTI_RUNTIME_CHECK(ptr != MAP_FAILED, "mmap() failed"); + self_mmap = static_cast<uint8_t*>(ptr); + AOTI_RUNTIME_CHECK( + reinterpret_cast<uint64_t*>( + self_mmap + weights_size - sizeof(uint64_t))[0] == magic_number, + "Weigths data seems corrupt"); + return self_mmap; +#endif + } struct ParamInfo { const char* name = nullptr; };
2.41.0
20bc1080e3cdf432257178756553f5f34064771
Fri, 19 Apr 2024 09:09:03 +0000
[PATCH 0364/1000] Revert "[Profiler] Unify the device(CUDA, XPU, PrivateUse1) in torch profiler post processing (#123247)"
This reverts commit 768ce2cddad2057349d1194274a5f93c47c5ac88. Reverted https://github.com/pytorch/pytorch/pull/123247 on behalf of https://github.com/DanilBaibak due to Broken trunk ([comment](https://github.com/pytorch/pytorch/pull/123247#issuecomment-2066152611))
diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py index 56771eb188..d9012d0e89 100644 --- a/test/profiler/test_profiler.py +++ b/test/profiler/test_profiler.py @@ -1095,7 +1095,7 @@ class TestProfiler(TestCase): stats = run_profiler(create_cuda_tensor) check_metrics( stats, - "device_memory_usage", + "cuda_memory_usage", allocs=[ "test_user_scope_alloc", "aten::to", @@ -1147,7 +1147,7 @@ class TestProfiler(TestCase): deallocs=["[memory]"], ) if torch.cuda.is_available(): - check_metrics(stats, "device_memory_usage", deallocs=["[memory]"]) + check_metrics(stats, "cuda_memory_usage", deallocs=["[memory]"]) @unittest.skipIf( IS_JETSON, "Jetson has a guard against OOM since host and gpu memory are shared" diff --git a/test/test_autograd.py b/test/test_autograd.py index 5f2c4d28e4..95432aaa6a 100644 --- a/test/test_autograd.py +++ b/test/test_autograd.py @@ -4628,11 +4628,11 @@ Done""", self.assertEqual(avg.count, 4) self.assertEqual(avg.cpu_time_total, 30) self.assertEqual(avg.self_cpu_time_total, 30) - self.assertEqual(avg.device_time_total, 0) + self.assertEqual(avg.cuda_time_total, 0) # average stats self.assertEqual(avg.cpu_time, 7.5) - self.assertEqual(avg.device_time_total, 0) + self.assertEqual(avg.cuda_time_total, 0) def test_profiler_shapes(self): print("") diff --git a/torch/_C/_autograd.pyi b/torch/_C/_autograd.pyi index e6c4c3ec9d..7e503a8e90 100644 --- a/torch/_C/_autograd.pyi +++ b/torch/_C/_autograd.pyi @@ -15,7 +15,6 @@ from ._profiler import ( class DeviceType(Enum): CPU = ... CUDA = ... - XPU = ... MKLDNN = ... OPENGL = ... OPENCL = ... diff --git a/torch/_C/_profiler.pyi b/torch/_C/_profiler.pyi index d19e72f573..e1481dd9c1 100644 --- a/torch/_C/_profiler.pyi +++ b/torch/_C/_profiler.pyi @@ -39,7 +39,6 @@ class ActiveProfilerType(Enum): class ProfilerActivity(Enum): CPU = ... CUDA = ... - XPU = ... MTIA = ... PrivateUse1 = ... diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py index 77e84d0829..53319bd2dd 100644 --- a/torch/_inductor/utils.py +++ b/torch/_inductor/utils.py @@ -136,7 +136,7 @@ def do_bench_using_profiling(fn: Callable[[], Any], warmup=25, rep=100) -> float log.debug("profiling time breakdown") log.debug(actual_events.table(row_limit=-1)) - res = sum(event.device_time_total for event in actual_events) / 1000.0 / n_repeat + res = sum(event.cuda_time_total for event in actual_events) / 1000.0 / n_repeat log.debug("profiling results: %s ms", res) return res diff --git a/torch/autograd/profiler.py b/torch/autograd/profiler.py index f233277b7e..ba020fb3cb 100644 --- a/torch/autograd/profiler.py +++ b/torch/autograd/profiler.py @@ -7,6 +7,7 @@ from warnings import warn import torch import torch.cuda +from torch._C import _get_privateuse1_backend_name from torch._C._profiler import _ExperimentalConfig from torch.autograd import ( @@ -111,12 +112,8 @@ class profile: Args: enabled (bool, optional): Setting this to False makes this context manager a no-op. - use_cuda (bool, optional): Enables timing of CUDA events as well - using the cudaEvent API. (will be deprecated) - - use_device (str, optional): Enables timing of device events. - Adds approximately 4us of overhead to each tensor operation when use cuda. - The valid devices options are 'cuda', 'xpu' and 'privateuseone'. + use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API. + Adds approximately 4us of overhead to each tensor operation. record_shapes (bool, optional): If shapes recording is set, information about input dimensions will be collected. This allows one to see which @@ -164,9 +161,9 @@ class profile: .. warning: Due to some CUDA multiprocessing limitations (multiprocessing-cuda-note_), - one cannot use the profiler with ``use_device = 'cuda'`` to benchmark + one cannot use the profiler with ``use_cuda = True`` to benchmark DataLoaders with ``num_workers > 0``. If you wish to benchmark data loading, - please use ``use_device = None`` or ``num_workers = 0``. + please use ``use_cuda = False`` or ``num_workers = 0``. Example: >>> # xdoctest: +SKIP @@ -210,13 +207,9 @@ class profile: if not self.enabled: return self.use_cuda = use_cuda - if self.use_cuda: - warn( - "The attribute `use_cuda` will be deprecated soon, please use ``use_device = 'cuda'`` instead." - ) - self.use_device: Optional[str] = "cuda" - else: - self.use_device = use_device + self.use_device: Optional[str] = ( + use_device if use_device != "privateuseone" else None + ) self.function_events: Optional[EventList] = None self.entered = False self.record_shapes = record_shapes @@ -240,19 +233,17 @@ class profile: use_kineto ), "Device-only events supported only with Kineto (use_kineto=True)" - VALID_DEVICE_OPTIONS = ["cuda", "xpu", "privateuseone"] - if self.use_device not in VALID_DEVICE_OPTIONS: - warn(f"The {self.use_device} is not a valid device option.") + if self.use_device == "cuda": + self.use_device = None + self.use_cuda = True + + if self.use_device and self.use_device != _get_privateuse1_backend_name(): + warn(f"{self.use_device} doesn't support profile.") self.use_device = None - if self.use_device == "cuda" and not torch.cuda.is_available(): + if self.use_cuda and not torch.cuda.is_available(): warn("CUDA is not available, disabling CUDA profiling") self.use_cuda = False - self.use_device = None - - if self.use_device == "xpu" and not torch.xpu.is_available(): - warn("XPU is not available, disabling XPU profiling") - self.use_device = None self.kineto_activities = set() if self.use_cpu: @@ -261,18 +252,14 @@ class profile: self.kineto_activities.add(ProfilerActivity.MTIA) self.profiler_kind = ProfilerState.KINETO - if self.use_device == "cuda": + if self.use_cuda: if not use_kineto or ProfilerActivity.CUDA not in _supported_activities(): assert self.use_cpu, "Legacy CUDA profiling requires use_cpu=True" self.profiler_kind = ProfilerState.KINETO_GPU_FALLBACK else: self.kineto_activities.add(ProfilerActivity.CUDA) - elif self.use_device == "xpu": - assert ( - use_kineto and ProfilerActivity.XPU in _supported_activities() - ), "Legacy XPU profiling is not supported. Requires use_kineto=True on XPU devices." - self.kineto_activities.add(ProfilerActivity.XPU) - elif self.use_device is not None and self.use_device != "privateuseone": + + if self.use_device: if ( not use_kineto or ProfilerActivity.PrivateUse1 not in _supported_activities() @@ -328,10 +315,8 @@ class profile: def __exit__(self, exc_type, exc_val, exc_tb): if not self.enabled: return - if self.use_device == "cuda": + if self.use_cuda: torch.cuda.synchronize() - elif self.use_device == "xpu": - torch.xpu.synchronize() t0 = perf_counter_ns() self.kineto_results = _disable_profiler() @@ -347,6 +332,7 @@ class profile: self.function_events = EventList( parsed_results, + use_cuda=self.use_cuda, use_device=self.use_device, profile_memory=self.profile_memory, with_flops=self.with_flops, @@ -459,11 +445,17 @@ class profile: else 0 ) - def _device_memory_usage(mem_record): + def _cuda_memory_usage(mem_record): return ( mem_record.nbytes() - if mem_record.device_type() - in [DeviceType.CUDA, DeviceType.PrivateUse1, DeviceType.HIP] + if mem_record.device_type() in [DeviceType.CUDA, DeviceType.HIP] + else 0 + ) + + def _privateuse1_memory_usage(mem_record): + return ( + mem_record.nbytes() + if mem_record.device_type() in [DeviceType.PrivateUse1] else 0 ) @@ -479,14 +471,16 @@ class profile: abs_end_ns = kineto_event.start_ns() + kineto_event.duration_ns() cpu_memory_usage = 0 - device_memory_usage = 0 + cuda_memory_usage = 0 + privateuse1_memory_usage = 0 if kineto_event.device_type() == DeviceType.CPU: # find the corresponding memory allocation events for mem_record in mem_records_acc.in_interval( kineto_event.start_ns() / 1000, abs_end_ns / 1000 ): cpu_memory_usage += _cpu_memory_usage(mem_record[0]) - device_memory_usage += _device_memory_usage(mem_record[0]) + cuda_memory_usage += _cuda_memory_usage(mem_record[0]) + privateuse1_memory_usage += _privateuse1_memory_usage(mem_record[0]) mem_record[1] = True is_async = kineto_event.is_async() or ( @@ -511,7 +505,8 @@ class profile: scope=kineto_event.scope(), use_device=self.use_device, cpu_memory_usage=cpu_memory_usage, - device_memory_usage=device_memory_usage, + cuda_memory_usage=cuda_memory_usage, + privateuse1_memory_usage=privateuse1_memory_usage, is_async=is_async, sequence_nr=kineto_event.sequence_nr(), device_type=kineto_event.device_type(), @@ -521,12 +516,12 @@ class profile: ) max_evt_id = max(max_evt_id, fe.id) if fe.device_type == DeviceType.CPU and not fe.is_async: - if self.use_device == "privateuseone": + if self.use_device: privateuse1_time = kineto_event.privateuse1_elapsed_us() if privateuse1_time > 0: fe.append_kernel(fe.name, fe.device_index, privateuse1_time) fe.is_legacy = True - elif self.use_device == "cuda": + else: # Check if we have CUDA time as a fallback cuda_time = kineto_event.cuda_elapsed_us() if cuda_time > 0: @@ -539,7 +534,7 @@ class profile: device_corr_map[corr_id] = [] device_corr_map[corr_id].append(fe) - # associate device kernels and device runtime (CPU) with CPU events + # associate CUDA kernels and CUDA runtime (CPU) with CPU events for fe in function_events: if ( fe.device_type == DeviceType.CPU @@ -554,7 +549,7 @@ class profile: f_evt.time_range.end - f_evt.time_range.start, ) elif f_evt.device_type == DeviceType.CPU: - # make sure that 'thread' of a CPU Kineto (e.g. Device Runtime) event is associated + # make sure that 'thread' of a CPU Kineto (e.g. CUDA Runtime) event is associated # with the 'thread' of the corresponding linked PyTorch event to properly track # parents and children f_evt.thread = fe.thread @@ -574,7 +569,8 @@ class profile: scope=0, # RecordScope::FUNCTION use_device=self.use_device, cpu_memory_usage=_cpu_memory_usage(evt), - device_memory_usage=_device_memory_usage(evt), + cuda_memory_usage=_cuda_memory_usage(evt), + privateuse1_memory_usage=_privateuse1_memory_usage(evt), is_async=False, sequence_nr=-1, device_type=DeviceType.CPU, diff --git a/torch/autograd/profiler_legacy.py b/torch/autograd/profiler_legacy.py index c491f9797a..32700ffb1c 100644 --- a/torch/autograd/profiler_legacy.py +++ b/torch/autograd/profiler_legacy.py @@ -93,7 +93,7 @@ class profile: parsed_results = _parse_legacy_records(records) self.function_events = EventList( parsed_results, - use_device="cuda" if self.use_cuda else None, + use_cuda=self.use_cuda, profile_memory=self.profile_memory, with_flops=self.with_flops, ) @@ -251,7 +251,7 @@ def _parse_legacy_records(thread_records): ], scope=start.scope(), cpu_memory_usage=cpu_memory_usage, - device_memory_usage=cuda_memory_usage, + cuda_memory_usage=cuda_memory_usage, is_async=is_async, is_remote=is_remote_event, sequence_nr=start.sequence_nr(), @@ -287,7 +287,7 @@ def _parse_legacy_records(thread_records): end_us=0, stack=[], cpu_memory_usage=record.cpu_memory_usage(), - device_memory_usage=record.cuda_memory_usage(), + cuda_memory_usage=record.cuda_memory_usage(), is_legacy=True, ) functions.append(fe) diff --git a/torch/autograd/profiler_util.py b/torch/autograd/profiler_util.py index 6d446d6ade..4db601ad7b 100644 --- a/torch/autograd/profiler_util.py +++ b/torch/autograd/profiler_util.py @@ -26,10 +26,12 @@ class EventList(list): """A list of Events (for pretty printing).""" def __init__(self, *args, **kwargs): + use_cuda = kwargs.pop("use_cuda", True) use_device = kwargs.pop("use_device", None) profile_memory = kwargs.pop("profile_memory", False) with_flops = kwargs.pop("with_flops", False) super().__init__(*args, **kwargs) + self._use_cuda = use_cuda self._use_device = use_device self._profile_memory = profile_memory self._tree_built = False @@ -179,16 +181,14 @@ class EventList(list): Args: sort_by (str, optional): Attribute used to sort entries. By default they are printed in the same order as they were registered. - Valid keys include: ``cpu_time``, ``cuda_time``, ``xpu_time``, - ``cpu_time_total``, ``cuda_time_total``, ``xpu_time_total``, - ``cpu_memory_usage``, ``cuda_memory_usage``, ``xpu_memory_usage``, - ``self_cpu_memory_usage``, ``self_cuda_memory_usage``, - ``self_xpu_memory_usage``, ``count``. + Valid keys include: ``cpu_time``, ``cuda_time``, ``cpu_time_total``, + ``cuda_time_total``, ``cpu_memory_usage``, ``cuda_memory_usage``, + ``self_cpu_memory_usage``, ``self_cuda_memory_usage``, ``count``. top_level_events_only(bool, optional): Boolean flag to determine the selection of events to display. If true, the profiler will only display events at top level like top-level invocation of python `lstm`, python `add` or other functions, nested events like low-level - cpu/cuda/xpu ops events are omitted for profiler result readability. + cpu/cuda ops events are omitted for profiler result readability. Returns: A string containing the table. @@ -267,7 +267,6 @@ class EventList(list): return [ "self_cpu_time_total", "self_cuda_time_total", - "self_xpu_time_total", "self_privateuse1_time_total", ] @@ -281,12 +280,7 @@ class EventList(list): with open(path, "w") as f: for evt in self: if evt.stack and len(evt.stack) > 0: - metric_value = getattr( - evt, - metric.replace("cuda", "device") - .replace("xpu", "device") - .replace("privateuse1", "device"), - ) + metric_value = getattr(evt, metric) if int(metric_value) > 0: stack_str = "" for entry in reversed(evt.stack): @@ -331,6 +325,7 @@ class EventList(list): avg_list = EventList( stats.values(), + use_cuda=self._use_cuda, use_device=self._use_device, profile_memory=self._profile_memory, with_flops=self._with_flops, @@ -400,23 +395,26 @@ class FormattedTimesMixin: """ cpu_time_str = _attr_formatter("cpu_time") - device_time_str = _attr_formatter("device_time") + cuda_time_str = _attr_formatter("cuda_time") + privateuse1_time_str = _attr_formatter("privateuse1_time") cpu_time_total_str = _attr_formatter("cpu_time_total") - device_time_total_str = _attr_formatter("device_time_total") + cuda_time_total_str = _attr_formatter("cuda_time_total") + privateuse1_time_total_str = _attr_formatter("privateuse1_time_total") self_cpu_time_total_str = _attr_formatter("self_cpu_time_total") - self_device_time_total_str = _attr_formatter("self_device_time_total") + self_cuda_time_total_str = _attr_formatter("self_cuda_time_total") + self_privateuse1_time_total_str = _attr_formatter("self_privateuse1_time_total") @property def cpu_time(self): return 0.0 if self.count == 0 else 1.0 * self.cpu_time_total / self.count # type: ignore[attr-defined] @property - def device_time(self): - return 0.0 if self.count == 0 else 1.0 * self.device_time_total / self.count # type: ignore[attr-defined] + def cuda_time(self): + return 0.0 if self.count == 0 else 1.0 * self.cuda_time_total / self.count # type: ignore[attr-defined] @property - def cuda_time(self): # To be deprecated - return self.device_time + def privateuse1_time(self): + return 0.0 if self.count == 0 else 1.0 * self.privateuse1_time_total / self.count # type: ignore[attr-defined] class Interval: @@ -450,7 +448,8 @@ class FunctionEvent(FormattedTimesMixin): scope=0, use_device=None, cpu_memory_usage=0, - device_memory_usage=0, + cuda_memory_usage=0, + privateuse1_memory_usage=0, is_async=False, is_remote=False, sequence_nr=-1, @@ -480,7 +479,8 @@ class FunctionEvent(FormattedTimesMixin): self.scope: int = scope self.use_device: Optional[str] = use_device self.cpu_memory_usage: int = cpu_memory_usage - self.device_memory_usage: int = device_memory_usage + self.cuda_memory_usage: int = cuda_memory_usage + self.privateuse1_memory_usage: int = privateuse1_memory_usage self.is_async: bool = is_async self.is_remote: bool = is_remote self.sequence_nr: int = sequence_nr @@ -530,23 +530,20 @@ class FunctionEvent(FormattedTimesMixin): ) @property - def self_device_memory_usage(self): + def self_cuda_memory_usage(self): if self.is_async or self.device_type != DeviceType.CPU: return 0 - return self.device_memory_usage - sum( - child.device_memory_usage for child in self.cpu_children + return self.cuda_memory_usage - sum( + child.cuda_memory_usage for child in self.cpu_children ) @property - def self_cuda_memory_usage(self): # To be deprecated - self.self_device_memory_usage - - @property - def cpu_time_total(self): - if self.device_type == DeviceType.CPU: - return self.time_range.elapsed_us() - else: + def self_privateuse1_memory_usage(self): + if self.is_async or self.device_type != DeviceType.CPU: return 0 + return self.privateuse1_memory_usage - sum( + child.privateuse1_memory_usage for child in self.cpu_children + ) @property def self_cpu_time_total(self): @@ -557,50 +554,84 @@ class FunctionEvent(FormattedTimesMixin): ) @property - def device_time_total(self): - if self.is_async or not self.use_device: + def cuda_time_total(self): + if self.is_async or self.use_device: return 0 if self.device_type == DeviceType.CPU: if not self.is_legacy: # account for the kernels in the children ops return sum(kinfo.duration for kinfo in self.kernels) + sum( - ch.device_time_total for ch in self.cpu_children + ch.cuda_time_total for ch in self.cpu_children ) else: # each legacy cpu events has a single (fake) kernel return sum(kinfo.duration for kinfo in self.kernels) else: - assert self.device_type in [DeviceType.CUDA, DeviceType.PrivateUse1] + assert self.device_type == DeviceType.CUDA return self.time_range.elapsed_us() @property - def cuda_time_total(self): # To be deprecated - self.device_time_total + def self_cuda_time_total(self): + if self.is_async or self.use_device: + return 0 + if self.device_type == DeviceType.CPU: + return self.cuda_time_total - sum( + child.cuda_time_total for child in self.cpu_children + ) + else: + assert self.device_type == DeviceType.CUDA + return self.cuda_time_total @property - def self_device_time_total(self): + def cpu_time_total(self): + if self.device_type == DeviceType.CPU: + return self.time_range.elapsed_us() + else: + return 0 + + @property + def self_privateuse1_time_total(self): if self.is_async or not self.use_device: return 0 if self.device_type == DeviceType.CPU: - return self.device_time_total - sum( - [child.device_time_total for child in self.cpu_children] + return self.privateuse1_time_total - sum( + child.privateuse1_time_total for child in self.cpu_children ) else: - assert self.device_type in [DeviceType.CUDA, DeviceType.PrivateUse1] - return self.device_time_total + assert self.device_type == DeviceType.CUDA + return self.privateuse1_time_total @property - def self_cuda_time_total(self): # To be deprecated - self.self_device_time_total + def privateuse1_time_total(self): + if self.is_async or not self.use_device: + return 0 + if self.device_type == DeviceType.CPU: + if not self.is_legacy: + # account for the kernels in the children ops + return sum(kinfo.duration for kinfo in self.kernels) + sum( + ch.privateuse1_time_total for ch in self.cpu_children + ) + else: + # each legacy cpu events has a single (fake) kernel + return sum(kinfo.duration for kinfo in self.kernels) + else: + assert self.device_type == DeviceType.PrivateUse1 + return self.time_range.elapsed_us() @property def key(self): return self.name def __repr__(self): - device_name = self.use_device - device_time = self.device_time_str - device_memory_usage = self.device_memory_usage + device_name = "cuda" if not self.use_device else self.use_device + device_time = ( + self.cuda_time_str if not self.use_device else self.privateuse1_time_str + ) + device_memory_usage = ( + self.cuda_memory_usage + if not self.use_device + else self.privateuse1_memory_usage + ) return ( "<FunctionEvent id={} name={} device_type={} node_id={} cpu_time={} start_us={} end_us={} " "cpu_children={} {}_time={} name={} thread={} input_shapes={} " @@ -640,16 +671,20 @@ class FunctionEventAvg(FormattedTimesMixin): self.is_remote: bool = False self.use_device: Optional[str] = None self.cpu_time_total: int = 0 - self.device_time_total: int = 0 + self.cuda_time_total: int = 0 + self.privateuse1_time_total: int = 0 self.self_cpu_time_total: int = 0 - self.self_device_time_total: int = 0 + self.self_cuda_time_total: int = 0 + self.self_privateuse1_time_total: int = 0 self.input_shapes: Optional[List[List[int]]] = None self.stack: Optional[List] = None self.scope: Optional[int] = None self.cpu_memory_usage: int = 0 - self.device_memory_usage: int = 0 + self.cuda_memory_usage: int = 0 + self.privateuse1_memory_usage: int = 0 self.self_cpu_memory_usage: int = 0 - self.self_device_memory_usage: int = 0 + self.self_cuda_memory_usage: int = 0 + self.self_privateuse1_memory_usage: int = 0 self.cpu_children: Optional[List[FunctionEvent]] = None self.cpu_parent: Optional[FunctionEvent] = None self.device_type: DeviceType = DeviceType.CPU @@ -677,13 +712,17 @@ class FunctionEventAvg(FormattedTimesMixin): assert isinstance(other, (FunctionEvent, FunctionEventAvg)) assert other.key == self.key self.cpu_time_total += other.cpu_time_total - self.device_time_total += other.device_time_total + self.cuda_time_total += other.cuda_time_total + self.privateuse1_time_total += other.privateuse1_time_total self.self_cpu_time_total += other.self_cpu_time_total - self.self_device_time_total += other.self_device_time_total + self.self_cuda_time_total += other.self_cuda_time_total + self.self_privateuse1_time_total += other.self_privateuse1_time_total self.cpu_memory_usage += other.cpu_memory_usage - self.device_memory_usage += other.device_memory_usage + self.cuda_memory_usage += other.cuda_memory_usage + self.privateuse1_memory_usage += other.privateuse1_memory_usage self.self_cpu_memory_usage += other.self_cpu_memory_usage - self.self_device_memory_usage += other.self_device_memory_usage + self.self_cuda_memory_usage += other.self_cuda_memory_usage + self.self_privateuse1_memory_usage += other.self_privateuse1_memory_usage self.count += other.count if self.flops is None: self.flops = other.flops @@ -696,9 +735,19 @@ class FunctionEventAvg(FormattedTimesMixin): def __repr__(self): device_name = "cuda" if not self.use_device else self.use_device - self_device_time = self.self_device_time_total_str - device_time = self.device_time_str - device_memory = self.device_memory_usage + self_device_time = ( + self.self_cuda_time_total_str + if not self.use_device + else self.self_privateuse1_time_total_str + ) + device_time = ( + self.cuda_time_str if not self.use_device else self.privateuse1_time_str + ) + device_memory = ( + self.cuda_memory_usage + if not self.use_device + else self.privateuse1_memory_usage + ) return ( "<FunctionEventAvg key={} self_cpu_time={} cpu_time={} " " self_{}_time={} {}_time={} input_shapes={} " @@ -809,14 +858,19 @@ def _build_table( if len(events) == 0: return "" - has_device_time = any(event.self_device_time_total > 0 for event in events) - has_device_mem = any(event.self_device_memory_usage > 0 for event in events) + has_cuda_time = any(event.self_cuda_time_total > 0 for event in events) + has_cuda_mem = any(event.self_cuda_memory_usage > 0 for event in events) + has_privateuse1_time = any( + event.self_privateuse1_time_total > 0 for event in events + ) + has_privateuse1_mem = any( + event.self_privateuse1_memory_usage > 0 for event in events + ) use_device = events[0].use_device - # Running on PrivateUse1 device with profiler but not enable - # ProfilerActivity.PrivateUse1 can also catch privateuse1 memory usage. - # Here only need to check has_privateuse1_time if not use_device. - if not use_device and has_device_time: - raise RuntimeError("use_device is None, but there is device performance data.") + if not use_device and (has_privateuse1_mem or has_privateuse1_time): + raise RuntimeError( + "use_device is None, but there is private device performance data." + ) has_input_shapes = any( (event.input_shapes is not None and len(event.input_shapes) > 0) @@ -825,16 +879,8 @@ def _build_table( if sort_by is not None: events = EventList( - sorted( - events, - key=lambda evt: getattr( - evt, - sort_by.replace("cuda", "device") - .replace("xpu", "device") - .replace("privateuse1", "device"), - ), - reverse=True, - ), + sorted(events, key=lambda evt: getattr(evt, sort_by), reverse=True), + use_cuda=has_cuda_time, use_device=use_device, profile_memory=profile_memory, with_flops=with_flops, @@ -872,14 +918,23 @@ def _build_table( "CPU total", "CPU time avg", ] - device_name = use_device.upper() if use_device is not None else "None" - if has_device_time: + if has_cuda_time: + headers.extend( + [ + "Self CUDA", + "Self CUDA %", + "CUDA total", + "CUDA time avg", + ] + ) + if has_privateuse1_time: + privateuse1 = use_device.upper() headers.extend( [ - f"Self {device_name}", - f"Self {device_name} %", - f"{device_name} total", - f"{device_name} time avg", + f"Self {privateuse1}", + f"Self {privateuse1} %", + f"{privateuse1} total", + f"{privateuse1} time avg", ] ) if profile_memory: @@ -889,11 +944,19 @@ def _build_table( "Self CPU Mem", ] ) - if has_device_mem: + if has_cuda_mem: headers.extend( [ - f"{device_name} Mem", - f"Self {device_name} Mem", + "CUDA Mem", + "Self CUDA Mem", + ] + ) + if has_privateuse1_mem: + privateuse1 = use_device.upper() + headers.extend( + [ + f"{privateuse1} Mem", + f"Self {privateuse1} Mem", ] ) headers.append("# of Calls") @@ -967,16 +1030,22 @@ def _build_table( result.append(s) result.append("\n") # Yes, newline after the end as well - sum_self_cpu_time_total = 0 - sum_self_device_time_total = 0 + sum_self_cpu_time_total = sum(event.self_cpu_time_total for event in events) + sum_self_cuda_time_total = 0 + sum_self_privateuse1_time_total = 0 for evt in events: - sum_self_cpu_time_total += evt.self_cpu_time_total - if evt.device_type == DeviceType.CPU and evt.is_legacy: + if evt.device_type == DeviceType.CPU: # in legacy profiler, kernel info is stored in cpu events - sum_self_device_time_total += evt.self_device_time_total - elif evt.device_type in [DeviceType.CUDA, DeviceType.PrivateUse1]: + if evt.is_legacy: + if not use_device: + sum_self_cuda_time_total += evt.self_cuda_time_total + else: + sum_self_privateuse1_time_total += evt.self_privateuse1_time_total + elif evt.device_type == DeviceType.CUDA: # in kineto profiler, there're events with the correct device type (e.g. CUDA) - sum_self_device_time_total += evt.self_device_time_total + sum_self_cuda_time_total += evt.self_cuda_time_total + elif evt.device_type == DeviceType.PrivateUse1: + sum_self_privateuse1_time_total += evt.self_privateuse1_time_total # Actual printing if header is not None: @@ -1021,16 +1090,28 @@ def _build_table( evt.cpu_time_total_str, # CPU total evt.cpu_time_str, # CPU time avg ] - if has_device_time: + if has_cuda_time: row_values.extend( [ - evt.self_device_time_total_str, - # device time total % + evt.self_cuda_time_total_str, + # CUDA time total % _format_time_share( - evt.self_device_time_total, sum_self_device_time_total + evt.self_cuda_time_total, sum_self_cuda_time_total ), - evt.device_time_total_str, - evt.device_time_str, # device time avg + evt.cuda_time_total_str, + evt.cuda_time_str, # Cuda time avg + ] + ) + if has_privateuse1_time: + row_values.extend( + [ + evt.self_privateuse1_time_total_str, + # PrivateUse1 time total % + _format_time_share( + evt.self_privateuse1_time_total, sum_self_privateuse1_time_total + ), + evt.privateuse1_time_total_str, + evt.privateuse1_time_str, # PrivateUse1 time avg ] ) if profile_memory: @@ -1042,13 +1123,22 @@ def _build_table( _format_memory(evt.self_cpu_memory_usage), ] ) - if has_device_mem: + if has_cuda_mem: + row_values.extend( + [ + # CUDA Mem Total + _format_memory(evt.cuda_memory_usage), + # Self CUDA Mem Total + _format_memory(evt.self_cuda_memory_usage), + ] + ) + if has_privateuse1_mem: row_values.extend( [ - # Device Mem Total - _format_memory(evt.device_memory_usage), - # Self Device Mem Total - _format_memory(evt.self_device_memory_usage), + # PrivateUse1 Mem Total + _format_memory(evt.privateuse1_memory_usage), + # Self PrivateUse1 Mem Total + _format_memory(evt.self_privateuse1_memory_usage), ] ) row_values.append( @@ -1084,9 +1174,10 @@ def _build_table( append(header_sep) append(f"Self CPU time total: {_format_time(sum_self_cpu_time_total)}") - if has_device_time: + if has_cuda_time: + append(f"Self CUDA time total: {_format_time(sum_self_cuda_time_total)}") + if has_privateuse1_time: append( - f"Self {use_device.upper() if use_device is not None else 'None'} " - f"time total: {_format_time(sum_self_device_time_total)}" + f"Self {use_device.upper()} time total: {_format_time(sum_self_privateuse1_time_total)}" ) return "".join(result) diff --git a/torch/csrc/profiler/kineto_shim.cpp b/torch/csrc/profiler/kineto_shim.cpp index 41561c6f3e..85f91bf8b2 100644 --- a/torch/csrc/profiler/kineto_shim.cpp +++ b/torch/csrc/profiler/kineto_shim.cpp @@ -342,7 +342,6 @@ c10::DeviceType deviceTypeFromActivity(libkineto::ActivityType activity_type) { case libkineto::ActivityType::USER_ANNOTATION: case libkineto::ActivityType::EXTERNAL_CORRELATION: case libkineto::ActivityType::CUDA_RUNTIME: - case libkineto::ActivityType::XPU_RUNTIME: case libkineto::ActivityType::CPU_INSTANT_EVENT: case libkineto::ActivityType::GLOW_RUNTIME: case libkineto::ActivityType::MTIA_RUNTIME: diff --git a/torch/profiler/profiler.py b/torch/profiler/profiler.py index 120b2acad2..bfc725700a 100644 --- a/torch/profiler/profiler.py +++ b/torch/profiler/profiler.py @@ -13,6 +13,7 @@ from typing_extensions import Self import torch import torch.autograd.profiler as prof +from torch._C import _get_privateuse1_backend_name from torch._C._profiler import ( _add_execution_trace_observer, _disable_execution_trace_observer, @@ -71,10 +72,8 @@ class _KinetoProfile: Args: activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values: - ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``, - ``torch.profiler.ProfilerActivity.XPU``. - Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA - or (when available) ProfilerActivity.XPU. + ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``. + Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA. record_shapes (bool): save information about operator's input shapes. profile_memory (bool): track tensor memory allocation/deallocation (see ``export_memory_timeline`` for more details). @@ -127,13 +126,9 @@ class _KinetoProfile: self.profiler: Optional[prof.profile] = None self.mem_tl: Optional[MemoryProfileTimeline] = None self.use_device = None - if ProfilerActivity.CUDA in self.activities: - self.use_device = "cuda" - elif ProfilerActivity.XPU in self.activities: - self.use_device = "xpu" - else: - self.use_device = "privateuseone" - + privateuse1_backend = _get_privateuse1_backend_name() + if privateuse1_backend != "privateuseone": + self.use_device = privateuse1_backend # user-defined metadata to be amended to the trace self.preset_metadata: Dict[str, str] = dict() @@ -149,7 +144,7 @@ class _KinetoProfile: use_cuda=(ProfilerActivity.CUDA in self.activities), use_cpu=(ProfilerActivity.CPU in self.activities), use_mtia=(ProfilerActivity.MTIA in self.activities), - use_device=self.use_device, + use_device=None, record_shapes=self.record_shapes, with_flops=self.with_flops, profile_memory=self.profile_memory, @@ -449,10 +444,8 @@ class profile(_KinetoProfile): Args: activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values: - ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``, - ``torch.profiler.ProfilerActivity.XPU``. - Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA - or (when available) ProfilerActivity.XPU. + ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``. + Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA. schedule (Callable): callable that takes step (int) as a single parameter and returns ``ProfilerAction`` value that specifies the profiler action to perform at each step. on_trace_ready (Callable): callable that is called at each step when ``schedule`` diff --git a/torch/testing/_internal/distributed/rpc/rpc_test.py b/torch/testing/_internal/distributed/rpc/rpc_test.py index 9f1a8f8411..25495f0bf8 100644 --- a/torch/testing/_internal/distributed/rpc/rpc_test.py +++ b/torch/testing/_internal/distributed/rpc/rpc_test.py @@ -4606,22 +4606,22 @@ class CudaRpcTest(RpcAgentTestFixture): function_events = p.function_events for event in function_events: if event.is_async: - self.assertEqual(0, event.device_time_total) + self.assertEqual(0, event.cuda_time_total) self.assertEqual([], event.kernels) - self.assertEqual(0, event.device_time) + self.assertEqual(0, event.cuda_time) else: if event.node_id == 1: continue self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1]) if get_name(event) in EXPECTED_REMOTE_EVENTS: - self.assertGreater(event.device_time_total, 0) + self.assertGreater(event.cuda_time_total, 0) self.assertEqual(1, len(event.kernels)) kernel = event.kernels[0] if event.node_id == dst_cuda_0: self.assertEqual(kernel.device, 0) if event.node_id == dst_cuda_1: self.assertEqual(kernel.device, 1) - self.assertGreater(event.device_time, 0) + self.assertGreater(event.cuda_time, 0) # Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled # events.
2.41.0
8e403c739e67e3377d6dcb391f4cad86625a196
Fri, 19 Apr 2024 09:22:58 +0000
[PATCH 0365/1000] Added a docstring for torch.Size.numel. (#124186)
Fixes #61231. Fixes #124167. This PR documents a rather long-standing issue w.r.t. unexpected behavior of `torch.Size.numel`, first reported almost 5 years ago. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124186 Approved by: https://github.com/janeyx99
diff --git a/docs/source/index.rst b/docs/source/index.rst index 2d6d3eea13..a7afe60bc2 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -108,6 +108,7 @@ Features described in this documentation are classified by release status: torch.random <random> masked torch.nested <nested> + size sparse storage torch.testing <testing> diff --git a/docs/source/size.rst b/docs/source/size.rst new file mode 100644 index 0000000000..340836e000 --- /dev/null +++ b/docs/source/size.rst @@ -0,0 +1,25 @@ +torch.Size +=================================== + +:class:`torch.Size` is the result type of a call to :func:`torch.Tensor.size`. It describes the size of all dimensions +of the original tensor. As a subclass of :class:`tuple`, it supports common sequence operations like indexing and +length. + + +Example:: + + >>> x = torch.ones(10, 20, 30) + >>> s = x.size() + >>> s + torch.Size([10, 20, 30]) + >>> s[1] + 20 + >>> len(s) + 3 + + + +.. autoclass:: torch.Size + :members: + :undoc-members: + :inherited-members: diff --git a/torch/__init__.py b/torch/__init__.py index 013e5aa832..846038e351 100644 --- a/torch/__init__.py +++ b/torch/__init__.py @@ -1624,8 +1624,8 @@ import torch.nn.intrinsic _C._init_names(list(torch._storage_classes)) # attach docstrings to torch and tensor functions -from . import _torch_docs, _tensor_docs, _storage_docs -del _torch_docs, _tensor_docs, _storage_docs +from . import _torch_docs, _tensor_docs, _storage_docs, _size_docs +del _torch_docs, _tensor_docs, _storage_docs, _size_docs def compiled_with_cxx11_abi() -> builtins.bool: diff --git a/torch/_size_docs.py b/torch/_size_docs.py new file mode 100644 index 0000000000..58587be32f --- /dev/null +++ b/torch/_size_docs.py @@ -0,0 +1,38 @@ +"""Adds docstrings to torch.Size functions""" + +import torch._C +from torch._C import _add_docstr as add_docstr + + +def add_docstr_all(method, docstr): + add_docstr(getattr(torch._C.Size, method), docstr) + + +add_docstr_all( + "numel", + """ +numel() -> int + +Returns the number of elements a :class:`torch.Tensor` with the given size would contain. + +More formally, for a tensor ``x = tensor.ones(10, 10)`` with size ``s = torch.Size([10, 10])``, +``x.numel() == x.size().numel() == s.numel() == 100`` holds true. + +Example:: + >>> x=torch.ones(10, 10) + >>> s=x.size() + >>> s + torch.Size([10, 10]) + >>> s.numel() + 100 + >>> x.numel() == s.numel() + True + + +.. warning:: + + This function does not return the number of dimensions described by :class:`torch.Size`, but instead the number + of elements a :class:`torch.Tensor` with that size would contain. + +""", +)
2.41.0
a71d12d920f3cafd552623e82e251d143b0c614
Fri, 19 Apr 2024 10:32:08 +0000
[PATCH 0366/1000] [CUDAGraphTree] Support mutated inputs from prior cudagraph pool (#123231)
# PR This PR supports mutating inputs in cudagraph trees, if these inputs are outputs from previous cudagraph. Please check #121861 for more details. # Note on Optimistic Mutation Check To determine whether applying cudagraph, we need to check input mutations, falling into four categories: a) no mutation, b) mutation on parameters/buffers, c) mutation on cudagraph recorded tensors, d) mutation on non-cudagraph recorded tensors. We can apply cudagraph for type a,b,c but cannot for type d. This input mutation types depends on function, current_node, and inputs. Since `check_for_mutation` is slow, there is a trade-off on making type c or d faster. - To make type d) faster, we want to `check_for_mutation` and call eager function early. However, this adds unnecessary overhead to type a, b, c due to the extra check. - To make type c) faster, we want to skip `check_for_mutation` at the beginning and only `check_for_mutation` before `record_function` for a new function. This removes the overhead of `check_for_mutation` for type a, b, c. However, this adds extra overhead to type d due to `check_invariants` for all children nodes. Instead, we design optimistic mutation check. The assumption is that, given a function and a node, the input mutation types usually remain the same across inputs. So, if we have ever detect a function on a node with type d, we will never detect it as type c. The detailed design is: - [Slow Path] On the first invocation of a function on a node, we run `check_for_mutation` once and cache the input mutation type as `non_cudagraph_managed_mutation[node_id][func_id]`. - [Fast Path] On the subsequent invocations of a function on a node, we skip `check_for_mutation`. For `non_cudagraph_managed_mutation[node_id][func_id]` as true, we directly call eager function. Otherwise, we `check_variants` and call cudagraph function. - [Slow Path] Before `record_function`, we run `check_for_mutation` again. **Q1: Would there be overhead for type a,b,c,d?** A: No. We only check input mutation types for the first invocation of a function on a node. **Q2: If a function happens to be type c during the first invocation on a node, could we detect it as type d in the future?** A: Yes. This is done by `check_invariants` and guarantees the correctness. **Q3: If a function happens to be type d during the first invocation on a node, could it still be recognized as type c in the future?** A: No. But this should happen rarely according to our assumption. In the rare case that it happens, there would not be any correctness issues and the performance is the same as the eager (or inductor optimized) function. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123231 Approved by: https://github.com/eellison
diff --git a/test/inductor/test_cudagraph_trees.py b/test/inductor/test_cudagraph_trees.py index 88881d542f..cc701337c7 100644 --- a/test/inductor/test_cudagraph_trees.py +++ b/test/inductor/test_cudagraph_trees.py @@ -312,8 +312,9 @@ if HAS_CUDA and not TEST_WITH_ASAN: ).run(captured_output[0]) # mutation on inp doesnt hit cudagraphs - self.assertIsNone(self.get_manager()) + self.assertEqual(len(self.get_manager().roots), 0) + # mutation on parameters/buffers hits cudagraphs class Mod(torch.nn.Module): def __init__(self): super().__init__() @@ -323,10 +324,10 @@ if HAS_CUDA and not TEST_WITH_ASAN: self.buf.add_(x) return self.buf + x - @torch.compile() def foo(mod, x): return mod(x) + foo = get_compile_fn(backend)(foo) mod = Mod() mod2 = Mod() @@ -336,6 +337,102 @@ if HAS_CUDA and not TEST_WITH_ASAN: self.assertIsNotNone(self.get_manager()) + @parametrize("backend", ("inductor", "cudagraphs")) + @torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True) + def test_mutation_cudagraph_managed_tensors(self, backend): + def foo(x): + return x + 1 + + def mut(x): + x.add_(2) + return x + + def non_mut(x): + return x.add(2) + + mut = get_compile_fn(backend)(mut) + foo = get_compile_fn(backend)(foo) + + with capture_stderr() as captured_output: + for i in range(3): + torch.compiler.cudagraph_mark_step_begin() + inp = torch.rand([4], device="cuda") + + tmp = foo(inp) + mut_out = mut(tmp) + self.assertEqual(mut_out, non_mut(foo(inp))) + FileCheck().check_count( + "skipping cudagraphs due to mutation on input.", 0, exactly=True + ).run(captured_output[0]) + + torch.compiler.cudagraph_mark_step_begin() + inp = torch.rand([4], device="cuda") + tmp = foo(inp) + mut_inp = tmp.clone() + # in this case, what previously a mutated cudagraph managed tensor is no longer, + # now its an input from eager we should fallback to inductor without cudagraphs + with capture_stderr() as captured_output: + mut(mut_inp) + FileCheck().check("skipping cudagraphs due to mutation on input.").check( + "x.add_(2)" + ).run(captured_output[0]) + self.assertEqual(mut_inp, non_mut(foo(inp))) + + @parametrize("backend", ("inductor", "cudagraphs")) + @torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True) + def test_mutation_cudagraph_managed_tensor_warn(self, backend): + def foo(x): + return x.add_(1) + + def fee(y, z): + return z.add(3) + + def inp(): + return torch.rand([4], device="cuda") + + foo = get_compile_fn(backend)(foo) + fee = get_compile_fn(backend)(fee) + + with capture_stderr() as captured_output: + for _ in range(3): + torch.compiler.cudagraph_mark_step_begin() + fee(inp(), foo(inp())) + FileCheck().check_count( + "skipping cudagraphs due to mutation on input.", 1, exactly=True + ).run(captured_output[0]) + + @parametrize("backend", ("inductor", "cudagraphs")) + @torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True) + def test_mutation_cudagraph_managed_tensor_warn_only_once(self, backend): + def foo(x): + return x + 1 + + def mut(x): + x.add_(2) + return x + + def inp(): + return torch.rand([4], device="cuda") + + mut = get_compile_fn(backend)(mut) + foo = get_compile_fn(backend)(foo) + + with capture_stderr() as captured_output: + # Should warn for current_node=None + mut(inp()) + + for i in range(3): + torch.compiler.cudagraph_mark_step_begin() + tmp = foo(inp()) + mut(tmp) # should not warn + + mut_inp = tmp.clone() + mut(mut_inp) # should not warn since mut has warned + + FileCheck().check_count( + "skipping cudagraphs due to mutation on input.", 1, exactly=True + ).run(captured_output[0]) + def test_function_compiled_multiple_times(self): def foo(x): y = foo2(x) diff --git a/torch/_dynamo/backends/cudagraphs.py b/torch/_dynamo/backends/cudagraphs.py index 6854bc8f17..41de419dc8 100644 --- a/torch/_dynamo/backends/cudagraphs.py +++ b/torch/_dynamo/backends/cudagraphs.py @@ -11,7 +11,7 @@ from torch._dynamo.backends.debugging import boxed_nop from torch._inductor.cudagraph_utils import ( BoxedDeviceIndex, check_multiple_devices_or_any_cpu_nodes, - get_mutation_stack_trace, + get_placeholders, ) from torch._inductor.utils import ( BoxedBool, @@ -74,18 +74,7 @@ def get_device_node_mapping(gm: torch.fx.GraphModule): return device_node_mapping -def check_for_mutation(aot_model: torch.fx.GraphModule, num_fixed) -> Optional[str]: - mutation_indices = find_input_mutations(aot_model.graph) - set(range(num_fixed)) - if not mutation_indices: - return None - - return get_mutation_stack_trace(aot_model, mutation_indices) - - def check_for_skip(aot_model: torch.fx.GraphModule, num_fixed) -> Optional[str]: - if mut_skip := check_for_mutation(aot_model, num_fixed): - return mut_skip - if skip := check_multiple_devices_or_any_cpu_nodes( get_device_node_mapping(aot_model) ): @@ -127,7 +116,6 @@ def cudagraphs(dynamo_model, dynamo_inputs): return interp boxed_device_index.set(get_device_index(aot_model)) - out = cudagraphify_impl( interp, aot_inputs, @@ -136,6 +124,8 @@ def cudagraphs(dynamo_model, dynamo_inputs): is_backward=False, is_inference=False, stack_traces=get_stack_traces(aot_model), + placeholders=get_placeholders(aot_model.graph), + mutated_input_idxs=find_input_mutations(aot_model.graph), ) out._boxed_call = True return out @@ -170,6 +160,8 @@ def cudagraphs(dynamo_model, dynamo_inputs): is_backward=True, is_inference=False, stack_traces=get_stack_traces(aot_model), + placeholders=get_placeholders(aot_model.graph), + mutated_input_idxs=find_input_mutations(aot_model.graph), ) out._boxed_call = True return out diff --git a/torch/_inductor/compile_fx.py b/torch/_inductor/compile_fx.py index f5e0878d39..3b7ffaf381 100644 --- a/torch/_inductor/compile_fx.py +++ b/torch/_inductor/compile_fx.py @@ -29,7 +29,7 @@ from torch._dynamo.utils import ( from torch._functorch import config as functorch_config from torch._functorch.aot_autograd import aot_export_module, make_boxed_func from torch._inductor.codecache import code_hash, CompiledFxGraph, FxGraphCache -from torch._inductor.cudagraph_utils import BoxedDeviceIndex +from torch._inductor.cudagraph_utils import BoxedDeviceIndex, get_placeholders from torch._inductor.debug import save_args_for_compile_fx_inner from torch._inductor.utils import BoxedBool, count_tangents @@ -513,16 +513,7 @@ def compile_fx_inner( if isinstance(t, torch.Tensor) ) - from torch._inductor.cudagraph_utils import check_for_mutation - - has_mutation_str = check_for_mutation(gm, compiled_graph, num_fixed) - has_mutation = has_mutation_str is not None - - if has_mutation: - compiled_graph.disabled_cudagraphs_reason = has_mutation_str - cudagraph_tests = [ - (not has_mutation, "mutated inputs"), (not has_incompatible_cudagraph_ops(gm), "incompatible ops"), (not complex_memory_overlap_inputs, "complex memory overlap"), ( @@ -557,6 +548,8 @@ def compile_fx_inner( is_backward=is_backward, is_inference=is_inference, constants=tuple(compiled_graph.constants.values()), + placeholders=tuple(get_placeholders(gm.graph)), + mutated_input_idxs=tuple(compiled_graph.mutated_input_idxs), ) else: BoxedBool.disable(cudagraphs) @@ -878,6 +871,8 @@ def cudagraphify( is_backward: bool, is_inference: bool, constants: Tuple[torch.Tensor, ...] = (), + placeholders: Tuple[torch.fx.Node, ...] = (), + mutated_input_idxs: Tuple[int, ...] = (), ): from torch._inductor.cudagraph_trees import ( cudagraphify_impl as new_cudagraphify_impl, @@ -892,6 +887,8 @@ def cudagraphify( is_backward=is_backward, is_inference=is_inference, constants=constants, + placeholders=placeholders, + mutated_input_idxs=mutated_input_idxs, ) else: cudagraphify_fn = cudagraphify_impl diff --git a/torch/_inductor/cudagraph_trees.py b/torch/_inductor/cudagraph_trees.py index b6a24e5e5e..141354d43a 100644 --- a/torch/_inductor/cudagraph_trees.py +++ b/torch/_inductor/cudagraph_trees.py @@ -76,6 +76,11 @@ from torch._inductor.compile_fx import ( remove_unaligned_input_idxs, static_input, ) +from torch._inductor.cudagraph_utils import ( + check_for_mutation, + FunctionID, + WrappedFunction, +) from torch.multiprocessing.reductions import StorageWeakRef from torch.storage import UntypedStorage from torch.types import _bool @@ -106,32 +111,15 @@ log = torch._logging.getArtifactLogger(__name__, "cudagraphs") from . import config -@dataclasses.dataclass(frozen=True) -class GraphID: - "Unique counter of a cuda graph recording" - id: int +perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints") @dataclasses.dataclass(frozen=True) -class FunctionID: - "Unique counter of a function wrapped in cudagraphify_impl" +class GraphID: + "Unique counter of a cuda graph recording" id: int -@dataclasses.dataclass(frozen=True) -class WrappedFunction: - """ - Represents a function that you want to record for CUDA graph replay, - with a little more metadata so we can identify if we have an applicable - CUDA graph in our CUDA graph tree for it. - """ - - model: Callable[..., Any] - static_input_idxs: List[int] - id: FunctionID - constants: Tuple[torch.Tensor, ...] - - def clear_cublass_cache(): """ Cublas keeps a persistent workspace allocation for running matmuls. This poses a problem for @@ -398,6 +386,8 @@ def cudagraphify( is_inference: bool, stack_traces: Optional[StackTraces] = None, constants: Tuple[torch.Tensor, ...] = (), + placeholders: Tuple[torch.fx.Node, ...] = (), + mutated_input_idxs: Tuple[int, ...] = (), ): manager = get_container(device_index).get_tree_manager() assert not (is_backward and is_inference) @@ -414,6 +404,8 @@ def cudagraphify( stack_traces, mode, constants, + placeholders, + mutated_input_idxs, ) @@ -569,6 +561,7 @@ class CUDAWarmupNode: stack_traces: Optional[StackTraces], stream: torch.cuda.Stream, already_warm: bool, + id: GraphID, ): self.wrapped_function = wrapped_function self.parent = parent @@ -581,6 +574,7 @@ class CUDAWarmupNode: self.stack_traces = stack_traces self.stream = stream self.already_warm = already_warm + self.id = id def run(self, new_inputs): assert not self.has_run, "Wrapped function should never be run twice" @@ -662,6 +656,12 @@ class CUDAWarmupNode: def all_outputs_are_dead(self): return not list(self.path_live_weakrefs()) + def _is_cuda_graph_recorded_tensor(self, t: torch.Tensor): + for storage_weak_ref in self.path_live_weakrefs(): + if t.untyped_storage().data_ptr() == storage_weak_ref.data_ptr(): + return True + return False + # Aliases for List that say what the indices denote InputList = List # input indexes @@ -1710,6 +1710,9 @@ class CUDAGraphTreeManager: self.warned_functions: Set[FunctionID] = set() torch._C._set_cached_tensors_enabled(True) + # warn only once if a function mutates inputs + self.warned_mutation: Set[FunctionID] = set() + # NB: cuda caching allocator will remember the stream a segment is allocated to # and only allocate that segment to the same stream. we need to use a single stream # for all allocations to the memory pool, otherwise the allocations to separate streams @@ -1736,6 +1739,13 @@ class CUDAGraphTreeManager: self.graph_counter = itertools.count(0) self.func_counter = itertools.count(0) + # mapping from graph_id to (function id to mutation type hint) since we are + # specializing on a particular combination of Parent Node -> Function ID. + self.non_cudagraph_managed_mutation_hint: Dict[ + Optional[GraphID], Dict[FunctionID, bool] + ] = defaultdict(dict) + self.warmup_node_counter = itertools.count(start=-1, step=-1) + # whether we the current node is in a state of warmup, recording, execution. If # there is no current node the state will be ExecutionState.None. self.path_state = ExecutionState.NONE @@ -1792,6 +1802,42 @@ class CUDAGraphTreeManager: def set_to_running_backward(self): self.running_forwards_with_pending_backwards = False + def _get_cuda_graph_recorded_tensor_checker(self) -> Callable[[Tensor], bool]: + return ( + self.current_node._is_cuda_graph_recorded_tensor + if isinstance(self.current_node, (CUDAGraphNode, CUDAWarmupNode)) + else lambda _: False + ) + + def new_warmup_node_id(self) -> GraphID: + return GraphID(next(self.warmup_node_counter)) + + def _update_non_cudagraph_managed_mutation( + self, function_id: FunctionID, inputs: List[Tensor] + ): + node_id = self._get_node_id() + if has_mutation_str := check_for_mutation( + self.ids_to_funcs[function_id], + inputs, + self._get_cuda_graph_recorded_tensor_checker(), + ): + self.non_cudagraph_managed_mutation_hint[node_id][function_id] = True + # warn once per function_id + if function_id in self.warned_mutation: + return + self.warned_mutation.add(function_id) + perf_hint_log.warning(has_mutation_str) + else: + self.non_cudagraph_managed_mutation_hint[node_id][function_id] = False + + def _get_node_id(self) -> Optional[GraphID]: + if self.current_node is None: + return None + elif isinstance(self.current_node, (CUDAGraphNode, CUDAWarmupNode)): + return self.current_node.id + else: + raise RuntimeError(f"Unknown node type {type(self.current_node)}") + def _run(self, new_inputs: List[Tensor], function_id: FunctionID): # we will try to end the current execution lazily, since # we dont want to do unnecessary checking of the existing outputs @@ -1803,6 +1849,16 @@ class CUDAGraphTreeManager: if self.in_warmup: self.try_end_curr_warmup(function_id) + node_id = self._get_node_id() + if function_id not in self.non_cudagraph_managed_mutation_hint[node_id]: + self._update_non_cudagraph_managed_mutation(function_id, new_inputs) + + # Early exit if the function mutates inputs which are neither parameters/buffers nor + # cudagraph recorded tensors. This check should happen after `try_end_curr_recording` + # and `try_end_curr_warmup` which may change self.current_node. + if self.non_cudagraph_managed_mutation_hint[node_id][function_id]: + return self.ids_to_funcs[function_id].model(new_inputs) + # warming up a function and subsequentally recording may use different memory addresses # because both depend on the state of the caching allocator. if we warm up graph A, # then warm up graph B and make more allocations, the subsequent recording of A will not @@ -1848,6 +1904,13 @@ class CUDAGraphTreeManager: if self.current_node is None: return self.run(new_inputs, function_id) + if len(self.ids_to_funcs[function_id].mutated_input_idxs) > 0: + self._update_non_cudagraph_managed_mutation(function_id, new_inputs) + if self.non_cudagraph_managed_mutation_hint[self._get_node_id()][ + function_id + ]: + return self.ids_to_funcs[function_id].model(new_inputs) + # at this point, we necessarily will do a new recording self.debug_fail_counter += 1 @@ -1934,6 +1997,7 @@ class CUDAGraphTreeManager: self.ids_to_stack_traces[function_id], self.stream, already_warm, + self.new_warmup_node_id(), ) self.current_node = node self.path_state = ExecutionState.WARMUP @@ -1954,6 +2018,8 @@ class CUDAGraphTreeManager: stack_traces, mode, constants, + placeholders, + mutated_input_idxs, ) -> Tuple[Callable[..., Any], List[Optional[Tensor]]]: id = self.new_func_id() self.ids_to_stack_traces[id] = stack_traces @@ -1962,6 +2028,8 @@ class CUDAGraphTreeManager: list(static_input_idxs), id, tuple(t for t in constants if isinstance(t, torch.Tensor) and t.is_cuda), + placeholders, + mutated_input_idxs, ) self.id_to_mode[id] = mode fn = functools.partial(self.run, function_id=id) diff --git a/torch/_inductor/cudagraph_utils.py b/torch/_inductor/cudagraph_utils.py index dbb073acda..0d79b88b40 100644 --- a/torch/_inductor/cudagraph_utils.py +++ b/torch/_inductor/cudagraph_utils.py @@ -1,9 +1,35 @@ import dataclasses -from typing import Dict, Iterable, Optional +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple import torch +@dataclasses.dataclass(frozen=True) +class FunctionID: + "Unique counter of a function wrapped in cudagraphify_impl" + id: int + + +@dataclasses.dataclass(frozen=True) +class WrappedFunction: + """ + Represents a function that you want to record for CUDA graph replay, + with a little more metadata so we can identify if we have an applicable + CUDA graph in our CUDA graph tree for it. + """ + + model: Callable[..., Any] + static_input_idxs: List[int] + id: FunctionID + constants: Tuple[torch.Tensor, ...] + placeholders: List[torch.fx.Node] + mutated_input_idxs: List[int] + + +def get_placeholders(graph: torch.fx.Graph) -> List[torch.fx.Node]: + return [node for node in graph.nodes if node.op == "placeholder"] + + def get_mutating_use_stack_trace(placeholder_node: torch.fx.Node) -> Optional[str]: # reinplaced uses might have a single, non-copy_ use if len(placeholder_node.users) == 1: @@ -22,10 +48,9 @@ def format_default_skip_message(reason: str) -> str: def get_mutation_stack_trace( - gm: torch.fx.GraphModule, mutation_indices: Iterable[int] + placeholders: List[torch.fx.Node], mutation_indices: Iterable[int] ) -> str: stack_trace: Optional[str] = "" - placeholders = [node for node in gm.graph.nodes if node.op == "placeholder"] for idx in mutation_indices: placeholder = placeholders[idx] @@ -40,7 +65,9 @@ def get_mutation_stack_trace( def check_for_mutation( - gm: torch.fx.GraphModule, compiled_graph, num_fixed: int + func: WrappedFunction, + inputs: List[torch.Tensor], + is_cuda_graph_recorded_tensor: Callable[[torch.Tensor], bool], ) -> Optional[str]: default_msg = format_default_skip_message("mutated inputs") @@ -48,16 +75,21 @@ def check_for_mutation( if torch._inductor.config.triton.cudagraph_trees: # checking if mutation is only on parameters/static inputs mutation_indices = [ - idx for idx in compiled_graph.mutated_input_idxs if idx >= num_fixed + idx + for idx in func.mutated_input_idxs + if not ( + idx in func.static_input_idxs + or is_cuda_graph_recorded_tensor(inputs[idx]) + ) ] has_mutation = len(mutation_indices) != 0 if not has_mutation: return None - return get_mutation_stack_trace(gm, mutation_indices) + return get_mutation_stack_trace(func.placeholders, mutation_indices) else: - has_mutation = len(compiled_graph.mutated_inputs) != 0 + has_mutation = len(func.mutated_input_idxs) != 0 return None if not has_mutation else default_msg
2.41.0
412b75b42b27918c7cccd7a4f1121c8da14cb71
Fri, 19 Apr 2024 09:54:05 +0000
[PATCH 0367/1000] [optim] add fused_adam/adamw_kernel support for CPU device (#123074)
On par with `CUDA` implementation. For `autocast` logic, same with `CUDA` + `Fused Adam`: - check inf in `gradscalar.step` - In fused kernel, if there is `inf`, do nothing. If not, unscale the grad ( also write back) and update the param. **TestPlan**: ``` # extend CUDA only test for CPU fused adagrad python test_optim.py -k test_fused_matches_forloop python test_optim.py -k test_fused_large_tensor python test_torch.py -k test_grad_scaling_autocast_fused # extend fused test python test_torch.py -k test_params_invalidated_with_grads_invalidated_between_unscale_and_step python test_optim.py -k test_can_load_older_state_dict # newly added test (follow https://github.com/pytorch/pytorch/blob/6b1f13ea2f3b1bcd575620eecd7d84a4d2e3eb76/test/test_cuda.py#L1108) python test_optim.py -k test_grad_scaling_autocast_fused_optimizers ``` **Benchmark**: **5.1x** on 56 core SPR **Parameter-size=1M** **Nparams=10** [test script](https://gist.github.com/zhuhaozhe/ef9a290ad3f8f4067b3373a3bdaa33e7) ``` numactl -C 0-55 -m 0 python bench_adam.py non-fused 6.0174267292022705 s fused 1.1787631511688232 s ``` **Note: Fused kernel accuracy** The accuracy failure in CI shows a little higher than default tolerance ``` 2024-04-02T06:09:16.2213887Z Mismatched elements: 21 / 64 (32.8%) 2024-04-02T06:09:16.2214339Z Greatest absolute difference: 1.5735626220703125e-05 at index (6, 6) (up to 1e-05 allowed) 2024-04-02T06:09:16.2214813Z Greatest relative difference: 1.0073336852656212e-05 at index (4, 1) (up to 1.3e-06 allowed) ``` I have debug it step by step and unfortunately we may not able to make the `fused kernel` exactly same with `non fused` one due to compiler optimizations. For example, in non-fused impl ``` exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2) ``` and in fused impl ``` exp_avg_sq_ptr[d] = scalar_t(beta2) * exp_avg_sq_ptr[d]; // std::cout << "exp_avg_sq " << exp_avg_sq_ptr[d] << std::endl; exp_avg_sq_ptr[d] = exp_avg_sq_ptr[d] + scalar_t(exp_avg_sq_grad_coefficient) * grad_val * grad_val; ``` If I keep `std::cout`, I can get exactly same results in UT ``` ===============param 0.6796758770942688 0.6796758770942688 ``` But when I comment out it, there will be a difference ``` ===============param 0.6796758770942688 0.6796759366989136 ``` So I will make the tolerance a little higher than default one. Co-authored-by: Jane Xu <janeyx@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123074 Approved by: https://github.com/jgong5, https://github.com/janeyx99
diff --git a/aten/src/ATen/native/FusedAdam.cpp b/aten/src/ATen/native/FusedAdam.cpp new file mode 100644 index 0000000000..b3be769b24 --- /dev/null +++ b/aten/src/ATen/native/FusedAdam.cpp @@ -0,0 +1,175 @@ +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +#include <ATen/core/Tensor.h> +#include <ATen/native/DispatchStub.h> +#include <ATen/native/FusedAdam.h> + +#ifndef AT_PER_OPERATOR_HEADERS +#include <ATen/Functions.h> +#include <ATen/NativeFunctions.h> +#else +#include <ATen/ops/_fused_adam.h> +#include <ATen/ops/_fused_adam_native.h> +#include <ATen/ops/_fused_adamw.h> +#include <ATen/ops/_fused_adamw_native.h> +#endif +namespace at { + +namespace native { + +void _fused_adam_kernel_cpu_( + at::TensorList params, + at::TensorList grads, + at::TensorList exp_avgs, + at::TensorList exp_avg_sqs, + at::TensorList max_exp_avg_sqs, + at::TensorList state_steps, + const double lr, + const double beta1, + const double beta2, + const double weight_decay, + const double eps, + const bool amsgrad, + const bool maximize, + const c10::optional<at::Tensor>& grad_scale, + const c10::optional<at::Tensor>& found_inf) { + const float* grad_scale_ptr = + grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr; + const float* found_inf_ptr = + found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr; + if (found_inf_ptr && *found_inf_ptr == 1.0) { + return; + } + size_t n_tensors = params.size(); + TORCH_CHECK(grads.size() == n_tensors); + TORCH_CHECK(exp_avgs.size() == n_tensors); + TORCH_CHECK(exp_avg_sqs.size() == n_tensors); + if (amsgrad) { + TORCH_CHECK(max_exp_avg_sqs.size() == n_tensors); + } else { + TORCH_CHECK(max_exp_avg_sqs.size() == 0); + } + TORCH_CHECK(state_steps.size() == n_tensors); + at::Tensor max_exp_avg_sq = at::Tensor(); + for (size_t i = 0; i < n_tensors; i++){ + if (amsgrad) max_exp_avg_sq = max_exp_avg_sqs[i]; + fused_adam_stub( + kCPU, + params[i], + grads[i], + exp_avgs[i], + exp_avg_sqs[i], + max_exp_avg_sq, + state_steps[i], + lr, + beta1, + beta2, + weight_decay, + eps, + amsgrad, + maximize, + grad_scale_ptr, + ADAM_MODE::ORIGINAL); + } +} + +// The following overload simply has a Tensor lr +void _fused_adam_kernel_cpu_( + at::TensorList params, + at::TensorList grads, + at::TensorList exp_avgs, + at::TensorList exp_avg_sqs, + at::TensorList max_exp_avg_sqs, + at::TensorList state_steps, + const at::Tensor& lr, + const double beta1, + const double beta2, + const double weight_decay, + const double eps, + const bool amsgrad, + const bool maximize, + const c10::optional<at::Tensor>& grad_scale, + const c10::optional<at::Tensor>& found_inf) { + _fused_adam_kernel_cpu_(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr.item<double>(), beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + +void _fused_adamw_kernel_cpu_( + at::TensorList params, + at::TensorList grads, + at::TensorList exp_avgs, + at::TensorList exp_avg_sqs, + at::TensorList max_exp_avg_sqs, + at::TensorList state_steps, + const double lr, + const double beta1, + const double beta2, + const double weight_decay, + const double eps, + const bool amsgrad, + const bool maximize, + const c10::optional<at::Tensor>& grad_scale, + const c10::optional<at::Tensor>& found_inf) { + const float* grad_scale_ptr = + grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr; + const float* found_inf_ptr = + found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr; + if (found_inf_ptr && *found_inf_ptr == 1.0) { + return; + } + size_t n_tensors = params.size(); + TORCH_CHECK(grads.size() == n_tensors); + TORCH_CHECK(exp_avgs.size() == n_tensors); + TORCH_CHECK(exp_avg_sqs.size() == n_tensors); + if (amsgrad) { + TORCH_CHECK(max_exp_avg_sqs.size() == n_tensors); + } else { + TORCH_CHECK(max_exp_avg_sqs.size() == 0); + } + TORCH_CHECK(state_steps.size() == n_tensors); + at::Tensor max_exp_avg_sq = at::Tensor(); + for (size_t i = 0; i < n_tensors; i++){ + if (amsgrad) max_exp_avg_sq = max_exp_avg_sqs[i]; + fused_adam_stub( + kCPU, + params[i], + grads[i], + exp_avgs[i], + exp_avg_sqs[i], + max_exp_avg_sq, + state_steps[i], + lr, + beta1, + beta2, + weight_decay, + eps, + amsgrad, + maximize, + grad_scale_ptr, + ADAM_MODE::ADAMW); + } +} + +// The following overload simply has a Tensor lr +void _fused_adamw_kernel_cpu_( + at::TensorList params, + at::TensorList grads, + at::TensorList exp_avgs, + at::TensorList exp_avg_sqs, + at::TensorList max_exp_avg_sqs, + at::TensorList state_steps, + const at::Tensor& lr, + const double beta1, + const double beta2, + const double weight_decay, + const double eps, + const bool amsgrad, + const bool maximize, + const c10::optional<at::Tensor>& grad_scale, + const c10::optional<at::Tensor>& found_inf) { + _fused_adamw_kernel_cpu_(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr.item<double>(), beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf); +} + + +DEFINE_DISPATCH(fused_adam_stub); + +} +} diff --git a/aten/src/ATen/native/FusedAdam.h b/aten/src/ATen/native/FusedAdam.h new file mode 100644 index 0000000000..6fbbaf2441 --- /dev/null +++ b/aten/src/ATen/native/FusedAdam.h @@ -0,0 +1,30 @@ +#include <ATen/core/Tensor.h> +#include <ATen/native/DispatchStub.h> + +namespace at { + +namespace native { + +enum class ADAM_MODE : uint8_t { ORIGINAL = 0, ADAMW = 1 }; + +using fused_adam_fn = void (*)( + const at::Tensor& param, + const at::Tensor& grad, + const at::Tensor& exp_avg, + const at::Tensor& exp_avg_sq, + const at::Tensor& max_exp_avg_sq, + const at::Tensor& state_step, + const double lr, + const double beta1, + const double beta2, + const double weight_decay, + const double eps, + const bool amsgrad, + const bool maximize, + const float* grad_scale_ptr, + const ADAM_MODE); + +DECLARE_DISPATCH(fused_adam_fn, fused_adam_stub); + +} +} diff --git a/aten/src/ATen/native/cpu/FusedAdamKernel.cpp b/aten/src/ATen/native/cpu/FusedAdamKernel.cpp new file mode 100644 index 0000000000..4a10fe202c --- /dev/null +++ b/aten/src/ATen/native/cpu/FusedAdamKernel.cpp @@ -0,0 +1,379 @@ +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +#include <ATen/core/Tensor.h> +#include <ATen/Parallel.h> +#include <ATen/OpMathType.h> +#include <ATen/native/DispatchStub.h> +#include <ATen/native/FusedAdam.h> +#include <ATen/Dispatch.h> +#include <ATen/cpu/vec/vec.h> +#include <ATen/cpu/vec/functional.h> +namespace at::native { + +namespace{ + +template <typename scalar_t, typename opmath_t, ADAM_MODE adam_mode> +typename std::enable_if< + std::is_same<scalar_t, Half>::value || std::is_same<scalar_t, BFloat16>::value, + void>:: + type inline adam_math( + scalar_t* param_ptr, + scalar_t* exp_avg_ptr, + scalar_t* exp_avg_sq_ptr, + scalar_t* grad_ptr, + scalar_t* max_exp_avg_sq_ptr, + double lr, + double bias_correction1, + double bias_correction2, + double exp_avg_grad_coefficient, + double exp_avg_sq_grad_coefficient, + double bias_correction2_sqrt, + double eps, + double weight_decay, + double beta2, + bool amsgrad, + bool maximize, + const float* grad_scale_ptr, + int64_t size +){ + double step_size = lr / bias_correction1; + using lpVec = at::vec::Vectorized<scalar_t>; + using fVec = at::vec::Vectorized<opmath_t>; + lpVec grad_vec_to_store; + int64_t d = 0; + fVec param_vec1, param_vec2; + fVec grad_vec1, grad_vec2; + fVec exp_avg_vec1, exp_avg_vec2; + fVec exp_avg_sq_vec1, exp_avg_sq_vec2; + fVec max_exp_avg_sq_vec1, max_exp_avg_sq_vec2; + for (; d < size - (size % lpVec::size()); d += lpVec::size()) { + lpVec param_lpvec = lpVec::loadu(param_ptr + d); + std::tie(param_vec1, param_vec2) = vec::convert_to_float<scalar_t>(param_lpvec); + lpVec grad_lpvec = lpVec::loadu(grad_ptr + d); + std::tie(grad_vec1, grad_vec2) = vec::convert_to_float<scalar_t>(grad_lpvec); + if (grad_scale_ptr) { + grad_vec1 = grad_vec1 / fVec(float(*grad_scale_ptr)); + grad_vec2 = grad_vec2 / fVec(float(*grad_scale_ptr)); + grad_vec_to_store = vec::convert_from_float<scalar_t>(grad_vec1, grad_vec2); + grad_vec_to_store.store(grad_ptr + d); + } + if (maximize){ + grad_vec1 = grad_vec1 * fVec(opmath_t(-1.0)); + grad_vec2 = grad_vec2 * fVec(opmath_t(-1.0)); + } + if (weight_decay != 0.f){ + if constexpr (adam_mode == ADAM_MODE::ORIGINAL) { + grad_vec1 += param_vec1 * fVec(opmath_t(weight_decay)); + grad_vec2 += param_vec2 * fVec(opmath_t(weight_decay)); + } else if constexpr (adam_mode == ADAM_MODE::ADAMW) { + param_vec1 = param_vec1 * fVec(opmath_t(1 - lr * weight_decay)); + param_vec2 = param_vec2 * fVec(opmath_t(1 - lr * weight_decay)); + } + } + + lpVec exp_avg_lpvec = lpVec::loadu(exp_avg_ptr + d); + std::tie(exp_avg_vec1, exp_avg_vec2) = vec::convert_to_float<scalar_t>(exp_avg_lpvec); + + // exp_avg.lerp_(grad, 1 - beta1) + const fVec lerp_weight = fVec(opmath_t(exp_avg_grad_coefficient)); + auto mask = lerp_weight.abs() < fVec(0.5); + auto coeff = fVec::blendv(lerp_weight - fVec(1), lerp_weight, mask); + + auto base1 = fVec::blendv(grad_vec1, exp_avg_vec1, mask); + exp_avg_vec1 = vec::fmadd(coeff, grad_vec1 - exp_avg_vec1, base1); + + auto base2 = fVec::blendv(grad_vec2, exp_avg_vec2, mask); + exp_avg_vec2 = vec::fmadd(coeff, grad_vec2 - exp_avg_vec2, base2); + + lpVec exp_avg_sq_lpvec = lpVec::loadu(exp_avg_sq_ptr + d); + std::tie(exp_avg_sq_vec1, exp_avg_sq_vec2) = vec::convert_to_float<scalar_t>(exp_avg_sq_lpvec); + exp_avg_sq_vec1 = exp_avg_sq_vec1 * fVec(opmath_t(beta2)) + + fVec(opmath_t(exp_avg_sq_grad_coefficient)) * grad_vec1 * grad_vec1; + exp_avg_sq_vec2 = exp_avg_sq_vec2 * fVec(opmath_t(beta2)) + + fVec(opmath_t(exp_avg_sq_grad_coefficient)) * grad_vec2 * grad_vec2; + + vec::convert_from_float<scalar_t>(exp_avg_vec1, exp_avg_vec2).store(exp_avg_ptr + d); + vec::convert_from_float<scalar_t>(exp_avg_sq_vec1, exp_avg_sq_vec2).store(exp_avg_sq_ptr + d); + + fVec denom_vec1, denom_vec2; + if (amsgrad) { + lpVec max_exp_avg_sq_lpvec = lpVec::loadu(max_exp_avg_sq_ptr + d); + std::tie(max_exp_avg_sq_vec1, max_exp_avg_sq_vec2) = vec::convert_to_float<scalar_t>(max_exp_avg_sq_lpvec); + max_exp_avg_sq_vec1 = maximum(max_exp_avg_sq_vec1, exp_avg_sq_vec1); + max_exp_avg_sq_vec2 = maximum(max_exp_avg_sq_vec2, exp_avg_sq_vec2); + vec::convert_from_float<scalar_t>(max_exp_avg_sq_vec1, max_exp_avg_sq_vec2).store(max_exp_avg_sq_ptr + d); + denom_vec1 = + (max_exp_avg_sq_vec1.sqrt() / fVec(opmath_t(bias_correction2_sqrt))) + fVec(opmath_t(eps)); + denom_vec2 = + (max_exp_avg_sq_vec2.sqrt() / fVec(opmath_t(bias_correction2_sqrt))) + fVec(opmath_t(eps)); + } else { + denom_vec1 = + (exp_avg_sq_vec1.sqrt() / fVec(opmath_t(bias_correction2_sqrt))) + fVec(opmath_t(eps)); + denom_vec2 = + (exp_avg_sq_vec2.sqrt() / fVec(opmath_t(bias_correction2_sqrt))) + fVec(opmath_t(eps)); + } + param_vec1 = param_vec1 + fVec(opmath_t(-step_size)) * exp_avg_vec1 / denom_vec1; + param_vec2 = param_vec2 + fVec(opmath_t(-step_size)) * exp_avg_vec2 / denom_vec2; + vec::convert_from_float<scalar_t>(param_vec1, param_vec2).store(param_ptr + d); + } + scalar_t grad_val_to_store; + for (; d < size; d++) { + opmath_t grad_val = grad_ptr[d]; + opmath_t param_val = param_ptr[d]; + if (grad_scale_ptr) { + grad_val = grad_ptr[d] / float(*grad_scale_ptr); + grad_val_to_store = scalar_t(grad_val); + grad_ptr[d] = grad_val_to_store; + } + if (maximize) grad_val = -grad_val; + if (weight_decay != 0.f){ + if constexpr (adam_mode == ADAM_MODE::ORIGINAL) { + grad_val += param_val * opmath_t(weight_decay); + } else if constexpr (adam_mode == ADAM_MODE::ADAMW) { + param_val = param_val * opmath_t(1 - lr * weight_decay); + } + } + // exp_avg.lerp_(grad, 1 - beta1) + opmath_t exp_avg_var = exp_avg_ptr[d]; + auto is_lerp_weight_small = std::abs(opmath_t(exp_avg_grad_coefficient)) < opmath_t(0.5); + if (is_lerp_weight_small) { + exp_avg_var = exp_avg_var + opmath_t(exp_avg_grad_coefficient) * (grad_val - exp_avg_var); + } else { + exp_avg_var = grad_val - (grad_val - exp_avg_var) * (opmath_t(1) - opmath_t(exp_avg_grad_coefficient)); + } + exp_avg_ptr[d] = scalar_t(exp_avg_var); + opmath_t exp_avg_sq_var = exp_avg_sq_ptr[d]; + exp_avg_sq_var = exp_avg_sq_var * opmath_t(beta2); + exp_avg_sq_var = exp_avg_sq_var + + opmath_t(exp_avg_sq_grad_coefficient) * grad_val * grad_val; + exp_avg_sq_ptr[d] = scalar_t(exp_avg_sq_var); + opmath_t demon_val; + if (amsgrad) { + opmath_t max_exp_avg_sq_var = max_exp_avg_sq_ptr[d]; + max_exp_avg_sq_var = std::max(max_exp_avg_sq_var, exp_avg_sq_var); + max_exp_avg_sq_ptr[d] = + scalar_t(max_exp_avg_sq_var); + demon_val = + std::sqrt(max_exp_avg_sq_var) / opmath_t(bias_correction2_sqrt) + opmath_t(eps); + } else { + demon_val = std::sqrt(exp_avg_sq_var) / opmath_t(bias_correction2_sqrt) + opmath_t(eps); + } + param_ptr[d] = param_val - opmath_t(step_size) * exp_avg_var / demon_val; + } +} + + +template <typename scalar_t, typename opmath_t, ADAM_MODE adam_mode> +typename std::enable_if< + std::is_same<scalar_t, float>::value || std::is_same<scalar_t, double>::value, + void>:: + type inline adam_math( + scalar_t* param_ptr, + scalar_t* exp_avg_ptr, + scalar_t* exp_avg_sq_ptr, + scalar_t* grad_ptr, + scalar_t* max_exp_avg_sq_ptr, + double lr, + double bias_correction1, + double bias_correction2, + double exp_avg_grad_coefficient, + double exp_avg_sq_grad_coefficient, + double bias_correction2_sqrt, + double eps, + double weight_decay, + double beta2, + bool amsgrad, + bool maximize, + const float* grad_scale_ptr, + int64_t size +){ + double step_size = lr / bias_correction1; + using Vec = at::vec::Vectorized<scalar_t>; + Vec grad_vec_to_store; + int64_t d = 0; + for (; d < size - (size % Vec::size()); d += Vec::size()) { + Vec param_vec = Vec::loadu(param_ptr + d); + Vec grad_vec = Vec::loadu(grad_ptr + d); + if (grad_scale_ptr) { + grad_vec = grad_vec / Vec(scalar_t(*grad_scale_ptr)); + grad_vec_to_store = grad_vec; + grad_vec_to_store.store(grad_ptr + d); + } + if (maximize) grad_vec = grad_vec * Vec(scalar_t(-1.0)); + if (weight_decay != 0.f){ + if constexpr (adam_mode == ADAM_MODE::ORIGINAL) { + grad_vec += param_vec * Vec(scalar_t(weight_decay)); + } else if constexpr (adam_mode == ADAM_MODE::ADAMW) { + param_vec = param_vec * Vec(scalar_t(1 - lr * weight_decay)); + } + } + Vec exp_avg_vec = Vec::loadu(exp_avg_ptr + d); + // exp_avg.lerp_(grad, 1 - beta1) + const Vec lerp_weight = Vec(scalar_t(exp_avg_grad_coefficient)); + auto mask = lerp_weight.abs() < Vec(0.5); + auto coeff = Vec::blendv(lerp_weight - Vec(1), lerp_weight, mask); + auto base = Vec::blendv(grad_vec, exp_avg_vec, mask); + exp_avg_vec = vec::fmadd(coeff, grad_vec - exp_avg_vec, base); + + Vec exp_avg_sq_vec = Vec::loadu(exp_avg_sq_ptr + d) * Vec(scalar_t(beta2)) + + Vec(scalar_t(exp_avg_sq_grad_coefficient)) * grad_vec * grad_vec; + exp_avg_vec.store(exp_avg_ptr + d); + exp_avg_sq_vec.store(exp_avg_sq_ptr + d); + + Vec denom_vec; + if (amsgrad) { + Vec max_exp_avg_sq_vec = + maximum(Vec::loadu(max_exp_avg_sq_ptr + d), exp_avg_sq_vec); + max_exp_avg_sq_vec.store(max_exp_avg_sq_ptr + d); + denom_vec = + (max_exp_avg_sq_vec.sqrt() / Vec(scalar_t(bias_correction2_sqrt))) + Vec(scalar_t(eps)); + } else { + denom_vec = + (exp_avg_sq_vec.sqrt() / Vec(scalar_t(bias_correction2_sqrt))) + Vec(scalar_t(eps)); + } + param_vec = param_vec + Vec(scalar_t(-step_size)) * exp_avg_vec / denom_vec; + param_vec.store(param_ptr + d); + } + scalar_t grad_val_to_store; + for (; d < size; d++) { + scalar_t grad_val = grad_ptr[d]; + if (grad_scale_ptr) { + grad_val = grad_ptr[d] / scalar_t(*grad_scale_ptr); + grad_val_to_store = grad_val; + grad_ptr[d] = grad_val_to_store; + } + if (maximize) grad_val = -grad_val; + if (weight_decay != 0.f){ + if constexpr (adam_mode == ADAM_MODE::ORIGINAL) { + grad_val += param_ptr[d] * scalar_t(weight_decay); + } else if constexpr (adam_mode == ADAM_MODE::ADAMW) { + param_ptr[d] = param_ptr[d] * scalar_t(1 - lr * weight_decay); + } + } + // exp_avg.lerp_(grad, 1 - beta1) + auto is_lerp_weight_small = std::abs(scalar_t(exp_avg_grad_coefficient)) < scalar_t(0.5); + if (is_lerp_weight_small) { + exp_avg_ptr[d] = exp_avg_ptr[d] + scalar_t(exp_avg_grad_coefficient) * (grad_val - exp_avg_ptr[d]); + } else { + exp_avg_ptr[d] = grad_val - (grad_val - exp_avg_ptr[d]) * (scalar_t(1) - scalar_t(exp_avg_grad_coefficient)); + } + exp_avg_sq_ptr[d] = exp_avg_sq_ptr[d] * scalar_t(beta2); + exp_avg_sq_ptr[d] = exp_avg_sq_ptr[d] + + scalar_t(exp_avg_sq_grad_coefficient) * grad_val * grad_val; + scalar_t demon_val; + if (amsgrad) { + max_exp_avg_sq_ptr[d] = + std::max(max_exp_avg_sq_ptr[d], exp_avg_sq_ptr[d]); + demon_val = + std::sqrt(max_exp_avg_sq_ptr[d]) / scalar_t(bias_correction2_sqrt) + scalar_t(eps); + } else { + demon_val = std::sqrt(exp_avg_sq_ptr[d]) / scalar_t(bias_correction2_sqrt) + scalar_t(eps); + } + param_ptr[d] = param_ptr[d] - scalar_t(step_size) * exp_avg_ptr[d] / demon_val; + } +} + + +template <typename scalar_t, ADAM_MODE adam_mode> +void adam_fused_step_impl( + const at::Tensor& param, + const at::Tensor& grad, + const at::Tensor& exp_avg, + const at::Tensor& exp_avg_sq, + const at::Tensor& max_exp_avg_sq, + const at::Tensor& state_step, + const double lr, + const double beta1, + const double beta2, + const double weight_decay, + const double eps, + const bool amsgrad, + const bool maximize, + const float* grad_scale_ptr) { + using opmath_t = at::opmath_type<scalar_t>; + double step = state_step.item<float>(); + scalar_t* param_data = param.data_ptr<scalar_t>(); + scalar_t* exp_avg_data = exp_avg.data_ptr<scalar_t>(); + scalar_t* exp_avg_sq_data = exp_avg_sq.data_ptr<scalar_t>(); + scalar_t* max_exp_avg_sq_data = amsgrad ? max_exp_avg_sq.data_ptr<scalar_t>() : nullptr; + scalar_t* grad_data = grad.data_ptr<scalar_t>(); + + // need to use double here to align with non-fused adam + double bias_correction1 = 1 - std::pow(beta1, step); + double bias_correction2 = 1 - std::pow(beta2, step); + double exp_avg_grad_coefficient = 1 - beta1; + double exp_avg_sq_grad_coefficient = 1 - beta2; + double bias_correction2_sqrt = std::sqrt(bias_correction2); + + + constexpr size_t cache_line_size = 64; + constexpr int64_t cache_line_aligned_task_unit = cache_line_size / sizeof(scalar_t); + size_t num_units = divup(param.numel(), cache_line_aligned_task_unit); + + auto adam_fn = [&](int64_t begin, int64_t end) { + // local pointers + begin *= cache_line_aligned_task_unit; + end = std::min(end * cache_line_aligned_task_unit, param.numel()); + scalar_t* param_ptr = param_data + begin; + scalar_t* exp_avg_ptr = exp_avg_data + begin; + scalar_t* exp_avg_sq_ptr = exp_avg_sq_data + begin; + scalar_t* grad_ptr = grad_data + begin; + scalar_t* max_exp_avg_sq_ptr = amsgrad ? max_exp_avg_sq_data + begin : nullptr; + + const int64_t size = end - begin; + adam_math<scalar_t, opmath_t, adam_mode>( + param_ptr, + exp_avg_ptr, + exp_avg_sq_ptr, + grad_ptr, + max_exp_avg_sq_ptr, + lr, + bias_correction1, + bias_correction2, + exp_avg_grad_coefficient, + exp_avg_sq_grad_coefficient, + bias_correction2_sqrt, + eps, + weight_decay, + beta2, + amsgrad, + maximize, + grad_scale_ptr, + size + ); + }; + at::parallel_for( + 0, num_units, 0, adam_fn); +} + +void fused_adam_kernel( + const at::Tensor& param, + const at::Tensor& grad, + const at::Tensor& exp_avg, + const at::Tensor& exp_avg_sq, + const at::Tensor& max_exp_avg_sq, + const at::Tensor& state_step, + const double lr, + const double beta1, + const double beta2, + const double weight_decay, + const double eps, + const bool amsgrad, + const bool maximize, + const float* grad_scale_ptr, + const ADAM_MODE adam_mode + ) { + Tensor grad_contiguous = grad.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, param.scalar_type(), "fused_adam_kernel", [&] { + if(adam_mode == ADAM_MODE::ORIGINAL){ + adam_fused_step_impl<scalar_t, ADAM_MODE::ORIGINAL>(param, grad, exp_avg, exp_avg_sq, max_exp_avg_sq, state_step, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_ptr); + } else { + adam_fused_step_impl<scalar_t, ADAM_MODE::ADAMW>(param, grad, exp_avg, exp_avg_sq, max_exp_avg_sq, state_step, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_ptr); + } + + }); +} + +} + +REGISTER_DISPATCH(fused_adam_stub, &fused_adam_kernel); +} // namespace at::native diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index 6e96a8a6aa..f3f5683350 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -15517,6 +15517,7 @@ # Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now). variants: function dispatch: + CPU: _fused_adam_kernel_cpu_ CUDA: _fused_adam_kernel_cuda_ autogen: _fused_adam, _fused_adam.out @@ -15526,6 +15527,7 @@ device_check: NoCheck variants: function dispatch: + CPU: _fused_adam_kernel_cpu_ CUDA: _fused_adam_kernel_cuda_ autogen: _fused_adam.tensor_lr, _fused_adam.tensor_lr_out @@ -15533,6 +15535,7 @@ # Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now). variants: function dispatch: + CPU: _fused_adamw_kernel_cpu_ CUDA: _fused_adamw_kernel_cuda_ autogen: _fused_adamw, _fused_adamw.out @@ -15542,6 +15545,7 @@ device_check: NoCheck variants: function dispatch: + CPU: _fused_adamw_kernel_cpu_ CUDA: _fused_adamw_kernel_cuda_ autogen: _fused_adamw.tensor_lr, _fused_adamw.tensor_lr_out diff --git a/build_variables.bzl b/build_variables.bzl index 6a152fb909..36e54ffda4 100644 --- a/build_variables.bzl +++ b/build_variables.bzl @@ -1168,6 +1168,7 @@ aten_native_source_codegen_list = [ "aten/src/ATen/native/cpu/SpmmReduceKernel.cpp", "aten/src/ATen/native/cpu/SparseFactories.cpp", "aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp", + "aten/src/ATen/native/cpu/FusedAdamKernel.cpp", ] # This aten native source file list will not go through aten codegen process @@ -1402,6 +1403,7 @@ aten_native_source_non_codegen_list = [ "aten/src/ATen/native/xnnpack/OpContext.cpp", "aten/src/ATen/native/xnnpack/RegisterOpContextClass.cpp", "aten/src/ATen/native/xnnpack/Shim.cpp", + "aten/src/ATen/native/FusedAdam.cpp", # Files not in native, but depends on native symbols # "aten/src/ATen/TensorIndexing.cpp", "aten/src/ATen/TensorIterator.cpp", diff --git a/test/test_optim.py b/test/test_optim.py index 680d967a26..9eea11ffda 100644 --- a/test/test_optim.py +++ b/test/test_optim.py @@ -21,9 +21,10 @@ from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_optimizers import ( optim_db, optims, OptimizerErrorEnum, _get_optim_inputs_including_global_cliquey_kwargs, TensorTracker) from torch.testing._internal.common_device_type import ( - instantiate_device_type_tests, largeTensorTest, onlyCPU, onlyCUDA, skipMPS, TEST_WITH_ROCM) + instantiate_device_type_tests, largeTensorTest, onlyCPU, onlyCUDA, skipMPS, TEST_WITH_ROCM, onlyNativeDeviceTypes) from torch.testing._internal.common_utils import markDynamoStrictTest, parametrize, run_tests, TestCase - +from torch.testing._internal.common_cuda import _create_scaling_case +from torch.testing._internal.common_dtype import floating_types_and FP16_REDUCED_PRECISION = {'atol': 1e-5, 'rtol': 1e-4} @@ -581,6 +582,49 @@ class TestOptimRenewed(TestCase): self.assertTrue(a1_grad_imags.all_popped()) self.assertTrue(losses.all_popped()) + def _compare_between(self, inputs, models, optimizers, assert_eq_kwargs=None, assert_step_dtype=None): + # why 7? iteration 7 is where we start to see differences for RAdam + # params interacting with the small eps value, because that's right + # after rho_t becomes greater than 5 in step 6. + if assert_eq_kwargs is None: + assert_eq_kwargs = {} + kIterations = 7 + tracker = TensorTracker(assert_eq_kwargs) + for i in range(kIterations): + state, updated_params = [], [] + if not isinstance(inputs, list): + inputs = [inputs, inputs] + for input, model, optimizer in zip(inputs, models, optimizers): + optimizer.zero_grad() + + # Test that step behaves as expected (a no-op) when grads are set to None + if i != 3: + output = model(input) + loss = output.sum() + loss.backward() + + optimizer.step() + state.append(optimizer.state) + updated_params.append(model.parameters()) + + og_state, new_state = state + for og_p, new_p in zip(updated_params[0], updated_params[1]): + tracker.add(og_p) + tracker.pop_check_set(new_p, self) + + # check that optimizer states are the same + og_p_state = og_state[og_p] + new_p_state = new_state[new_p] + if assert_step_dtype is not None: + if torch.is_tensor(og_p_state.get("step", None)): + self.assertEqual(og_p_state["step"].dtype, assert_step_dtype) + if torch.is_tensor(new_p_state.get("step", None)): + self.assertEqual(new_p_state["step"].dtype, assert_step_dtype) + for k in og_p_state: + tracker.add(og_p_state[k]) + tracker.pop_check_set(new_p_state[k], self) + + self.assertTrue(tracker.all_popped()) def _test_derived_optimizers(self, device, dtype, optim_info, flag, reduced_precision=False, assert_step_dtype=None): """ @@ -589,16 +633,12 @@ class TestOptimRenewed(TestCase): for provided optimizer configurations. """ assert flag in ("foreach", "fused") + assert_eq_kwargs = {} if not reduced_precision else FP16_REDUCED_PRECISION - # why 7? iteration 7 is where we start to see differences for RAdam - # params interacting with the small eps value, because that's right - # after rho_t becomes greater than 5 in step 6. - kIterations = 7 - - optim_inputs = optim_info.optim_inputs_func(device=device) + optim_inputs = optim_info.optim_inputs_func(device=device, dtype=dtype) optim_cls = optim_info.optim_cls for optim_input in optim_inputs: - updated_params, state = [], [] + models, optimizers = [], [] kwargs = deepcopy(optim_input.kwargs) if kwargs.get("capturable", False) and str(device) == "cpu": # capturable is not supported on CPU @@ -626,39 +666,10 @@ class TestOptimRenewed(TestCase): params = list(model.parameters()) + [empty_param] optimizer = optim_cls(params, **kwargs) + models.append(model) + optimizers.append(optimizer) - for i in range(kIterations): - optimizer.zero_grad() - - # Test that step behaves as expected (a no-op) when grads are set to None - if i != 3: - output = model(input) - loss = output.sum() - loss.backward() - - optimizer.step() - - if assert_step_dtype is not None: - p_state = optimizer.state[params[0]] - if torch.is_tensor(p_state.get("step", None)): - self.assertEqual(p_state["step"].dtype, assert_step_dtype) - - state.append(optimizer.state) - updated_params.append(model.parameters()) - - assert_eq_kwargs = {} if not reduced_precision else FP16_REDUCED_PRECISION - - og_state, new_state = state - for og_p, new_p in zip(updated_params[0], updated_params[1]): - self.assertEqual(og_p, new_p, **assert_eq_kwargs) - - # check that optimizer states are the same - og_p_state = og_state[og_p] - new_p_state = new_state[new_p] - - for k in og_p_state: - self.assertEqual(og_p_state[k], new_p_state[k], **assert_eq_kwargs) - + self._compare_between(input, models, optimizers, assert_eq_kwargs, assert_step_dtype) @skipMPS # MPS doesn't support torch.float64, see https://github.com/pytorch/pytorch/issues/115350 @optims([optim for optim in optim_db if "foreach" in optim.supported_impls], dtypes=[torch.float64]) @@ -847,16 +858,23 @@ class TestOptimRenewed(TestCase): self.assertLessEqual(mt_max_mem, expected_max_mem) - @onlyCUDA - @optims([optim for optim in optim_db if "fused" in optim.supported_impls], dtypes=[torch.float64]) + @onlyNativeDeviceTypes + @optims( + [optim for optim in optim_db if "fused" in optim.supported_impls], + dtypes=floating_types_and(torch.bfloat16, torch.float16, ) + ) def test_fused_matches_forloop(self, device, dtype, optim_info): + if device not in optim_info.supports_fused_on: + self.skipTest(f"{device} is not supported for fused on {optim_info.optim_cls.__name__}") self._test_derived_optimizers(device, dtype, optim_info, "fused") - @onlyCUDA - @largeTensorTest("64GB", "cuda") + @onlyNativeDeviceTypes + @largeTensorTest("64GB") @optims([optim for optim in optim_db if "fused" in optim.supported_impls], dtypes=[torch.float16]) def test_fused_large_tensor(self, device, dtype, optim_info): + if device not in optim_info.supports_fused_on: + self.skipTest(f"{device} is not supported for fused on {optim_info.optim_cls.__name__}") optim_cls = optim_info.optim_cls optim_inputs = optim_info.optim_inputs_func(device=device) for optim_input in optim_inputs: @@ -1304,10 +1322,11 @@ class TestOptimRenewed(TestCase): # Make sure that device of state['step'] is still CPU _unless_ torch.compile() added a capturable! capturable = state_dict_cpu["param_groups"][0].get("capturable", False) + fused = state_dict_cpu["param_groups"][0].get("fused", False) new_state_dict = optimizer_cuda.state_dict() for state_cpu, state_cuda in zip(state_dict_cpu["state"].values(), new_state_dict["state"].values()): if "step" in state_cpu and torch.is_tensor(state_cpu["step"]): - self.assertEqual(state_cuda["step"].device.type, "cuda" if capturable else "cpu") + self.assertEqual(state_cuda["step"].device.type, "cuda" if capturable or fused else "cpu") for _ in range(5): optimizer.step(closure) @@ -1615,6 +1634,104 @@ class TestOptimRenewed(TestCase): res2 = optim_neg_inf.step(closure) self.assertEqual(type(res1), type(res2)) + @onlyCUDA + @optims( + [optim for optim in optim_db if "cpu" in optim.supports_fused_on and "cuda" in optim.supports_fused_on], + dtypes=floating_types_and(torch.bfloat16, torch.float16,) + ) + def test_fused_cpu_matches_cuda(self, device, dtype, optim_info): + optim_cls = optim_info.optim_cls + optim_inputs = optim_info.optim_inputs_func(device="cpu") + for optim_input in optim_inputs: + inpts, models, optimizers = [], [], [] + for dev in ('cpu', 'cuda'): + kwargs = optim_input.kwargs + kwargs["fused"] = True + inpt = torch.tensor( + [0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype=dtype, device=dev + ).reshape(3, 2) + + torch.manual_seed(1) + model = torch.nn.Sequential( + torch.nn.Linear(2, 3), + torch.nn.Sigmoid(), + torch.nn.Linear(3, 1), + torch.nn.Sigmoid(), + ) + model.to(dtype=dtype, device=dev) + + # foreach/fused optimizers should be tested with a + # zero_size tensor as its last param. + # ref: https://github.com/pytorch/pytorch/issues/100701 + empty_param = torch.empty((), device=dev, dtype=dtype, requires_grad=True) + empty_param.grad = torch.rand_like(empty_param) + params = list(model.parameters()) + [empty_param] + + optimizer = optim_cls(params, **kwargs) + inpts.append(inpt) + models.append(model) + optimizers.append(optimizer) + self._compare_between(inpts, models, optimizers) + + @onlyCPU + @optims([optim for optim in optim_db if "fused" in optim.supported_impls], dtypes=[torch.float32]) + def test_grad_scaling_autocast_fused_optimizers(self, device, dtype, optim_info): + # This ut is from test_cuda.py test_grad_scaling_autocast_fused_optimizers + # but only test Adam/AdamW on CPU + # TODO: haozhe, support SGD and unified this ut with the CUDA only one + if device not in optim_info.supports_fused_on: + self.skipTest(f"{device} is not supported for fused on {optim_info.optim_cls.__name__}") + optim_inputs = optim_info.optim_inputs_func(device=device) + optim_cls = optim_info.optim_cls + for optim_input in optim_inputs: + kwargs = optim_input.kwargs + for _separate_unscale in (True, False): + self._grad_scaling_autocast_fused_optimizers( + optimizer_ctor=optim_cls, optimizer_kwargs=kwargs, separate_unscale=_separate_unscale) + + def _grad_scaling_autocast_fused_optimizers(self, optimizer_ctor, optimizer_kwargs, separate_unscale): + ( + mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, _, + ) = _create_scaling_case(optimizer_ctor=optimizer_ctor, optimizer_kwargs=optimizer_kwargs, device='cpu') + kwargs = deepcopy(optimizer_kwargs) + kwargs["fused"] = False + if 'lr' not in optimizer_kwargs: + # _create_scaling_case will set lr = 1.0 if optimizer_kwargs do not set lr + kwargs['lr'] = 1.0 + opt_control = optimizer_ctor(mod_control.parameters(), **kwargs) + + scaler = torch.cpu.amp.GradScaler(init_scale=128.0) + for input, target in data: + opt_control.zero_grad() + with torch.autocast('cpu', dtype=torch.half): + output_control = mod_control(input) + loss_control = loss_fn(output_control, target) + scaler.scale(loss_control).backward() + scaler.step(opt_control) + scaler.update() + + opt_scaling.zero_grad() + with torch.autocast('cpu', dtype=torch.half): + output_scaling = mod_scaling(input) + loss_scaling = loss_fn(output_scaling, target) + scaler.scale(loss_scaling).backward() + if separate_unscale: + scaler.unscale_(opt_scaling) + scaler.step(opt_scaling) + scaler.update() + + self.assertEqual(loss_control, loss_scaling,) + for param_control, param_scaling in zip(mod_control.parameters(), mod_scaling.parameters()): + self.assertEqual(param_control.grad, param_scaling.grad,) + self.assertEqual(param_control, param_scaling,) + + state_control, state_scaling = opt_control.state[param_control], opt_scaling.state[param_scaling] + + for k in state_control: + actual = state_scaling[k] + if k == "step": + actual = actual.squeeze() + self.assertEqual(state_control[k], actual,) @onlyCUDA @optims([o for o in optim_db if "foreach" in o.supported_impls], dtypes=[torch.float32]) diff --git a/test/test_torch.py b/test/test_torch.py index 9a1b619903..a11919328b 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -47,8 +47,7 @@ from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCUDA, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast, - skipMeta, - PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes, + skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes, get_all_device_types, skipXLA) from typing import Tuple import torch.backends.quantized @@ -5932,7 +5931,7 @@ else: for optimizer_ctor in (torch.optim.SGD, torch.optim.Adam, torch.optim.AdamW): self._grad_scaling_autocast_test(device=device.type, optimizer_ctor=optimizer_ctor, optimizer_kwargs={"foreach": True}) - @onlyCUDA + @onlyNativeDeviceTypes def test_grad_scaling_autocast_fused(self, device): device = torch.device(device) for optimizer_ctor in (torch.optim.Adam, torch.optim.AdamW): @@ -5952,8 +5951,6 @@ else: {"foreach": False, "fused": True}, ), ): - if device.type != "cuda": - optimizer_kwargs['fused'] = False with self.subTest(optimizer=optimizer_ctor, optimizer_kwargs=optimizer_kwargs): self._test_grads_invalidated_between_unscale_and_step(device.type, optimizer_ctor, optimizer_kwargs) diff --git a/torch/optim/adam.py b/torch/optim/adam.py index cd45a197b3..e74ad4e1ab 100644 --- a/torch/optim/adam.py +++ b/torch/optim/adam.py @@ -76,7 +76,7 @@ class Adam(Optimizer): # Support AMP with FP16/BF16 model params which would need # higher prec copy of params to do update math in higher prec to # alleviate the loss of information. - fused_supported_devices = _get_fused_kernels_supported_devices() + fused_supported_devices = _get_fused_kernels_supported_devices() + ["cpu"] if not all( p.device.type in fused_supported_devices and torch.is_floating_point(p) for pg in self.param_groups diff --git a/torch/optim/adamw.py b/torch/optim/adamw.py index bbe03c1ce5..89e776558d 100644 --- a/torch/optim/adamw.py +++ b/torch/optim/adamw.py @@ -75,7 +75,7 @@ class AdamW(Optimizer): # Suppor AMP with FP16/BF16 model params which would need # higher prec copy of params to do update math in higher prec to # alleviate the loss of information. - fused_supported_devices = _get_fused_kernels_supported_devices() + fused_supported_devices = _get_fused_kernels_supported_devices() + ["cpu"] if not all( p.device.type in fused_supported_devices and torch.is_floating_point(p) for pg in self.param_groups diff --git a/torch/testing/_internal/common_optimizers.py b/torch/testing/_internal/common_optimizers.py index 6cf83218de..a1bea634ff 100644 --- a/torch/testing/_internal/common_optimizers.py +++ b/torch/testing/_internal/common_optimizers.py @@ -44,10 +44,7 @@ from torch.testing._internal.common_utils import ( skipIfTorchDynamo, TEST_WITH_TORCHDYNAMO, ) -from torch.utils._foreach_utils import ( - _get_foreach_kernels_supported_devices, - _get_fused_kernels_supported_devices, -) +from torch.utils._foreach_utils import _get_foreach_kernels_supported_devices class OptimizerInput: @@ -143,6 +140,7 @@ class OptimizerInfo: skips=(), # Indicates which tests to skip decorators=None, # Additional decorators to apply to generated tests optim_error_inputs_func=None, # Function to generate optim inputs that error + supports_fused_on: Tuple[str] = (), ): self.optim_cls = optim_cls self.optim_inputs_func = optim_inputs_func @@ -160,6 +158,7 @@ class OptimizerInfo: *(skips if skips else []), ) self.optim_error_inputs_func = optim_error_inputs_func + self.supports_fused_on = supports_fused_on def get_decorators(self, test_class, test_name, device, dtype, param_kwargs): result = [set_single_threaded_if_parallel_tbb] @@ -291,7 +290,7 @@ def get_error_inputs_for_all_optims(device, dtype): # global-cliquey flags to individual tests and fully expect tests to edit OptimizerInput.kwargs. -def optim_inputs_func_adadelta(device): +def optim_inputs_func_adadelta(device, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( @@ -340,7 +339,7 @@ def optim_error_inputs_func_adadelta(device, dtype): return error_inputs -def optim_inputs_func_adagrad(device): +def optim_inputs_func_adagrad(device, dtype=None): return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput( @@ -384,7 +383,7 @@ def optim_error_inputs_func_adagrad(device, dtype): # TODO: consider tensor LR! See multi_tensor_optimizer_configs in test_optim.py --> tensor LR should work # with all implementation code paths... -def optim_inputs_func_adam(device): +def optim_inputs_func_adam(device, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( @@ -399,7 +398,7 @@ def optim_inputs_func_adam(device): ), ] - return [ + total = [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"), OptimizerInput( @@ -414,6 +413,19 @@ def optim_inputs_func_adam(device): params=None, kwargs={"weight_decay": 0.1, "amsgrad": True}, desc="amsgrad" ), ] + (cuda_supported_configs if "cuda" in str(device) else []) + if dtype in (torch.float16,): + for input in total: + """ + Too small eps will make denom to be zero for low precision dtype + denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) + For example, + >>> a + tensor([0.], dtype=torch.float16) + >>> a + 1e-8 + tensor([0.], dtype=torch.float16) + """ + input.kwargs["eps"] = 0.1 + return total def optim_error_inputs_func_adam(device, dtype): @@ -473,7 +485,7 @@ def optim_error_inputs_func_adam(device, dtype): return error_inputs -def optim_inputs_func_adamax(device): +def optim_inputs_func_adamax(device, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( @@ -524,15 +536,15 @@ def optim_error_inputs_func_adamax(device, dtype): return error_inputs -def optim_inputs_func_adamw(device): - return optim_inputs_func_adam(device) +def optim_inputs_func_adamw(device, dtype=None): + return optim_inputs_func_adam(device, dtype) def optim_error_inputs_func_adamw(device, dtype): return optim_error_inputs_func_adam(device, dtype) -def optim_inputs_func_asgd(device): +def optim_inputs_func_asgd(device, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( @@ -584,7 +596,7 @@ def optim_error_inputs_func_asgd(device, dtype): return error_inputs -def optim_inputs_func_lbfgs(device): +def optim_inputs_func_lbfgs(device, dtype=None): return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"), @@ -605,7 +617,7 @@ def optim_error_inputs_func_lbfgs(device, dtype): # Weird story bro, NAdam and RAdam do not have maximize. -def optim_inputs_func_nadam(device): +def optim_inputs_func_nadam(device, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( @@ -676,7 +688,7 @@ def optim_error_inputs_func_nadam(device, dtype): # Weird story bro, NAdam and RAdam do not have maximize. -def optim_inputs_func_radam(device=None): +def optim_inputs_func_radam(device=None, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( @@ -738,7 +750,7 @@ def optim_error_inputs_func_radam(device, dtype): return error_inputs -def optim_inputs_func_rmsprop(device): +def optim_inputs_func_rmsprop(device, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( @@ -799,7 +811,7 @@ def optim_error_inputs_func_rmsprop(device, dtype): return error_inputs -def optim_inputs_func_rprop(device): +def optim_inputs_func_rprop(device, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( @@ -841,7 +853,7 @@ def optim_error_inputs_func_rprop(device, dtype): return error_inputs -def optim_inputs_func_sgd(device): +def optim_inputs_func_sgd(device, dtype=None): return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput(params=None, kwargs={"lr": 1e-2}, desc="non-default lr"), @@ -886,7 +898,7 @@ def optim_error_inputs_func_sgd(device, dtype): return error_inputs -def optim_inputs_func_sparseadam(device): +def optim_inputs_func_sparseadam(device, dtype=None): return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput( @@ -995,10 +1007,7 @@ def _get_optim_inputs_including_global_cliquey_kwargs( x for x in optim_info.supported_impls if x not in skip - and ( - _get_device_type(device) in _get_fused_kernels_supported_devices() - or x != "fused" - ) + and (_get_device_type(device) in optim_info.supports_fused_on or x != "fused") and ( _get_device_type(device) in _get_foreach_kernels_supported_devices() or x != "foreach" @@ -1196,6 +1205,7 @@ optim_db: List[OptimizerInfo] = [ ), optim_error_inputs_func=optim_error_inputs_func_adam, supported_impls=("foreach", "differentiable", "fused"), + supports_fused_on=("cpu", "cuda"), decorators=( # Expected floating point error between fused and compiled forloop DecorateInfo( @@ -1205,6 +1215,21 @@ optim_db: List[OptimizerInfo] = [ active_if=lambda kwargs: TEST_WITH_TORCHDYNAMO and kwargs["dtype"] == torch.float64, ), + DecorateInfo( + # Note on tolerances: + # difference comes from the fact that the non fused kernel have + # more dtype cast operations. We have another test test_fused_cpu_matches_cuda + # to make sure there is no discrepancies between cuda fused kernel + # and cpu fused kernel + toleranceOverride( + { + torch.bfloat16: tol(atol=5e-3, rtol=5e-3), + torch.float16: tol(atol=5e-3, rtol=5e-3), + } + ), + "TestOptimRenewed", + "test_fused_matches_forloop", + ), ), skips=( DecorateInfo( @@ -1364,6 +1389,7 @@ optim_db: List[OptimizerInfo] = [ optim_inputs_func=optim_inputs_func_adamw, optim_error_inputs_func=optim_error_inputs_func_adamw, supported_impls=("foreach", "differentiable", "fused"), + supports_fused_on=("cpu", "cuda"), decorators=( # Expected error between compiled forloop and fused optimizers DecorateInfo( @@ -1373,6 +1399,21 @@ optim_db: List[OptimizerInfo] = [ active_if=lambda kwargs: TEST_WITH_TORCHDYNAMO and kwargs["dtype"] == torch.float64, ), + DecorateInfo( + toleranceOverride( + # Note on tolerances: + # difference comes from the fact that the non fused kernel have + # more dtype cast operations. We have another test test_fused_cpu_matches_cuda + # to make sure there is no discrepancies between cuda fused kernel + # and cpu fused kernel + { + torch.bfloat16: tol(atol=5e-3, rtol=5e-3), + torch.float16: tol(atol=5e-3, rtol=5e-3), + } + ), + "TestOptimRenewed", + "test_fused_matches_forloop", + ), ), skips=( DecorateInfo( @@ -1865,6 +1906,7 @@ optim_db: List[OptimizerInfo] = [ }, [lambda opt: StepLR(opt, gamma=0.99999, step_size=300)], ), + supports_fused_on=("cuda",), skips=( DecorateInfo( skipIfTorchDynamo( @@ -2060,7 +2102,10 @@ class TensorTracker: numerical discrepancies, and so when the test fails, it is likely a real problem. """ - def __init__(self): + def __init__(self, assert_eq_kwargs=None): + if assert_eq_kwargs is None: + assert_eq_kwargs = {} + self.assert_eq_kwargs = assert_eq_kwargs self.tensors = [] def add(self, tensor): @@ -2080,7 +2125,7 @@ class TensorTracker: ref = self.tensors.pop(0) testcase.assertTrue(isinstance(ref, Tensor), f"{type(ref)=}") - testcase.assertEqual(tensor_to_set, ref) + testcase.assertEqual(tensor_to_set, ref, **self.assert_eq_kwargs) with torch.no_grad(): tensor_to_set.copy_(ref)
2.41.0
e280862ffa0ff34e73f8763f84e4b77925a3570
Thu, 18 Apr 2024 22:45:31 -0700
[PATCH 0368/1000] Add custom joint graph passes (#124443)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124443 Approved by: https://github.com/aorenste, https://github.com/malfet
diff --git a/test/inductor/test_custom_post_grad_passes.py b/test/inductor/test_custom_post_grad_passes.py index acb11d4d93..5b1d3031c9 100644 --- a/test/inductor/test_custom_post_grad_passes.py +++ b/test/inductor/test_custom_post_grad_passes.py @@ -57,6 +57,12 @@ aten = torch.ops.aten mkldnn = torch.ops.mkldnn +def change_cos_pass(graph): + for node in graph.nodes: + if node.op == "call_function" and node.target == aten.cos.default: + node.target = aten.sin.default + + class TestPostGradCustomPrePostPass(TestCustomPassBase): # mkldnn fusion's pattern_matcher # (torch/_inductor/fx_passes/mkldnn_fusion.py), @@ -121,6 +127,30 @@ class TestPostGradCustomPrePostPass(TestCustomPassBase): x1 = self.conv(x) return x1.relu() + def test_custom_joint_pass_pre(self): + with config.patch(joint_custom_pre_pass=change_cos_pass): + + def g(x): + return x.sin().sin().sin() + + def f(x): + return x.cos().cos().cos() + + x = torch.randn(8, dtype=torch.float32) + torch.testing.assert_close(torch.compile(f)(x), g(x)) + + def test_custom_joint_pass_post(self): + with config.patch(joint_custom_post_pass=change_cos_pass): + + def g(x): + return x.sin().sin().sin() + + def f(x): + return x.cos().cos().cos() + + x = torch.randn(8, dtype=torch.float32) + torch.testing.assert_close(torch.compile(f)(x), g(x)) + def test_custom_pre_pass(self): with config.patch( # leave custom pass only in post_grad_passes() diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py index d7141639d0..f96e95ab5d 100644 --- a/torch/_inductor/config.py +++ b/torch/_inductor/config.py @@ -99,6 +99,10 @@ pattern_matcher = True post_grad_custom_pre_pass: Optional[Callable[[torch.fx.graph.Graph], None]] = None post_grad_custom_post_pass: Optional[Callable[[torch.fx.graph.Graph], None]] = None +# Registers a custom joint graph pass. +joint_custom_pre_pass: Optional[Callable[[torch.fx.Graph], None]] = None +joint_custom_post_pass: Optional[Callable[[torch.fx.Graph], None]] = None + # Registers a custom pregrad pass. Note that the pre-grad IR is 1. # non-functional, 2. non-normalized, and 3. prone to change. Ideally we should # use post-grad passes. diff --git a/torch/_inductor/fx_passes/joint_graph.py b/torch/_inductor/fx_passes/joint_graph.py index ab9352714f..3713583e69 100644 --- a/torch/_inductor/fx_passes/joint_graph.py +++ b/torch/_inductor/fx_passes/joint_graph.py @@ -300,6 +300,9 @@ def joint_graph_passes(graph: torch.fx.GraphModule): """ lazy_init() count = 0 + if config.joint_custom_pre_pass is not None: + config.joint_custom_pre_pass(graph.graph) + count += 1 if config.joint_graph_constant_folding: constant_fold_uniform_value(graph) @@ -310,6 +313,10 @@ def joint_graph_passes(graph: torch.fx.GraphModule): if not config.fallback_random: count += replace_random_passes(graph) + if config.joint_custom_post_pass is not None: + config.joint_custom_post_pass(graph.graph) + count += 1 + if count: stable_topological_sort(graph.graph) graph.graph.lint()
2.41.0
8fa843e58712c60e5d95fc638f45ce8f1033e23
Fri, 19 Apr 2024 12:34:00 +0000
[PATCH 0369/1000] Add vectorized norm fill for ppc64le (#113351)
This patch adds the vectorized norm fill for ppc64le. Pull Request resolved: https://github.com/pytorch/pytorch/pull/113351 Approved by: https://github.com/jgong5
diff --git a/aten/src/ATen/native/cpu/DistributionTemplates.h b/aten/src/ATen/native/cpu/DistributionTemplates.h index f4890c2f7e..93a9b33b29 100644 --- a/aten/src/ATen/native/cpu/DistributionTemplates.h +++ b/aten/src/ATen/native/cpu/DistributionTemplates.h @@ -15,7 +15,6 @@ #include <c10/util/irange.h> #endif - namespace at { namespace native { namespace templates { @@ -149,6 +148,62 @@ static void normal_fill_16(scalar_t *data, const scalar_t mean, const scalar_t s } } +#if defined(__VSX__) || defined(CPU_CAPABILITY_VSX) +static void normal_fill_16_VSX(float *data,const Vectorized<float> &two_pi,const Vectorized<float> &one,const Vectorized<float> &minus_two,const Vectorized<float> &mean,const Vectorized<float> &std) { + using Vec = Vectorized<float>; + Vec u1=one-Vec::loadu(data); + Vec u2=Vec::loadu(data+8); + Vec radius=(minus_two * u1.log()); + radius=radius.sqrt(); + Vec theta=two_pi * u2; + Vec output_vec=radius * theta.cos() * std + mean; + Vec output_vec2=radius * theta.sin() * std + mean; + output_vec.store(data); + output_vec2.store(data+8); +} + +template <typename scalar_t, typename RNG> +void normal_fill_VSX(const TensorBase &self, const scalar_t mean, const scalar_t std, RNG generator) { + float *data = self.data_ptr<float>(); + auto size = self.numel(); + std::lock_guard<std::mutex> lock(generator->mutex_); + for (const auto i : c10::irange(size)) { + at::uniform_real_distribution<scalar_t> uniform(0, 1); + data[i] = uniform(generator); + } + + using Vec = Vectorized<float>; + const Vec two_pi = Vec(2.0f * c10::pi<double>); + const Vec one = Vec(1.0f); + const Vec minus_two = Vec(-2.0f); + const Vec var_vec = Vec(std); + const Vec mean_vec = Vec(mean); + + for (int64_t i = 0; i < size - 15; i += 16) { + if(Vec::size()==8) { + normal_fill_16_VSX(data + i, two_pi, one, minus_two, mean_vec, var_vec); + } + else{ + normal_fill_16<scalar_t>(data + i, mean, std); + } + } + if (size % 16 != 0) { + // Recompute the last 16 values. + data = data + size - 16; + for (const auto i : c10::irange(16)) { + at::uniform_real_distribution<scalar_t> uniform(0, 1); + data[i] = uniform(generator); + } + if(Vec::size()==8){ + normal_fill_16_VSX(data, two_pi, one, minus_two, mean_vec, var_vec); + } + else{ + normal_fill_16<scalar_t>(data, mean, std); + } + } +} +#endif //VSX + template <typename scalar_t, typename RNG> void normal_fill(const TensorBase &self, const scalar_t mean, const scalar_t std, RNG generator) { scalar_t *data = self.data_ptr<scalar_t>(); @@ -179,6 +234,8 @@ void normal_kernel(const TensorBase &self, double mean, double std, RNG generato if (self.scalar_type() == ScalarType::Float && size >= 16 && self.is_contiguous()) { #ifdef CPU_CAPABILITY_AVX2 normal_fill_AVX2(self, static_cast<float>(mean), static_cast<float>(std), generator); +#elif defined(__VSX__) || defined(CPU_CAPABILITY_VSX) + normal_fill_VSX(self, static_cast<float>(mean), static_cast<float>(std), generator); #else normal_fill(self, static_cast<float>(mean), static_cast<float>(std), generator); #endif
2.41.0
6724a769b88fdc0714a9ae94f0f1ff076590025
Fri, 19 Apr 2024 13:12:42 +0000
[PATCH 0370/1000] [ptd] drop ncclGroupStart/end for ncclCommInit (#124363) (#124416)
Summary: ``` ncclGroupStart() ncclCommInit(..) ncclGroupEnd() ``` above pattern is only needed when we have *single-thread* to manage multiple GPUs in our case, we always have 1 process managing 1 GPU, we don't need group operation. Test Plan: CI Differential Revision: D56274975 Co-authored-by: Cen Zhao <cenzhao@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124416 Approved by: https://github.com/shuqiangzhang
diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp index a84647cfc6..bf21fc0dc6 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp @@ -1953,10 +1953,8 @@ std::shared_ptr<NCCLComm> ProcessGroupNCCL::getNCCLComm( // example: Using the batch_isend_irecv to send a tensor to a target process. // On the sender side, the corresponding underlying NCCL calls will look like // ncclGroupStart() // This is in batch_isend_irecv - // ncclGroupStart() // This is [Note 1] // ncclCommInitRank() // Inside NCCLComm::create // ncclSend() - // ncclGroupEnd() // This is [Note 2] // ncclGroupEnd() // This is in batch_isend_irecv // With this pattern, the nccl communicator will be created in the last // ncclGroupEnd which means when ncclSend is processed, the passed @@ -1970,9 +1968,6 @@ std::shared_ptr<NCCLComm> ProcessGroupNCCL::getNCCLComm( C10D_NCCL_CHECK(ncclGroupEnd(), c10::nullopt); } - // [Note 1] Create the NCCL communicators for each GPU - C10D_NCCL_CHECK(ncclGroupStart(), c10::nullopt); - // GPU world size and GPU rank int numRanks, rank; @@ -2033,19 +2028,6 @@ std::shared_ptr<NCCLComm> ProcessGroupNCCL::getNCCLComm( inInitializationCommMap_.emplace(deviceKey, ncclComm); } - // [Note 2 ] -#ifndef NCCL_HAS_COMM_NONBLOCKING - C10D_NCCL_CHECK(ncclGroupEnd(), c10::nullopt); -#else - if (nccl_use_nonblocking()) { - // If we use nonblocking mode, allow communicators to be - // uninitialized/ncclInProgress until the first communication - C10D_NCCL_CHECK_NONBLOCKING(ncclGroupEnd(), c10::nullopt); - } else { - C10D_NCCL_CHECK(ncclGroupEnd(), c10::nullopt); - } -#endif - NCCLTraceBuffer::get()->record_pg_ranks( std::make_tuple(pg_name_, pg_desc_), groupRanks());
2.41.0
9db59e9e4d425f9d4e9f55247888c24b0d638e8
Thu, 18 Apr 2024 11:48:41 -0700
[PATCH 0371/1000] [sparse] Add fast semi-structured spasification kernels (#122350)
This PR adds in fast semi-structured sparsification kernels to PyTorch. These kernels allow for accelerated semi-structured sparsification kernels in PyTorch. The kernels have been added as aten native functions In particular, three new functions have been added: * `torch._sparse_semi_structured_tile` This function will return the packed representation and metadata for both X and X', as well as the thread masks. Note that this applies 2:4 sparsity in a 4x4 tile instead of a 1x4 strip as usual. * `torch._sparse_semi_structured_apply` This function takes in an input tensor and thread masks from the above function and returns a packed representation and metadata from applying thread masks to the input tensor. * `torch._sparse_semi_structured_apply_dense` This function does the same thing as above but instead of returning the tensor in the sparse representation it returns it in the dense representation The subclasses have also been updated to add a new `prune_dense_static_sort` classmethod to create sparse tensors with this format. I've added some additional documentatino on how to calculate the compressed tensors needed to create a SparseSemiStructuredTensor oneself. To this end, there are two new helper functions added: `sparse_semi_structured_tile` `compute_compressed_swizzled_bitmask` Differential Revision: [D56190801](https://our.internmc.facebook.com/intern/diff/D56190801) Pull Request resolved: https://github.com/pytorch/pytorch/pull/122350 Approved by: https://github.com/cpuhrsch
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index f3f5683350..cbdb998c81 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -3342,6 +3342,18 @@ dispatch: CUDA: _cslt_sparse_mm_search +- func: _sparse_semi_structured_tile(Tensor input, str algorithm="", bool use_cutlass=True) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + dispatch: + CUDA: _sparse_semi_structured_tile + +- func: _sparse_semi_structured_apply(Tensor input, Tensor thread_masks) -> (Tensor, Tensor) + dispatch: + CUDA: _sparse_semi_structured_apply + +- func: _sparse_semi_structured_apply_dense(Tensor input, Tensor thread_masks) -> Tensor + dispatch: + CUDA: _sparse_semi_structured_apply_dense + # DEPRECATED: Use torch.__sparse_semi_structured_mm/torch._sparse_semi_structured_addmm instead - func: _sparse_semi_structured_linear(Tensor input, Tensor weight, Tensor meta, *, Tensor? bias=None, str? activation=None, ScalarType? out_dtype=None) -> Tensor dispatch: diff --git a/aten/src/ATen/native/sparse/cuda/ComputeSparseTile.h b/aten/src/ATen/native/sparse/cuda/ComputeSparseTile.h new file mode 100644 index 0000000000..3d6b14224d --- /dev/null +++ b/aten/src/ATen/native/sparse/cuda/ComputeSparseTile.h @@ -0,0 +1,184 @@ +#pragma once + +#include <ATen/native/sparse/cuda/SparseSemiStructuredPack.h> +#include <ATen/native/sparse/cuda/StaticSort.h> +#include <cutlass/bfloat16.h> +#include <cutlass/half.h> + +// Given 4x4 values, computes the selected indices that will remain after 2:4 +// sparsification, as a bitmask. +// NOTE: Algorithms might select LESS than 8 values in total in some cases. + +namespace platform { +template <> +struct numeric_limits<cutlass::bfloat16_t> { + CUTLASS_HOST_DEVICE + static cutlass::bfloat16_t infinity() { + return cutlass::bfloat16_t::bitcast(0x7f80); + } +}; +} // namespace platform + +namespace at::native{ + +template <typename Element, typename Pointwise> +struct TileValueOrderedT { + union { + struct { + Element value; + uint2b_t col; + uint2b_t row; + } parts; + uint32_t raw; + }; + CUTLASS_DEVICE bool operator<( + TileValueOrderedT<Element, Pointwise> const& other) const { + return Pointwise::apply(parts.value) < Pointwise::apply(other.parts.value); + } + CUTLASS_DEVICE TileValueOrderedT() {} +}; + +// Operations that we can apply to rank the values +struct IdentityOp { + template <typename T> + static T CUTLASS_HOST_DEVICE apply(T const& x) { + return x; + } +}; +// Can be applied to rank based on absolute value +struct AbsOp { + template <typename T> + static T CUTLASS_HOST_DEVICE apply(T const& x) { + return cutlass::abs(x); + } +}; + +// Given 4x4 values, computes the selected indices that will remain after 2:4 +// sparsification, as a bitmask. We have 2 constraints: +// (1) At most 2 values per line +// (2) At most 2 values per column +// This means we can select at most 8 values in total. +// ALGO: We use a greedy algorithm, where we take values in the 4x4 +// tile in descending order. If a value fits (because the line/col is not +// already full), we select it. Then we move on to the next one. +// NOTE: This algorithm might select LESS than 8 values in total in some cases. +// NOTE (2): RF are not indexable, so we shouldn't rely on indexing +// values at any point, otherwise they will be stored in local memory. +template <typename Op = IdentityOp> +struct LargestValuesGreedy { + template <typename T> + static CUTLASS_DEVICE T outOfBoundsFillValue() { + return -platform::numeric_limits<T>::infinity(); + } + + template <typename Tile4x4Accessor> + CUTLASS_DEVICE Indices4x4 operator()(Tile4x4Accessor values) { + using TileValueOrdered = + TileValueOrderedT<typename Tile4x4Accessor::Element, Op>; + using TileValuesFragment = cutlass::Array<TileValueOrdered, 4 * 4>; + Indices4x4 indices; + TileValuesFragment values_ordered; + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < 4; ++i) { + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < 4; ++j) { + TileValueOrdered& v = values_ordered[i * 4 + j]; + v.parts.value = values.at(i, j).get(); + v.parts.col = j; + v.parts.row = i; + } + } + // Use a sorting network (aka without branches) to avoid + // warp divergence + StaticSort<TileValuesFragment::kElements> sorter; + sorter(values_ordered); + + // bitmask to store how many we have selected on a given row/col + // 0 selected: (numPerRow >> 2*row) = 00 (0) + // 1 selected: (numPerRow >> 2*row) = 01 (1) + // 2 selected: (numPerRow >> 2*row) = 11 (3) + uint32_t numPerRow = 0; + uint32_t numPerCol = 0; + indices = 0; + + // Take as many as we can, starting with the largest values + CUTLASS_PRAGMA_UNROLL + for (int i = values_ordered.size() - 1; i >= 0; i--) { + auto& e = values_ordered[i]; + + uint32_t rcount = uint2b_t(numPerRow >> 2 * e.parts.row); + uint32_t ccount = uint2b_t(numPerCol >> 2 * e.parts.col); + // NOTE: This is more efficient (yet equivalent) to: + // `rcount != 3 && ccount != 3` + bool selected = (rcount + ccount) <= 2; + indices |= selected << (e.parts.col + 4 * e.parts.row); + + numPerRow |= (rcount + selected) << 2 * e.parts.row; + numPerCol |= (ccount + selected) << 2 * e.parts.col; + } + return indices; + } +}; + +// We consider each rows independantly in order +// This is to ensure that a row's sparsity pattern is only determined +// by its values and the rows before (but never the rows after) +// This enforces causality strictly +template <typename Op = IdentityOp> +struct Causal1122 { + template <typename T> + static CUTLASS_DEVICE T outOfBoundsFillValue() { + return -platform::numeric_limits<T>::infinity(); + } + + template <typename Tile4x4Accessor> + CUTLASS_DEVICE Indices4x4 operator()(Tile4x4Accessor values) { + static constexpr int kMaxValuesPerRow[] = {1, 1, 2, 2}; + using TileValueOrdered = + TileValueOrderedT<typename Tile4x4Accessor::Element, Op>; + using TileValuesFragment = cutlass::Array<TileValueOrdered, 4>; + Indices4x4 indices = 0; + + uint32_t numPerCol = 0; // <- see doc in `LargestValuesGreedy` + + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < 4; ++row) { + int row_count = 0; + TileValuesFragment values_ordered; + CUTLASS_PRAGMA_UNROLL + for (int col = 0; col < 4; ++col) { + TileValueOrdered& v = values_ordered[col]; + v.parts.value = values.at(row, col).get(); + v.parts.col = col; + } + // Use a sorting network (aka without branches) to avoid + // warp divergence + StaticSort<TileValuesFragment::kElements> sorter; + sorter(values_ordered); + + // Take as many as we can, starting with the largest values + CUTLASS_PRAGMA_UNROLL + for (int i = values_ordered.size() - 1; i >= 0; i--) { + auto& e = values_ordered[i]; + + uint32_t ccount = uint2b_t(numPerCol >> 2 * e.parts.col); + bool selected = ccount != 3 && (row_count < kMaxValuesPerRow[row]); + indices |= selected << (e.parts.col + 4 * row); + numPerCol |= (ccount + selected) << 2 * e.parts.col; + row_count += selected; + } + } + return indices; + } +}; + +template <typename T> +void named_algorithms(T callback) { + callback(LargestValuesGreedy<IdentityOp>(), "largest_values_greedy"); + callback(Causal1122<IdentityOp>(), "causal1122"); + callback(LargestValuesGreedy<AbsOp>(), "largest_abs_values_greedy"); + // default one + callback(LargestValuesGreedy<IdentityOp>(), ""); +} + +} // namespace diff --git a/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredApplyDense.cu b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredApplyDense.cu new file mode 100644 index 0000000000..8b4d6be5aa --- /dev/null +++ b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredApplyDense.cu @@ -0,0 +1,186 @@ +#include <ATen/ScalarOps.h> +#include <ATen/Tensor.h> +#include <ATen/Functions.h> +#include <ATen/autocast_mode.h> +#include <c10/cuda/CUDAGuard.h> + +#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080) +#else +#include <ATen/native/sparse/cuda/ComputeSparseTile.h> +#include <ATen/native/sparse/cuda/SparseSemiStructuredPack.h> +#endif + +namespace at::native { + +#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080) +#else +struct Params { + uint64_t const* threads_masks; + + uint16_t const* input; + int64_t input_stride; + int64_t input_dim0; + int64_t input_dim1; + + uint16_t* output; + int64_t output_stride; + + __host__ dim3 getBlocksGrid() const { + return dim3( + cutlass::ceil_div(input_dim0, kWarpX), + cutlass::ceil_div(input_dim1, kWarpY), + 1); + } + + static CUTLASS_HOST_DEVICE dim3 getThreadsGrid() { + return dim3(kWarpX / kThreadX, kWarpY / kThreadY, 1); + } + + CUTLASS_DEVICE Tile8x8Masks* getCurrentThreadIndices() const { + Tile8x8Masks* gmem_threads_masks = (Tile8x8Masks*)threads_masks; + gmem_threads_masks += blockIdx.y * getThreadsGrid().y + threadIdx.y; + int64_t strideX = gridDim.y * getThreadsGrid().y; + gmem_threads_masks += + (blockIdx.x * getThreadsGrid().x + threadIdx.x) * strideX; + return gmem_threads_masks; + } +}; + +template <bool kInputRowMajor = true, bool kOutputRowMajor = true> +__global__ void __launch_bounds__(32 /* num_threads */, 32) sparse_semi_structured_apply_dense_k(Params p) { + using Fragment = cutlass::Array<uint16_t, 8>; + + // Top-left of the 8x8 tile we own + int warp_x = blockIdx.x * kWarpX; + int warp_y = blockIdx.y * kWarpY; + int x = warp_x + threadIdx.x * kThreadX; + int y = warp_y + threadIdx.y * kThreadY; + + uint16_t* output = p.output + x * p.output_stride + y; + Tile8x8Masks indices = *p.getCurrentThreadIndices(); + + // Load dense + Fragment lines[8]; + if (kInputRowMajor) { + uint16_t const* input = p.input + x * p.input_stride + y; + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < 8; ++i) { + cutlass::arch::global_load<Fragment, sizeof(Fragment)>( + lines[i], input + i * p.input_stride, true); + } + } else { + uint16_t const* input = p.input + x + y * p.input_stride; + Fragment columns[8]; + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < 8; ++i) { + cutlass::arch::global_load<Fragment, sizeof(Fragment)>( + columns[i], input + i * p.input_stride, true); + } + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < 8; ++i) { + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < 8; ++j) { + lines[i][j] = columns[j][i].get(); + } + } + } + + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < 2; ++row) { + Indices4x4 masks[2]; + if (row == 0) { + masks[0] = indices.a; + masks[1] = indices.b; + } else { + masks[0] = indices.c; + masks[1] = indices.d; + } + + // Apply mask + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < 2; ++m) { + CUTLASS_PRAGMA_UNROLL + for (int r = 0; r < 4; ++r) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < 4; ++c) { + lines[4 * row + r][4 * m + c] = lines[4 * row + r][4 * m + c] * + int((masks[m] >> (4 * r + c)) & 1); + } + } + } + } + static_assert(kOutputRowMajor, "Transpose here for ColMajor output"); + // Save dense with zeros + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < 8; ++i) { + cutlass::arch::global_store<Fragment, sizeof(Fragment)>( + lines[i], output + i * p.output_stride, true); + } +} +#endif + +Tensor _sparse_semi_structured_apply_dense( + const Tensor& input, + const Tensor& threads_masks) { + +#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080) + AT_ERROR("_sparse_semi_structured_apply_dense: not supported"); + return Tensor{}; +#else + TORCH_CHECK( + input.scalar_type() == at::ScalarType::Half || + input.scalar_type() == at::ScalarType::BFloat16, + "Unsupported `input` dtype"); + TORCH_CHECK( + input.stride(0) == 1 || input.stride(1) == 1, + "`input` should be either RowMajor or ColMajor. Invalid memory layout - try .contiguous()?"); + + auto roundedx = cutlass::round_up(input.size(0), kWarpX); + auto roundedy = cutlass::round_up(input.size(1), kWarpY); + + Params p; + p.input = (uint16_t const*)input.data_ptr(); + p.input_dim0 = input.size(0); + p.input_dim1 = input.size(1); + p.threads_masks = (uint64_t const*)threads_masks.data_ptr(); + + TORCH_CHECK(threads_masks.dim() == 3); + TORCH_CHECK(threads_masks.size(0) == p.getBlocksGrid().x * p.getThreadsGrid().x); + TORCH_CHECK(threads_masks.size(1) == p.getBlocksGrid().y * p.getThreadsGrid().y); + TORCH_CHECK(threads_masks.stride(1) == sizeof(p.threads_masks[0])); + TORCH_CHECK(threads_masks.size(2) == sizeof(p.threads_masks[0])); + TORCH_CHECK(threads_masks.stride(2) == 1); + TORCH_CHECK(threads_masks.scalar_type() == at::ScalarType::Byte); + + at::Tensor output = at::empty({p.input_dim0, p.input_dim1}, input.options()); + TORCH_INTERNAL_ASSERT(output.stride(-1) == 1, "expected RowMajor?"); + p.output = (uint16_t*)output.data_ptr(); + + bool inputRowMajor = input.stride(-1) == 1; + bool outputRowMajor = output.stride(-1) == 1; + p.input_stride = input.stride(inputRowMajor ? 0 : 1); + p.output_stride = output.stride(outputRowMajor ? 0 : 1); + at::cuda::CUDAGuard device_guard(input.device()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + size_t smem_bytes = 0; + if (inputRowMajor && outputRowMajor) { + sparse_semi_structured_apply_dense_k<true, true> + <<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes, stream>>>(p); + } else if (!inputRowMajor && outputRowMajor) { + sparse_semi_structured_apply_dense_k<false, true> + <<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes, stream>>>(p); + } else { + TORCH_CHECK( + false, + "Unsupported configuration: `input` is ", + inputRowMajor ? "RowMajor" : "ColMajor", + ", and `output` is ", + outputRowMajor ? "RowMajor" : "ColMajor"); + } + C10_CUDA_KERNEL_LAUNCH_CHECK(); + return output; +#endif +} + +} // namespace diff --git a/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredPack.h b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredPack.h new file mode 100644 index 0000000000..95cf466a76 --- /dev/null +++ b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredPack.h @@ -0,0 +1,520 @@ +#pragma once + +#include <ATen/native/sparse/cuda/StaticSort.h> +#include <cutlass/arch/memory.h> +#include <cutlass/array.h> +#include <cutlass/bfloat16.h> +#include <cutlass/fast_math.h> +#include <cutlass/half.h> +#include <cutlass/integer_subbyte.h> + +namespace at::native { + +using cutlass::uint1b_t; +using cutlass::uint2b_t; +using cutlass::uint4b_t; +using uint8b_t = cutlass::integer_subbyte<8, false>; +using ReorderedLayoutInputE = cutlass::layout::ColumnMajorInterleaved<2>; +using ElementInputE = uint16_t; +constexpr int kWarpX = 32; +constexpr int kWarpY = 64; +constexpr int kThreadX = 8; +constexpr int kThreadY = 8; + +// bitmask of selected values, in col-major storage +// eg: indices & (1 << (col + 4 * row)) +using Indices4x4 = uint16_t; + +struct Tile8x8Masks { + Indices4x4 a, b, c, d; + CUTLASS_DEVICE Tile8x8Masks() { + a = b = c = d = 0; + } +}; + +static_assert(sizeof(Tile8x8Masks) == 8, "should be exactly uint64_t"); + +// Each thread has data for an 8x8 area of the input tensor +// Due to the very specific format of the metadata, 32 consecutive bits +// of the metadata tensor will live in 4 different threads. +// This functions does the required warp shuffling to send data to the +// right threads. +// This took some time to write (and get right), hopefully these slides +// can help +// https://docs.google.com/presentation/d/1DtmKThv8S5QAyBktuLRYzZhRzCvS1qSkBbrqNCjMPeA/edit#slide=id.g249eb2e2f2e_0_28 +CUTLASS_DEVICE uint32_t +warp_shuffle_meta(uint32_t meta_ab, bool transposed = false) { + // The required format is + // (one line = 32 bits) + // a[ 0, 0:16] a[ 8, 0:16] <- T0 [left] + // a[ 0, 16:32] a[ 8, 16:32] + // a[16, 0:16] a[24, 0:16] + // a[16, 16:32] a[24, 16:32] + // a[ 1, 0:16] a[ 9, 0:16] <- T4 + // a[ 1, 16:32] a[ 9, 16:32] + // a[17, 0:16] a[25, 0:16] + // a[17, 16:32] a[25, 16:32] + // a[ 2, 0:16] a[10, 0:16] <- T1 [left, bottom] + // a[ 2, 16:32] a[10, 16:32] + // a[18, 0:16] a[26, 0:16] + // a[18, 16:32] a[26, 16:32] + // a[ 3, 0:16] a[11, 0:16] <- T5 [bottom] + // a[ 3, 16:32] a[11, 16:32] + // a[19, 0:16] a[27, 0:16] + // a[19, 16:32] a[27, 16:32] + // ... + // Use warp-shuffles to send data around threads + bool thread_left = (threadIdx.y % 2) == 0; + bool thread_bottom = threadIdx.x % 2; + + if (transposed) { + thread_left = (threadIdx.x % 2) == 0; + thread_bottom = threadIdx.y % 2; + } + + uint8b_t stage0_data[2] = { + uint8b_t(meta_ab >> (8 * thread_left)), + uint8b_t(meta_ab >> (8 * (thread_left + 2)))}; + // shfl t0-t4 / t1-t5 + stage0_data[0] = + __shfl_xor_sync(0xffffffff, stage0_data[0], transposed ? 1 : 4); + stage0_data[1] = + __shfl_xor_sync(0xffffffff, stage0_data[1], transposed ? 1 : 4); + + uint16_t line0 = int(uint8b_t(meta_ab >> (8 * (1 - thread_left)))) + << ((1 - thread_left) * 8); + line0 |= int(stage0_data[0]) << (thread_left * 8); + uint16_t line1 = int(uint8b_t(meta_ab >> (8 * (1 - thread_left + 2)))) + << ((1 - thread_left) * 8); + line1 |= int(stage0_data[1]) << (thread_left * 8); + + uint16_t stage1_data = thread_bottom ? line0 : line1; + stage1_data = __shfl_xor_sync(0xffffffff, stage1_data, transposed ? 4 : 1); + + uint32_t final_metadata; + if (thread_bottom) { + final_metadata = uint32_t(stage1_data) | uint32_t(line1) << 16; + } else { + final_metadata = uint32_t(stage1_data) << 16 | uint32_t(line0); + } + return final_metadata; +} + +CUTLASS_DEVICE void warp_shuffle_and_write_meta( + ElementInputE* metadata_quad, + uint32_t meta_ab, + bool transposed = false) { + bool thread_left = (threadIdx.y % 2) == 0; + bool thread_bottom = threadIdx.x % 2; + + if (transposed) { + thread_left = (threadIdx.x % 2) == 0; + thread_bottom = threadIdx.y % 2; + } + + uint32_t final_metadata = warp_shuffle_meta(meta_ab, transposed); + + int index = (!thread_left + 2 * thread_bottom) * 4; + ((uint32_t*)metadata_quad)[index] = final_metadata; +} + +template <typename Element_> +struct KernelTypes { + using Element = Element_; + using Fragment = + cutlass::Array<Element, 8>; // always read from gmem in chunks of 128bits + using Fragment4 = cutlass::Array<Element, 4>; + using ValuesPacked = cutlass::Array<Element, 8>; // 4 first col, 4 second col + + struct Params { + /// inputs + Element const* input; + int64_t input_s0; + int64_t input_dim0; + int64_t input_dim1; + + /// outputs + Element* packed; + int64_t packed_stride; + + Element* packed_trans; + int64_t packed_trans_stride; + + uint64_t* threads_masks; + + __host__ dim3 getBlocksGrid() const { + return dim3( + cutlass::ceil_div(input_dim0, kWarpX), + cutlass::ceil_div(input_dim1, kWarpY), + 1); + } + + static CUTLASS_HOST_DEVICE dim3 getThreadsGrid() { + return dim3(kWarpX / kThreadX, kWarpY / kThreadY, 1); + } + + CUTLASS_DEVICE Tile8x8Masks* getCurrentThreadIndices() const { + Tile8x8Masks* gmem_threads_masks = (Tile8x8Masks*)threads_masks; + gmem_threads_masks += blockIdx.y * getThreadsGrid().y + threadIdx.y; + int64_t strideX = gridDim.y * getThreadsGrid().y; + gmem_threads_masks += + (blockIdx.x * getThreadsGrid().x + threadIdx.x) * strideX; + return gmem_threads_masks; + } + }; + + struct Tile4x4Accessor { + using Element = Element_; + + Fragment (&_lines)[8]; + int _start_row; + int _start_col; + + CUTLASS_DEVICE Tile4x4Accessor( + Fragment (&lines)[8], + int start_row, + int start_col) + : _lines(lines), _start_row(start_row), _start_col(start_col) {} + + CUTLASS_DEVICE typename Fragment::reference at(int r, int c) { + return _lines[r + _start_row][c + _start_col]; + } + }; + + struct Tile4x4Packed { + Fragment4 values[2]; + CUTLASS_DEVICE Tile4x4Packed() { + values[0].clear(); + values[1].clear(); + } + }; + + // Returns a packed 4x4 tile (eg 2x4 values) which correspond to the values + // that are in `indices`. Also fills the `meta` array in the right format + // for consumption in the TensorCores. + // Example: + // indices: 0011 + // 1001 + // 1001 + // 0100 (<- note, only 1 value on the last line) + // packed: values[0][2] values[1][0] values[2][0] values[3][1] + // values[0][3] values[1][3] values[2][3] Element(0) + CUTLASS_DEVICE static Tile4x4Packed pack_4x4( + Indices4x4 indices, + Tile4x4Accessor tile, + uint32_t& meta, + int meta_pos, + bool transpose = false) { + Tile4x4Packed packed; + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < 4; ++row) { + uint2b_t col0_from, col1_from; + auto packValue = [&](uint2b_t col_to, uint2b_t col_from) { + auto value = transpose ? tile.at(col_from, row).get() + : tile.at(row, col_from).get(); + packed.values[col_to][row] = value; + if (col_to == uint2b_t(0)) { + col0_from = col_from; + } else { + col1_from = col_from; + } + }; + auto isSelected = [&](int col) { + if (transpose) { + return indices & (1 << (row + 4 * col)); + } + return indices & (1 << (col + 4 * row)); + }; + // Process cols 0/1 + // We know that col0 is always packed to position 0 if it's there + // and col1 is packed to pos 0 or 1 (depending if col0 is selected) + if (isSelected(1)) { + packValue(0, 1); + } + if (isSelected(0)) { + packValue(0, 0); + } + if (isSelected(0) && isSelected(1)) { + packValue(1, 1); + } + // Process cols 2/3 + // same sort of heuristic + if (isSelected(2)) { + packValue(1, 2); + } + if (isSelected(3)) { + packValue(1, 3); + } + if (isSelected(2) && isSelected(3)) { + packValue(0, 2); + } + int add_mask = (col0_from | (col1_from << 2)) << (8 * row + meta_pos); + meta |= add_mask; + } + return packed; + } + + struct Tile8x8Meta { + // meta_ab[row] |= (real_col << (8*row + 2*pos)) + uint32_t meta_ab; + uint32_t meta_cd; + + // meta_ac_trans[col] |= (real_row << (8*col + 2*pos)) + uint32_t meta_ac_trans; + uint32_t meta_bd_trans; + + CUTLASS_DEVICE Tile8x8Meta() { + meta_ab = meta_cd = meta_ac_trans = meta_bd_trans = 0; + } + }; + + CUTLASS_DEVICE static void writePacked( + Element* ptr, + Fragment4 packed0, + Fragment4 packed1) { + Fragment write; + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < 4; ++i) { + write[i] = packed0[i].get(); + write[i + 4] = packed1[i].get(); + } + cutlass::arch::global_store<Fragment, sizeof(Fragment)>(write, ptr, true); + } + + CUTLASS_DEVICE static void writePackedT( + Element* ptr, + int64_t stride, + Tile4x4Packed a, + Tile4x4Packed b) { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < 4; ++i) { + Fragment4 write; + write[0] = a.values[0][i].get(); + write[1] = a.values[1][i].get(); + write[2] = b.values[0][i].get(); + write[3] = b.values[1][i].get(); + cutlass::arch::global_store<Fragment4, sizeof(Fragment4)>( + write, ptr + i * stride, true); + } + } + + template <typename Algorithm, typename MetadataStore> + CUTLASS_DEVICE static void sparse_semi_structured_tile_kernel( + Params p, + MetadataStore metadata_gmem, + Algorithm compute_tile_indices) { + // Each thread is responsible for an 8x8 tile, which contains 4 4x4 tiles: + // A, B, C and D, as displayed in the following schema: + // +---+---+ + // | A | B | + // +---+---+ + // | C | D | + // +---+---+ + // Each warp (32 threads) will then be responsible for a 32x64 tile of the + // input. + // This configuration allows to read/write data in 128bits chunks. These + // memory accesses are coalesced at the warp-level into 128bytes. See also: + // https://docs.google.com/presentation/d/1DtmKThv8S5QAyBktuLRYzZhRzCvS1qSkBbrqNCjMPeA/edit#slide=id.g2494f30c7cf_0_0 + + // Top-left of the 8x8 tile we own + int warp_x = blockIdx.x * kWarpX; + int warp_y = blockIdx.y * kWarpY; + int x = warp_x + threadIdx.x * kThreadX; + int y = warp_y + threadIdx.y * kThreadY; + + Element const* input = p.input + x * p.input_s0 + y; + Element* packed = p.packed + x * p.packed_stride + (y / 2); + Element* packed_trans = + p.packed_trans + (x / 2) + y * p.packed_trans_stride; + + Fragment lines[8]; // Contains all values from the 8x8 tile + + Tile8x8Meta metadata; + Tile8x8Masks indices; + + // Load/process tiles `A` and `B` + Element fillValue = Algorithm::template outOfBoundsFillValue<Element>(); + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < 4; ++i) { + lines[i].fill(fillValue); + cutlass::arch::global_load<Fragment, sizeof(Fragment)>( + lines[i], input + i * p.input_s0, x + i < p.input_dim0); + } + indices.a = compute_tile_indices(Tile4x4Accessor(lines, 0, 0)); + indices.b = compute_tile_indices(Tile4x4Accessor(lines, 0, 4)); + + // Compute packed tiles A & B + { + Tile4x4Packed packed_a = pack_4x4( + indices.a, Tile4x4Accessor(lines, 0, 0), metadata.meta_ab, 0); + Tile4x4Packed packed_b = pack_4x4( + indices.b, Tile4x4Accessor(lines, 0, 4), metadata.meta_ab, 4); + writePackedT(packed, p.packed_stride, packed_a, packed_b); + } + + // Compute/store packed tiles A & B in transpose output + Tile4x4Packed packed_trans_a = pack_4x4( + indices.a, + Tile4x4Accessor(lines, 0, 0), + metadata.meta_ac_trans, + 0, + true); + Tile4x4Packed packed_trans_b = pack_4x4( + indices.b, + Tile4x4Accessor(lines, 0, 4), + metadata.meta_bd_trans, + 0, + true); + // (NOTE) Now we no longer need A & B (`lines[0:4]`) + + // Load/process tiles `C` and `D` + CUTLASS_PRAGMA_UNROLL + for (int i = 4; i < 8; ++i) { + lines[i].fill(fillValue); + cutlass::arch::global_load<Fragment, sizeof(Fragment)>( + lines[i], input + i * p.input_s0, x + i < p.input_dim0); + } + indices.c = compute_tile_indices(Tile4x4Accessor(lines, 4, 0)); + indices.d = compute_tile_indices(Tile4x4Accessor(lines, 4, 4)); + + // Compute packed tiles C & D + { + Tile4x4Packed packed_c = pack_4x4( + indices.c, Tile4x4Accessor(lines, 4, 0), metadata.meta_cd, 0); + Tile4x4Packed packed_d = pack_4x4( + indices.d, Tile4x4Accessor(lines, 4, 4), metadata.meta_cd, 4); + writePackedT( + packed + 4 * p.packed_stride, p.packed_stride, packed_c, packed_d); + } + + // Compute/store packed tiles C & D in transpose output + Tile4x4Packed packed_trans_c = pack_4x4( + indices.c, + Tile4x4Accessor(lines, 4, 0), + metadata.meta_ac_trans, + 4, + true); + Tile4x4Packed packed_trans_d = pack_4x4( + indices.d, + Tile4x4Accessor(lines, 4, 4), + metadata.meta_bd_trans, + 4, + true); + + // Dump the metadata in a nice format + *p.getCurrentThreadIndices() = indices; + + // Store packed A, B, C & D for transposed matrix + writePackedT( + packed_trans, p.packed_trans_stride, packed_trans_a, packed_trans_c); + packed_trans += 4 * p.packed_trans_stride; + writePackedT( + packed_trans, p.packed_trans_stride, packed_trans_b, packed_trans_d); + + // Writing meta non-transposed + { + ElementInputE* packed_meta_reordered = metadata_gmem.get_metaN( + warp_x, threadIdx.x * kThreadX, warp_y, threadIdx.y * kThreadY); + warp_shuffle_and_write_meta(packed_meta_reordered, metadata.meta_ab); + warp_shuffle_and_write_meta(packed_meta_reordered + 32, metadata.meta_cd); + } + + // Writing meta transposed + { + ElementInputE* packed_trans_meta_reordered = metadata_gmem.get_metaT( + warp_x, threadIdx.x * kThreadX, warp_y, threadIdx.y * kThreadY); + warp_shuffle_and_write_meta( + packed_trans_meta_reordered, metadata.meta_ac_trans, true); + warp_shuffle_and_write_meta( + packed_trans_meta_reordered + 32, metadata.meta_bd_trans, true); + } + } + + CUTLASS_DEVICE static void sparse_semi_structured_apply_kernel(Params p) { + // See `sparse24_sparsify_both_ways_kernel` + // It's basically the same, just that we skip + // the part where compute the indices we keep + + // Top-left of the 8x8 tile we own + int warp_x = blockIdx.x * kWarpX; + int warp_y = blockIdx.y * kWarpY; + int x = warp_x + threadIdx.x * kThreadX; + int y = warp_y + threadIdx.y * kThreadY; + + Element const* input = p.input + x * p.input_s0 + y; + Element* packed = p.packed + x * p.packed_stride + (y / 2); + Element* packed_trans = + p.packed_trans + (x / 2) + y * p.packed_trans_stride; + + Fragment lines[8]; // Contains all values from the 8x8 tile + + Tile8x8Meta metadata; + Tile8x8Masks indices = *p.getCurrentThreadIndices(); + + // Load/process tiles `A` and `B` + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < 8; ++i) { + // NB: Values outside bounds is undefined, but shouldn't + // be used anywhere + cutlass::arch::global_load<Fragment, sizeof(Fragment)>( + lines[i], input + i * p.input_s0, x + i < p.input_dim0); + } + + // Compute packed tiles A & B + { + Tile4x4Packed packed_a = pack_4x4( + indices.a, Tile4x4Accessor(lines, 0, 0), metadata.meta_ab, 0); + Tile4x4Packed packed_b = pack_4x4( + indices.b, Tile4x4Accessor(lines, 0, 4), metadata.meta_ab, 4); + writePackedT(packed, p.packed_stride, packed_a, packed_b); + } + + // Compute/store packed tiles A & B in transpose output + Tile4x4Packed packed_trans_a = pack_4x4( + indices.a, + Tile4x4Accessor(lines, 0, 0), + metadata.meta_ac_trans, + 0, + true); + Tile4x4Packed packed_trans_b = pack_4x4( + indices.b, + Tile4x4Accessor(lines, 0, 4), + metadata.meta_bd_trans, + 0, + true); + // (NOTE) Now we no longer need A & B (`lines[0:4]`) + + // Compute packed tiles C & D + { + Tile4x4Packed packed_c = pack_4x4( + indices.c, Tile4x4Accessor(lines, 4, 0), metadata.meta_cd, 0); + Tile4x4Packed packed_d = pack_4x4( + indices.d, Tile4x4Accessor(lines, 4, 4), metadata.meta_cd, 4); + writePackedT( + packed + 4 * p.packed_stride, p.packed_stride, packed_c, packed_d); + } + + // Compute/store packed tiles C & D in transpose output + Tile4x4Packed packed_trans_c = pack_4x4( + indices.c, + Tile4x4Accessor(lines, 4, 0), + metadata.meta_ac_trans, + 4, + true); + Tile4x4Packed packed_trans_d = pack_4x4( + indices.d, + Tile4x4Accessor(lines, 4, 4), + metadata.meta_bd_trans, + 4, + true); + + // Store packed A, B, C & D for transposed matrix + writePackedT( + packed_trans, p.packed_trans_stride, packed_trans_a, packed_trans_c); + packed_trans += 4 * p.packed_trans_stride; + writePackedT( + packed_trans, p.packed_trans_stride, packed_trans_b, packed_trans_d); + } +}; + +} // namespace at::native diff --git a/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredTile.cu b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredTile.cu new file mode 100644 index 0000000000..fd5a04fa61 --- /dev/null +++ b/aten/src/ATen/native/sparse/cuda/SparseSemiStructuredTile.cu @@ -0,0 +1,312 @@ +#include <ATen/ScalarOps.h> +#include <ATen/Functions.h> +#include <ATen/Tensor.h> +#include <ATen/autocast_mode.h> +#include <c10/cuda/CUDAGuard.h> +#include <ATen/ATen.h> +#include <ATen/core/Tensor.h> +#include <ATen/cuda/CUDAUtils.h> +#include <ATen/Dispatch.h> + +#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080) +#else +#include <ATen/native/sparse/cuda/ComputeSparseTile.h> +#include <ATen/native/sparse/cuda/SparseSemiStructuredPack.h> +#include <cuda_runtime.h> +#endif + +namespace at::native { + +#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080) +#else +struct MetadataCuSparseLt { + // Format used by cuSparseLt + // This is based on reverse-engineering, for a visual illustration: + // https://docs.google.com/presentation/d/1DtmKThv8S5QAyBktuLRYzZhRzCvS1qSkBbrqNCjMPeA/edit#slide=id.g29afe95bda8_0_0 + static constexpr int kStrideBlock32x32 = (32 * 32) / (sizeof(ElementInputE) * 8); + + ElementInputE* _meta; + ElementInputE* _meta_trans; + int64_t _rows; + int64_t _cols; + + static int64_t getMetadataSize(int rows, int cols) + { + TORCH_CHECK(rows % 128 == 0 && cols % 128 == 0, "Only supports rows/cols multiples of 128"); + // 1 bit per dense value + return (rows * cols) / (8 * sizeof(ElementInputE)); + } + + // < return value of the function, packed, packed_meta > + static std::tuple<Tensor, Tensor, Tensor> create_compressed_representation(int rows, int cols, at::Tensor const& like) + { + TORCH_CHECK( + like.scalar_type() == at::ScalarType::Half || + like.scalar_type() == at::ScalarType::BFloat16); + constexpr int kBytesPerScalar = 2; + int64_t data_scalars = rows * cutlass::ceil_div(cols, 2); + int64_t meta_scalars = getMetadataSize(rows, cols); + + at::Tensor storage = at::empty( + {(data_scalars + meta_scalars)}, + at::TensorOptions().device(like.device()).dtype(like.dtype())); + + using at::indexing::Slice; + using at::indexing::None; + at::Tensor packed = storage.index({Slice(None, data_scalars)}) + .view({rows, cutlass::ceil_div(cols, 2)}); + at::Tensor metadata = storage.index({Slice(data_scalars, None)}); + // TODO: Cast metadata to Short + static_assert(kBytesPerScalar == 2, "or modify the last dim below"); + metadata = metadata.view({rows / 128, cols / 32, 256}); + return std::make_tuple(storage, packed, metadata); + } + + MetadataCuSparseLt(at::Tensor metaN, at::Tensor metaT, int rows, int cols) { + _meta = (ElementInputE*)metaN.data_ptr(); + _meta_trans = (ElementInputE*)metaT.data_ptr(); + _rows = rows; + _cols = cols; + } + CUTLASS_HOST_DEVICE + static int64_t _get_meta_offset( + int warp_row, + int thread_row, + int warp_col, + int thread_col, + int totalRows) { + int64_t offset = 0; + // warp-level: Find the 128x64 tile + offset += (warp_row / 128) * (kStrideBlock32x32 * 8); + offset += (warp_col / 64) * (kStrideBlock32x32 * 8) * (totalRows / 128); + // Find the 32x32 tile inside + offset += (((warp_row + thread_row) % 128) / 32) * kStrideBlock32x32; + offset += (((warp_col + thread_col) % 64) / 32) * (kStrideBlock32x32 * 4); + // Inside the 32x32 tile + offset += (warp_row % 32) * 2; + // Top/bottom 16x16 tile + offset += ((thread_row % 32) / 16) * 4; + // Left/right 16x16 tile + offset += ((thread_col % 32) / 16) * 2; + return offset; + } + CUTLASS_HOST_DEVICE + ElementInputE* get_metaN( + int warp_row, + int thread_row, + int warp_col, + int thread_col) const { + return _meta + + _get_meta_offset(warp_row, thread_row, warp_col, thread_col, _rows); + } + CUTLASS_HOST_DEVICE + ElementInputE* get_metaT( + int warp_row, + int thread_row, + int warp_col, + int thread_col) const { + return _meta_trans + + _get_meta_offset(warp_col, thread_col, warp_row, thread_row, _cols); + } +}; + +struct MetadataCutlass { + // Layout needed to run 2:4 gemms in CUTLASS + // There is basically a hardware specific value for every + // 32x32 dense tile (1024 bits). Then these tiles are + // stored in a Column-Major fashion + ElementInputE* _meta; + ElementInputE* _meta_trans; + int64_t _meta_reordered_sy; + int64_t _meta_trans_reordered_sx; + + static std::tuple< + at::Tensor, // return value of the function + at::Tensor, // packed + at::Tensor // packed_meta + > + create_compressed_representation(int rows, int cols, at::Tensor const& like) { + TORCH_CHECK( + like.scalar_type() == at::ScalarType::Half || + like.scalar_type() == at::ScalarType::BFloat16); + auto roundedx = cutlass::round_up(rows, kWarpX); + auto roundedy = cutlass::round_up(cols, kWarpY); + + // NB: Writing to `packed` tensors in transposed manner + at::Tensor packed = + at::empty({roundedx, cutlass::ceil_div(roundedy, 2)}, like.options()); + at::Tensor packed_meta = at::empty( + {roundedx * roundedy / 16}, + like.options().dtype(at::ScalarType::Short)) + .view({roundedy / 32, roundedx, 2}) + .permute({1, 2, 0}); + return std::make_tuple(packed, packed, packed_meta); + } + MetadataCutlass(at::Tensor metaN, at::Tensor metaT, int rows, int cols) { + _meta = (ElementInputE*)metaN.data_ptr(); + _meta_reordered_sy = metaN.stride(2); + _meta_trans = (ElementInputE*)metaT.data_ptr(); + _meta_trans_reordered_sx = metaT.stride(2); + } + CUTLASS_HOST_DEVICE + int64_t _get_meta_offset( + int warp_row, + int thread_row, + int warp_col, + int thread_col, + int64_t stride) const { + int64_t offset = 0; + offset += warp_row * 2 + (warp_col / 32) * stride; + // A single warp is 32x64. The right 32x32 tile is at a different position + offset += 64 * (thread_row / 32); + offset += (thread_col / 32) * stride; + // Top/bottom 16x16 tile + offset += ((thread_row % 32) / 16) * 4; + // Left/right 16x16 tile + offset += ((thread_col % 32) / 16) * 2; + return offset; + } + CUTLASS_HOST_DEVICE + ElementInputE* get_metaN( + int warp_row, + int thread_row, + int warp_col, + int thread_col) const { + return _meta + + _get_meta_offset( + warp_row, thread_row, warp_col, thread_col, _meta_reordered_sy); + } + CUTLASS_HOST_DEVICE + ElementInputE* get_metaT( + int warp_row, + int thread_row, + int warp_col, + int thread_col) const { + return _meta_trans + + _get_meta_offset( + warp_col, + thread_col, + warp_row, + thread_row, + _meta_trans_reordered_sx); + } +}; + +template <typename KT, typename Metadata, typename Algorithm> +__global__ void __launch_bounds__(32 /* num_threads */, 20) + sparse_semi_structured_tile_kernel( + typename KT::Params p, + Metadata metadata, + Algorithm algo) { + KT::sparse_semi_structured_tile_kernel(p, metadata, algo); +} + +template <typename Element, typename MetadataFormat> +std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> sparse_semi_structured_tile_typed( + const at::Tensor input, + std::string algorithm) +{ + using KT = KernelTypes<Element>; + c10::optional<at::cuda::CUDAGuard> device_guard; + if (!input.is_meta()) { + device_guard.emplace(input.device()); + } + + TORCH_CHECK(input.dim() == 2, "Can only sparsify 2d tensors"); + TORCH_CHECK( + input.stride(1) == 1, + "Can only sparsify contiguous tensors. Sparsify the transpose otherwise."); + + auto rows = input.size(0); + auto cols = input.size(1); + + auto [compressed, packed, packed_meta_reordered] = + MetadataFormat::create_compressed_representation(rows, cols, input); + auto [compressed_trans, packed_trans, packed_trans_meta_reordered] = + MetadataFormat::create_compressed_representation(cols, rows, input); + TORCH_CHECK( + input.size(1) % 32 == 0, "Number of cols should be multiple of 32"); + + typename KT::Params p; + p.input = (Element const*)input.data_ptr(); + p.input_s0 = input.stride(0); + p.input_dim0 = input.size(0); + p.input_dim1 = input.size(1); + + p.packed = (Element*)packed.data_ptr(); + p.packed_stride = packed.stride(0); + p.packed_trans = (Element*)packed_trans.data_ptr(); + p.packed_trans_stride = packed_trans.stride(0); + + MetadataFormat metadata = MetadataFormat( + packed_meta_reordered, packed_trans_meta_reordered, rows, cols); + at::Tensor threads_masks = at::empty( + {p.getBlocksGrid().x * p.getThreadsGrid().x, + p.getBlocksGrid().y * p.getThreadsGrid().y, + sizeof(p.threads_masks[0])}, + input.options().dtype(at::ScalarType::Byte)); + p.threads_masks = (uint64_t*)threads_masks.data_ptr(); + + bool kernel_launched = false; + auto launchKernel = [&](auto algo, std::string const& algo_name) { + if (algo_name == algorithm) { + kernel_launched = true; + if (input.is_meta()) { + return; + } + size_t smem_bytes = 0; + sparse_semi_structured_tile_kernel<KT> + <<<p.getBlocksGrid(), + p.getThreadsGrid(), + smem_bytes, + at::cuda::getCurrentCUDAStream()>>>(p, metadata, algo); + } + }; + named_algorithms(launchKernel); + TORCH_CHECK(kernel_launched, "Unknown algorithm \"", algorithm, "\""); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + return std::make_tuple( + compressed, + packed_meta_reordered, + compressed_trans, + packed_trans_meta_reordered, + threads_masks); +} +#endif + +// <packed, packed_meta_reordered, packed_trans, packed_trans_meta_reorderd, threads_masks> +std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _sparse_semi_structured_tile( + const Tensor& input, + c10::string_view algorithm, + bool use_cutlass) +{ +#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080) + AT_ERROR("_sparse_semi_structured_tile: not supported"); + return std::make_tuple(Tensor{}, Tensor{}, Tensor{}, Tensor{}, Tensor{}); +#else + std::string algo(algorithm.data(), algorithm.size()); + + auto runTyped = [&](auto type) + { + using ElementT = decltype(type); + if (use_cutlass) { + return sparse_semi_structured_tile_typed<ElementT, MetadataCutlass>(input, algo); + } + else { + return sparse_semi_structured_tile_typed<ElementT, MetadataCuSparseLt>(input, algo); + } + }; + + if (input.scalar_type() == at::ScalarType::Half) + { + return runTyped(cutlass::half_t()); + } else { + TORCH_CHECK( + input.scalar_type() == at::ScalarType::Half || + input.scalar_type() == at::ScalarType::BFloat16, input.scalar_type()); + return runTyped(cutlass::bfloat16_t()); + } +#endif +} + +} // namespace at::native diff --git a/aten/src/ATen/native/sparse/cuda/SparseSemiSturcturedApply.cu b/aten/src/ATen/native/sparse/cuda/SparseSemiSturcturedApply.cu new file mode 100644 index 0000000000..023e8f7393 --- /dev/null +++ b/aten/src/ATen/native/sparse/cuda/SparseSemiSturcturedApply.cu @@ -0,0 +1,107 @@ +#include <ATen/ScalarOps.h> +#include <ATen/Tensor.h> +#include <ATen/Functions.h> +#include <ATen/Utils.h> +#include <c10/cuda/CUDAGuard.h> +#include <c10/util/accumulate.h> + +#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080) +#else +#include <ATen/native/sparse/cuda/SparseSemiStructuredPack.h> +#endif + +namespace at::native { + +#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080) +#else +template <typename KT> +__global__ void __launch_bounds__(32 /* num_threads */) + sparse_semi_structured_apply_kernel(typename KT::Params p) +{ + KT::sparse_semi_structured_apply_kernel(p); +} + +// Apply a 2:4 sparsify pattern computed with +// `_sparse_semi_structured_tile` to another Tensor +template <bool kIsMeta, typename Element> +std::tuple<Tensor, Tensor> _sparse_semi_structured_apply_typed(Tensor input, Tensor threads_masks) +{ + using KT = KernelTypes<Element>; + // TODO: Technically we should be able to deal with that + // by running on the transpose of `input` and swapping + // `packed` & `packed_t`. + // This would require to adapt the `threads_masks` a bit tho. + if (input.stride(1) != 1) { + input = input.contiguous(); + } + c10::optional<at::cuda::CUDAGuard> device_guard; + if (!kIsMeta) { + device_guard.emplace(input.device()); + } + + TORCH_CHECK(input.dim() == 2); + TORCH_CHECK(input.stride(1) == 1); + TORCH_CHECK(input.stride(0) % 8 == 0); + TORCH_CHECK(input.size(1) % 32 == 0, "Wrong alignment shape[1]"); + + auto roundedx = cutlass::round_up(input.size(0), kWarpX); + auto roundedy = cutlass::round_up(input.size(1), kWarpY); + at::Tensor packed = + at::empty({roundedx, cutlass::ceil_div(roundedy, 2)}, input.options()); + at::Tensor packed_trans = + at::empty({roundedy, cutlass::ceil_div(roundedx, 2)}, input.options()); + + typename KT::Params p; + p.input = (Element const*)input.data_ptr(); + p.input_s0 = input.stride(0); + p.input_dim0 = input.size(0); + p.input_dim1 = input.size(1); + + p.packed = (Element*)packed.data_ptr(); + p.packed_stride = packed.stride(0); + p.packed_trans = (Element*)packed_trans.data_ptr(); + p.packed_trans_stride = packed_trans.stride(0); + + p.threads_masks = (uint64_t*)threads_masks.data_ptr(); + + TORCH_CHECK(threads_masks.dim() == 3); + TORCH_CHECK( + threads_masks.size(0) == p.getBlocksGrid().x * p.getThreadsGrid().x); + TORCH_CHECK( + threads_masks.size(1) == p.getBlocksGrid().y * p.getThreadsGrid().y); + TORCH_CHECK(threads_masks.stride(1) == sizeof(p.threads_masks[0])); + TORCH_CHECK(threads_masks.size(2) == sizeof(p.threads_masks[0])); + TORCH_CHECK(threads_masks.stride(2) == 1); + TORCH_CHECK(threads_masks.scalar_type() == at::ScalarType::Byte); + + if (!kIsMeta) { + size_t smem_bytes = 0; + sparse_semi_structured_apply_kernel<KT> + <<<p.getBlocksGrid(), + p.getThreadsGrid(), + smem_bytes, + at::cuda::getCurrentCUDAStream()>>>(p); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + return std::make_tuple(packed, packed_trans); +} +#endif + +std::tuple<Tensor, Tensor> _sparse_semi_structured_apply(const Tensor& input, const Tensor& threads_masks) // Returned by `_sparse_semi_structured_tile` +{ +#if defined(USE_ROCM) || defined(_MSC_VER) || (defined(CUDA_VERSION) && CUDA_VERSION < 11080) + AT_ERROR("_sparse_semi_structured_apply: not supported"); + return std::make_tuple(Tensor{}, Tensor{}); +#else + TORCH_CHECK( + input.scalar_type() == at::ScalarType::Half || input.scalar_type() == at::ScalarType::BFloat16, + "Unsupported dtype - only `float16` and `bfloat16` are supported currently" + ); + auto result = (input.scalar_type() == at::ScalarType::Half) + ? _sparse_semi_structured_apply_typed<false, cutlass::half_t>(input, threads_masks) + : _sparse_semi_structured_apply_typed<false, cutlass::bfloat16_t>(input, threads_masks); + return result; +#endif +} + +} // namespace diff --git a/aten/src/ATen/native/sparse/cuda/StaticSort.h b/aten/src/ATen/native/sparse/cuda/StaticSort.h new file mode 100644 index 0000000000..e2fc5675f6 --- /dev/null +++ b/aten/src/ATen/native/sparse/cuda/StaticSort.h @@ -0,0 +1,100 @@ +#pragma once +#include <cutlass/cutlass.h> + +/** + * A Functor class to create a sort for fixed sized arrays/containers with a + * compile time generated Bose-Nelson sorting network. + * \tparam NumElements The number of elements in the array or container to + * sort. \tparam T The element type. \tparam Compare A + * comparator functor class that returns true if lhs < rhs. + */ +template <unsigned NumElements> +class StaticSort { + template <class A> + struct Swap { + template <class T> + CUTLASS_HOST_DEVICE void s(T& v0, T& v1) { + // Explicitly code out the Min and Max to nudge the compiler + // to generate branchless code. + T t = v0 < v1 ? v0 : v1; // Min + v1 = v0 < v1 ? v1 : v0; // Max + v0 = t; + } + + CUTLASS_HOST_DEVICE Swap(A& a, const int& i0, const int& i1) { + s(a[i0], a[i1]); + } + }; + + template <class A, int I, int J, int X, int Y> + struct PB { + CUTLASS_HOST_DEVICE PB(A& a) { + enum { + L = X >> 1, + M = (X & 1 ? Y : Y + 1) >> 1, + IAddL = I + L, + XSubL = X - L + }; + PB<A, I, J, L, M> p0(a); + PB<A, IAddL, J + M, XSubL, Y - M> p1(a); + PB<A, IAddL, J, XSubL, M> p2(a); + } + }; + + template <class A, int I, int J> + struct PB<A, I, J, 1, 1> { + CUTLASS_HOST_DEVICE PB(A& a) { + Swap<A> s(a, I - 1, J - 1); + } + }; + + template <class A, int I, int J> + struct PB<A, I, J, 1, 2> { + CUTLASS_HOST_DEVICE PB(A& a) { + Swap<A> s0(a, I - 1, J); + Swap<A> s1(a, I - 1, J - 1); + } + }; + + template <class A, int I, int J> + struct PB<A, I, J, 2, 1> { + CUTLASS_HOST_DEVICE PB(A& a) { + Swap<A> s0(a, I - 1, J - 1); + Swap<A> s1(a, I, J - 1); + } + }; + + template <class A, int I, int M, bool Stop = false> + struct PS { + CUTLASS_HOST_DEVICE PS(A& a) { + enum { L = M >> 1, IAddL = I + L, MSubL = M - L }; + PS<A, I, L, (L <= 1)> ps0(a); + PS<A, IAddL, MSubL, (MSubL <= 1)> ps1(a); + PB<A, I, IAddL, L, MSubL> pb(a); + } + }; + + template <class A, int I, int M> + struct PS<A, I, M, true> { + CUTLASS_HOST_DEVICE PS(A& a) {} + }; + + public: + /** + * Sorts the array/container arr. + * \param arr The array/container to be sorted. + */ + template <class Container> + CUTLASS_HOST_DEVICE void operator()(Container& arr) const { + PS<Container, 1, NumElements, (NumElements <= 1)> ps(arr); + }; + + /** + * Sorts the array arr. + * \param arr The array to be sorted. + */ + template <class T> + CUTLASS_HOST_DEVICE void operator()(T* arr) const { + PS<T*, 1, NumElements, (NumElements <= 1)> ps(arr); + }; +}; diff --git a/test/expect/HasDecompTest.test_has_decomposition.expect b/test/expect/HasDecompTest.test_has_decomposition.expect index 79a3455713..8fbdc431f4 100644 --- a/test/expect/HasDecompTest.test_has_decomposition.expect +++ b/test/expect/HasDecompTest.test_has_decomposition.expect @@ -524,8 +524,11 @@ aten::_sparse_mask_projection.out aten::_sparse_mm_reduce_impl aten::_sparse_mm_reduce_impl_backward aten::_sparse_semi_structured_addmm +aten::_sparse_semi_structured_apply +aten::_sparse_semi_structured_apply_dense aten::_sparse_semi_structured_linear aten::_sparse_semi_structured_mm +aten::_sparse_semi_structured_tile aten::_sparse_softmax aten::_sparse_softmax.out aten::_sparse_softmax_backward_data diff --git a/test/test_sparse_semi_structured.py b/test/test_sparse_semi_structured.py index a09e2647eb..625f067da4 100644 --- a/test/test_sparse_semi_structured.py +++ b/test/test_sparse_semi_structured.py @@ -5,6 +5,7 @@ import unittest import torch from torch import nn +import torch.nn.functional as F from torch.sparse import ( SparseSemiStructuredTensor, @@ -13,6 +14,12 @@ from torch.sparse import ( to_sparse_semi_structured, ) +from torch.sparse._semi_structured_conversions import ( + sparse_semi_structured_from_dense_cutlass, + _sparse_semi_structured_tile, + _compute_compressed_swizzled_bitmask, +) + from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( @@ -32,28 +39,48 @@ from torch.testing._internal.common_utils import ( IS_WINDOWS, ) -from torch.utils._triton import has_triton +import pytest -CUSPARSELT_NUM_ALG_IDS = 4 -CUSPARSELT_MIXED_DTYPE_SUPPORT = [torch.float16, torch.bfloat16, torch.int32] +from torch.utils._triton import has_triton SEMI_STRUCTURED_SUPPORTED_DTYPES = [torch.float16, torch.bfloat16, torch.float32, torch.int8] -SEMI_STRUCTURED_SUPPORTED_BACKENDS = [] +SEMI_STRUCTURED_SUPPORTED_BACKENDS = {} _IS_SM8X = False + if torch.cuda.is_available(): _IS_SM8X = torch.cuda.get_device_capability(0)[0] == 8 - SEMI_STRUCTURED_SUPPORTED_BACKENDS.append("cutlass") + SEMI_STRUCTURED_SUPPORTED_BACKENDS["cutlass"] = SparseSemiStructuredTensorCUTLASS # check if cslt is available for now using this: # TODO when we add cusparselt as a backend, we can update this to be use torch.cusparselt.is_available() try: torch._cslt_compress(torch.ones(128, 256).cuda()) - SEMI_STRUCTURED_SUPPORTED_BACKENDS.append("cusparselt") + SEMI_STRUCTURED_SUPPORTED_BACKENDS["cusparselt"] = SparseSemiStructuredTensorCUSPARSELT except Exception: pass +inference_dtypes = dtypes(torch.float16, torch.bfloat16, torch.float32, torch.int8) +training_dtypes = dtypes(torch.float16, torch.bfloat16) +parametrize_backends = parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS) + +atol_rtol_kw = { + torch.float16: { + "rtol": 1e-3, + "atol": 1e-3, + }, + torch.bfloat16: { + "rtol": 1e-1, + "atol": 1e-1, + }, +} +def sparse24_largest_mask_2d(original): + sparse = SparseSemiStructuredTensorCUTLASS.prune_dense_static_sort(original) + return sparse.to_dense().bool() + +def sparsify24_dense(original): + return sparse24_largest_mask_2d(original) * original def rand_sparse_semi_structured_mask( r, c, dtype=torch.float16, device="cuda", choice=None @@ -97,6 +124,7 @@ def rand_sparse_semi_structured(r, c, dtype, device, choice=None): dense = dense.masked_fill(~mask, 0) return dense + def rand_sparse_semi_structured_all_patterns(r, c, dtype, device): pattern = '2by4' if dtype != torch.float32 else '1by2' if pattern == '1by2': @@ -171,8 +199,6 @@ class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase): x = x.contiguous() return torch.nn.functional.relu(x) - SparseSemiStructuredTensor._FORCE_CUTLASS = backend == "cutlass" - input = torch.rand(dense_input_shape, device="cuda").half() model = Model().eval().cuda().half() mod_linear = model.linear @@ -182,7 +208,7 @@ class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase): mod_linear.weight = nn.Parameter(mod_linear.weight * mask) dense_result = model(input) - mod_linear.weight = nn.Parameter(to_sparse_semi_structured(mod_linear.weight)) + mod_linear.weight = nn.Parameter(SEMI_STRUCTURED_SUPPORTED_BACKENDS[backend].from_dense(mod_linear.weight)) sparse_result = model(input) model = torch.compile(model, backend="inductor", fullgraph=True) @@ -213,20 +239,36 @@ class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase): SparseSemiStructuredTensorCompileTest._test_mlp_contiguous_relu_compile("cutlass", dense_input_shape) + @unittest.skipIf(IS_WINDOWS, "torch.compile not supported on windows") + @unittest.skipIf("cusparselt" not in SEMI_STRUCTURED_SUPPORTED_BACKENDS, "cusparselt not supported on this machine") + def test_sp24_compile(self) -> None: + x = torch.randn([1024, 512], device="cuda", dtype=torch.float16, requires_grad=True) + e = torch.eye(x.shape[0], x.shape[0], device="cuda", dtype=torch.float16) + + def fn(x, e): + y = SparseSemiStructuredTensorCUSPARSELT.prune_dense_static_sort(x) + y = y.t() + return x @ y + + # Eager + output = fn(x, e) + output.backward(output) + # Torch compile + output = torch.compile(fn)(x, e) + output.backward(output) + class TestSparseSemiStructured(TestCase): def setUp(self): if not _IS_SM8X: self.skipTest('Only runs on SM80') + if IS_WINDOWS: + self.skipTest("torch.compile not supported on windows") - @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES) - @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS) + @inference_dtypes + @parametrize_backends def test_to_sparse_semi_structured(self, dtype, backend): SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass") - - if backend == "cutlass" and IS_WINDOWS: - self.skipTest("CUTLASS not supported on Windows") - A = rand_sparse_semi_structured_mask(128, 256, dtype=dtype) A_sparse = to_sparse_semi_structured(A) @@ -237,18 +279,14 @@ class TestSparseSemiStructured(TestCase): assert isinstance(A, torch.Tensor) assert isinstance(A_sparse, SparseSemiStructuredTensor) - - @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES) + @inference_dtypes + @parametrize_backends @parametrize("dense_input_shape", [(128, 1), (128, 64), (128, 128)]) - @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS) def test_mm_sparse_first_NN(self, dense_input_shape, dtype, device, backend): """ Ensure torch.mm(A_sparse, B) is correct for float16 and will throw error for int8 """ SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass") - if backend == "cutlass" and IS_WINDOWS: - self.skipTest("CUTLASS not supported on Windows") - A = rand_sparse_semi_structured_mask(256, 128, dtype=dtype) A_sparse = to_sparse_semi_structured(A) @@ -256,7 +294,6 @@ class TestSparseSemiStructured(TestCase): # Currently we don't support int matmul on GPU, so evaluate on CPU and copy over if dtype is torch.int8: - # This should fail if backend == "cutlass": with self.assertRaisesRegex(RuntimeError, "spgemm_cutlass_dispatch_layouts"): sparse_result = torch.mm(A_sparse, B) @@ -269,18 +306,15 @@ class TestSparseSemiStructured(TestCase): sparse_result = torch.mm(A_sparse, B) assert torch.allclose(dense_result, sparse_result, rtol=1e-3, atol=1e-3) - @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES) + @inference_dtypes + @parametrize_backends @parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128)]) - @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS) def test_mm_sparse_first_NT(self, dense_input_shape, dtype, device, backend): """ Ensure torch.mm(A_sparse, B.t()) is correct for float16/bfloat16 and will throw an error for int8 + padding """ SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass") - if backend == "cutlass" and IS_WINDOWS: - self.skipTest("CUTLASS not supported on Windows") - A = rand_sparse_semi_structured_mask(256, 128, dtype=dtype) A_sparse = to_sparse_semi_structured(A) @@ -308,9 +342,9 @@ class TestSparseSemiStructured(TestCase): sparse_result = torch.mm(A_sparse, B.t()) assert torch.allclose(dense_result, sparse_result, rtol=1e-3, atol=1e-3) - @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES) + @inference_dtypes @parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128)]) - @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS) + @parametrize_backends def test_mm_sparse_first_TN(self, dtype, dense_input_shape, device, backend): """ Ensure torch.mm(A_sparse.t(), B) throws error @@ -329,9 +363,9 @@ class TestSparseSemiStructured(TestCase): ): torch.mm(A_sparse.t(), B) - @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES) + @inference_dtypes @parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128)]) - @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS) + @parametrize_backends def test_mm_sparse_second_NT(self, dense_input_shape, dtype, device, backend): """ Ensure torch.mm(A, B_sparse.t()) is correct @@ -354,9 +388,9 @@ class TestSparseSemiStructured(TestCase): assert torch.allclose(dense_result, sparse_result, rtol=1e-3, atol=1e-3) - @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES) + @inference_dtypes @parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128)]) - @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS) + @parametrize_backends def test_mm_sparse_second_NN(self, dense_input_shape, dtype, device, backend): """ Ensure torch.mm(A, B_sparse) throws error @@ -377,7 +411,7 @@ class TestSparseSemiStructured(TestCase): @parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128), (64, 128, 128)]) @parametrize("inference_mode", [subtest(True), subtest(False)]) - @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS) + @parametrize_backends def test_linear(self, dense_input_shape, inference_mode, device, backend): """ Test nn.Linear has the same numerics @@ -405,11 +439,9 @@ class TestSparseSemiStructured(TestCase): assert torch.allclose(dense_result, sparse_result, rtol=1e-3, atol=1e-3) @parametrize("dense_input_shape", [(1, 128), (64, 128), (128, 128), (64, 128, 128)]) - @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS) + @parametrize_backends def test_mlp(self, device, dense_input_shape, backend): - SparseSemiStructuredTensor._FORCE_CUTLASS = backend == "cutlass" - if backend == "cutlass" and IS_WINDOWS: - self.skipTest("CUTLASS not supported on Windows") + SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass") input = torch.rand(dense_input_shape, device=device).half() model = ( nn.Sequential( @@ -437,7 +469,7 @@ class TestSparseSemiStructured(TestCase): assert torch.allclose(dense_result, sparse_result, rtol=1e-3, atol=1e-3) - @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS) + @parametrize_backends def test_values(self, backend): SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass") if backend == "cutlass" and IS_WINDOWS: @@ -447,7 +479,7 @@ class TestSparseSemiStructured(TestCase): assert A_sparse.values().shape == (128, 64) assert (A_sparse.values() == 1).all() - @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS) + @parametrize_backends def test_indices(self, backend): SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass") if backend == "cutlass" and IS_WINDOWS: @@ -456,16 +488,11 @@ class TestSparseSemiStructured(TestCase): A_sparse = to_sparse_semi_structured(A) assert A_sparse.indices().shape == (128, 8) - @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES) - @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS) + @inference_dtypes + @parametrize_backends def test_min_sparse_shape(self, dtype, device, backend): SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass") - if backend == "cutlass" and IS_WINDOWS: - self.skipTest("CUTLASS not supported on Windows") - if backend == "cutlass": - config = SparseSemiStructuredTensorCUTLASS._DTYPE_SHAPE_CONSTRAINTS[dtype] - elif backend == "cusparselt": - config = SparseSemiStructuredTensorCUSPARSELT._DTYPE_SHAPE_CONSTRAINTS[dtype] + config = SEMI_STRUCTURED_SUPPORTED_BACKENDS[backend]._DTYPE_SHAPE_CONSTRAINTS[dtype] A = rand_sparse_semi_structured_mask(config.sparse_min_rows, config.sparse_min_cols, dtype=dtype, device=device) A_sparse = to_sparse_semi_structured(A) B = torch.rand((config.sparse_min_cols, config.dense_min_cols), device=device).to(dtype) @@ -479,8 +506,8 @@ class TestSparseSemiStructured(TestCase): sparse_res = torch.mm(A_sparse, B) assert torch.allclose(sparse_res, dense_res, rtol=1e-3, atol=1e-3) - @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES) - @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS) + @inference_dtypes + @parametrize_backends def test_unsupported_shape(self, dtype, device, backend): SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass") if backend == "cutlass" and IS_WINDOWS: @@ -490,7 +517,7 @@ class TestSparseSemiStructured(TestCase): A_sparse = to_sparse_semi_structured(A) @dtypes(*all_types_and_complex()) - @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS) + @parametrize_backends def test_unsupported_dtype(self, dtype, device, backend): SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass") if backend == "cutlass" and IS_WINDOWS: @@ -503,7 +530,7 @@ class TestSparseSemiStructured(TestCase): else: A_sparse = to_sparse_semi_structured(A) - @parametrize("backend", SEMI_STRUCTURED_SUPPORTED_BACKENDS) + @parametrize_backends def test_unsupported_dim(self, device, backend): SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass") if backend == "cutlass" and IS_WINDOWS: @@ -513,13 +540,325 @@ class TestSparseSemiStructured(TestCase): with self.assertRaisesRegex(RuntimeError, "Error original_tensor.dim"): A_sparse = to_sparse_semi_structured(A) + +def create_random_mask(shape) -> torch.Tensor: + r = random.Random(0) + mask = torch.zeros(shape, dtype=torch.bool) + for line in range(mask.shape[0]): + for col in range(0, mask.shape[1], 4): + sparsity = r.choice( + [ + [False, False, True, True], + [False, True, False, True], + [True, False, False, True], + [False, True, True, False], + [True, False, True, False], + [True, True, False, False], + ] + ) + mask[line, col : col + 4] = torch.tensor(sparsity, dtype=torch.bool) + return mask + +class TestSparseSemiStructuredTraining(TestCase): + + def setUp(self): + if not _IS_SM8X: + self.skipTest('Only runs on SM80') + if IS_WINDOWS: + self.skipTest('CUTLASS not supported on windows') + + + @training_dtypes + def test_prune_dense_static_sort(self, dtype) -> None: + # Ideally we would like to clone and compare, but that won't work because the sorting order will be different + # instead we pass the pruned matrix to the CUDA implementation and preserve the sparsity pattern. + dense = torch.randn(128, 128, device="cuda", dtype=dtype) + pruned = _sparse_semi_structured_tile(dense) + + # CUTLASS + reference_cutlass = SparseSemiStructuredTensorCUTLASS.prune_dense_static_sort(pruned, algorithm="largest_abs_values_greedy") + assert torch.allclose(pruned, reference_cutlass.to_dense()) + + packed_cutlass, meta_cutlass = sparse_semi_structured_from_dense_cutlass(pruned) + packed_t_cutlass, meta_t_cutlass = sparse_semi_structured_from_dense_cutlass(pruned.t().contiguous()) + meta_cutlass = meta_cutlass.as_strided(reference_cutlass.meta.shape, reference_cutlass.meta.stride()) + meta_t_cutlass = meta_t_cutlass.as_strided(reference_cutlass.meta_t.shape, reference_cutlass.meta_t.stride()) + compressed_swizzled_bitmask = _compute_compressed_swizzled_bitmask(pruned) + compressed_swizzled_bitmask = compressed_swizzled_bitmask.as_strided(reference_cutlass.compressed_swizzled_bitmask.shape, + reference_cutlass.compressed_swizzled_bitmask.stride()) + cutlass = SparseSemiStructuredTensorCUTLASS(dense.shape, + packed_cutlass, + meta_cutlass, + packed_t_cutlass, + meta_t_cutlass, + compressed_swizzled_bitmask) + assert torch.allclose(reference_cutlass.to_dense(), cutlass.to_dense()) + + # CUSPARSELT + reference_cusparselt = SparseSemiStructuredTensorCUSPARSELT.prune_dense_static_sort(pruned, + algorithm="largest_abs_values_greedy") + assert torch.allclose(pruned, reference_cusparselt.to_dense()) + + packed_cusparselt = torch._cslt_compress(pruned) + packed_t_cusparselt = torch._cslt_compress(pruned.t().contiguous()) + cusparselt = SparseSemiStructuredTensorCUSPARSELT(dense.shape, + packed_cusparselt, + None, + packed_t_cusparselt, + None, + compressed_swizzled_bitmask) + assert torch.allclose(reference_cusparselt.to_dense(), cusparselt.to_dense()) + + + + @training_dtypes + @parametrize_backends + def test_pruning_algo_largest_abs_values_greedy(self, dtype, backend) -> None: + inp = torch.tensor( + [[4, 3, 2, 1], [-1, -3, 0.6, 0.5], [1, 2, 3, 4], [10, 2, -1, 5]], + device="cuda", + dtype=dtype, + ) + inp = F.pad(inp, (0, 128 - 4, 0, 128 - 4), "constant", 1) + sInp = SEMI_STRUCTURED_SUPPORTED_BACKENDS[backend].prune_dense_static_sort(inp, algorithm="largest_abs_values_greedy") + + mask = sInp.to_dense() / inp + assert mask[:4, :4].int().tolist() == [ + [1, 1, 0, 0], + [0, 1, 1, 0], + [0, 0, 1, 1], + [1, 0, 0, 1], + ] + + @training_dtypes + def test_gemm(self, dtype) -> None: + M, N, K = 32, 32, 64 + a = torch.randn([M, K], device="cuda", dtype=dtype) + b = torch.randn([K, N], device="cuda", dtype=dtype) + mask = rand_sparse_semi_structured_mask(M, K, dtype=torch.bool) + + a.masked_fill_(~mask, 0) + + a_sparse = to_sparse_semi_structured(a) + + masked_a = a * mask + ref_out = masked_a @ b + sp24_out = a_sparse @ b + assert torch.allclose(ref_out, sp24_out, **atol_rtol_kw[dtype]) + + + @training_dtypes + @parametrize_backends + def test_pack_both_ways_meta_correctness(self, dtype, backend) -> None: + M, N = 128, 256 + # Construct x to make sure we always have exactly 8 elements per 4x4 tile + a = (4 * torch.arange(8))[:, None] + torch.arange(8)[None, :] + a = a.repeat(M // 8, N // 8) + assert a.shape == (M, N) + a = a.cuda().to(dtype) + b = torch.randn([a.shape[1], 128], device="cuda", dtype=dtype) + + a_sparse = SEMI_STRUCTURED_SUPPORTED_BACKENDS[backend].prune_dense_static_sort(a) + + mask_dense = sparse24_largest_mask_2d(a).to(dtype) + + if backend == "cutlass": + assert isinstance(a_sparse, SparseSemiStructuredTensorCUTLASS) + (packed, meta, packed_t, meta_t, bitmask) = torch._sparse_semi_structured_tile( + mask_dense, use_cutlass=True) + + sparse_mask = SparseSemiStructuredTensorCUTLASS( + mask_dense.shape, + packed=packed, + meta=meta, + packed_t=packed_t, + meta_t=meta_t, + compressed_swizzled_bitmask=bitmask, + ) + assert torch.allclose(a_sparse.meta.view(torch.short), sparse_mask.meta) + + ref_gemm = (mask_dense * a) @ b + pack_gemm = a_sparse @ b + assert torch.allclose(ref_gemm, pack_gemm, **atol_rtol_kw[dtype]) + + @training_dtypes + def test_pack_both_ways_id(self, dtype) -> None: + N = 512 + torch.manual_seed(0) + a = torch.randn([N, N], dtype=dtype, device="cuda") + b = torch.eye(N, dtype=dtype, device="cuda") + + packed, meta, packed_t, meta_t = torch._sparse_semi_structured_tile(a)[ + :4 + ] + # Heuristic to ensure we pack the same values + assert torch.allclose( + packed.to(torch.float64).sum(), packed_t.to(torch.float64).sum() + ) + + mask_dense = sparse24_largest_mask_2d(a.to(dtype)) + + ref_gemm = mask_dense * a + # Test A@B + pack_gemm = torch._sparse_semi_structured_linear(b.t(), packed, meta).t() + max_diff = (ref_gemm - pack_gemm).abs().argmax() + assert torch.allclose( + ref_gemm, pack_gemm, + **atol_rtol_kw[dtype] + ), f"packed is wrong at pos: ({max_diff // N}, {max_diff % N})" + # Test A.t@B + pack_gemm = torch._sparse_semi_structured_linear(b.t(), packed_t, meta_t) + max_diff = (ref_gemm - pack_gemm).abs().argmax() + + assert torch.allclose( + ref_gemm, pack_gemm, + **atol_rtol_kw[dtype] + ), f"packed_t is wrong at pos: ({max_diff // N}, {max_diff % N})" + + @training_dtypes + def test_pack_both_ways_edge_case1(self, dtype) -> None: + # In this case, the heuristic will keep 7 values out of 16 + # instead of 8. let's see how the kernel handles this + quad = torch.tensor( + [ + [2, -1, -2, -3], # Should be packed as `2 <null>` + [-1, 8, -1, 6], + [-1, -1, 4, 5], + [-1, 3, 7, -1], + ], + dtype=dtype, + device="cuda", + ) + a = torch.randn([32, 64], dtype=dtype, device="cuda") + a[:4, :4] = quad + packed, meta, packed_t, meta_t = torch._sparse_semi_structured_tile(a)[:4] + # Check first line in A + assert packed[0, 0].item() == 2 + assert packed[0, 1].item() == 0 + # And first column in A.t + assert packed_t[0, 0].item() == 2 + assert packed_t[0, 1].item() == 0 + + @training_dtypes + def test_sp24_apply(self, dtype) -> None: + M, N = 256, 1024 + x = torch.randn([M, N], dtype=dtype, device="cuda") + ( + packed, + meta, + packed_t, + meta_t, + bitmask, + ) = torch._sparse_semi_structured_tile(x) + packed2, packed_t2 = torch._sparse_semi_structured_apply(x, bitmask) + assert torch.allclose(packed, packed2) + assert torch.allclose(packed_t, packed_t2) + + @training_dtypes + def test_sp24_apply_dense(self, dtype) -> None: + M, N = 256, 1024 + x = torch.randn([M, N], dtype=dtype, device="cuda") + ( + packed, + meta, + packed_t, + meta_t, + bitmask, + ) = torch._sparse_semi_structured_tile(x) + + expected = SparseSemiStructuredTensorCUTLASS( + x.shape, + packed=packed, + meta=meta, + packed_t=packed_t, + meta_t=meta_t, + compressed_swizzled_bitmask=bitmask, + ).to_dense() + + packed2, packed_t2 = torch._sparse_semi_structured_apply(x, bitmask) + sparse = SparseSemiStructuredTensorCUTLASS( + x.shape, + packed=packed2, + meta=meta, + packed_t=packed_t2, + meta_t=meta_t, + compressed_swizzled_bitmask=bitmask, + ) + + dense = torch._sparse_semi_structured_apply_dense(x, bitmask) + + assert torch.allclose(dense, expected) + assert torch.allclose(sparse.to_dense(), expected) + + + @training_dtypes + def test_sp24_matmuls(self, dtype) -> None: + M, N, K = 64, 256, 1024 + a = torch.randn([M, K], device="cuda", dtype=dtype) + b = torch.randn([K, N], device="cuda", dtype=dtype) + a_m = sparse24_largest_mask_2d(a) + b_m = sparse24_largest_mask_2d(b) + (packed, meta, packed_t, meta_t, bitmask) = torch._sparse_semi_structured_tile(a) + a_s = SparseSemiStructuredTensorCUTLASS( + a.shape, + packed=packed, + meta=meta, + packed_t=packed_t, + meta_t=meta_t, + compressed_swizzled_bitmask=bitmask, + ) + (packed, meta, packed_t, meta_t, bitmask) = torch._sparse_semi_structured_tile(b) + b_s = SparseSemiStructuredTensorCUTLASS( + b.shape, + packed=packed, + meta=meta, + packed_t=packed_t, + meta_t=meta_t, + compressed_swizzled_bitmask=bitmask, + ) + + assert torch.allclose(a_s @ b, (a * a_m) @ b, rtol=1e-1, atol=1e-1) + assert torch.allclose(a @ b_s, a @ (b * b_m), rtol=1e-1, atol=1e-1) + assert torch.allclose( + a @ a_s.t(), a @ (a * a_m).t(), rtol=1e-1, atol=1e-1 + ) + assert torch.allclose( + a_s.t() @ a, (a * a_m).t() @ a, rtol=1e-1, atol=1e-1 + ) + + def test_sp24_matmuls_mat_vec(self) -> None: + a = torch.randn([64, 128], device="cuda", dtype=torch.float16) + b = torch.randn([128], device="cuda", dtype=torch.float16) + a_m = sparse24_largest_mask_2d(a) + a_s = to_sparse_semi_structured(a) + + with pytest.raises(NotImplementedError): + assert torch.allclose(a_s @ b, (a * a_m) @ b, **atol_rtol_kw[a.dtype]) + + + def test_sp24_matmuls_bmm(self) -> None: + a = torch.randn([64, 128], device="cuda", dtype=torch.float16) + b = torch.randn([5, 6, 128], device="cuda", dtype=torch.float16) + a_m = sparse24_largest_mask_2d(a) + a_s = to_sparse_semi_structured(a) + + with pytest.raises(NotImplementedError): + assert torch.allclose(a_s @ b, (a * a_m) @ b, **atol_rtol_kw[a.dtype]) + +class TestSparseSemiStructuredCUTLASS(TestCase): + """ + This contains CUTLASS specific tests for + - torch._sparse_semi_structured_linear + """ + def setUp(self): + if not _IS_SM8X: + self.skipTest('Only runs on SM80') + if "cutlass" not in SEMI_STRUCTURED_SUPPORTED_BACKENDS: + self.skipTest('CUTLASS not enabled') + @unittest.skipIf(TEST_WITH_ROCM or IS_WINDOWS, "ROCm and Windows doesn't support CUTLASS") - @parametrize("backend", ["cutlass"]) - @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES) - def test_linear_cutlass(self, device, dtype, backend): - SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass") - if backend == "cutlass" and IS_WINDOWS: - self.skipTest("CUTLASS not supported on Windows") + @inference_dtypes + def test_linear_cutlass(self, device, dtype): def run_test(batch_shape, m, n, k, device, dtype, dtype_out, add_bias, activation, rtol, atol): weight = rand_sparse_semi_structured(m, k, dtype, device) @@ -643,12 +982,8 @@ class TestSparseSemiStructured(TestCase): @unittest.skipIf(not has_triton(), "Test needs triton and recent GPU arch") - @parametrize("backend", ["cutlass"]) - @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES) - def test_conversions(self, device, dtype, backend): - SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass") - if backend == "cutlass" and IS_WINDOWS: - self.skipTest("CUTLASS not supported on Windows") + @inference_dtypes + def test_conversions(self, device, dtype): def run_test(r, c, device, dtype): dense_ref = rand_sparse_semi_structured(r, c, dtype, device) @@ -675,12 +1010,8 @@ class TestSparseSemiStructured(TestCase): run_test(r, c, device, dtype) @unittest.skipIf(not has_triton(), "Test needs triton and recent GPU arch") - @parametrize("backend", ["cutlass"]) - @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES) - def test_conversions_all_patterns(self, device, dtype, backend): - SparseSemiStructuredTensor._FORCE_CUTLASS = (backend == "cutlass") - if backend == "cutlass" and IS_WINDOWS: - self.skipTest("CUTLASS not supported on Windows") + @inference_dtypes + def test_conversions_all_patterns(self, device, dtype): r, c = 32, 128 dense_inv, dense_val = rand_sparse_semi_structured_all_patterns(r, c, dtype, device) @@ -690,18 +1021,23 @@ class TestSparseSemiStructured(TestCase): torch.testing.assert_close(dense, dense_val, rtol=0, atol=0) -class TestCUSPARSELT(TestCase): + + +CUSPARSELT_NUM_ALG_IDS = 4 +CUSPARSELT_MIXED_DTYPE_SUPPORT = [torch.float16, torch.bfloat16, torch.int32] + + +class TestSparseSemiStructuredCUSPARSELT(TestCase): """ - This contains cuSPARSELt specific tests. + This contains cuSPARSELt specific tests for + torch._cslt_compress + torch._cslt_sparse_mm """ - def setUp(self): if not _IS_SM8X: self.skipTest('Only runs on SM80') if "cusparselt" not in SEMI_STRUCTURED_SUPPORTED_BACKENDS: self.skipTest('cuSPARSELt not enabled') - else: - SparseSemiStructuredTensor._FORCE_CUTLASS = False @parametrize("out_dtype", CUSPARSELT_MIXED_DTYPE_SUPPORT) @parametrize("dense_input_shape", [(128, 128)]) @@ -715,7 +1051,7 @@ class TestCUSPARSELT(TestCase): sparse_result = torch._cslt_sparse_mm(A_compressed, B.t(), out_dtype=out_dtype) assert torch.allclose(dense_result, sparse_result, rtol=1e-3, atol=1e-3) - @dtypes(torch.float16, torch.bfloat16) + @training_dtypes def test_cslt_sparse_mm_alpha(self, dtype, device): A = torch.Tensor([0, 0, 1, 1]).tile((128, 64)).to(dtype).cuda() B = torch.ones((256, 128), device=device).to(dtype) @@ -747,7 +1083,7 @@ class TestCUSPARSELT(TestCase): assert torch.allclose(sparse_result, dense_result, rtol=1e-3, atol=1e-3) @parametrize("alg_id", range(CUSPARSELT_NUM_ALG_IDS)) - @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES) + @inference_dtypes def test_cslt_sparse_mm_alg_id(self, device, dtype, alg_id): # alg_id=3 not supported for float32 dtype if dtype == torch.float32 and alg_id == 3: @@ -764,7 +1100,7 @@ class TestCUSPARSELT(TestCase): assert torch.allclose(sparse_result, dense_result, rtol=1e-3, atol=1e-3) - @dtypes(*SEMI_STRUCTURED_SUPPORTED_DTYPES) + @inference_dtypes def test_cslt_sparse_mm_search(self, device, dtype): A = rand_sparse_semi_structured_mask(128, 128, dtype=dtype) A_compressed = torch._cslt_compress(A) @@ -777,9 +1113,10 @@ class TestCUSPARSELT(TestCase): # in cuSPARSELt v0.5.0 there are only 4 alg_ids total, so we should remove the +1 here when we update. assert alg_id in range(CUSPARSELT_NUM_ALG_IDS + 1) - instantiate_device_type_tests(TestSparseSemiStructured, globals(), only_for="cuda") -instantiate_device_type_tests(TestCUSPARSELT, globals(), only_for="cuda") +instantiate_device_type_tests(TestSparseSemiStructuredCUTLASS, globals(), only_for="cuda") +instantiate_device_type_tests(TestSparseSemiStructuredCUSPARSELT, globals(), only_for="cuda") +instantiate_device_type_tests(TestSparseSemiStructuredTraining, globals(), only_for="cuda") if __name__ == "__main__": run_tests() diff --git a/torch/sparse/_semi_structured_conversions.py b/torch/sparse/_semi_structured_conversions.py index c487b15149..5203ad245b 100644 --- a/torch/sparse/_semi_structured_conversions.py +++ b/torch/sparse/_semi_structured_conversions.py @@ -1,20 +1,22 @@ import torch -# This is PyTorch implementation of main part of reorder_meta() -# function, from tools/util/include/cutlass/util/host_reorder.h file -# of CUTLASS source tree. Furthermore, CUTLASS template for sparse -# GEMM decides upon layout of this matrix, and at the moment for the -# sparse GEMM executed on tensor cores, this is layout described by -# ColumnMajorInterleaved<2> data structure, in -# include/cutlass/layout/matrix.h of CUTLASS source tree. The -# reordering of meta matrix into meta_reordered matrix calculated -# according to these segments of CUTLASS code is re-implemented here. -# Note that this calculation produces offsets for scattering metadata -# matrix elements into reordered metadata matrix elements (or, -# equivalently, for gathering reordered metadata matrix element back -# into metadata matrix elements). def _calculate_meta_reordering_scatter_offsets(m, meta_ncols, meta_dtype, device): + """ + This is PyTorch implementation of main part of reorder_meta() + function, from tools/util/include/cutlass/util/host_reorder.h file + of CUTLASS source tree. Furthermore, CUTLASS template for sparse + GEMM decides upon layout of this matrix, and at the moment for the + sparse GEMM executed on tensor cores, this is layout described by + ColumnMajorInterleaved<2> data structure, in + include/cutlass/layout/matrix.h of CUTLASS source tree. The + reordering of meta matrix into meta_reordered matrix calculated + according to these segments of CUTLASS code is re-implemented here. + Note that this calculation produces offsets for scattering metadata + matrix elements into reordered metadata matrix elements (or, + equivalently, for gathering reordered metadata matrix element back + into metadata matrix elements). + """ dst_rows = torch.arange(0, m, device=device)[:, None].repeat(1, meta_ncols) dst_cols = torch.arange(0, meta_ncols, device=device).repeat(m, 1) @@ -41,10 +43,12 @@ def _calculate_meta_reordering_scatter_offsets(m, meta_ncols, meta_dtype, device return (cols_maj * m * interleave + dst_rows * interleave + cols_min).view(-1) -# This function converts dense matrix into sparse semi-structured -# representation, producing "compressed" matrix, in the layout used by -# CUTLASS backend, and corresponding metadata matrix. def sparse_semi_structured_from_dense_cutlass(dense): + """ + This function converts dense matrix into sparse semi-structured + representation, producing "compressed" matrix, in the layout used by + CUTLASS backend, and corresponding metadata matrix. + """ if dense.dim() != 2: raise RuntimeError( f"Expected 2-dimensional dense tensor, got {dense.dim()}-dimensional tensor" @@ -172,11 +176,13 @@ def sparse_semi_structured_from_dense_cutlass(dense): return (sparse, meta_reordered.view(m, meta_ncols)) -# This function performs reverse of the function above - it -# reconstructs dense matrix from a pair of "compressed" matrix, given -# in the layout used by CUTLASS backend, and accompanying metadata -# matrix. def sparse_semi_structured_to_dense_cutlass(sparse, meta_reordered): + """ + This function performs reverse of the function above - it + reconstructs dense matrix from a pair of "compressed" matrix, given + in the layout used by CUTLASS backend, and accompanying metadata + matrix. + """ if sparse.dim() != 2: raise RuntimeError( f"Expected 2-dimensional sparse tensor, got {sparse.dim()}-dimensional tensor" @@ -273,3 +279,73 @@ def sparse_semi_structured_to_dense_cutlass(sparse, meta_reordered): ) return dense.view(m, 2 * k) + + +def _sparse_semi_structured_tile(dense): + """ + This function computes a 2:4 sparse tile by greedily taking the largest values. + + Since we take the largest values greedily, how the sorting algorithm handles duplicates affects + the ultimate sparsity pattern. + + Note that this function does not have the same sorting semantics as our CUDA backend, + which is exposed via `torch._sparse_semi_structured_tile` and thus returns a different pattern. + """ + + def greedy_prune_tile(tile): + num_kept_row = [0, 0, 0, 0] + num_kept_col = [0, 0, 0, 0] + + for x in tile.flatten().sort(descending=True, stable=True).indices: + r, c = x // 4, x % 4 + if num_kept_row[r] < 2 and num_kept_col[c] < 2: + num_kept_row[r] += 1 + num_kept_col[c] += 1 + else: + tile[r, c] = 0 + + for batch in dense.unfold(0, 4, 4).unfold(1, 4, 4): + for tile in batch: + greedy_prune_tile(tile) + + return dense + + +def _compute_compressed_swizzled_bitmask(dense): + """ + Calculates the compressed swizzled bitmask from a dense tensor + """ + + # first we need to convert the dense tensor to a bitmask + int_bitmask = dense.bool().to(torch.uint8) + + # Each thread is responsible for an 8x8 tile, which contains 4 4x4 tiles: + # A, B, C and D, as displayed in the following schema: + # +---+---+ + # | A | B | + # +---+---+ + # | C | D | + # +---+---+ + + # we first need to split into the 8x8 tiles + bitmask_8x8_chunks = int_bitmask.unfold(0, 8, 8).unfold(1, 8, 8) + + # then we unfold again to get our indivdual 4x4 tiles + bitmask_4x4_chunks = bitmask_8x8_chunks.unfold(2, 4, 4).unfold(3, 4, 4) + + # Each 4x4 bitmask defines two 8-bit integers, which encode the sparsity pattern + # of that tile. Note that the least siginificant bit is stored first. + # [1 1 0 0] + # [1 1 0 0] -> 0011 0011 -> 51 + # [0 0 1 1] 1100 1100 204 + # [0 0 1 1] + + # reshape tensor to expand tiles into 8-bit vectors + bitmask_binary_representation = bitmask_4x4_chunks.reshape(*bitmask_4x4_chunks.shape[:2], 4, 2, 8) + + # to convert from binary representaiton, we can do a matmul with powers of two + powers_of_two = 2**torch.arange(8, dtype=torch.float, device="cuda") + # To run on GPU: cast to float to do matmul and then cast back + compressed_swizzled_bitmask = (bitmask_binary_representation.to(torch.float) @ powers_of_two).to(torch.uint8) + + return compressed_swizzled_bitmask diff --git a/torch/sparse/_semi_structured_ops.py b/torch/sparse/_semi_structured_ops.py index eaa609b342..551111b429 100644 --- a/torch/sparse/_semi_structured_ops.py +++ b/torch/sparse/_semi_structured_ops.py @@ -70,8 +70,8 @@ def semi_sparse_t(func, types, args=(), kwargs=None) -> torch.Tensor: meta=self.meta_t, packed_t=self.packed, meta_t=self.meta, - threads_masks=self.threads_masks.transpose(0, 1) - if self.threads_masks is not None + compressed_swizzled_bitmask=self.compressed_swizzled_bitmask.transpose(0, 1) + if self.compressed_swizzled_bitmask is not None else None, fuse_transpose_cusparselt=args[0].fuse_transpose_cusparselt, alg_id_cusparselt=args[0].alg_id_cusparselt, @@ -97,7 +97,7 @@ def semi_sparse_detach(func, types, args, kwargs) -> torch.Tensor: meta=self.meta, packed_t=self.packed_t, meta_t=self.meta_t, - threads_masks=self.threads_masks, + compressed_swizzled_bitmask=self.compressed_swizzled_bitmask, requires_grad=False, ) diff --git a/torch/sparse/semi_structured.py b/torch/sparse/semi_structured.py index 7c86b0d43b..587fcc0d72 100644 --- a/torch/sparse/semi_structured.py +++ b/torch/sparse/semi_structured.py @@ -5,7 +5,7 @@ from typing import Any, Optional, Tuple, List, Callable, Dict import torch from torch.sparse._semi_structured_conversions import ( sparse_semi_structured_from_dense_cutlass, - sparse_semi_structured_to_dense_cutlass, + sparse_semi_structured_to_dense_cutlass ) from torch.sparse._semi_structured_ops import ( fallback_dispatcher, @@ -56,17 +56,18 @@ class SparseSemiStructuredTensor(torch.Tensor): _FUSE_TRANSPOSE: bool = False _PROTOTYPE_WARNING_SHOWN: bool = False + BACKEND: str SPARSE_DISPATCH: Dict[Callable, Callable] packed: Optional[torch.Tensor] meta: Optional[torch.Tensor] packed_t: Optional[torch.Tensor] meta_t: Optional[torch.Tensor] - threads_masks: Optional[torch.Tensor] + compressed_swizzled_bitmask: Optional[torch.Tensor] fuse_transpose_cusparselt: bool alg_id_cusparselt: int - __slots__ = ["packed", "meta", "packed_t", "meta_t", "threads_masks"] + __slots__ = ["packed", "meta", "packed_t", "meta_t", "compressed_swizzled_bitmask"] @staticmethod def __new__( # noqa: PYI034 @@ -76,7 +77,7 @@ class SparseSemiStructuredTensor(torch.Tensor): meta: Optional[torch.Tensor], packed_t: Optional[torch.Tensor], meta_t: Optional[torch.Tensor], - threads_masks: Optional[torch.Tensor], + compressed_swizzled_bitmask: Optional[torch.Tensor], fuse_transpose_cusparselt: bool = False, alg_id_cusparselt: int = 0, requires_grad: bool = False, @@ -95,8 +96,8 @@ class SparseSemiStructuredTensor(torch.Tensor): meta: The metadata of the original dense tensor, if it is stored separately packed_t: The compressed representation of the transposed original dense tensor meta_t: The metadata of the transposed original dense tensor, if it is stored separately - threads_masks: The masks used by the CUTLASS backend to determine which threads should participate in the computation. - Used for pointwise ops. + compressed_swizzled_bitmask: The masks used by the CUTLASS backend to determine which threads should + participate in the computation. Used for pointwise ops. fuse_transpose_cusparselt: When running with cuSPARSELt, we have the option to fuse a transposition with a matmul, which is useful in the case of 2:4 sparse training. alg_id_cusparselt: The algorithm id to use when using cuSPARSELT, will have effect on performance @@ -124,6 +125,9 @@ class SparseSemiStructuredTensor(torch.Tensor): # But this is useful since it allows users to overload the dispatch table for debugging / testing. cls._load_dispatch_table() + # we can also register the classes with dynamo when the warning is shown. + torch._dynamo.allow_in_graph(cls) + if packed is not None: previous_tensor = packed elif packed_t is not None: @@ -143,7 +147,7 @@ class SparseSemiStructuredTensor(torch.Tensor): tensor.meta = meta tensor.packed_t = packed_t tensor.meta_t = meta_t - tensor.threads_masks = threads_masks + tensor.compressed_swizzled_bitmask = compressed_swizzled_bitmask tensor.fuse_transpose_cusparselt = fuse_transpose_cusparselt tensor.alg_id_cusparselt = alg_id_cusparselt return tensor @@ -181,7 +185,7 @@ class SparseSemiStructuredTensor(torch.Tensor): meta=inner_tensors.get("meta", None), packed_t=inner_tensors.get("packed_t", None), meta_t=inner_tensors.get("meta_t", None), - threads_masks=inner_tensors.get("threads_masks", None), + compressed_swizzled_bitmask=inner_tensors.get("compressed_swizzled_bitmask", None), fuse_transpose_cusparselt=fuse_transpose_cusparselt, alg_id_cusparselt=alg_id_cusparselt, requires_grad=requires_grad, @@ -216,6 +220,7 @@ class SparseSemiStructuredTensor(torch.Tensor): torch.ops.aten.matmul: semi_sparse_mm, torch.ops.aten.addmm: semi_sparse_addmm, torch.ops.aten.linear: semi_sparse_linear, + torch.ops.aten._to_copy: fallback_dispatcher, } if custom_dispatch_table is not None: cls.SPARSE_DISPATCH.update(custom_dispatch_table) @@ -359,13 +364,14 @@ def to_sparse_semi_structured( "SparseSemiStructuredTensor only support contiguous input tensors. " ) - sparse_subclass = ( + # set from _FORCE_CUTLASS flag + SPARSE_SUBCLASS = ( torch.sparse.SparseSemiStructuredTensorCUTLASS if SparseSemiStructuredTensor._FORCE_CUTLASS else torch.sparse.SparseSemiStructuredTensorCUSPARSELT ) - return sparse_subclass.from_dense(original_tensor) + return SPARSE_SUBCLASS.from_dense(original_tensor) class SparseSemiStructuredTensorCUTLASS(SparseSemiStructuredTensor): """ @@ -378,7 +384,7 @@ class SparseSemiStructuredTensorCUTLASS(SparseSemiStructuredTensor): When _FORCE_CUTLASS is set, or when cuSPARSELt is not available, this subclass calls into _sparse_semi_structured_(mm|addmm) and sparse_semi_structured_from_dense for conversion to the compressed format. """ - + BACKEND = "cutlass" _DTYPE_SHAPE_CONSTRAINTS = { torch.int8: _SEMI_STRUCTURED_SPARSE_CONFIG(16, 128, 16, 16), torch.float16: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 64, 8, 8), @@ -401,19 +407,71 @@ class SparseSemiStructuredTensorCUTLASS(SparseSemiStructuredTensor): meta=meta_tensor_cutlass, packed_t=None, meta_t=None, - threads_masks=None, + compressed_swizzled_bitmask=None, requires_grad=original_tensor.requires_grad, ) def to_dense(self): assert self.meta is not None and self.packed is not None - return ( - sparse_semi_structured_to_dense_cutlass( - self.packed, - self.meta, - ) - if self.meta.ndim == 2 - else super().to_dense() + return sparse_semi_structured_to_dense_cutlass( + self.packed, + self.meta, + ) if self.meta.ndim == 2 else super().to_dense() + + @classmethod + def prune_dense_static_sort(cls, original_tensor : torch.Tensor, algorithm="") -> "SparseSemiStructuredTensor": + """ + This function takes in a unpruned dense tensor and runs a (branchless) static sort across a 4x4 tile. + + It greedily picks the largest values in the tile, upholding the 2:4 sparsity constraint across both rows and columns. + The algorithm used to prune the matrix is implemented in `_sparse_semi_structured_tile`. + + Then it creates the packed and meta tensors for the compressed sparse representation of the pruned dense tensor. + It also calculates the packed_t and meta_t tensors for the compressed sparse representation of the transposed + pruned dense tensor. + Since we cannot transpose the compressed representations, we store both for the fw/bw pass respectively. + + Finally, this function also computes a compressed swizzled bitmask that encodes the sparsity pattern + This can be used in the backward pass to mask the gradients. + + [9 1 7 4] [9 0 7 0] + [1 2 3 0] [0 2 0 0] + [8 3 5 4] -> prune 4x4 tile -> [8 0 0 4] -> pack to CUTLASS semi-structured -> packed + [1 2 6 2] [0 0 6 2] -> metadata + + -> pack to transposed CUTLASS -> packed_t + semi-structured representation -> metadata_t + + -> compute swizzled bitmask -> compressed_swizzled_bitmask + + + The equivalent PyTorch code to create the same five outputs from the dense tensor can be found below: + ``` + from torch.sparse import SparseSemiStructuredTensorCUTLASS + from torch.sparse._semi_structured_conversions import _sparse_semi_structured_tile, _compute_compressed_swizzled_bitmask + + pruned = _sparse_semi_structured_tile(dense) + packed_cutlass, meta_cutlass = sparse_semi_structured_from_dense_cutlass(pruned) + packed_t_cutlass, meta_t_cutlass = sparse_semi_structured_from_dense_cutlass(pruned.t().contiguous()) + bitmask = _compute_compressed_swizzled_bitmask(pruned) + + SparseSemiStructuredTensorCUTLASS(dense.shape, packed_cutlass, meta_cutlass, packed_t_cutlass, meta_t_cutlass, bitmask) + ``` + """ + # We can either pack to the CUTLASS or cuSPARSELt representation, depending on the use_cutlass flag. + (packed, meta, packed_t, meta_t, compressed_swizzled_bitmask) = torch._sparse_semi_structured_tile( + original_tensor, + algorithm=algorithm, + use_cutlass=True) + + return cls( + original_tensor.shape, + packed=packed, + meta=meta, + packed_t=packed_t, + meta_t=meta_t, + compressed_swizzled_bitmask=compressed_swizzled_bitmask, + requires_grad=False, ) def _mm( @@ -459,7 +517,7 @@ class SparseSemiStructuredTensorCUSPARSELT(SparseSemiStructuredTensor): cuSPARSELt also supports transposition fusion, which is necessary for performant 2:4 sparse training, as well as specifying alg_id, a config that affects the performance of the matmul depending on matmul sizes. """ - + BACKEND = "cusparselt" _DTYPE_SHAPE_CONSTRAINTS = { torch.int8: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 32, 16, 16), torch.float16: _SEMI_STRUCTURED_SPARSE_CONFIG(16, 16, 8, 8), @@ -476,12 +534,59 @@ class SparseSemiStructuredTensorCUSPARSELT(SparseSemiStructuredTensor): meta=None, packed_t=None, meta_t=None, - threads_masks=None, + compressed_swizzled_bitmask=None, fuse_transpose_cusparselt=SparseSemiStructuredTensor._FUSE_TRANSPOSE, alg_id_cusparselt=SparseSemiStructuredTensor._DEFAULT_ALG_ID, requires_grad=original_tensor.requires_grad, ) + @classmethod + def prune_dense_static_sort(cls, original_tensor : torch.Tensor, algorithm="") -> "SparseSemiStructuredTensor": + """ + This function does the same thing as described in SparseSemiStructuredCUTLASS, but uses the cuSPASRELt metadata + layout and sparse matmul. + + The only functional difference is that cuSPARSELt stores `metadata` and `packed` together into a single tensor. + + [9 1 7 4] [9 0 7 0] + [1 2 3 0] [0 2 0 0] + [8 3 5 4] -> prune 4x4 tile -> [8 0 0 4] -> pack to cuSPARSELT semi-structured -> packed + [1 2 6 2] [0 0 6 2] + + -> pack to transposed cuSPARSELt -> packed_t + semi-structured representation + + -> compute swizzled bitmask -> compressed_swizzled_bitmask + + + The equivalent PyTorch code to create the same three outputs from the dense tensor can be found below: + ``` + from torch.sparse import SparseSemiStructuredTensorCUSPARSELT + from torch.sparse._semi_structured_conversions import _sparse_semi_structured_tile, _compute_compressed_swizzled_bitmask + + pruned = _sparse_semi_structured_tile(dense) + packed_cusparselt = torch._cslt_compress(pruned) + packed_t_cusparselt = torch._cslt_compress(pruned.t().contiguous()) + bitmask = _compute_compressed_swizzled_bitmask(pruned) + + SparseSemiStructuredTensorCUSPARSELT(dense.shape, packed_cutlass, None, packed_t_cutlass, None, bitmask) + ``` + """ + (packed, meta, packed_t, meta_t, compressed_swizzled_bitmask) = torch._sparse_semi_structured_tile( + original_tensor, + algorithm=algorithm, + use_cutlass=False) + + return cls( + original_tensor.shape, + packed=packed, + meta=meta, + packed_t=packed_t, + meta_t=meta_t, + compressed_swizzled_bitmask=compressed_swizzled_bitmask, + requires_grad=False, + ) + def _mm( self, B: torch.Tensor,
2.41.0
56e057814565b2ae33b2106b4d0136179aa18f8
Fri, 19 Apr 2024 13:39:38 +0000
[PATCH 0373/1000] [Environment Variable][1/N] Use thread-safe env variable API in c10 (#119449)
This PR is the beginning of attempts to wrap thread-unsafe getenv and set_env functions inside a RW mutex. Pull Request resolved: https://github.com/pytorch/pytorch/pull/119449 Approved by: https://github.com/malfet, https://github.com/albanD
diff --git a/c10/core/impl/alloc_cpu.cpp b/c10/core/impl/alloc_cpu.cpp index 9b7ae22f9f..def4c3a3a9 100644 --- a/c10/core/impl/alloc_cpu.cpp +++ b/c10/core/impl/alloc_cpu.cpp @@ -3,6 +3,7 @@ #include <c10/core/alignment.h> #include <c10/util/Flags.h> #include <c10/util/Logging.h> +#include <c10/util/env.h> #include <c10/util/irange.h> #include <c10/util/numa.h> @@ -53,8 +54,8 @@ void memset_junk(void* data, size_t num) { #if defined(__linux__) && !defined(__ANDROID__) static inline bool is_thp_alloc_enabled() { static bool value = [&] { - const char* ptr = std::getenv("THP_MEM_ALLOC_ENABLE"); - return ptr != nullptr ? std::atoi(ptr) : 0; + auto env = c10::utils::check_env("THP_MEM_ALLOC_ENABLE"); + return env.has_value() ? env.value() : 0; }(); return value; } diff --git a/c10/cuda/CUDAAllocatorConfig.cpp b/c10/cuda/CUDAAllocatorConfig.cpp index 1f81ed47b6..ca38dfd6a4 100644 --- a/c10/cuda/CUDAAllocatorConfig.cpp +++ b/c10/cuda/CUDAAllocatorConfig.cpp @@ -234,7 +234,7 @@ size_t CUDAAllocatorConfig::parseAllocatorConfig( return i; } -void CUDAAllocatorConfig::parseArgs(const char* env) { +void CUDAAllocatorConfig::parseArgs(const std::optional<std::string>& env) { // If empty, set the default values m_max_split_size = std::numeric_limits<size_t>::max(); m_roundup_power2_divisions.assign(kRoundUpPowerOfTwoIntervals, 0); @@ -242,16 +242,16 @@ void CUDAAllocatorConfig::parseArgs(const char* env) { bool used_cudaMallocAsync = false; bool used_native_specific_option = false; - if (env == nullptr) { + if (!env.has_value()) { return; } { std::lock_guard<std::mutex> lock(m_last_allocator_settings_mutex); - m_last_allocator_settings = env; + m_last_allocator_settings = env.value(); } std::vector<std::string> config; - lexArgs(env, config); + lexArgs(env.value().c_str(), config); for (size_t i = 0; i < config.size(); i++) { std::string_view config_item_view(config[i]); diff --git a/c10/cuda/CUDAAllocatorConfig.h b/c10/cuda/CUDAAllocatorConfig.h index 3106fc1b46..db5c9e1c8f 100644 --- a/c10/cuda/CUDAAllocatorConfig.h +++ b/c10/cuda/CUDAAllocatorConfig.h @@ -2,6 +2,7 @@ #include <c10/cuda/CUDAMacros.h> #include <c10/util/Exception.h> +#include <c10/util/env.h> #include <atomic> #include <cstddef> @@ -72,14 +73,13 @@ class C10_CUDA_API CUDAAllocatorConfig { static CUDAAllocatorConfig& instance() { static CUDAAllocatorConfig* s_instance = ([]() { auto inst = new CUDAAllocatorConfig(); - const char* env = getenv("PYTORCH_CUDA_ALLOC_CONF"); - inst->parseArgs(env); + inst->parseArgs(c10::utils::get_env("PYTORCH_CUDA_ALLOC_CONF")); return inst; })(); return *s_instance; } - void parseArgs(const char* env); + void parseArgs(const std::optional<std::string>& env); private: CUDAAllocatorConfig(); diff --git a/c10/cuda/CUDACachingAllocator.cpp b/c10/cuda/CUDACachingAllocator.cpp index c472e82ce2..afac5272b6 100644 --- a/c10/cuda/CUDACachingAllocator.cpp +++ b/c10/cuda/CUDACachingAllocator.cpp @@ -8,6 +8,7 @@ #include <c10/util/CallOnce.h> #include <c10/util/ScopeExit.h> #include <c10/util/UniqueVoidPtr.h> +#include <c10/util/env.h> #include <c10/util/flat_hash_map.h> #include <c10/util/hash.h> #include <c10/util/irange.h> @@ -2831,7 +2832,7 @@ class DeviceCachingAllocator { // errors, since the caching allocator foils cuda-memcheck. bool forceUncachedAllocator() { static bool force_uncached = - getenv("PYTORCH_NO_CUDA_MEMORY_CACHING") != nullptr; + c10::utils::has_env("PYTORCH_NO_CUDA_MEMORY_CACHING"); return force_uncached; } @@ -3363,9 +3364,9 @@ struct BackendStaticInitializer { // version checks, to CUDAAllocatorConfig's runtime doublecheck. If this // works, maybe we should move all of CUDAAllocatorConfig here? CUDAAllocator* parseEnvForBackend() { - const char* val = getenv("PYTORCH_CUDA_ALLOC_CONF"); - if (val != nullptr) { - const std::string config(val); + const auto val = c10::utils::get_env("PYTORCH_CUDA_ALLOC_CONF"); + if (val.has_value()) { + const std::string& config = val.value(); std::regex exp("[\\s,]+"); std::sregex_token_iterator it(config.begin(), config.end(), exp, -1); diff --git a/c10/cuda/CUDADeviceAssertionHost.cpp b/c10/cuda/CUDADeviceAssertionHost.cpp index 1d52af7812..ec41e6230f 100644 --- a/c10/cuda/CUDADeviceAssertionHost.cpp +++ b/c10/cuda/CUDADeviceAssertionHost.cpp @@ -3,6 +3,7 @@ #include <c10/cuda/CUDAFunctions.h> #include <c10/util/Backtrace.h> #include <c10/util/Exception.h> +#include <c10/util/env.h> #include <c10/util/irange.h> #include <cuda_runtime.h> @@ -80,8 +81,8 @@ bool dsa_check_if_all_devices_support_managed_memory() { } bool env_flag_set(const char* env_var_name) { - const char* const env_string = std::getenv(env_var_name); - return (env_string == nullptr) ? false : std::strcmp(env_string, "0"); + const auto env_flag = c10::utils::check_env(env_var_name); + return env_flag.has_value() && env_flag.value(); } /// Deleter for UVM/managed memory pointers diff --git a/c10/cuda/CUDAMiscFunctions.cpp b/c10/cuda/CUDAMiscFunctions.cpp index 11ea775366..9ef724813e 100644 --- a/c10/cuda/CUDAMiscFunctions.cpp +++ b/c10/cuda/CUDAMiscFunctions.cpp @@ -1,12 +1,14 @@ #include <c10/cuda/CUDAMiscFunctions.h> -#include <cstdlib> +#include <c10/util/env.h> namespace c10::cuda { +// NOLINTNEXTLINE(bugprone-exception-escape,-warnings-as-errors) const char* get_cuda_check_suffix() noexcept { - static char* device_blocking_flag = getenv("CUDA_LAUNCH_BLOCKING"); + static auto device_blocking_flag = + c10::utils::check_env("CUDA_LAUNCH_BLOCKING"); static bool blocking_enabled = - (device_blocking_flag && atoi(device_blocking_flag)); + (device_blocking_flag.has_value() && device_blocking_flag.value()); if (blocking_enabled) { return ""; } else { diff --git a/c10/test/util/DeadlockDetection_test.cpp b/c10/test/util/DeadlockDetection_test.cpp index 35c4953f6d..05ae154e22 100644 --- a/c10/test/util/DeadlockDetection_test.cpp +++ b/c10/test/util/DeadlockDetection_test.cpp @@ -1,9 +1,8 @@ #include <c10/util/DeadlockDetection.h> +#include <c10/util/env.h> #include <gtest/gtest.h> -#include <cstdlib> - using namespace ::testing; using namespace c10::impl; @@ -23,7 +22,7 @@ TEST(DeadlockDetection, basic) { #ifndef _WIN32 TEST(DeadlockDetection, disable) { - setenv("TORCH_DISABLE_DEADLOCK_DETECTION", "1", 1); + c10::utils::set_env("TORCH_DISABLE_DEADLOCK_DETECTION", "1"); DummyPythonGILHooks hooks; SetPythonGILHooks(&hooks); SetPythonGILHooks(&hooks); diff --git a/c10/util/DeadlockDetection.cpp b/c10/util/DeadlockDetection.cpp index 320fa7873c..4b00d24534 100644 --- a/c10/util/DeadlockDetection.cpp +++ b/c10/util/DeadlockDetection.cpp @@ -1,6 +1,5 @@ #include <c10/util/DeadlockDetection.h> - -#include <cstdlib> +#include <c10/util/env.h> namespace c10::impl { @@ -8,7 +7,7 @@ namespace { PythonGILHooks* python_gil_hooks = nullptr; bool disable_detection() { - return std::getenv("TORCH_DISABLE_DEADLOCK_DETECTION") != nullptr; + return c10::utils::has_env("TORCH_DISABLE_DEADLOCK_DETECTION"); } } // namespace diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp index e9c9e9c2f3..17459f69fa 100644 --- a/c10/util/Logging.cpp +++ b/c10/util/Logging.cpp @@ -1,6 +1,7 @@ #include <c10/util/Backtrace.h> #include <c10/util/Flags.h> #include <c10/util/Logging.h> +#include <c10/util/env.h> #ifdef FBCODE_CAFFE2 #include <folly/synchronization/SanitizeThread.h> #endif @@ -10,7 +11,6 @@ #endif #include <algorithm> -#include <cstdlib> #include <iostream> // Common code that we use regardless of whether we use glog or not. @@ -94,8 +94,8 @@ using DDPUsageLoggerType = std::function<void(const DDPLoggingData&)>; namespace { bool IsAPIUsageDebugMode() { - const char* val = getenv("PYTORCH_API_USAGE_STDERR"); - return val && *val; // any non-empty value + auto val = c10::utils::get_env("PYTORCH_API_USAGE_STDERR"); + return val.has_value() && !val.value().empty(); // any non-empty value } void APIUsageDebug(const string& event) { @@ -438,10 +438,10 @@ namespace c10::detail { namespace { void setLogLevelFlagFromEnv() { - const char* level_str = std::getenv("TORCH_CPP_LOG_LEVEL"); + auto level_env = c10::utils::get_env("TORCH_CPP_LOG_LEVEL"); // Not set, fallback to the default level (i.e. WARNING). - std::string level{level_str != nullptr ? level_str : ""}; + std::string level{level_env.has_value() ? level_env.value() : ""}; if (level.empty()) { return; } diff --git a/c10/util/env.cpp b/c10/util/env.cpp new file mode 100644 index 0000000000..865c6b9497 --- /dev/null +++ b/c10/util/env.cpp @@ -0,0 +1,108 @@ +#include <c10/util/Exception.h> +#include <c10/util/env.h> +#include <fmt/format.h> +#include <cstdlib> +#include <shared_mutex> + +namespace c10::utils { + +static std::shared_mutex env_mutex; + +// Set an environment variable. +void set_env(const char* name, const char* value, bool overwrite) { + std::lock_guard lk(env_mutex); +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4996) +#endif +#ifdef _MSC_VER + if (!overwrite) { + // NOLINTNEXTLINE(concurrency-mt-unsafe) + if (std::getenv(name) != nullptr) { + return; + } + } + auto full_env_variable = fmt::format("{}={}", name, value); + // NOLINTNEXTLINE(concurrency-mt-unsafe) + auto err = putenv(full_env_variable.c_str()); + TORCH_INTERNAL_ASSERT( + err == 0, + "putenv failed for environment \"", + name, + "\", the error is: ", + err); +#else + // NOLINTNEXTLINE(concurrency-mt-unsafe) + auto err = setenv(name, value, static_cast<int>(overwrite)); + TORCH_INTERNAL_ASSERT( + err == 0, + "setenv failed for environment \"", + name, + "\", the error is: ", + err); +#endif +#ifdef _MSC_VER +#pragma warning(pop) +#endif + return; +} + +// Checks an environment variable is set. +bool has_env(const char* name) noexcept { + std::shared_lock lk(env_mutex); +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4996) +#endif + // NOLINTNEXTLINE(concurrency-mt-unsafe) + auto envar = std::getenv(name); +#ifdef _MSC_VER +#pragma warning(pop) +#endif + return envar != nullptr; +} + +// Reads an environment variable and returns the content if it is set +std::optional<std::string> get_env(const char* name) noexcept { + std::shared_lock lk(env_mutex); +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4996) +#endif + // NOLINTNEXTLINE(concurrency-mt-unsafe) + auto envar = std::getenv(name); +#ifdef _MSC_VER +#pragma warning(pop) +#endif + if (envar != nullptr) { + return std::string(envar); + } + return std::nullopt; +} + +// Reads an environment variable and returns +// - optional<true>, if set equal to "1" +// - optional<false>, if set equal to "0" +// - nullopt, otherwise +// +// NB: +// Issues a warning if the value of the environment variable is not 0 or 1. +std::optional<bool> check_env(const char* name) { + auto env_opt = get_env(name); + if (env_opt.has_value()) { + if (*env_opt == "0") { + return false; + } + if (*env_opt == "1") { + return true; + } + TORCH_WARN( + "Ignoring invalid value for boolean flag ", + name, + ": ", + *env_opt, + "valid values are 0 or 1."); + } + return std::nullopt; +} +} // namespace c10::utils diff --git a/c10/util/env.h b/c10/util/env.h index 3db116c7db..04b7585861 100644 --- a/c10/util/env.h +++ b/c10/util/env.h @@ -1,11 +1,20 @@ #pragma once -#include <c10/util/Exception.h> -#include <cstdlib> -#include <cstring> +#include <c10/macros/Export.h> #include <optional> +#include <string> namespace c10::utils { + +// Set an environment variable. +C10_API void set_env( + const char* name, + const char* value, + bool overwrite = true); + +// Checks an environment variable is set. +C10_API bool has_env(const char* name) noexcept; + // Reads an environment variable and returns // - optional<true>, if set equal to "1" // - optional<false>, if set equal to "0" @@ -13,29 +22,10 @@ namespace c10::utils { // // NB: // Issues a warning if the value of the environment variable is not 0 or 1. -inline std::optional<bool> check_env(const char* name) { -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4996) -#endif - auto envar = std::getenv(name); -#ifdef _MSC_VER -#pragma warning(pop) -#endif - if (envar) { - if (strcmp(envar, "0") == 0) { - return false; - } - if (strcmp(envar, "1") == 0) { - return true; - } - TORCH_WARN( - "Ignoring invalid value for boolean flag ", - name, - ": ", - envar, - "valid values are 0 or 1."); - } - return std::nullopt; -} +C10_API std::optional<bool> check_env(const char* name); + +// Reads the value of an environment variable if it is set. +// However, check_env should be used if the value is assumed to be a flag. +C10_API std::optional<std::string> get_env(const char* name) noexcept; + } // namespace c10::utils diff --git a/c10/util/tempfile.cpp b/c10/util/tempfile.cpp index 28c3c7f14f..f106885a88 100644 --- a/c10/util/tempfile.cpp +++ b/c10/util/tempfile.cpp @@ -1,4 +1,5 @@ #include <c10/util/Exception.h> +#include <c10/util/env.h> #include <c10/util/tempfile.h> #include <fmt/format.h> @@ -22,10 +23,11 @@ static std::string make_filename(std::string_view name_prefix) { // We see if any of these environment variables is set and use their value, or // else default the temporary directory to `/tmp`. - const char* tmp_directory = "/tmp"; + std::string tmp_directory = "/tmp"; for (const char* variable : {"TMPDIR", "TMP", "TEMP", "TEMPDIR"}) { - if (const char* path = getenv(variable)) { - tmp_directory = path; + auto path_opt = c10::utils::get_env(variable); + if (path_opt.has_value()) { + tmp_directory = path_opt.value(); break; } }
2.41.0
2a2f676c38367b12ddb94b019921acacece5bba
Thu, 18 Apr 2024 19:44:44 -0700
[PATCH 0374/1000] [custom_op] add ability to provide manual schema (#124180)
Test Plan: - new tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/124180 Approved by: https://github.com/albanD
diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py index f06fea8b0a..4fc708ad33 100644 --- a/test/test_custom_ops.py +++ b/test/test_custom_ops.py @@ -2135,6 +2135,50 @@ class TestCustomOpAPI(TestCase): self.assertEqual(z, x + y) self.assertTrue(cpu_called) + @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug") + def test_manual_schema(self): + @torch.library.custom_op( + "_torch_testing::add", + mutates_args=(), + schema="(Tensor x, float y) -> Tensor", + ) + def add(x, y): + x_np = x.numpy(force=True) + out_np = x_np + y + return torch.from_numpy(out_np).to(x.device) + + x = torch.randn(3) + y = 3.14 + z = add(x, y) + self.assertEqual(z, x + y) + + @torch.library.custom_op( + "_torch_testing::sin_", + mutates_args=["x"], + schema="(Tensor(a!) x) -> ()", + ) + def sin_(x): + x_np = x.numpy() + np.sin(x_np, out=x_np) + + x = torch.randn(3) + expected = x.sin() + sin_(x) + self.assertEqual(x, expected) + + @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug") + def test_manual_schema_error(self): + with self.assertRaisesRegex(ValueError, "the op mutates {'x'}"): + + @torch.library.custom_op( + "_torch_testing::sin_", + mutates_args=(), + schema="(Tensor(a!) x) -> ()", + ) + def sin_(x): + x_np = x.numpy() + np.sin(x_np, out=x_np) + def test_supports_tensorlist(self): @torch._library.autograd.supports_tensorlist class Stack(torch.autograd.Function): diff --git a/torch/_library/custom_ops.py b/torch/_library/custom_ops.py index f36d9e3393..cd98309602 100644 --- a/torch/_library/custom_ops.py +++ b/torch/_library/custom_ops.py @@ -28,6 +28,7 @@ def custom_op( *, mutates_args: Iterable[str], device_types: device_types_t = None, + schema: Optional[str] = None, ) -> Callable: """Wraps a function into custom operator. @@ -52,6 +53,19 @@ def custom_op( is valid for. If no device type is provided, then the function is used as the default implementation for all device types. Examples: "cpu", "cuda". + schema (None | str): A schema string for the operator. If None + (recommended) we'll infer a schema for the operator from its type + annotations. We recommend letting us infer a schema unless you + have a specific reason not to. + Example: "(Tensor x, int y) -> (Tensor, Tensor)". + + .. note:: + We recommend not passing in a ``schema`` arg and instead letting us infer + it from the type annotations. It is error-prone to write your own schema. + You may wish to provide your own schema if our interpretation of + the type annotation is not what you want. + For more info on how to write a schema string, see + `here <https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/README.md#func>`_ Examples:: >>> import torch @@ -96,9 +110,27 @@ def custom_op( def inner(fn): import torch - schema = torch._custom_op.impl.infer_schema(fn, mutates_args) + if schema is None: + import torch._custom_op.impl + + schema_str = torch._custom_op.impl.infer_schema(fn, mutates_args) + else: + schema_str = schema namespace, opname = name.split("::") - result = CustomOpDef(namespace, opname, schema, fn) + result = CustomOpDef(namespace, opname, schema_str, fn) + if schema is not None: + # Check that schema's alias annotations match those of `mutates_args`. + expected = set() + for arg in result._opoverload._schema.arguments: + if arg.alias_info is not None and arg.alias_info.is_write: + expected.add(arg.name) + if expected != set(mutates_args): + raise ValueError( + f"Attempted to create a custom op with `mutates_args={mutates_args}` " + f"and `schema={schema}. The schema suggests that the op mutates {expected}" + f"which is different from what was provided to us in `mutates_args`. " + f"Please make these consistent." + ) result.register_impl(device_types)(fn) return result
2.41.0
918dfedc5b1f33ea8951fe2aaa22d13a05b3704
Thu, 18 Apr 2024 19:44:44 -0700
[PATCH 0375/1000] [custom_op] Rename register_impl to register_kernel (#124200)
Motivation: - The API is used for registering an implementation for a specific device type. - "impl" is ambiguous and can be confused with Library.impl. Test Plan: - existing tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/124200 Approved by: https://github.com/albanD ghstack dependencies: #124180
diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py index 4fc708ad33..2fd7ecf99d 100644 --- a/test/test_custom_ops.py +++ b/test/test_custom_ops.py @@ -2123,7 +2123,7 @@ class TestCustomOpAPI(TestCase): cpu_called = False - @add.register_impl("cpu") + @add.register_kernel("cpu") def _(x, y): nonlocal cpu_called cpu_called = True @@ -2579,7 +2579,7 @@ Please use `add.register_fake` to add an fake impl.""", out_np = np.sin(x_np) return torch.from_numpy(out_np) - @f.register_impl("cuda") + @f.register_kernel("cuda") def _(x: Tensor) -> Tensor: nonlocal cuda_call_count cuda_call_count += 1 diff --git a/torch/_library/custom_ops.py b/torch/_library/custom_ops.py index cd98309602..c2b7e475a8 100644 --- a/torch/_library/custom_ops.py +++ b/torch/_library/custom_ops.py @@ -131,7 +131,7 @@ def custom_op( f"which is different from what was provided to us in `mutates_args`. " f"Please make these consistent." ) - result.register_impl(device_types)(fn) + result.register_kernel(device_types)(fn) return result return inner @@ -171,7 +171,7 @@ class CustomOpDef: def __repr__(self) -> str: return f"<CustomOpDef({self._qualname})>" - def register_impl( + def register_kernel( self, device_types: device_types_t, fn: Optional[Callable] = None, / ) -> Callable: """Register an implementation for a device type for this operator. @@ -199,7 +199,7 @@ class CustomOpDef: >>> return torch.from_numpy(y_np) >>> >>> # Add implementations for the cuda device - >>> @numpy_sin.register_impl("cuda") + >>> @numpy_sin.register_kernel("cuda") >>> def _(x): >>> x_np = x.cpu().numpy() >>> y_np = np.sin(x_np) @@ -469,17 +469,17 @@ class CustomOpDef: # >>> return x.sin() # >>> # >>> # Usage 1: not as a decorator -# >>> numpy_sin.register_impl("cuda", fn) +# >>> numpy_sin.register_kernel("cuda", fn) # >>> # >>> # Usage 2: as a decorator -# >>> @numpy_sin.register_impl("cuda") +# >>> @numpy_sin.register_kernel("cuda") # >>> def fn2(x): # >>> return x.sin # -# The way we support this is that `register_impl` accepts an optional `fn`. +# The way we support this is that `register_kernel` accepts an optional `fn`. # If `fn` is provided (Usage 1), then we know that the user is using it not # as a decorator. -# If `fn` is not provided (Usage 2), then `register_impl` needs to return a +# If `fn` is not provided (Usage 2), then `register_kernel` needs to return a # decorator.
2.41.0
ad8d25881d850eaf0b326f6ce5c78305e38c001
Thu, 18 Apr 2024 19:44:45 -0700
[PATCH 0376/1000] Add torch.library.register_kernel (#124299)
This mirrors the .register_kernel method on the object produced by the custom_op decorator. Test Plan: - new tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/124299 Approved by: https://github.com/albanD ghstack dependencies: #124180, #124200
diff --git a/docs/source/library.rst b/docs/source/library.rst index 2b09e5c9ea..b2ae235414 100644 --- a/docs/source/library.rst +++ b/docs/source/library.rst @@ -21,12 +21,12 @@ Use :func:`torch.library.custom_op` to create new custom ops. Extending custom ops (created from Python or C++) ------------------------------------------------- -Use the impl methods, such as :func:`torch.library.impl` and -func:`torch.library.impl_abstract`, to add implementations +Use the register.* methods, such as :func:`torch.library.register_kernel` and +func:`torch.library.register_fake`, to add implementations for any operators (they may have been created using :func:`torch.library.custom_op` or via PyTorch's C++ operator registration APIs). -.. autofunction:: impl +.. autofunction:: register_kernel .. autofunction:: register_autograd .. autofunction:: register_fake .. autofunction:: impl_abstract @@ -53,3 +53,5 @@ A tutorial that walks you through some examples on how to use this API is availa .. autofunction:: fallthrough_kernel .. autofunction:: define + +.. autofunction:: impl diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py index 2fd7ecf99d..b9fe9bed81 100644 --- a/test/test_custom_ops.py +++ b/test/test_custom_ops.py @@ -2359,6 +2359,110 @@ class TestCustomOpAPI(TestCase): self.assertEqual(z.shape, x.shape) self.assertTrue(called) + @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug") + def test_library_register_kernel(self): + modes = ["function", "qualname", "opoverload"] + calls = ["decorator", "function"] + device_types_options = ["cpu", None] + + for mode, call, device_types in itertools.product( + modes, calls, device_types_options + ): + + @torch.library.custom_op( + "_torch_testing::add", mutates_args=(), device_types="cuda" + ) + def add(x: Tensor, y: float) -> Tensor: + x_np = x.cpu().numpy() + out_np = x_np + y + return torch.from_numpy(out_np).to(x.device) + + if mode == "function": + op = add + elif mode == "qualname": + op = "_torch_testing::add" + else: + assert mode == "opoverload" + op = torch.ops._torch_testing.add.default + + called = False + + if call == "decorator": + + @torch.library.register_kernel(op, device_types) + def _(x, y): + nonlocal called + called = True + x_np = x.numpy() + out_np = x_np + y + return torch.from_numpy(out_np) + + else: + assert call == "function" + + def add_cpu(x, y): + nonlocal called + called = True + x_np = x.numpy() + out_np = x_np + y + return torch.from_numpy(out_np) + + torch.library.register_kernel(op, device_types, add_cpu) + + x = torch.randn(3) + y = 3.14 + z = add(x, y) + self.assertEqual(z, x + y) + self.assertTrue(called) + + @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug") + def test_library_register_kernel_low_level(self): + modes = ["qualname", "opoverload"] + calls = ["decorator", "function"] + device_types_options = [("cpu", "cuda"), "cpu", None] + + for mode, call, device_types in itertools.product( + modes, calls, device_types_options + ): + with torch.library._scoped_library("_torch_testing", "FRAGMENT") as lib: + lib.define("add9(Tensor x, float y) -> Tensor") + + if mode == "qualname": + op = "_torch_testing::add9" + else: + assert mode == "opoverload" + op = torch.ops._torch_testing.add9.default + + called = False + + if call == "decorator": + + @torch.library.register_kernel(op, device_types, lib=lib) + def _(x, y): + nonlocal called + called = True + x_np = x.numpy() + out_np = x_np + y + return torch.from_numpy(out_np) + + else: + assert call == "function" + + def add_cpu(x, y): + nonlocal called + called = True + x_np = x.numpy() + out_np = x_np + y + return torch.from_numpy(out_np) + + torch.library.register_kernel(op, device_types, add_cpu, lib=lib) + + x = torch.randn(3) + y = 3.14 + z = torch.ops._torch_testing.add9.default(x, y) + self.assertEqual(z, x + y) + self.assertTrue(called) + @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug") def test_library_register_autograd(self): for mode in ["function", "qualname", "opoverload"]: diff --git a/torch/_library/custom_ops.py b/torch/_library/custom_ops.py index c2b7e475a8..77ea4d8b47 100644 --- a/torch/_library/custom_ops.py +++ b/torch/_library/custom_ops.py @@ -191,7 +191,7 @@ class CustomOpDef: >>> from torch.library import custom_op >>> import numpy as np >>> - >>> # Example of split cpu and cuda definitions + >>> # Create a custom op that works on cpu >>> @custom_op("mylib::numpy_sin", mutates_args=(), device_types="cpu") >>> def numpy_sin(x: Tensor) -> Tensor: >>> x_np = x.numpy() diff --git a/torch/library.py b/torch/library.py index ce6c75c261..a03500b63e 100644 --- a/torch/library.py +++ b/torch/library.py @@ -9,7 +9,7 @@ import re import contextlib import sys import warnings -from torch._library.custom_ops import custom_op, _maybe_get_opdef +from torch._library.custom_ops import custom_op, _maybe_get_opdef, device_types_t import torch._library as _library @@ -424,6 +424,65 @@ def impl_abstract(qualname, func=None, *, lib=None, _stacklevel=1): _op_identifier = Union[str, "torch._ops.OpOverload", "torch._library.custom_ops.CustomOpDef"] +def register_kernel( + op: _op_identifier, + device_types: device_types_t, + func: Optional[Callable] = None, + /, + *, + lib: Optional[Library] = None): + """Register an implementation for a device type for this operator. + + Some valid device_types are: "cpu", "cuda", "xla", "mps", "ipu", "xpu". + This API may be used as a decorator. + + Args: + fn (Callable): The function to register as the implementation for + the given device types. + device_types (None | str | Sequence[str]): The device_types to register an impl to. + If None, we will register to all device types -- please only use + this option if your implementation is truly device-type-agnostic. + + Examples:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> import torch + >>> from torch import Tensor + >>> from torch.library import custom_op + >>> import numpy as np + >>> + >>> # Create a custom op that works on cpu + >>> @custom_op("mylib::numpy_sin", mutates_args=(), device_types="cpu") + >>> def numpy_sin(x: Tensor) -> Tensor: + >>> x_np = x.numpy() + >>> y_np = np.sin(x_np) + >>> return torch.from_numpy(y_np) + >>> + >>> # Add implementations for the cuda device + >>> @torch.library.register_kernel("mylib::numpy_sin", "cuda") + >>> def _(x): + >>> x_np = x.cpu().numpy() + >>> y_np = np.sin(x_np) + >>> return torch.from_numpy(y_np).to(device=x.device) + >>> + >>> x_cpu = torch.randn(3) + >>> x_cuda = x_cpu.cuda() + >>> assert torch.allclose(numpy_sin(x_cpu), x_cpu.sin()) + >>> assert torch.allclose(numpy_sin(x_cuda), x_cuda.sin()) + + """ + + if not isinstance(op, (str, torch._ops.OpOverload, torch._library.custom_ops.CustomOpDef)): + raise ValueError("register_kernel(op): got unexpected type for op: {type(op)}") + if isinstance(op, torch._ops.OpOverload): + op = op._name + opdef = _maybe_get_opdef(op) + if opdef is not None: + return opdef.register_kernel(device_types, func) + assert isinstance(op, str) + if device_types is None: + device_types = "CompositeExplicitAutograd" + return impl(op, device_types, func, lib=lib) + def register_fake( op: _op_identifier,
2.41.0
36f8378e1b5a8cb7127977b8d068fbf9c3e1247
Thu, 18 Apr 2024 21:38:18 -0700
[PATCH 0377/1000] Re-land precompile triton templates (#124030)
Re-land precompile triton templates. This got reverted because we were precompiling templates without checking the cache. I have since added logic and a test to ensure we do not precompile if there is a cache hit. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124030 Approved by: https://github.com/shunting314, https://github.com/nmacchioni, https://github.com/yoyoyocmu
diff --git a/test/inductor/test_max_autotune.py b/test/inductor/test_max_autotune.py index d1f074de51..beb1b22df8 100644 --- a/test/inductor/test_max_autotune.py +++ b/test/inductor/test_max_autotune.py @@ -294,6 +294,7 @@ class TestMaxAutotune(TestCase): self.assertEqual(num_get, 3) self.assertEqual(num_put, 1) + @skipIfRocm def test_precompilation_threads(self): import threading from typing import Any, Dict @@ -328,7 +329,8 @@ class TestMaxAutotune(TestCase): inputs: str, benchmark: Callable[[Any], Dict[ChoiceCaller, float]], ) -> Dict[ChoiceCaller, float]: - return benchmark(choices) + if benchmark is not None: + return benchmark(choices) asc = AlgorithmSelectorCache() @@ -426,6 +428,25 @@ class TestMaxAutotune(TestCase): FileCheck().check_not("extern_kernels.convolution").run(code[0]) self.assertEqual(conv1x1(input_tensor), out, atol=1e-2, rtol=0) + @skipIfRocm + def test_filled_cache_precompile(self): + def fn(a, b, c): + a = (a @ b) @ c + a, b, c = (t.to(torch.float16) for t in [a, b, c]) + return (a @ b) @ c + + fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn) + inputs = [torch.rand([256, 256], device="cuda") for _ in range(3)] + from torch._dynamo.utils import counters + + self.assertEqual(fn(*inputs), fn_c(*inputs), atol=1e-2, rtol=1e-2) + + torch._dynamo.reset() + counters.clear() + + fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn) + self.assertEqual(counters["inductor"]["select_algorithm_precompile"], 0) + def test_cat_addmm(self): def fn(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor): return torch.cat( diff --git a/test/inductor/test_select_algorithm.py b/test/inductor/test_select_algorithm.py index 3b76651fcc..48713bb63e 100644 --- a/test/inductor/test_select_algorithm.py +++ b/test/inductor/test_select_algorithm.py @@ -19,8 +19,10 @@ aten = torch.ops.aten def patches(fn): - def skip_cache(self, choices, name, key, generate): - return generate(choices) + def skip_cache(self, choices, name, key, benchmark): + if benchmark is None: + return {} + return benchmark(choices) for patcher in [ dynamo_config.patch(verbose=True), diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py index 52c923e353..5c2606d81c 100644 --- a/test/inductor/test_templated_attention.py +++ b/test/inductor/test_templated_attention.py @@ -16,7 +16,8 @@ from torch.utils._triton import has_triton # Skip tests if Triton is not available supported_platform = skipUnless( - torch.cuda.is_available() and has_triton(), "Requires CUDA and Triton" + torch.cuda.is_available() and has_triton() and torch.version.hip is None, + "Requires CUDA and Triton", ) Tolerances = namedtuple("Tolerances", ["atol", "rtol"]) diff --git a/torch/_inductor/autotune_process.py b/torch/_inductor/autotune_process.py index ba1976745c..35beb6fb06 100644 --- a/torch/_inductor/autotune_process.py +++ b/torch/_inductor/autotune_process.py @@ -502,7 +502,6 @@ class TestBenchmarkRequest(BenchmarkRequest): class TritonBenchmarkRequest(BenchmarkRequest): # Important: Instances of this class have to be serializable # across process boundaries. Do not put CUDA Tensors in here! - def __init__( self, kernel_name: str, @@ -545,6 +544,8 @@ class TritonBenchmarkRequest(BenchmarkRequest): if "warmup" in inspect.signature(run_method).parameters: warmup_arg["warmup"] = False + from torch._C import _cuda_getCurrentRawStream as get_raw_stream + if torch.version.hip and self.matrix_instr_nonkdim != 0: return functools.partial( run_method, @@ -553,9 +554,7 @@ class TritonBenchmarkRequest(BenchmarkRequest): *self.extra_args, grid=self.grid, **warmup_arg, - num_stages=self.num_stages, - num_warps=self.num_warps, - matrix_instr_nonkdim=self.matrix_instr_nonkdim, + stream=get_raw_stream(self.output_tensor_meta.device.index), ) else: return functools.partial( @@ -565,10 +564,13 @@ class TritonBenchmarkRequest(BenchmarkRequest): *self.extra_args, grid=self.grid, **warmup_arg, - num_stages=self.num_stages, - num_warps=self.num_warps, + stream=get_raw_stream(self.output_tensor_meta.device.index), ) + def precompile(self): + mod = PyCodeCache.load_by_key_path(self.module_cache_key, self.module_path) + getattr(mod, self.kernel_name).precompile() + def __str__(self) -> str: return f"{self.kernel_name=}, {self.module_path=}, {self.module_cache_key=}" diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py index e120e6446a..465891fc7f 100644 --- a/torch/_inductor/codecache.py +++ b/torch/_inductor/codecache.py @@ -156,7 +156,7 @@ class CacheBase: try: import triton - triton_version = triton.__version__ + triton_version = triton.__version__ # type: ignore[attr-defined] except ModuleNotFoundError: triton_version = None @@ -262,7 +262,7 @@ class PersistentCache(CacheBase): choices: List[ChoiceCaller], op: str, inputs: str, - benchmark: Callable[[Any], Dict[ChoiceCaller, float]], + benchmark: Optional[Callable[[Any], Dict[ChoiceCaller, float]]], ) -> Dict[ChoiceCaller, float]: """ Check to see if we have benchmarked the given choice callers. For each @@ -270,7 +270,7 @@ class PersistentCache(CacheBase): 1. Check global_cache[op][inputs][choice][precision], return benchmark if cached. 2. Check local_cache[op][inputs][choice][precision], return benchmark if cached. - 3. + 3. If benchmark is not None: a. `max_autotune_gemm=True`: benchmark the choice, update local_cache[op][inputs][choice], and return the benchmark. b. `max_autotune_gemm=False`: don't benchmark the choice, return nothing. @@ -303,9 +303,13 @@ class PersistentCache(CacheBase): if config.max_autotune or config.max_autotune_gemm: local_cache = self.get_local_cache() # check local cache first since it is data specific to the current machine - if not check_cache(local_cache) and not ( - use_global_cache() - and check_cache(self.get_global_cache(), callback=log_stats) + if ( + not check_cache(local_cache) + and not ( + use_global_cache() + and check_cache(self.get_global_cache(), callback=log_stats) + ) + and benchmark is not None ): try: # re-benchmark everything to try to get consistent numbers from the same machine diff --git a/torch/_inductor/codegen/triton_utils.py b/torch/_inductor/codegen/triton_utils.py index c95e699bcd..c8a7d92e3c 100644 --- a/torch/_inductor/codegen/triton_utils.py +++ b/torch/_inductor/codegen/triton_utils.py @@ -65,6 +65,32 @@ def signature_to_meta( } +def is_unaligned_buffer(arg: TensorArg): + buf_name = arg.buffer + if buf_name in V.graph.graph_inputs: + return not config.assume_aligned_inputs + + if buf_name in V.graph.constants: + # all constants are assumed to be aligned + return False + + if V.graph.scheduler: + layout = V.graph.scheduler.get_buffer_layout(buf_name) + else: + buffer = V.graph.get_buffer(buf_name) + # output arg + if not buffer: + assert buf_name == V.kernel.output_node.name + layout = V.kernel.output_node.layout + else: + layout = buffer.get_layout() + + if isinstance(layout, torch._inductor.ir.NonOwningLayout): + return not layout.maybe_guard_aligned() + else: + return False + + def config_of( args: List[KernelArgType], *, @@ -83,9 +109,7 @@ def config_of( offset_aligned = V.graph.sizevars.statically_known_multiple_of( x.offset * x.dtype.itemsize, alignment # type: ignore[arg-type] ) - return offset_aligned and not V.graph.scheduler.is_unaligned_buffer( - x.buffer - ) + return offset_aligned and not is_unaligned_buffer(x) else: return False if isinstance(x, SizeArg): diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py index 573e79acd6..049a77a4ef 100644 --- a/torch/_inductor/scheduler.py +++ b/torch/_inductor/scheduler.py @@ -2495,18 +2495,9 @@ class Scheduler: self.flush() - def is_unaligned_buffer(self, buf_name): - if buf_name in V.graph.graph_inputs: - return not config.assume_aligned_inputs - if buf_name in V.graph.constants: - # all constants are assumed to be aligned - return False + def get_buffer_layout(self, buf_name: str) -> ir.Layout: node = self.name_to_node[buf_name] - layout = node.node.get_layout() - if isinstance(layout, ir.NonOwningLayout): - return not layout.maybe_guard_aligned() - else: - return False + return node.node.get_layout() class BaseScheduling: diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py index 75deeaf5e3..3261909d2b 100644 --- a/torch/_inductor/select_algorithm.py +++ b/torch/_inductor/select_algorithm.py @@ -94,7 +94,7 @@ class TritonTemplateKernel(TritonKernel): grid_fn, meta, call_sizes, - use_jit=True, + use_jit=False, prefix_args=0, suffix_args=0, epilogue_fn=identity, @@ -153,8 +153,8 @@ class TritonTemplateKernel(TritonKernel): argdefs, _, signature = self.args.python_argdefs() triton_meta = { "signature": signature_to_meta(signature, size_dtype=self.index_dtype), - "device": V.graph.scheduler.current_device.index, - "device_type": V.graph.scheduler.current_device.type, + "device": self.output_node.get_device().index, + "device_type": self.output_node.get_device().type, "constants": {}, } triton_meta["configs"] = [config_of(signature)] @@ -554,7 +554,7 @@ class TritonTemplate(KernelTemplate): ), TritonTemplateKernel( kernel_name=kernel_name, output_node=fake_out, - use_jit=True, + use_jit=False, **kernel_options, ) as kernel: try: @@ -740,6 +740,10 @@ class TritonTemplateCaller(ir.TritonTemplateCallerBase): assert self.bmreq is not None return self.bmreq.benchmark(*args, output_tensor=out) + def precompile(self): + assert self.bmreq is not None + self.bmreq.precompile() + def __str__(self): return f"TritonTemplateCaller({self.bmreq.module_path}, {self.debug_extra})" @@ -881,6 +885,7 @@ class AlgorithmSelectorCache(PersistentCache): # TODO(nmacchioni): remove once CI tests are fixed choices = [choice for choice in choices if choice is not None] + if len(choices) == 0: raise RuntimeError( "No choices to select, please consider adding ATEN into max_autotune_gemm_backends " @@ -897,19 +902,38 @@ class AlgorithmSelectorCache(PersistentCache): def make_benchmark_fn(): return self.make_benchmark_fn(choices, input_nodes, layout, input_gen_fns) - def precompile(choices): + def precompile(choices) -> Callable[[], None]: + def no_op(*args, **kwargs): + return + if ( precompilation_timeout_seconds is None or precompilation_timeout_seconds <= 0 ): - return + return no_op num_workers = min( config.compile_threads, torch.get_num_threads(), len(choices), ) if num_workers <= 0: - return + return no_op + + # TODO - debug issue + if torch.version.hip: + return no_op + + # check local and global cache before precompiling + timings = self.lookup( + choices, + name, + repr([self.key_of(x) for x in input_nodes]), + benchmark=None, + ) + + if timings: + return no_op + log.info( "Multithreaded precompilation for %d choices using %d worker threads", len(choices),
2.41.0
62169a8fa84246babe98410bb5600693db62a14
Thu, 18 Apr 2024 13:15:03 -0700
[PATCH 0378/1000] Support torchbind op dispatch in python (#123367)
We override the `__call__` method and register fake, functional, proxy default dispatch mode implementation in its python_key_mode_table. The idea is: 1. when inputs contains FakeScriptObject, we dispatch it through _get_dispatch mechanism. We implement dispatch mode keys automatically in the operator's constructor. 2. when inputs are not fakified, we dispatch through the original c++ dispatcher. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123367 Approved by: https://github.com/zou3519
diff --git a/aten/src/ATen/core/dispatch/Dispatcher.h b/aten/src/ATen/core/dispatch/Dispatcher.h index c6d336510c..caf73d7ceb 100644 --- a/aten/src/ATen/core/dispatch/Dispatcher.h +++ b/aten/src/ATen/core/dispatch/Dispatcher.h @@ -403,6 +403,10 @@ public: return operatorDef_->op.hasKernelForDispatchKey(k); } + bool isKernelFallthroughKernel(DispatchKey k) const { + return operatorDef_->op.kernelForDispatchKey(k).isFallthrough(); + } + bool hasKernelForAnyDispatchKey(DispatchKeySet k) const { return operatorDef_->op.hasKernelForAnyDispatchKey(k); } diff --git a/test/export/test_torchbind.py b/test/export/test_torchbind.py index 7523af9f13..b5312fec19 100644 --- a/test/export/test_torchbind.py +++ b/test/export/test_torchbind.py @@ -85,6 +85,19 @@ class TestExportTorchbind(TestCase): test.tq_size_counter += 1 return len(self.queue) + self.torch_bind_ops = [ + torch.ops._TorchScriptTesting.takes_foo, + torch.ops._TorchScriptTesting.takes_foo_python_meta, + torch.ops._TorchScriptTesting.takes_foo_list_return, + torch.ops._TorchScriptTesting.takes_foo_tuple_return, + torch.ops._TorchScriptTesting.take_an_instance, + torch.ops._TorchScriptTesting.take_an_instance_inferred, + torch.ops._TorchScriptTesting.takes_foo_cia, + torch.ops._TorchScriptTesting.queue_pop, + torch.ops._TorchScriptTesting.queue_push, + torch.ops._TorchScriptTesting.queue_size, + ] + def tearDown(self): torch._library.fake_class_registry.deregister_fake_class( "_TorchScriptTesting::_Foo" @@ -555,6 +568,181 @@ def forward(self, arg0_1, arg1_1): self.assertEqual(tq.size(), 0) self.assertEqual(tq1.size(), 0) + def test_identifying_torchbind_ops(self): + for op in self.torch_bind_ops: + self.assertTrue(op._has_torchbind_op_overload) + + for op in [ + torch.ops.aten.add, + torch.ops.aten.cos, + ]: + self.assertFalse(op._has_torchbind_op_overload) + + def test_torchbind_op_register_fallthrough(self): + TEST_DISPATCH_KEY = torch._C.DispatchKey.AutocastCPU + TEST_DISPATCH_KEY_STR = "AutocastCPU" + + for op_packet in self.torch_bind_ops: + op = op_packet.default + ns, _ = torch._library.utils.parse_namespace(op_packet._qualified_op_name) + with torch.library._scoped_library(ns, "FRAGMENT") as lib: + lib.impl( + op.name(), torch.library.fallthrough_kernel, TEST_DISPATCH_KEY_STR + ) + self.assertTrue( + torch._C._dispatch_kernel_for_dispatch_key_is_fallthrough( + op.name(), TEST_DISPATCH_KEY + ) + ) + + def test_torchbind_op_fallthrough_keys_respects_lib_impl(self): + TEST_DISPATCH_KEY = torch._C.DispatchKey.AutogradCPU + TEST_DISPATCH_KEY_STR = "AutogradCPU" + + tested = 0 + for op_packet in self.torch_bind_ops: + op = op_packet.default + ns, _ = torch._library.utils.parse_namespace(op_packet._qualified_op_name) + if ( + not torch._C._dispatch_has_kernel_for_dispatch_key( + op.name(), TEST_DISPATCH_KEY + ) + and TEST_DISPATCH_KEY not in op.py_kernels + ): + tested += 1 + with torch.library._scoped_library(ns, "FRAGMENT") as lib: + lib.impl( + op.name(), lambda *args, **kwargs: args, TEST_DISPATCH_KEY_STR + ) + self.assertTrue(TEST_DISPATCH_KEY not in op._fallthrough_keys()) + + with torch.library._scoped_library(ns, "FRAGMENT") as lib: + lib.impl( + op.name(), + torch.library.fallthrough_kernel, + TEST_DISPATCH_KEY_STR, + ) + self.assertTrue(TEST_DISPATCH_KEY in op._fallthrough_keys()) + self.assertTrue(tested > 0) + + def test_make_fx_schema_checking_script_object(self): + class Model(torch.nn.Module): + def forward(self, tq, x, foo): + torch.ops._TorchScriptTesting.queue_push(foo, x.cos()) + return tq + + class ModelCallByKW(torch.nn.Module): + def forward(self, tq, x, foo): + torch.ops._TorchScriptTesting.queue_push(x=x.cos(), foo=foo) + return tq + + mod = Model() + modkw = ModelCallByKW() + + foo = torch.classes._TorchScriptTesting._Foo(10, 20) + x = torch.ones(3, 3) + tq = torch.classes._TorchScriptTesting._TensorQueue( + torch.empty( + 0, + ).fill_(-1) + ) + ns = "_TorchScriptTesting" + with torch.library._scoped_library(ns, "FRAGMENT") as lib: + op = torch.ops._TorchScriptTesting.queue_push + lib.impl(op.__name__, torch.library.fallthrough_kernel, "AutogradCPU") + lib.impl(op.__name__, torch.library.fallthrough_kernel, "ADInplaceOrView") + lib.impl( + op.__name__, + torch.library.fallthrough_kernel, + "PythonTLSSnapshot", + ) + + with self.assertRaisesRegex( + RuntimeError, "is expected to be a FakeScriptObject" + ): + _ = make_fx(mod, tracing_mode="fake")(tq, x, foo) + + with self.assertRaisesRegex( + RuntimeError, "is expected to be a FakeScriptObject" + ): + _ = make_fx(modkw, tracing_mode="fake")(tq, x, foo) + + @parametrize("fallthrough_via", ["lib_impl", "py_impl"]) + def test_make_fx_tensor_queue_operators(self, fallthrough_via): + class Model(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, tq, x): + with torch.autocast("cuda", dtype=torch.bfloat16): + torch.ops._TorchScriptTesting.queue_push(tq, x.cos()) + torch.ops._TorchScriptTesting.queue_push(tq, x.sin()) + x_sin = torch.ops._TorchScriptTesting.queue_pop( + tq + ) - torch.ops._TorchScriptTesting.queue_size(tq) + x_cos = torch.ops._TorchScriptTesting.queue_pop( + tq + ) + torch.ops._TorchScriptTesting.queue_size(tq) + return x_sin, x_cos, tq + + mod = Model() + + tq1 = torch.classes._TorchScriptTesting._TensorQueue( + torch.empty( + 0, + ).fill_(-1) + ) + tq2 = torch.classes._TorchScriptTesting._TensorQueue( + torch.empty( + 0, + ).fill_(-1) + ) + x = torch.ones(2, 3) + + mod(tq1, x) + + ops = [ + torch.ops._TorchScriptTesting.queue_push, + torch.ops._TorchScriptTesting.queue_pop, + torch.ops._TorchScriptTesting.queue_size, + ] + if fallthrough_via == "lib_impl": + ns = "_TorchScriptTesting" + with torch.library._scoped_library(ns, "FRAGMENT") as lib: + for op in ops: + lib.impl( + op.__name__, torch.library.fallthrough_kernel, "AutocastCUDA" + ) + + gm = make_fx(mod, tracing_mode="fake")(tq1, x) + else: + for op in ops: + op.default.py_impl(torch._C.DispatchKey.AutocastCUDA)( + torch.library.fallthrough_kernel + ) + gm = make_fx(mod, tracing_mode="fake")(tq1, x) + for op in ops: + op.default._dispatch_cache.clear() + del op.default.py_kernels[torch._C.DispatchKey.AutocastCUDA] + + self.assertExpectedInline( + gm.code.strip(), + """\ +def forward(self, arg0_1, arg1_1): + cos = torch.ops.aten.cos.default(arg1_1) + queue_push = torch.ops._TorchScriptTesting.queue_push.default(arg0_1, cos); cos = None + sin = torch.ops.aten.sin.default(arg1_1); arg1_1 = None + queue_push_1 = torch.ops._TorchScriptTesting.queue_push.default(arg0_1, sin); sin = None + queue_pop = torch.ops._TorchScriptTesting.queue_pop.default(arg0_1) + queue_size = torch.ops._TorchScriptTesting.queue_size.default(arg0_1) + sub = torch.ops.aten.sub.Tensor(queue_pop, 1); queue_pop = None + queue_pop_1 = torch.ops._TorchScriptTesting.queue_pop.default(arg0_1) + queue_size_1 = torch.ops._TorchScriptTesting.queue_size.default(arg0_1) + add = torch.ops.aten.add.Tensor(queue_pop_1, 0); queue_pop_1 = None + return (sub, add, arg0_1)""", + ) + self._assertEqualSkipScriptObject(gm(tq1, x), mod(tq2, x)) + @skipIfTorchDynamo("torchbind not supported with dynamo yet") class TestRegisterFakeClass(TestCase): diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in index 20aba9464e..583bd384ed 100644 --- a/torch/_C/__init__.pyi.in +++ b/torch/_C/__init__.pyi.in @@ -428,6 +428,16 @@ ResolutionCallback = Callable[[str], Callable[..., Any]] # Defined in torch/csrc/jit/python/script_init.cpp # and torch/csrc/jit/python/init.cpp +def _maybe_call_torch_function_for_op_packet( + op_overload_packet: Any, + args: Any, + kwargs: Any, +) -> Any: ... +def _check_schema_allow_fake_script_object( + schema: FunctionSchema, + args: Any, + kwargs: Any, +) -> _bool: ... def _create_function_from_graph(qualname: str, graph: Graph) -> ScriptFunction: ... def _debug_set_autodiff_subgraph_inlining(disabled: _bool) -> None: ... def _ivalue_tags_match(lhs: ScriptModule, rhs: ScriptModule) -> _bool: ... @@ -1493,6 +1503,10 @@ def _dispatch_has_kernel_for_any_dispatch_key( name: str, dispatch_key_set: DispatchKeySet, ) -> _bool: ... +def _dispatch_kernel_for_dispatch_key_is_fallthrough( + name: str, + dispatch: _dispatchkey, +) -> _bool: ... def _dispatch_has_computed_kernel_for_dispatch_key( name: str, dispatch: _dispatchkey, diff --git a/torch/_dynamo/tensor_version_op.py b/torch/_dynamo/tensor_version_op.py index f12ed95b58..4c4246474c 100644 --- a/torch/_dynamo/tensor_version_op.py +++ b/torch/_dynamo/tensor_version_op.py @@ -13,13 +13,13 @@ _tensor_version = _make_prim( @_tensor_version.py_impl(FakeTensorMode) -def _tensor_version_fake(self): +def _tensor_version_fake(fake_mode, self_tensor): """ The initial dynamo capture of _tensor_version + _unsafe_set_version_counter turns the `._version` into an unbacked SymInt so that we don't need to specialize on the `._version` of input tensors to the graph. """ - return self.fake_mode.shape_env.create_unbacked_symint() + return fake_mode.shape_env.create_unbacked_symint() _unsafe_set_version_counter = _make_prim( @@ -48,10 +48,10 @@ Note this is similar to how no_grad is handled. @_tensor_version.py_impl(FunctionalTensorMode) -def _tensor_version_functional(self): +def _tensor_version_functional(mode, self): return self._version @_unsafe_set_version_counter.py_impl(FunctionalTensorMode) -def _unsafe_set_version_counter_functional(self, version): +def _unsafe_set_version_counter_functional(ctx, self, version): torch._C._autograd._unsafe_set_version_counter(self, version) diff --git a/torch/_library/utils.py b/torch/_library/utils.py index e2d0110409..2bab3d6dd8 100644 --- a/torch/_library/utils.py +++ b/torch/_library/utils.py @@ -199,3 +199,21 @@ def requires_set_python_module() -> bool: the C++ op with a python module. """ return getattr(_utils_internal, "REQUIRES_SET_PYTHON_MODULE", True) + + +def handle_dispatch_mode(curr_mode, op_overload, *args, **kwargs): + assert isinstance(curr_mode, torch.utils._python_dispatch.TorchDispatchMode) + overload_types = [] + args_flattened, _ = torch.utils._pytree.tree_flatten((args, kwargs.values())) + for a in args_flattened: + # TODO: need to double check the semantics of the "types" argument to torch_dispatch. + # It's generated in PyInterpreter.cpp, but seems to be generated in two places, + # where in one case we only include tensors with the python key, and in another + # we include **all** tensors. + if isinstance(a, torch.Tensor) and torch._C._dispatch_keys(a).has( + torch._C.DispatchKey.Python + ): + overload_types.append(type(a)) + # TODO: check that I got these args correct (in C++, we pass in "0000"??) + + return curr_mode.__torch_dispatch__(op_overload, overload_types, args, kwargs) diff --git a/torch/_ops.py b/torch/_ops.py index 8bfacf83c2..0809d7cbf1 100644 --- a/torch/_ops.py +++ b/torch/_ops.py @@ -4,7 +4,7 @@ import importlib import inspect import sys import types -from typing import Any, Callable, Dict, Set, Type, Union +from typing import Any, Callable, Dict, List, Set, Type, Union import torch._C import torch.utils._pytree as pytree @@ -261,6 +261,7 @@ class HigherOrderOperator(OperatorBase): if self.__class__ is HigherOrderOperator: self_name_space = "." + self.namespace if self.namespace else "" self.__module__ = self.__module__ + self_name_space + self.non_fallthrough_keys = torch._C._dispatch_keyset_full() for dispatch_key in _HIGHER_ORDER_OP_DEFAULT_FALLTHROUGH_DISPATCH_KEYS: @@ -684,7 +685,10 @@ class OpOverload(OperatorBase): assert key not in self._dispatch_cache, f"{self} {key}" if key == torch._C.DispatchKey.Python: - if not self.python_key_mode_table: + if ( + not isinstance(self, TorchBindOpOverload) + and not self.python_key_mode_table + ): self._dispatch_cache[key] = key add_cached_op(self) return key @@ -698,12 +702,18 @@ class OpOverload(OperatorBase): assert ( curr_mode is not None ), "Illegal invocation of dispatch on torch._C.DispatchKey.Python without a mode." + if curr_mode not in self.python_key_mode_table: - # TODO: This path is slow, should generally encourage this - # case to not happen - return self._op_dk(key, *args, **kwargs) - # TODO(voz): The idea behind this is that we do not yet support dispatch by key + mode, only key. - return self.python_key_mode_table[curr_mode](*args, **kwargs) + if isinstance(self, TorchBindOpOverload): + with torch.utils._python_dispatch._pop_mode_temporarily() as mode: + return torch._library.utils.handle_dispatch_mode( + mode, self, *args, **kwargs + ) + else: + return self._op_dk(key, *args, **kwargs) + + with torch.utils._python_dispatch._pop_mode_temporarily() as mode: + return self.python_key_mode_table[curr_mode](mode, *args, **kwargs) self._dispatch_cache[key] = handler add_cached_op(self) @@ -731,24 +741,8 @@ class OpOverload(OperatorBase): _set_mode_pre_dispatch(top_mode) with _temporarily_pop_modes_from_pre_dispatch() as curr_mode: - assert isinstance(curr_mode, TorchDispatchMode) - overload_types = [] - args_flattened, _ = torch.utils._pytree.tree_flatten( - (args, kwargs.values()) - ) - for a in args_flattened: - # TODO: need to double check the semantics of the "types" argument to torch_dispatch. - # It's generated in PyInterpreter.cpp, but seems to be generated in two places, - # where in one case we only include tensors with the python key, and in another - # we include **all** tensors. - if isinstance(a, torch.Tensor) and torch._C._dispatch_keys( - a - ).has(torch._C.DispatchKey.Python): - overload_types.append(type(a)) - # TODO: check that I got these args correct (in C++, we pass in "0000"??) - - return curr_mode.__torch_dispatch__( - self, overload_types, args, kwargs + return torch._library.utils.handle_dispatch_mode( + curr_mode, self, *args, **kwargs ) # Note [Not Caching Per-Dispatch-Key Mode Handlers] @@ -776,7 +770,6 @@ class OpOverload(OperatorBase): add_cached_op(self) return handler - # print(self, key, final_key) r = self.py_kernels.get(final_key, final_key) if cache_result: self._dispatch_cache[key] = r @@ -801,6 +794,98 @@ class OpOverload(OperatorBase): # TODO: add more methods to expose information about input and output arguments +# TorchBindOpOverload are those custom ops which have at least one overload's +# schema consists of torch.ScriptObject (i.e. custom class) input. +# TorchBindOpOverload will skip C++ dispatcher and purely dispatched in python +# when its inputs contain FakeScriptObject in a similar way as higher order ops. +class TorchBindOpOverload(OpOverload): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def _fallthrough_keys(self) -> List[DispatchKey]: + # TODO: we should be calling the fallback for these, but a fallthrough is almost close + # enough to the fallback in most cases that we care about. + _DEFAULT_FALLTHROUGH_KEYS = [ + DispatchKey.Autograd, + DispatchKey.AutogradCPU, + DispatchKey.AutogradCUDA, + DispatchKey.ADInplaceOrView, + DispatchKey.PythonTLSSnapshot, + ] + + def _may_use_fallthrough_instead_of_fallback(key: DispatchKey): + if torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), key): + return torch._C._dispatch_kernel_for_dispatch_key_is_fallthrough( + self.name(), key + ) + + return ( + key not in self.py_kernels + or self.py_kernels[key] is torch.library.fallthrough_kernel + ) + + return [ + key + for key in _DEFAULT_FALLTHROUGH_KEYS + if _may_use_fallthrough_instead_of_fallback(key) + ] + + # use `self_` to avoid naming collide with arguments that + # are named "self". This way, they can be called by kwargs. + def __call__(self_, *args, **kwargs): # noqa: B902 + if _must_dispatch_in_python(args, kwargs): + # When any inputs are FakeScriptObject, we need to + # skip c++ dispatcher and dispatch in python through _get_dispatch of python_dispatcher. + return self_._dispatch_in_python(args, kwargs, self_._fallthrough_keys()) + + return self_._op(*args, **kwargs) + + def _dispatch_in_python(self, args, kwargs, fallthrough_keys): + non_fallthrough_keys = torch._C._dispatch_keyset_full() + for key in fallthrough_keys: + non_fallthrough_keys = non_fallthrough_keys.remove(key) + + dispatch_key_set = _compute_keyset(args, kwargs, non_fallthrough_keys) + dispatch_key = dispatch_key_set.highestPriorityTypeId() + + handler = ( + self._get_dispatch(dispatch_key) + if dispatch_key not in self._dispatch_cache + else self._dispatch_cache[dispatch_key] + ) + + if isinstance(handler, DispatchKey): + # fallthrough keys can be registered at runtime via torch.library.impl + # so need to add it to fallthrough_keys and re-dispatch. + if torch._C._dispatch_kernel_for_dispatch_key_is_fallthrough( + self.name(), dispatch_key + ): + return self._dispatch_in_python( + args, kwargs, fallthrough_keys + [dispatch_key] + ) + + raise RuntimeError( + f"Cannot handle FakeScriptObject with python dispatcher with dispatch key {handler}." + f"Please implement it by annotating a python callable with py_impl({handler})." + ) + + assert isinstance(handler, Callable) # type: ignore[arg-type] + return handler(*args, **kwargs) + + +def _must_dispatch_in_python(args, kwargs): + return pytree.tree_any( + lambda obj: isinstance( + obj, torch._library.fake_class_registry.FakeScriptObject + ), + (args, kwargs), + ) + + +def _has_script_object_arg(schema: torch.FunctionSchema) -> bool: + return any(isinstance(arg.type, torch.ClassType) for arg in schema.arguments) + + # OpOverloadPacket class contains pointer to a base unresolved operator that doesn't correspond to a specific operator # You can obtain an OpOverload object through attribute query. class OpOverloadPacket: @@ -812,6 +897,9 @@ class OpOverloadPacket: self._op = op self._overload_names = overload_names self._dir = [] + self._has_torchbind_op_overload = any( + _has_script_object_arg(schema) for schema in self._schemas.values() + ) # it's a no-op since OpOverloadPacket object is immutable and must be unique for a given op. def __deepcopy__(self, memo=None): @@ -832,6 +920,13 @@ class OpOverloadPacket: def op(self): return self._op + @property + def _schemas(self): + return { + overload_name: torch._C._get_schema(self._qualified_op_name, overload_name) + for overload_name in self._overload_names + } + def __getattr__(self, key): # It is not a valid op_name when __file__ is passed in if key == "__file__": @@ -865,7 +960,11 @@ class OpOverloadPacket: self._qualified_op_name, use_key ) schema = torch._C._get_schema(self._qualified_op_name, use_key) - overload = OpOverload(self, op_, op_dk_, schema, tags) + overload = ( + OpOverload(self, op_, op_dk_, schema, tags) + if not _has_script_object_arg(schema) + else TorchBindOpOverload(self, op_, op_dk_, schema, tags) + ) # cache the overload object setattr(self, key, overload) self._dir.append(key) @@ -886,6 +985,12 @@ class OpOverloadPacket: # is still callable from JIT # We save the function ptr as the `op` attribute on # OpOverloadPacket to access it here. + + # Directly calling OverloadPacket goes into C++, which will check + # the schema and cause an error for torchbind op when inputs consist of FakeScriptObject so we + # intercept it here and call TorchBindOpverload instead. + if self_._has_torchbind_op_overload and _must_dispatch_in_python(args, kwargs): + return _call_overload_packet_from_python(self_, args, kwargs) return self_._op(*args, **(kwargs or {})) # TODO: use this to make a __dir__ @@ -893,6 +998,46 @@ class OpOverloadPacket: return [n if n else "default" for n in self._overload_names] +# Note - this mirrors the logic of the cpp_function defined in jit/python/init.cpp +# _jit_get_operations, which calls _get_operation_for_overload_or_packet. +def _call_overload_packet_from_python(op: OpOverloadPacket, args, kwargs): + # Re-use the torch function handling logic in cpp + torch_function_called, ret = torch._C._maybe_call_torch_function_for_op_packet( + op, *args, **kwargs + ) + + if torch_function_called: + return ret + + # The following mirrors getOpWithStack. + # In cpp, we do a schema matching for the arguments, and call ToIValue to + # to check whether the arguments are valid. But need to do similar things here + # and check the schema whether the FakeScriptObject is the corresponding fake class + # of the actual class used in schema. + exceptions = {} + found_op = None + for overload_name in op.overloads(): + op_overload = getattr(op, overload_name) + try: + _ = torch._C._check_schema_allow_fake_script_object( + op_overload._schema, *args, **kwargs + ) + found_op = op_overload + break + except RuntimeError as e: + exceptions[overload_name] = e + + if found_op: + return found_op(*args, **kwargs) + + err_msg = ( + f"Fail to match any TorchBindOverload of {op} with following exceptions:\n" + ) + for i, (key, msg) in enumerate(exceptions.items()): + err_msg += f"Overload name {key}:\n {msg}\n" + raise RuntimeError(err_msg) + + # Resolution of torch.fn is different from torch.ops.aten.fn # torch.fn uses the Python argparser, matches with the # appropriate schema, and calls into the unboxed version of the method diff --git a/torch/csrc/jit/python/init.cpp b/torch/csrc/jit/python/init.cpp index 057d0fc20c..5eb4851089 100644 --- a/torch/csrc/jit/python/init.cpp +++ b/torch/csrc/jit/python/init.cpp @@ -1666,6 +1666,14 @@ void initJITBindings(PyObject* module) { } }); + m.def( + "_check_schema_allow_fake_script_object", + [](const FunctionSchema& schema, py::args args, py::kwargs kwargs) { + // checkSchemaAllowFakeScriptObject will throw runtime error if there is + // a schema mismatch. Otherwise, it returns true. + return checkSchemaAllowFakeScriptObject(schema, args, kwargs); + }); + m.def( "_jit_resolve_packet", [](const char* op_name, py::args args, py::kwargs kwargs) { @@ -1734,6 +1742,20 @@ void initJITBindings(PyObject* module) { }, py::arg("qualified_name")); + m.def( + "_maybe_call_torch_function_for_op_packet", + [](py::handle op_overload_packet, py::args args, py::kwargs kwargs) { + py::list ns_method = + op_overload_packet.attr("_qualified_op_name").attr("split")("::"); + return _maybe_handle_torch_function( + py::cast<std::string>(ns_method[0]), + py::cast<std::string>(ns_method[1]), + "", + false, + args, + kwargs); + }); + m.def( "parse_ir", [](const std::string& input, bool parse_tensor_constants) { diff --git a/torch/csrc/jit/python/pybind_utils.cpp b/torch/csrc/jit/python/pybind_utils.cpp index 9360b83823..ba0135a024 100644 --- a/torch/csrc/jit/python/pybind_utils.cpp +++ b/torch/csrc/jit/python/pybind_utils.cpp @@ -757,6 +757,23 @@ std::pair<std::shared_ptr<Operator>, Stack> getOpWithStack( } } +// This function is used to check if the schema is valid for the given args and +// kwargs. It checks script object by checking wether the FakeScriptObject is +// an instance of the corresponding fake class for the actual class used in +// schema. +bool checkSchemaAllowFakeScriptObject( + const FunctionSchema& schema, + py::args args, + const py::kwargs& kwargs) { + bool match = false; + try { + match = matchSchemaAllowFakeScriptObject(schema, std::move(args), kwargs); + } catch (schema_match_error& error) { + throw std::runtime_error(error.what()); + } + return match; +} + py::object invokeOperatorFromPython( const std::vector<std::shared_ptr<Operator>>& operations, py::args args, @@ -775,13 +792,13 @@ py::object invokeOperatorFromPython( return createPyObjectForStack(std::move(stack)); } -py::object _get_operation_for_overload_or_packet( - const std::vector<std::shared_ptr<Operator>>& operations, - Symbol symbol, - py::args args, - const py::kwargs& kwargs, +py::tuple _maybe_handle_torch_function( + const std::string& ns, + const std::string& method_name, + const std::string& overload_name, bool is_overload, - c10::optional<c10::DispatchKey> dk) { + py::args args, + const py::kwargs& kwargs) { std::vector<PyObject*> overloaded_args; size_t total_arg_num = args.size() + kwargs.size(); for (const auto i : c10::irange(args.size())) { @@ -807,15 +824,11 @@ py::object _get_operation_for_overload_or_packet( false /* throw_error */); } if (!overloaded_args.empty() || at::impl::torch_function_mode_enabled()) { - py::object ret; - std::string ns = symbol.ns().toUnqualString(); - std::string method_name = symbol.toUnqualString(); auto self_func = py::module::import("torch") .attr("ops") .attr(ns.c_str()) .attr(method_name.c_str()); if (is_overload) { - auto overload_name = operations[0]->schema().overload_name(); if (overload_name.empty()) { self_func = self_func.attr("default"); } else { @@ -824,16 +837,36 @@ py::object _get_operation_for_overload_or_packet( } std::string module_name("torch.ops"); module_name.append(ns); - return pybind11::reinterpret_steal<py::object>( - handle_torch_function_no_python_arg_parser( - overloaded_args, - args.ptr(), - kwargs.ptr(), - method_name.c_str(), - self_func.ptr(), - module_name.c_str())); + return py::make_tuple( + true, + pybind11::reinterpret_steal<py::object>( + handle_torch_function_no_python_arg_parser( + overloaded_args, + args.ptr(), + kwargs.ptr(), + method_name.c_str(), + self_func.ptr(), + module_name.c_str()))); } - return invokeOperatorFromPython(operations, args, kwargs, dk); + return py::make_tuple(false, py::none()); +} + +py::object _get_operation_for_overload_or_packet( + const std::vector<std::shared_ptr<Operator>>& operations, + Symbol symbol, + py::args args, + const py::kwargs& kwargs, + bool is_overload, + c10::optional<c10::DispatchKey> dk) { + std::string ns = symbol.ns().toUnqualString(); + std::string method_name = symbol.toUnqualString(); + std::string overload_name = operations[0]->schema().overload_name(); + auto res = _maybe_handle_torch_function( + ns, method_name, overload_name, is_overload, args, kwargs); + auto torch_function_called = py::cast<bool>(res[0]); + return torch_function_called + ? res[1] + : invokeOperatorFromPython(operations, args, kwargs, dk); } } // namespace torch::jit diff --git a/torch/csrc/jit/python/pybind_utils.h b/torch/csrc/jit/python/pybind_utils.h index cbb7791652..a78c3e0c0b 100644 --- a/torch/csrc/jit/python/pybind_utils.h +++ b/torch/csrc/jit/python/pybind_utils.h @@ -873,6 +873,116 @@ struct VISIBILITY_HIDDEN tuple_slice { int64_t e; }; +inline bool validateFakeScriptObjectSchema( + const c10::FunctionSchema& schema, + size_t argumentPosition, + py::handle object) { + auto argument = schema.arguments().at(argumentPosition); + auto class_type = argument.real_type()->expect<c10::ClassType>(); + auto fake_class_registry = + py::module::import("torch._library.fake_class_registry"); + auto fake_class = fake_class_registry.attr("find_fake_class")( + class_type->name().value().qualifiedName()); + if (!py::isinstance(object.attr("wrapped_obj"), fake_class)) { + throw schema_match_error(c10::str( + schema.formatTypeMismatchMsg( + argument, + friendlyTypeName(object), + argumentPosition, + py::repr(object.attr("wrapped_obj"))), + "\nCast error details: ", + argument.name(), + " is expected to be a FakeScriptObject of ", + class_type->name().value().qualifiedName())); + } + return true; +} + +inline bool matchSchemaAllowFakeScriptObject( + const FunctionSchema& schema, + const tuple_slice& args, + const py::kwargs& kwargs) { + size_t all_arguments = args.size() + kwargs.size(); + if (all_arguments > schema.arguments().size()) { + throw schema_match_error(c10::str( + schema.name(), + "() expected at most ", + schema.arguments().size(), + " argument(s) but received ", + all_arguments, + " argument(s). Declaration: ", + schema)); + } + + int64_t arg_idx = 0; + auto fake_class_registry = + py::module::import("torch._library.fake_class_registry"); + + // First push all positional args. + for (const auto& arg : args) { + // ...but refuse to do it if the schema says that this was supposed + // to be keyword only + if (schema.arguments()[arg_idx].kwarg_only()) { + throw schema_match_error(c10::str( + schema.name(), + "() takes ", + arg_idx, + " positional argument(s) but ", + args.size(), + " was/were given. Declaration: ", + schema)); + } + // Use the type information from the schema to convert the PyObject. + const auto& argument = schema.arguments().at(arg_idx); + if (argument.real_type()->kind() == TypeKind::ClassType && + py::isinstance(arg, fake_class_registry.attr("FakeScriptObject"))) { + validateFakeScriptObjectSchema(schema, arg_idx, arg); + } else { + argumentToIValue(schema, arg_idx, arg); + } + + arg_idx++; + } + + // Now for every remaining non-positional argument in the schema, look for it + // in the kwargs dict and push it if found, or use its default value if it + // has one. + size_t consumed_kwargs = 0; + for (size_t i = arg_idx; i < schema.arguments().size(); ++i) { + const auto& arg = schema.arguments()[i]; + if (kwargs.contains(arg.name().c_str())) { + auto cur_kwarg = kwargs[arg.name().c_str()]; + if (arg.real_type()->kind() == TypeKind::ClassType && + py::isinstance( + cur_kwarg, fake_class_registry.attr("FakeScriptObject"))) { + validateFakeScriptObjectSchema(schema, i, cur_kwarg); + } else { + argumentToIValue(schema, i, cur_kwarg); + } + consumed_kwargs += 1; + } else if (arg.default_value()) { + continue; + } else { + throw schema_match_error(c10::str( + schema.name(), + "() is missing value for argument '", + arg.name(), + "'. Declaration: ", + schema)); + } + } + + if (consumed_kwargs != kwargs.size()) { + std::vector<std::string> names; + for (const auto& kwarg : kwargs) { + names.emplace_back(py::cast<std::string>(kwarg.first)); + } + throw schema_match_error(schema.findErrorInKwargs(names)); + } + + return true; +} + inline Stack createStackForSchema( const FunctionSchema& schema, const tuple_slice& args, @@ -1147,6 +1257,19 @@ TORCH_PYTHON_API py::object invokeOperatorFromPython( const py::kwargs& kwargs, c10::optional<c10::DispatchKey> dk = c10::nullopt); +TORCH_PYTHON_API py::tuple _maybe_handle_torch_function( + const std::string& ns, + const std::string& method_name, + const std::string& overload_name, + bool is_overload, + py::args args, + const py::kwargs& kwargs); + +TORCH_PYTHON_API bool checkSchemaAllowFakeScriptObject( + const FunctionSchema& schema, + py::args args, + const py::kwargs& kwargs); + TORCH_PYTHON_API py::object _get_operation_for_overload_or_packet( const std::vector<std::shared_ptr<Operator>>& operations, Symbol symbol, diff --git a/torch/csrc/utils/python_dispatch.cpp b/torch/csrc/utils/python_dispatch.cpp index 780f37a675..2d115a8228 100644 --- a/torch/csrc/utils/python_dispatch.cpp +++ b/torch/csrc/utils/python_dispatch.cpp @@ -29,9 +29,7 @@ namespace py = pybind11; -namespace torch { -namespace impl { -namespace dispatch { +namespace torch::impl::dispatch { // NB: I'd like to index this on OperatorHandle, but I can't, as I can't // guarantee that the main interpreter has finish doing all registrations before @@ -518,6 +516,16 @@ void initDispatchBindings(PyObject* module) { return op->hasKernelForDispatchKey(dispatch); }); + m.def( + // Returns whether or not the kernel for this dispatach key is a + // fallthrough kernel + "_dispatch_kernel_for_dispatch_key_is_fallthrough", + [](const char* name, c10::DispatchKey dispatch) -> bool { + auto op = + c10::Dispatcher::singleton().findOp(torch::jit::parseName(name)); + return op->isKernelFallthroughKernel(dispatch); + }); + m.def( "_dispatch_has_kernel_for_any_dispatch_key", [](const char* name, c10::DispatchKeySet ks) -> bool { @@ -938,6 +946,4 @@ void python_op_registration_trampoline_impl( pushPyOutToStack(op, stack, obj, "PythonKernelHolder"); } -} // namespace dispatch -} // namespace impl -} // namespace torch +} // namespace torch::impl::dispatch diff --git a/torch/testing/_internal/torchbind_impls.py b/torch/testing/_internal/torchbind_impls.py index 933de44105..f66388d2ed 100644 --- a/torch/testing/_internal/torchbind_impls.py +++ b/torch/testing/_internal/torchbind_impls.py @@ -3,7 +3,7 @@ import torch def register_if_not(qualname): entry = torch._library.simple_registry.singleton.find(qualname) - if entry.abstract_impl.kernel is not None: + if entry.abstract_impl.kernel is None: return torch.library.impl_abstract(qualname) else: @@ -28,5 +28,5 @@ def register_fake_operators(): return tq.push(x) @register_if_not("_TorchScriptTesting::queue_size") - def fake_queue_size(tq, x): + def fake_queue_size(tq): return tq.size()
2.41.0
93f756cdc1a86a65d9572d050ae62387a84dc60
Thu, 18 Apr 2024 13:15:03 -0700
[PATCH 0379/1000] Support aot_export torchbind op (#123370)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123370 Approved by: https://github.com/zou3519 ghstack dependencies: #123367
diff --git a/test/export/test_torchbind.py b/test/export/test_torchbind.py index b5312fec19..3ec3e7dfab 100644 --- a/test/export/test_torchbind.py +++ b/test/export/test_torchbind.py @@ -4,6 +4,7 @@ import unittest import torch import torch.utils._pytree as pytree +from torch._functorch.aot_autograd import aot_export_module from torch._higher_order_ops.torchbind import enable_torchbind_tracing from torch._library.fake_class_registry import FakeScriptObject from torch.export import export @@ -743,6 +744,63 @@ def forward(self, arg0_1, arg1_1): ) self._assertEqualSkipScriptObject(gm(tq1, x), mod(tq2, x)) + def test_aot_export_tensor_queue_operators(self): + class Model(torch.nn.Module): + def __init__(self): + super().__init__() + + def forward(self, tq, x): + torch.ops._TorchScriptTesting.queue_push(tq, x.cos()) + torch.ops._TorchScriptTesting.queue_push(tq, x.sin()) + x_sin = torch.ops._TorchScriptTesting.queue_pop( + tq + ) - torch.ops._TorchScriptTesting.queue_size(tq) + x_cos = torch.ops._TorchScriptTesting.queue_pop( + tq + ) + torch.ops._TorchScriptTesting.queue_size(tq) + return x_sin, x_cos, tq + + mod = Model() + + tq1 = torch.classes._TorchScriptTesting._TensorQueue( + torch.empty( + 0, + ).fill_(-1) + ) + x = torch.ones(2, 3) + + fake_mode = torch._subclasses.fake_tensor.FakeTensorMode() + fake_tq1 = torch._library.fake_class_registry.to_fake_obj(fake_mode, tq1) + fake_x = fake_mode.from_tensor(x) + gm = aot_export_module(mod, (fake_tq1, fake_x), trace_joint=False)[0] + + # inputs: token, tq, x + # return: token, x_sin, x_cos, tq + self.assertExpectedInline( + gm.code.strip(), + """\ +def forward(self, arg0_1, arg1_1, arg2_1): + cos = torch.ops.aten.cos.default(arg2_1) + with_effects = torch._higher_order_ops.effects.with_effects(arg0_1, torch.ops._TorchScriptTesting.queue_push.default, arg1_1, cos); arg0_1 = cos = None + getitem = with_effects[0]; with_effects = None + sin = torch.ops.aten.sin.default(arg2_1); arg2_1 = None + with_effects_1 = torch._higher_order_ops.effects.with_effects(getitem, torch.ops._TorchScriptTesting.queue_push.default, arg1_1, sin); getitem = sin = None + getitem_2 = with_effects_1[0]; with_effects_1 = None + with_effects_2 = torch._higher_order_ops.effects.with_effects(getitem_2, torch.ops._TorchScriptTesting.queue_pop.default, arg1_1); getitem_2 = None + getitem_4 = with_effects_2[0] + getitem_5 = with_effects_2[1]; with_effects_2 = None + with_effects_3 = torch._higher_order_ops.effects.with_effects(getitem_4, torch.ops._TorchScriptTesting.queue_size.default, arg1_1); getitem_4 = None + getitem_6 = with_effects_3[0]; with_effects_3 = None + sub = torch.ops.aten.sub.Tensor(getitem_5, 1); getitem_5 = None + with_effects_4 = torch._higher_order_ops.effects.with_effects(getitem_6, torch.ops._TorchScriptTesting.queue_pop.default, arg1_1); getitem_6 = None + getitem_8 = with_effects_4[0] + getitem_9 = with_effects_4[1]; with_effects_4 = None + with_effects_5 = torch._higher_order_ops.effects.with_effects(getitem_8, torch.ops._TorchScriptTesting.queue_size.default, arg1_1); getitem_8 = None + getitem_10 = with_effects_5[0]; with_effects_5 = None + add = torch.ops.aten.add.Tensor(getitem_9, 0); getitem_9 = None + return (getitem_10, sub, add, arg1_1)""", # noqa: B950 + ) + @skipIfTorchDynamo("torchbind not supported with dynamo yet") class TestRegisterFakeClass(TestCase): diff --git a/torch/_functorch/_aot_autograd/utils.py b/torch/_functorch/_aot_autograd/utils.py index 172f792826..67b0974147 100644 --- a/torch/_functorch/_aot_autograd/utils.py +++ b/torch/_functorch/_aot_autograd/utils.py @@ -11,6 +11,7 @@ from typing import Any, Callable, List, Optional, Tuple, Union import torch import torch.utils._pytree as pytree +from torch._library.fake_class_registry import FakeScriptObject from torch.fx.experimental._backward_state import BackwardState from torch.fx.experimental.proxy_tensor import py_sym_types @@ -23,6 +24,7 @@ KNOWN_TYPES = [ bool, type(None), *py_sym_types, + FakeScriptObject, ] original_zip = zip diff --git a/torch/_ops.py b/torch/_ops.py index 0809d7cbf1..9ada11cd9f 100644 --- a/torch/_ops.py +++ b/torch/_ops.py @@ -799,9 +799,6 @@ class OpOverload(OperatorBase): # TorchBindOpOverload will skip C++ dispatcher and purely dispatched in python # when its inputs contain FakeScriptObject in a similar way as higher order ops. class TorchBindOpOverload(OpOverload): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - def _fallthrough_keys(self) -> List[DispatchKey]: # TODO: we should be calling the fallback for these, but a fallthrough is almost close # enough to the fallback in most cases that we care about. @@ -811,6 +808,7 @@ class TorchBindOpOverload(OpOverload): DispatchKey.AutogradCUDA, DispatchKey.ADInplaceOrView, DispatchKey.PythonTLSSnapshot, + DispatchKey.PythonDispatcher, ] def _may_use_fallthrough_instead_of_fallback(key: DispatchKey): @@ -830,14 +828,40 @@ class TorchBindOpOverload(OpOverload): if _may_use_fallthrough_instead_of_fallback(key) ] + @contextlib.contextmanager + def _register_as_effectful_op_temporarily(self): + from torch._higher_order_ops.effects import ( + _EffectType, + _register_effectful_op, + SIDE_EFFECTS, + ) + + try: + if self not in SIDE_EFFECTS: + _register_effectful_op(self, _EffectType.ORDERED) + yield + finally: + if self in SIDE_EFFECTS: + del SIDE_EFFECTS[self] + # use `self_` to avoid naming collide with arguments that # are named "self". This way, they can be called by kwargs. def __call__(self_, *args, **kwargs): # noqa: B902 if _must_dispatch_in_python(args, kwargs): # When any inputs are FakeScriptObject, we need to - # skip c++ dispatcher and dispatch in python through _get_dispatch of python_dispatcher. - return self_._dispatch_in_python(args, kwargs, self_._fallthrough_keys()) - + # skip c++ dispatcher and dispatch in python through _get_dispatch of python_dispatcher + # because C++ dispatcher will check the schema and cannot recognize FakeScriptObject. + # + # Note: + # 1. We only register the torchbind op temporarily as effectful op because we only want + # the effect token functionalization logic to be applied during tracing. Otherwise, the behavior + # of the eagerly executing the op might change after tracing. + # 2. We don't want to register the op as effectful for all torchbind ops in ctor because this might + # cause unexpected behavior for some autograd.profiler ops e.g. profiler._record_function_exit._RecordFunction. + with self_._register_as_effectful_op_temporarily(): + return self_._dispatch_in_python( + args, kwargs, self_._fallthrough_keys() + ) return self_._op(*args, **kwargs) def _dispatch_in_python(self, args, kwargs, fallthrough_keys):
2.41.0
03d111c54cf7e3c9a5a288d11aa1641d7972a22
Thu, 18 Apr 2024 22:32:03 -0700
[PATCH 0380/1000] Enable dynamo test_forloop_goes_right_direction_multi_gpu (#123324)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123324 Approved by: https://github.com/janeyx99
diff --git a/torch/testing/_internal/common_optimizers.py b/torch/testing/_internal/common_optimizers.py index a1bea634ff..da6e9c5407 100644 --- a/torch/testing/_internal/common_optimizers.py +++ b/torch/testing/_internal/common_optimizers.py @@ -1127,13 +1127,6 @@ optim_db: List[OptimizerInfo] = [ "test_forloop_goes_right_direction", active_if=lambda kwargs: not kwargs["contiguous"], ), - DecorateInfo( - skipIfTorchDynamo( - "No closure handling, https://github.com/pytorch/pytorch/issues/116494" - ), - "TestOptimRenewed", - "test_forloop_goes_right_direction_multigpu", - ), DecorateInfo( skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), "TestOptimRenewed", @@ -1243,13 +1236,6 @@ optim_db: List[OptimizerInfo] = [ "TestOptimRenewed", "test_param_group_with_lrscheduler_goes_right_direction", ), - DecorateInfo( - skipIfTorchDynamo( - "No closure handling, https://github.com/pytorch/pytorch/issues/116494" - ), - "TestOptimRenewed", - "test_forloop_goes_right_direction_multigpu", - ), DecorateInfo( skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), "TestOptimRenewed", @@ -1305,13 +1291,6 @@ optim_db: List[OptimizerInfo] = [ "test_forloop_goes_right_direction", active_if=lambda kwargs: not kwargs["contiguous"], ), - DecorateInfo( - skipIfTorchDynamo( - "No closure handling, https://github.com/pytorch/pytorch/issues/116494" - ), - "TestOptimRenewed", - "test_forloop_goes_right_direction_multigpu", - ), DecorateInfo( skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), "TestOptimRenewed", @@ -1422,13 +1401,6 @@ optim_db: List[OptimizerInfo] = [ "test_forloop_goes_right_direction", active_if=lambda kwargs: not kwargs["contiguous"], ), - DecorateInfo( - skipIfTorchDynamo( - "No closure handling, https://github.com/pytorch/pytorch/issues/116494" - ), - "TestOptimRenewed", - "test_forloop_goes_right_direction_multigpu", - ), DecorateInfo( skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), "TestOptimRenewed", @@ -1478,13 +1450,6 @@ optim_db: List[OptimizerInfo] = [ optim_error_inputs_func=optim_error_inputs_func_asgd, supported_impls=("foreach", "differentiable"), skips=( - DecorateInfo( - skipIfTorchDynamo( - "No closure handling, https://github.com/pytorch/pytorch/issues/116494" - ), - "TestOptimRenewed", - "test_forloop_goes_right_direction_multigpu", - ), DecorateInfo( skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), "TestOptimRenewed", @@ -1597,13 +1562,6 @@ optim_db: List[OptimizerInfo] = [ "test_forloop_goes_right_direction", active_if=lambda kwargs: not kwargs["contiguous"], ), - DecorateInfo( - skipIfTorchDynamo( - "No closure handling, https://github.com/pytorch/pytorch/issues/116494" - ), - "TestOptimRenewed", - "test_forloop_goes_right_direction_multigpu", - ), DecorateInfo( skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), "TestOptimRenewed", @@ -1675,13 +1633,6 @@ optim_db: List[OptimizerInfo] = [ optim_error_inputs_func=optim_error_inputs_func_radam, supported_impls=("foreach", "differentiable"), skips=( - DecorateInfo( - skipIfTorchDynamo( - "No closure handling, https://github.com/pytorch/pytorch/issues/116494" - ), - "TestOptimRenewed", - "test_forloop_goes_right_direction_multigpu", - ), DecorateInfo( skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), "TestOptimRenewed", @@ -1747,13 +1698,6 @@ optim_db: List[OptimizerInfo] = [ "test_forloop_goes_right_direction", active_if=lambda kwargs: not kwargs["contiguous"], ), - DecorateInfo( - skipIfTorchDynamo( - "No closure handling, https://github.com/pytorch/pytorch/issues/116494" - ), - "TestOptimRenewed", - "test_forloop_goes_right_direction_multigpu", - ), DecorateInfo( skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), "TestOptimRenewed", @@ -1817,13 +1761,6 @@ optim_db: List[OptimizerInfo] = [ "test_forloop_goes_right_direction", active_if=lambda kwargs: not kwargs["contiguous"], ), - DecorateInfo( - skipIfTorchDynamo( - "No closure handling, https://github.com/pytorch/pytorch/issues/116494" - ), - "TestOptimRenewed", - "test_forloop_goes_right_direction_multigpu", - ), DecorateInfo( skipIfTorchDynamo("Fails fix point assertion on 3.8, see #97811"), "TestOptimRenewed", @@ -1908,13 +1845,6 @@ optim_db: List[OptimizerInfo] = [ ), supports_fused_on=("cuda",), skips=( - DecorateInfo( - skipIfTorchDynamo( - "No closure handling, https://github.com/pytorch/pytorch/issues/116494" - ), - "TestOptimRenewed", - "test_forloop_goes_right_direction_multigpu", - ), DecorateInfo( skipIfTorchDynamo("initial_value is incorrect in dynamo, see #123202"), "TestOptimRenewed",
2.41.0