commitId
stringlengths
40
40
datetime
stringlengths
30
31
subject
stringlengths
37
266
comment
stringlengths
109
15.2k
diff
stringlengths
238
914k
gitVersion
stringclasses
9 values
06d99e46cba535756835a733bdb65c19c563d44
Thu, 18 Apr 2024 22:32:03 -0700
[PATCH 0381/1000] Fix for 117147 (#123404)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123404 Approved by: https://github.com/Skylion007, https://github.com/janeyx99 ghstack dependencies: #123324
diff --git a/torch/testing/_internal/common_optimizers.py b/torch/testing/_internal/common_optimizers.py index da6e9c5407..554ce9cd25 100644 --- a/torch/testing/_internal/common_optimizers.py +++ b/torch/testing/_internal/common_optimizers.py @@ -1926,13 +1926,6 @@ optim_db: List[OptimizerInfo] = [ "test_load_nontensor_step", device_type="cpu", ), - DecorateInfo( - skipIfTorchDynamo( - "momentum_buffer inconsistency, https://github.com/pytorch/pytorch/issues/117147" - ), - "TestOptimRenewed", - "test_state_dict_with_cuda_params", - ), DecorateInfo( skipIfTorchDynamo( "fails, https://github.com/pytorch/pytorch/issues/117165"
2.41.0
531a29fb95f37afe9b32425c35a9be8887f370d
Thu, 18 Apr 2024 22:32:04 -0700
[PATCH 0382/1000] Enable tests related to 116061 (#123405)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123405 Approved by: https://github.com/janeyx99 ghstack dependencies: #123324, #123404
diff --git a/torch/testing/_internal/common_optimizers.py b/torch/testing/_internal/common_optimizers.py index 554ce9cd25..b089e15e8a 100644 --- a/torch/testing/_internal/common_optimizers.py +++ b/torch/testing/_internal/common_optimizers.py @@ -1894,38 +1894,6 @@ optim_db: List[OptimizerInfo] = [ "test_mixed_device_dtype", active_if=TEST_WITH_TORCHDYNAMO, ), - DecorateInfo( - skipIfTorchDynamo( - "Errors with list out of range, see https://github.com/pytorch/pytorch/issues/116061" - ), - "TestOptimRenewed", - "test_step_is_noop_for_zero_grads", - device_type="cpu", - ), - DecorateInfo( - skipIfTorchDynamo( - "Errors with list out of range, see https://github.com/pytorch/pytorch/issues/116061" - ), - "TestOptimRenewed", - "test_param_groups_weight_decay", - device_type="cpu", - ), - DecorateInfo( - skipIfTorchDynamo( - "Errors with list out of range, see https://github.com/pytorch/pytorch/issues/116061" - ), - "TestOptimRenewed", - "test_param_groups_lr", - device_type="cpu", - ), - DecorateInfo( - skipIfTorchDynamo( - "Errors with list out of range, see https://github.com/pytorch/pytorch/issues/116061" - ), - "TestOptimRenewed", - "test_load_nontensor_step", - device_type="cpu", - ), DecorateInfo( skipIfTorchDynamo( "fails, https://github.com/pytorch/pytorch/issues/117165"
2.41.0
050e627dc0b17953a3c212f7d86cd27b9d41d82
Thu, 18 Apr 2024 22:32:04 -0700
[PATCH 0383/1000] Defer marking_static_address (#124309)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124309 Approved by: https://github.com/anijain2305 ghstack dependencies: #123324, #123404, #123405
diff --git a/torch/_dynamo/variables/optimizer.py b/torch/_dynamo/variables/optimizer.py index 594226e3e8..e183f7a5e5 100644 --- a/torch/_dynamo/variables/optimizer.py +++ b/torch/_dynamo/variables/optimizer.py @@ -48,14 +48,7 @@ class OptimizerVariable(UserDefinedObjectVariable): tensor_to_source=None, **kwargs, ): - from ..decorators import mark_static_address - super().__init__(value, **kwargs) - - for group in self.value.param_groups: - for p in group["params"]: - mark_static_address(p) - self.grad_to_source = grad_to_source or {} self.tensor_to_source = tensor_to_source or {} self.static_tensor_names = static_tensor_names or set() @@ -101,6 +94,12 @@ class OptimizerVariable(UserDefinedObjectVariable): return GetAttrVariable(self, name, source=AttrSource(self.source, name)) if name == "param_groups": + from ..decorators import mark_static_address + + for group in self.value.param_groups: + for p in group["params"]: + mark_static_address(p) + self._set_capturable(tx) return super().var_getattr(tx, name)
2.41.0
8a027f144fe60dd09806a2abf214b21dd2f6157
Thu, 18 Apr 2024 22:32:05 -0700
[PATCH 0384/1000] Fixes for 123400 (#123406)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123406 Approved by: https://github.com/janeyx99 ghstack dependencies: #123324, #123404, #123405, #124309
diff --git a/test/test_optim.py b/test/test_optim.py index 9eea11ffda..031f2aa6ca 100644 --- a/test/test_optim.py +++ b/test/test_optim.py @@ -1607,7 +1607,11 @@ class TestOptimRenewed(TestCase): # Make some state for _ in range(3): - optimizer.step(closure) + if optim_info.step_requires_closure: + optimizer.step(closure) + else: + closure() + optimizer.step() self.assertEqual(getPublicAttrs(optimizer), getPublicAttrs(deepcopy(optimizer))) diff --git a/torch/testing/_internal/common_optimizers.py b/torch/testing/_internal/common_optimizers.py index b089e15e8a..f3663e98f0 100644 --- a/torch/testing/_internal/common_optimizers.py +++ b/torch/testing/_internal/common_optimizers.py @@ -1073,13 +1073,6 @@ optim_db: List[OptimizerInfo] = [ "TestOptimRenewed", "test_complex_2d", ), - DecorateInfo( - skipIfTorchDynamo( - "fails, https://github.com/pytorch/pytorch/issues/117165" - ), - "TestOptimRenewed", - "test_deepcopy_copies_all_public_attrs", - ), # Note on tolerances: # test_correctness_Adadelta_cuda_float32 # Mismatched elements: 10 / 100 (10.0%) @@ -1159,13 +1152,6 @@ optim_db: List[OptimizerInfo] = [ "TestOptimRenewed", "test_complex_2d", ), - DecorateInfo( - skipIfTorchDynamo( - "fails, https://github.com/pytorch/pytorch/issues/117165" - ), - "TestOptimRenewed", - "test_deepcopy_copies_all_public_attrs", - ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" @@ -1263,13 +1249,6 @@ optim_db: List[OptimizerInfo] = [ "TestOptimRenewed", "test_complex_2d", ), - DecorateInfo( - skipIfTorchDynamo( - "fails, https://github.com/pytorch/pytorch/issues/117165" - ), - "TestOptimRenewed", - "test_deepcopy_copies_all_public_attrs", - ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" @@ -1326,13 +1305,6 @@ optim_db: List[OptimizerInfo] = [ "TestOptimRenewed", "test_foreach_large_tensor", ), - DecorateInfo( - skipIfTorchDynamo( - "fails, https://github.com/pytorch/pytorch/issues/117165" - ), - "TestOptimRenewed", - "test_deepcopy_copies_all_public_attrs", - ), DecorateInfo( skipIfTorchDynamo( "capturable path no longer called after hitting cache limit, see #121178" @@ -1428,13 +1400,6 @@ optim_db: List[OptimizerInfo] = [ "TestOptimRenewed", "test_complex_2d", ), - DecorateInfo( - skipIfTorchDynamo( - "fails, https://github.com/pytorch/pytorch/issues/117165" - ), - "TestOptimRenewed", - "test_deepcopy_copies_all_public_attrs", - ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" @@ -1486,13 +1451,6 @@ optim_db: List[OptimizerInfo] = [ "TestOptimRenewed", "test_step_is_noop_for_zero_grads", ), - DecorateInfo( - skipIfTorchDynamo( - "fails, https://github.com/pytorch/pytorch/issues/117165" - ), - "TestOptimRenewed", - "test_deepcopy_copies_all_public_attrs", - ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" @@ -1515,13 +1473,6 @@ optim_db: List[OptimizerInfo] = [ DecorateInfo( skipIfMps, "TestOptimRenewed", "test_can_load_older_state_dict" ), - DecorateInfo( - skipIfTorchDynamo( - "fails, https://github.com/pytorch/pytorch/issues/117165" - ), - "TestOptimRenewed", - "test_deepcopy_copies_all_public_attrs", - ), DecorateInfo( unittest.skip("Does not support param groups"), "TestOptimRenewed", @@ -1611,13 +1562,6 @@ optim_db: List[OptimizerInfo] = [ "TestOptimRenewed", "test_state_dict_with_cuda_params", ), - DecorateInfo( - skipIfTorchDynamo( - "fails, https://github.com/pytorch/pytorch/issues/117165" - ), - "TestOptimRenewed", - "test_deepcopy_copies_all_public_attrs", - ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" @@ -1660,13 +1604,6 @@ optim_db: List[OptimizerInfo] = [ "TestOptimRenewed", "test_complex_2d", ), - DecorateInfo( - skipIfTorchDynamo( - "fails, https://github.com/pytorch/pytorch/issues/117165" - ), - "TestOptimRenewed", - "test_deepcopy_copies_all_public_attrs", - ), DecorateInfo( toleranceOverride( { @@ -1786,13 +1723,6 @@ optim_db: List[OptimizerInfo] = [ "TestOptimRenewed", "test_complex_2d", ), - DecorateInfo( - skipIfTorchDynamo( - "fails, https://github.com/pytorch/pytorch/issues/117165" - ), - "TestOptimRenewed", - "test_deepcopy_copies_all_public_attrs", - ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" @@ -1894,13 +1824,6 @@ optim_db: List[OptimizerInfo] = [ "test_mixed_device_dtype", active_if=TEST_WITH_TORCHDYNAMO, ), - DecorateInfo( - skipIfTorchDynamo( - "fails, https://github.com/pytorch/pytorch/issues/117165" - ), - "TestOptimRenewed", - "test_deepcopy_copies_all_public_attrs", - ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support"
2.41.0
ae835eee43ab4583338aca0d9107cb8ed30e72e
Fri, 19 Apr 2024 00:36:51 +0000
[PATCH 0385/1000] Enable SourcelessBuilder to build GraphModule generated by make_fx (#123673)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123673 Approved by: https://github.com/ezyang, https://github.com/anijain2305 ghstack dependencies: #123680
diff --git a/torch/_dynamo/variables/builder.py b/torch/_dynamo/variables/builder.py index af64fb7ee9..c398e99077 100644 --- a/torch/_dynamo/variables/builder.py +++ b/torch/_dynamo/variables/builder.py @@ -167,6 +167,7 @@ from .torch import TorchCtxManagerClassVariable, TorchInGraphFunctionVariable from .torch_function import build_torch_function_fn, TensorWithTFOverrideVariable from .user_defined import ( KeyedJaggedTensorVariable, + SourcelessGraphModuleVariable, UserDefinedClassVariable, UserDefinedObjectVariable, ) @@ -2074,6 +2075,12 @@ class SourcelessBuilder: return UserDefinedClassVariable(value) elif isinstance(value, types.MethodWrapperType): return MethodWrapperVariable(value) + elif isinstance(value, torch.fx.graph_module.GraphModule): + return SourcelessGraphModuleVariable(value) + elif isinstance( + value, (torch.utils._pytree.TreeSpec, torch.utils._pytree.LeafSpec) + ): + return UserDefinedObjectVariable(value) elif PlacementVariable.is_placement(value): return PlacementVariable(value) elif DeviceMeshVariable.is_device_mesh(value): diff --git a/torch/_dynamo/variables/user_defined.py b/torch/_dynamo/variables/user_defined.py index 6d242ce56a..22b12053d4 100644 --- a/torch/_dynamo/variables/user_defined.py +++ b/torch/_dynamo/variables/user_defined.py @@ -901,6 +901,14 @@ class UserDefinedObjectVariable(UserDefinedVariable): return VariableBuilder(tx, source)(subobj) elif ConstantVariable.is_literal(subobj): return ConstantVariable.create(subobj) + elif ( + type(subobj) == torch.utils._pytree.TreeSpec + or type(subobj) == torch.utils._pytree.LeafSpec + or type(value) == torch.utils._pytree.TreeSpec + ): + from .builder import SourcelessBuilder + + return SourcelessBuilder.create(tx, subobj) if ( name not in getattr(value, "__dict__", {}) @@ -987,6 +995,30 @@ class UserDefinedObjectVariable(UserDefinedVariable): )(collections.OrderedDict.__getitem__(self.value, key.as_python_constant())) +class SourcelessGraphModuleVariable(UserDefinedObjectVariable): + def __init__( + self, + value, + **kwargs, + ): + super().__init__(value, **kwargs) + + def call_method( + self, + tx, + name, + args: "List[VariableTracker]", + kwargs: "Dict[str, VariableTracker]", + ) -> "VariableTracker": + fn_variable = variables.UserFunctionVariable(self.value.forward.__func__) + args = [self] + args + return tx.inline_user_function_return( + fn_variable, + args, + kwargs, + ) + + class KeyedJaggedTensorVariable(UserDefinedObjectVariable): @staticmethod def is_matching_object(obj):
2.41.0
9fc280dce82b4f3af9710afd5fc05df4d147dda
Thu, 18 Apr 2024 21:38:18 -0700
[PATCH 0386/1000] Dont precompile already seen keys, limit epilogue choices (#122642)
Two changes: - in epilogue benchmark fusion, only take top 6 choices. There were basically no choices taken after this in HF. - Share a single precompilation function among matmuls with same key. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122642 Approved by: https://github.com/shunting314 ghstack dependencies: #124030
diff --git a/test/hi.py b/test/hi.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/inductor/test_max_autotune.py b/test/inductor/test_max_autotune.py index beb1b22df8..bbcff4f87f 100644 --- a/test/inductor/test_max_autotune.py +++ b/test/inductor/test_max_autotune.py @@ -447,6 +447,22 @@ class TestMaxAutotune(TestCase): fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn) self.assertEqual(counters["inductor"]["select_algorithm_precompile"], 0) + @config.patch(autotune_local_cache=False, autotune_remote_cache=False) + def test_precompilations(self): + def fn(a, b, c): + a = (a @ b) @ c + a, b, c = (t.to(torch.float16) for t in [a, b, c]) + return (a @ b) @ c + + fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn) + inputs = [torch.rand([256, 256], device="cuda") for _ in range(3)] + + self.assertEqual(fn(*inputs), fn_c(*inputs), atol=1e-2, rtol=1e-2) + + from torch._dynamo.utils import counters + + self.assertEqual(counters["inductor"]["select_algorithm_precompile"], 2) + def test_cat_addmm(self): def fn(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor): return torch.cat( diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py index 465891fc7f..a2b88cf474 100644 --- a/torch/_inductor/codecache.py +++ b/torch/_inductor/codecache.py @@ -301,7 +301,7 @@ class PersistentCache(CacheBase): return hit if config.max_autotune or config.max_autotune_gemm: - local_cache = self.get_local_cache() + local_cache = self.get_local_cache() if config.autotune_local_cache else {} # check local cache first since it is data specific to the current machine if ( not check_cache(local_cache) diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py index f96e95ab5d..6b7c9beec9 100644 --- a/torch/_inductor/config.py +++ b/torch/_inductor/config.py @@ -306,6 +306,9 @@ benchmark_multi_templates = ( os.environ.get("TORCHINDUCTOR_BENCHMARK_MULTI_TEMPLATES", "0") == "1" ) +# Take how many of the top triton kernels to benchmark epilogue +max_epilogue_benchmarked_choices = 3 + # how many nodes to allow into a single fusion max_fusion_size = 64 diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py index 049a77a4ef..e3f7f39514 100644 --- a/torch/_inductor/scheduler.py +++ b/torch/_inductor/scheduler.py @@ -1835,6 +1835,8 @@ class Scheduler: min_ms_fused = float("inf") ms_fused_choice = None + triton_choices = 0 + for choice, unfused_time in choice_timings.items(): if not isinstance(choice, torch._inductor.ir.TritonTemplateCallerBase): continue @@ -1842,6 +1844,10 @@ class Scheduler: if unfused_time >= ms1 + ms2: continue + triton_choices += 1 + if triton_choices > config.max_epilogue_benchmarked_choices: + break + # TODO - parallel compile triton templates # TODO - should prune/skip choices that are not within certain % of best choice with node1.node.swap_as_triton_caller(choice): diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py index 3261909d2b..4272d5034d 100644 --- a/torch/_inductor/select_algorithm.py +++ b/torch/_inductor/select_algorithm.py @@ -866,6 +866,15 @@ class ErrorFromChoice(RuntimeError): class AlgorithmSelectorCache(PersistentCache): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # the autotuning will get occur in the scheduler, so there is + # no guarantee that the first lowering for a given key will also be the + # first to benchmark it. share a single precompilation function for all lowerings + # of a particular key + self.precompile_cache: Dict[str, Callable[[], None]] = {} + def __call__( self, name, @@ -902,6 +911,8 @@ class AlgorithmSelectorCache(PersistentCache): def make_benchmark_fn(): return self.make_benchmark_fn(choices, input_nodes, layout, input_gen_fns) + inputs_key = repr([self.key_of(x) for x in input_nodes]) + def precompile(choices) -> Callable[[], None]: def no_op(*args, **kwargs): return @@ -927,13 +938,19 @@ class AlgorithmSelectorCache(PersistentCache): timings = self.lookup( choices, name, - repr([self.key_of(x) for x in input_nodes]), + inputs_key, benchmark=None, ) if timings: return no_op + precompile_key = ( + f"{name}: {inputs_key} : {torch.get_float32_matmul_precision()}" + ) + if precompile_func := self.precompile_cache.get(precompile_key): + return precompile_func + log.info( "Multithreaded precompilation for %d choices using %d worker threads", len(choices), @@ -947,7 +964,9 @@ class AlgorithmSelectorCache(PersistentCache): timeout=precompilation_timeout_seconds, ) + @functools.lru_cache(None) def wait_on_futures(): + counters["inductor"]["select_algorithm_precompile"] += 1 try: iterator = iter(futures) while True: @@ -963,8 +982,11 @@ class AlgorithmSelectorCache(PersistentCache): ) except StopIteration: pass + executor.shutdown(wait=True) + self.precompile_cache[precompile_key] = wait_on_futures + return wait_on_futures def autotune(choices): @@ -985,7 +1007,7 @@ class AlgorithmSelectorCache(PersistentCache): timings = self.lookup( choices, name, - repr([self.key_of(x) for x in input_nodes]), + inputs_key, autotune, ) autotune_elapse = time.time() - autotune_start_ts
2.41.0
78450a00b349cf2b3e5a6f53af91280cdfa81c8
Fri, 19 Apr 2024 06:31:55 -0700
[PATCH 0388/1000] Excise uses of the old custom ops APIs (#124134)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124134 Approved by: https://github.com/albanD ghstack dependencies: #124180, #124200, #124299
diff --git a/test/inductor/test_torchinductor_dynamic_shapes.py b/test/inductor/test_torchinductor_dynamic_shapes.py index c9824fbdd5..3e7e296d4d 100644 --- a/test/inductor/test_torchinductor_dynamic_shapes.py +++ b/test/inductor/test_torchinductor_dynamic_shapes.py @@ -9,7 +9,6 @@ import unittest from functools import partial import torch -import torch._custom_ops as custom_ops import torch.library from torch._dynamo.testing import make_test_cls_with_patches from torch._inductor.codegen.common import device_codegens, register_backend_for_device @@ -280,30 +279,20 @@ class TestInductorDynamic(TestCase): @torch._dynamo.config.patch(capture_scalar_outputs=True) @torch._inductor.config.patch(implicit_fallbacks=True) def test_item_to_inputs_kernel_nobreak(self, device): - with torch.library._scoped_library("test", "DEF") as lib: - try: + @torch.library.custom_op("test::foo", mutates_args=()) + def foo(x: torch.Tensor, y: int) -> torch.Tensor: + return x.clone() - @custom_ops.custom_op("test::foo") - def foo(x: torch.Tensor, y: int) -> torch.Tensor: - raise NotImplementedError + @foo.register_fake + def _(x: torch.Tensor, y: int) -> torch.Tensor: + return x.clone() - @custom_ops.impl("test::foo") - def foo_impl(x: torch.Tensor, y: int) -> torch.Tensor: - return x.clone() - - @torch.library.impl_abstract("test::foo", lib=lib) - def foo_meta(x: torch.Tensor, y: int) -> torch.Tensor: - return x.clone() - - @torch.compile(fullgraph=True) - def f(x, r): - y = x.item() - return torch.ops.test.foo(r, y) - - f(torch.tensor([3], device=device), torch.randn(10, device=device)) + @torch.compile(fullgraph=True) + def f(x, r): + y = x.item() + return torch.ops.test.foo(r, y) - finally: - custom_ops._destroy("test::foo") + f(torch.tensor([3], device=device), torch.randn(10, device=device)) @torch._dynamo.config.patch( capture_scalar_outputs=True, capture_dynamic_output_shape_ops=True @@ -396,34 +385,24 @@ class TestInductorDynamic(TestCase): ) @torch._inductor.config.patch(implicit_fallbacks=True) def test_dynamic_stride_nobreak(self, device): - with torch.library._scoped_library("test", "DEF") as lib: - try: - - @custom_ops.custom_op("test::foo") - def foo(x: torch.Tensor) -> torch.Tensor: - raise NotImplementedError - - @custom_ops.impl("test::foo") - def foo_impl(x: torch.Tensor) -> torch.Tensor: - stride = x.item() - return torch.empty_strided((1,), (stride,), device=x.device) - - @torch.library.impl_abstract("test::foo", lib=lib) - def foo_meta(x: torch.Tensor) -> torch.Tensor: - ctx = torch.library.get_ctx() - stride = ctx.new_dynamic_size() - return torch.empty_strided((1,), (stride,), device=x.device) - - @torch.compile(fullgraph=True) - def f(x): - r = torch.ops.test.foo(x) - y = r.stride(0) - return torch.empty(y, device=x.device) - - f(torch.tensor([3], device=device)) - - finally: - custom_ops._destroy("test::foo") + @torch.library.custom_op("test::foo", mutates_args=()) + def foo(x: torch.Tensor) -> torch.Tensor: + stride = x.item() + return torch.empty_strided((1,), (stride,), device=x.device) + + @foo.register_fake + def _(x: torch.Tensor) -> torch.Tensor: + ctx = torch.library.get_ctx() + stride = ctx.new_dynamic_size() + return torch.empty_strided((1,), (stride,), device=x.device) + + @torch.compile(fullgraph=True) + def f(x): + r = torch.ops.test.foo(x) + y = r.stride(0) + return torch.empty(y, device=x.device) + + f(torch.tensor([3], device=device)) @torch._inductor.config.patch(disable_cpp_codegen=True) def test_floor(self): diff --git a/test/onnx/test_fx_passes.py b/test/onnx/test_fx_passes.py index 00fe67b558..9ebbf11646 100644 --- a/test/onnx/test_fx_passes.py +++ b/test/onnx/test_fx_passes.py @@ -3,7 +3,6 @@ import torch import torch._dynamo import torch.fx -from torch._custom_op import impl as custom_op from torch.onnx._internal.fx.passes import _utils as pass_utils from torch.testing._internal import common_utils @@ -58,33 +57,26 @@ class TestFxPasses(common_utils.TestCase): ), f"Expected all names to be unique, got {nodes}" def test_onnx_dynamo_export_raises_when_model_contains_unsupported_fx_nodes(self): - @custom_op.custom_op("mylibrary::foo_op") + @torch.library.custom_op( + "mylibrary::foo_op", device_types="cpu", mutates_args=() + ) def foo_op(x: torch.Tensor) -> torch.Tensor: - ... + return x + 1 - @custom_op.custom_op("mylibrary::bar_op") + @torch.library.custom_op( + "mylibrary::bar_op", device_types="cpu", mutates_args=() + ) def bar_op(x: torch.Tensor) -> torch.Tensor: - ... + return x + 2 - @foo_op.impl_abstract() - def foo_op_impl_abstract(x): + @foo_op.register_fake + def _(x): return torch.empty_like(x) - @foo_op.impl("cpu") - def foo_op_impl(x): - return x + 1 - - @bar_op.impl_abstract() - def bar_op_impl_abstract(x): + @bar_op.register_fake + def _(x): return torch.empty_like(x) - @bar_op.impl("cpu") - def bar_op_impl(x): - return x + 2 - - torch._dynamo.allow_in_graph(foo_op) - torch._dynamo.allow_in_graph(bar_op) - def func(x, y, z): return foo_op(x) + bar_op(y) + z diff --git a/torch/_prims/debug_prims.py b/torch/_prims/debug_prims.py index d4d7a0c999..ea3854d04b 100644 --- a/torch/_prims/debug_prims.py +++ b/torch/_prims/debug_prims.py @@ -1,8 +1,7 @@ import contextlib -from typing import Optional, Sequence +from typing import Optional import torch -from torch._custom_op.impl import custom_op from torch.utils._content_store import ContentStoreReader LOAD_TENSOR_READER: Optional[ContentStoreReader] = None @@ -26,18 +25,12 @@ def load_tensor_reader(loc): def register_debug_prims(): - @custom_op("debugprims::load_tensor") - def load_tensor( # type: ignore[empty-body] - name: str, - size: Sequence[int], - stride: Sequence[int], - *, - dtype: torch.dtype, - device: torch.device, - ) -> torch.Tensor: - ... - - @load_tensor.impl_factory() + torch.library.define( + "debugprims::load_tensor", + "(str name, int[] size, int[] stride, *, ScalarType dtype, Device device) -> Tensor", + ) + + @torch.library.impl("debugprims::load_tensor", "BackendSelect") def load_tensor_factory(name, size, stride, dtype, device): if LOAD_TENSOR_READER is None: from torch._dynamo.testing import rand_strided diff --git a/torch/utils/_python_dispatch.py b/torch/utils/_python_dispatch.py index 9bd6d25f0d..f5f830c2f1 100644 --- a/torch/utils/_python_dispatch.py +++ b/torch/utils/_python_dispatch.py @@ -6,6 +6,7 @@ from typing import Any, Dict, List, Optional, Set, Union import torch import torchgen +import torchgen.model from torch._C import ( _get_dispatch_stack_at, _len_torch_dispatch_stack,
2.41.0
8e17b2d4d527e28fb385bf7697b8efeb0385d26
Fri, 19 Apr 2024 06:31:55 -0700
[PATCH 0389/1000] Move schema inference to torch._library (#124199)
After this PR, we can delete torch._custom_op/torch._custom_ops (except there are external libraries depending it). Pull Request resolved: https://github.com/pytorch/pytorch/pull/124199 Approved by: https://github.com/albanD ghstack dependencies: #124180, #124200, #124299, #124134
diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py index b9fe9bed81..914de0a630 100644 --- a/test/test_custom_ops.py +++ b/test/test_custom_ops.py @@ -746,7 +746,7 @@ class TestCustomOp(CustomOpTestCaseBase): ) def test_supported_return_types_single_return(self): - for typ in torch._custom_op.impl.SUPPORTED_RETURN_TYPES: + for typ in torch._library.infer_schema.SUPPORTED_RETURN_TYPES: for example in self._generate_examples(typ): try: @@ -765,7 +765,7 @@ class TestCustomOp(CustomOpTestCaseBase): custom_ops._destroy(f"{self.test_ns}::foo") def test_supported_return_types_multi_return(self): - for typ in torch._custom_op.impl.SUPPORTED_RETURN_TYPES: + for typ in torch._library.infer_schema.SUPPORTED_RETURN_TYPES: for example in self._generate_examples(typ): try: @@ -785,7 +785,7 @@ class TestCustomOp(CustomOpTestCaseBase): custom_ops._destroy(f"{self.test_ns}::foo") def test_supported_param_types(self): - for typ in torch._custom_op.impl.SUPPORTED_PARAM_TYPES: + for typ in torch._library.infer_schema.SUPPORTED_PARAM_TYPES: @custom_ops.custom_op(f"{TestCustomOp.test_ns}::foo") def foo(x: Tensor, y: typ) -> Tensor: diff --git a/torch/_custom_op/impl.py b/torch/_custom_op/impl.py index fefd7cedf9..df83c51bcf 100644 --- a/torch/_custom_op/impl.py +++ b/torch/_custom_op/impl.py @@ -14,6 +14,8 @@ from torch._library.abstract_impl import AbstractImplCtx from torch.library import get_ctx from .autograd import autograd_kernel_indirection, construct_autograd_kernel +import torch._library.infer_schema +from torch._library.infer_schema import infer_schema """ For a detailed guide on custom ops, please see @@ -769,145 +771,6 @@ def validate_function_matches_schema( compare(kwargonly, schema.arguments.flat_kwarg_only) -def infer_schema(prototype_function: typing.Callable, mutates_args=()) -> str: - """Given a function with type hints, parses a schema. - - We make some assumptions to make our lives easier that correspond to how people - write custom ops in real life: - - none of the outputs alias any of the inputs or each other. - - only the args listed in mutates_args are being mutated. - - Callers (e.g. the custom ops API) are responsible for checking these assumptions. - """ - sig = inspect.signature(prototype_function) - - def error_fn(what): - raise ValueError( - f"infer_schema(func): {what} " f"Got func with signature {sig})" - ) - - params = [] - seen_args = set() - for idx, (name, param) in enumerate(sig.parameters.items()): - if not supported_param(param): - error_fn("We do not support positional-only args, varargs, or varkwargs.") - - if param.annotation is inspect.Parameter.empty: - error_fn(f"Parameter {name} must have a type annotation.") - - if param.annotation not in SUPPORTED_PARAM_TYPES.keys(): - error_fn( - f"Parameter {name} has unsupported type {param.annotation}. " - f"The valid types are: {SUPPORTED_PARAM_TYPES.keys()}." - ) - - schema_type = SUPPORTED_PARAM_TYPES[param.annotation] - if name in mutates_args: - if not schema_type.startswith("Tensor"): - error_fn(f"Parameter {name} is in mutable_args but only Tensors or collections of Tensors can be mutated") - schema_type = f"Tensor(a{idx}!){schema_type[len('Tensor'):]}" - seen_args.add(name) - if param.default is inspect.Parameter.empty: - params.append(f"{schema_type} {name}") - else: - if param.default is not None and not isinstance(param.default, (int, float, bool)): - error_fn( - f"Parameter {name} has an unsupported default value (we only support " - f"int, float, bool, None). Please file an issue on GitHub so we can " - f"prioritize this." - ) - params.append(f"{schema_type} {name}={param.default}") - mutates_args_not_seen = set(mutates_args) - seen_args - if len(mutates_args_not_seen) > 0: - error_fn(f"{mutates_args_not_seen} in mutates_args were not found in " - f"the custom op's signature. " - f"mutates_args should contain the names of all args that the " - f"custom op mutates.") - ret = parse_return(sig.return_annotation, error_fn) - return f"({', '.join(params)}) -> {ret}" - - -def derived_types( - base_type, cpp_type, list_base, optional_base_list, optional_list_base -): - result = [ - (base_type, cpp_type), - (typing.Optional[base_type], f"{cpp_type}?"), - ] - - def derived_seq_types(typ): - return [ - typing.Sequence[typ], # type: ignore[valid-type] - typing.List[typ], # type: ignore[valid-type] - ] - - if list_base: - for seq_typ in derived_seq_types(base_type): - result.append((seq_typ, f"{cpp_type}[]")) # type: ignore[valid-type] - if optional_base_list: - for seq_typ in derived_seq_types(typing.Optional[base_type]): - result.append((seq_typ, f"{cpp_type}?[]")) # type: ignore[valid-type] - if optional_list_base: - for seq_typ in derived_seq_types(base_type): # type: ignore[valid-type] - result.append((typing.Optional[seq_typ], f"{cpp_type}[]?")) # type: ignore[valid-type] - return result - - -def get_supported_param_types(): - data = [ - # (python type, schema type, type[] variant, type?[] variant, type[]? variant - (torch.Tensor, "Tensor", True, True, False), - (int, "SymInt", True, False, True), - (float, "float", True, False, True), - (bool, "bool", True, False, True), - (str, "str", False, False, False), - (torch.types.Number, "Scalar", True, False, False), - (torch.dtype, "ScalarType", False, False, False), - (torch.device, "Device", False, False, False), - ] - result = [] - for line in data: - result.extend(derived_types(*line)) - return dict(result) - - -SUPPORTED_RETURN_TYPES = { - torch.Tensor: "Tensor", - typing.List[torch.Tensor]: "Tensor[]", - int: "SymInt", - float: "float", - bool: "bool", - torch.types.Number: "Scalar", -} - - -def parse_return(annotation, error_fn): - if annotation is None: - return "()" - - origin = typing.get_origin(annotation) - if origin is not tuple: - if annotation not in SUPPORTED_RETURN_TYPES.keys(): - error_fn( - f"Return has unsupported type {annotation}. " - f"The valid types are: {SUPPORTED_RETURN_TYPES}." - ) - return SUPPORTED_RETURN_TYPES[annotation] - - args = typing.get_args(annotation) - for arg in args: - if arg not in SUPPORTED_RETURN_TYPES: - error_fn( - f"Return has unsupported type {annotation}. " - f"The valid types are: {SUPPORTED_RETURN_TYPES}." - ) - - return "(" + ", ".join([SUPPORTED_RETURN_TYPES[arg] for arg in args]) + ")" - - -SUPPORTED_PARAM_TYPES = get_supported_param_types() - - def report_error_callback(custom_op: typing.Any, key: str) -> None: if key == "Undefined": raise NotImplementedError( diff --git a/torch/_library/infer_schema.py b/torch/_library/infer_schema.py new file mode 100644 index 0000000000..e85803db37 --- /dev/null +++ b/torch/_library/infer_schema.py @@ -0,0 +1,156 @@ +import inspect +import typing + +from .. import device, dtype, Tensor, types + + +def infer_schema(prototype_function: typing.Callable, mutates_args=()) -> str: + """Given a function with type hints, parses a schema. + + We make some assumptions to make our lives easier that correspond to how people + write custom ops in real life: + - none of the outputs alias any of the inputs or each other. + - only the args listed in mutates_args are being mutated. + + Callers (e.g. the custom ops API) are responsible for checking these assumptions. + """ + sig = inspect.signature(prototype_function) + + def error_fn(what): + raise ValueError( + f"infer_schema(func): {what} " f"Got func with signature {sig})" + ) + + params = [] + seen_args = set() + for idx, (name, param) in enumerate(sig.parameters.items()): + if not supported_param(param): + error_fn("We do not support positional-only args, varargs, or varkwargs.") + + if param.annotation is inspect.Parameter.empty: + error_fn(f"Parameter {name} must have a type annotation.") + + if param.annotation not in SUPPORTED_PARAM_TYPES.keys(): + error_fn( + f"Parameter {name} has unsupported type {param.annotation}. " + f"The valid types are: {SUPPORTED_PARAM_TYPES.keys()}." + ) + + schema_type = SUPPORTED_PARAM_TYPES[param.annotation] + if name in mutates_args: + if not schema_type.startswith("Tensor"): + error_fn( + f"Parameter {name} is in mutable_args but only Tensors or collections of Tensors can be mutated" + ) + schema_type = f"Tensor(a{idx}!){schema_type[len('Tensor'):]}" + seen_args.add(name) + if param.default is inspect.Parameter.empty: + params.append(f"{schema_type} {name}") + else: + if param.default is not None and not isinstance( + param.default, (int, float, bool) + ): + error_fn( + f"Parameter {name} has an unsupported default value (we only support " + f"int, float, bool, None). Please file an issue on GitHub so we can " + f"prioritize this." + ) + params.append(f"{schema_type} {name}={param.default}") + mutates_args_not_seen = set(mutates_args) - seen_args + if len(mutates_args_not_seen) > 0: + error_fn( + f"{mutates_args_not_seen} in mutates_args were not found in " + f"the custom op's signature. " + f"mutates_args should contain the names of all args that the " + f"custom op mutates." + ) + ret = parse_return(sig.return_annotation, error_fn) + return f"({', '.join(params)}) -> {ret}" + + +def derived_types( + base_type, cpp_type, list_base, optional_base_list, optional_list_base +): + result = [ + (base_type, cpp_type), + (typing.Optional[base_type], f"{cpp_type}?"), + ] + + def derived_seq_types(typ): + return [ + typing.Sequence[typ], # type: ignore[valid-type] + typing.List[typ], # type: ignore[valid-type] + ] + + if list_base: + for seq_typ in derived_seq_types(base_type): + result.append((seq_typ, f"{cpp_type}[]")) # type: ignore[valid-type] + if optional_base_list: + for seq_typ in derived_seq_types(typing.Optional[base_type]): + result.append((seq_typ, f"{cpp_type}?[]")) # type: ignore[valid-type] + if optional_list_base: + for seq_typ in derived_seq_types(base_type): # type: ignore[valid-type] + result.append((typing.Optional[seq_typ], f"{cpp_type}[]?")) # type: ignore[valid-type] + return result + + +def get_supported_param_types(): + data = [ + # (python type, schema type, type[] variant, type?[] variant, type[]? variant + (Tensor, "Tensor", True, True, False), + (int, "SymInt", True, False, True), + (float, "float", True, False, True), + (bool, "bool", True, False, True), + (str, "str", False, False, False), + (types.Number, "Scalar", True, False, False), + (dtype, "ScalarType", False, False, False), + (device, "Device", False, False, False), + ] + result = [] + for line in data: + result.extend(derived_types(*line)) + return dict(result) + + +SUPPORTED_RETURN_TYPES = { + Tensor: "Tensor", + typing.List[Tensor]: "Tensor[]", + int: "SymInt", + float: "float", + bool: "bool", + types.Number: "Scalar", +} + + +def parse_return(annotation, error_fn): + if annotation is None: + return "()" + + origin = typing.get_origin(annotation) + if origin is not tuple: + if annotation not in SUPPORTED_RETURN_TYPES.keys(): + error_fn( + f"Return has unsupported type {annotation}. " + f"The valid types are: {SUPPORTED_RETURN_TYPES}." + ) + return SUPPORTED_RETURN_TYPES[annotation] + + args = typing.get_args(annotation) + for arg in args: + if arg not in SUPPORTED_RETURN_TYPES: + error_fn( + f"Return has unsupported type {annotation}. " + f"The valid types are: {SUPPORTED_RETURN_TYPES}." + ) + + return "(" + ", ".join([SUPPORTED_RETURN_TYPES[arg] for arg in args]) + ")" + + +SUPPORTED_PARAM_TYPES = get_supported_param_types() + + +def supported_param(param: inspect.Parameter) -> bool: + return param.kind in ( + inspect.Parameter.POSITIONAL_OR_KEYWORD, + inspect.Parameter.KEYWORD_ONLY, + )
2.41.0
0d83726bde98f7a716f8e81d9e38804c0793404
Fri, 19 Apr 2024 18:57:38 +0000
[PATCH 0391/1000] [5/x][AMD][Lowering Enablement] Hipifying aoti code_wrapper (#124241)
Summary: as title Test Plan: CI & unit test patch on top of https://www.internalfb.com/phabricator/paste/view/P1214895953 to test Differential Revision: D56223917 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124241 Approved by: https://github.com/jansel, https://github.com/desertfire
diff --git a/torch/_inductor/codegen/aoti_hipify_utils.py b/torch/_inductor/codegen/aoti_hipify_utils.py new file mode 100644 index 0000000000..a86ef2d297 --- /dev/null +++ b/torch/_inductor/codegen/aoti_hipify_utils.py @@ -0,0 +1,20 @@ +import torch + +from torch.utils.hipify.hipify_python import PYTORCH_MAP, RE_PYTORCH_PREPROCESSOR + +# It is not a good idea to directly apply hipify_torch to codegen, which will be vulnerable to cases like: +# "... +# from ..codecache import CudaKernelParamCache +# ..." +# In such cases, we do not need to hipify_torch the orignial class/file name in codegen/codecache + + +def maybe_hipify_code_wrapper(source_codes: str) -> str: + if torch.version.hip is None: + return source_codes + + def c2_repl(m): + return PYTORCH_MAP[m.group(0)] + + source_codes = RE_PYTORCH_PREPROCESSOR.sub(c2_repl, source_codes) + return source_codes diff --git a/torch/_inductor/codegen/codegen_device_driver.py b/torch/_inductor/codegen/codegen_device_driver.py new file mode 100644 index 0000000000..73fcb7afd5 --- /dev/null +++ b/torch/_inductor/codegen/codegen_device_driver.py @@ -0,0 +1,88 @@ +import torch + +# Provide aoti module launch hip/cuda drivers. This file is also used for unit testing purpose + + +def cuda_kernel_driver() -> str: + source_codes = """ + #define CUDA_DRIVER_CHECK(EXPR) \\ + do { \\ + CUresult code = EXPR; \\ + const char *msg; \\ + cuGetErrorString(code, &msg); \\ + if (code != CUDA_SUCCESS) { \\ + throw std::runtime_error( \\ + std::string("CUDA driver error: ") + \\ + std::string(msg)); \\ + } \\ + } while (0); + + namespace { + + struct Grid { + Grid(uint32_t x, uint32_t y, uint32_t z) + : grid_x(x), grid_y(y), grid_z(z) {} + uint32_t grid_x; + uint32_t grid_y; + uint32_t grid_z; + + bool is_non_zero() { + return grid_x > 0 && grid_y > 0 && grid_z > 0; + } + }; + + } // anonymous namespace + + static inline CUfunction loadKernel( + std::string filePath, + const std::string &funcName, + uint32_t sharedMemBytes, + const std::optional<std::string> &cubinDir = std::nullopt) { + if (cubinDir) { + std::filesystem::path p1{*cubinDir}; + std::filesystem::path p2{filePath}; + filePath = (p1 / p2.filename()).string(); + } + + CUmodule mod; + CUfunction func; + CUDA_DRIVER_CHECK(cuModuleLoad(&mod, filePath.c_str())); + CUDA_DRIVER_CHECK(cuModuleGetFunction(&func, mod, funcName.c_str())); + if (sharedMemBytes > 0) { + CUDA_DRIVER_CHECK(cuFuncSetAttribute( + func, + CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, + sharedMemBytes + )) + } + return func; + } + + static inline void launchKernel( + CUfunction func, + uint32_t gridX, + uint32_t gridY, + uint32_t gridZ, + uint32_t numWarps, + uint32_t sharedMemBytes, + void* args[], + cudaStream_t stream) { + CUDA_DRIVER_CHECK(cuLaunchKernel( + func, gridX, gridY, gridZ, 32*numWarps, 1, 1, sharedMemBytes, stream, args, nullptr + )); + } + """ + if torch.version.hip is not None: + # Replace the warp size from 32 (cuLaunchKernel) to 64 (hipModuleLaunchKernel) + # The warp size on NV GPU is 32, while the wavefront size on AMD GPU is 64 + source_codes = source_codes.replace("32*numWarps", "64*numWarps") + return source_codes + + +def cuda_kernel_header() -> str: + source_codes = """ + #include <c10/cuda/CUDAGuard.h> + #include <c10/cuda/CUDAStream.h> + #include <ATen/cuda/EmptyTensor.h> + """ + return source_codes diff --git a/torch/_inductor/codegen/cpp_wrapper_cpu.py b/torch/_inductor/codegen/cpp_wrapper_cpu.py index a96fadaafa..4c07930045 100644 --- a/torch/_inductor/codegen/cpp_wrapper_cpu.py +++ b/torch/_inductor/codegen/cpp_wrapper_cpu.py @@ -15,6 +15,7 @@ from .. import config, ir from ..codecache import CudaKernelParamCache from ..utils import cache_on_self, sympy_product from ..virtualized import V +from .aoti_hipify_utils import maybe_hipify_code_wrapper from .common import IndentedBuffer from .wrapper import EnterSubgraphLine, ExitSubgraphLine, WrapperCodeGen @@ -665,7 +666,9 @@ class CppWrapperCpu(WrapperCodeGen): V.graph.const_module.wrapper_code.src_to_kernel.values() ) for kernel in sorted(declare_kernel): - self.prefix.writeline(f" CUfunction {kernel}{{nullptr}};") + self.prefix.writeline( + maybe_hipify_code_wrapper(f" CUfunction {kernel}{{nullptr}};") + ) self.prefix.writeline("};") self.prefix.writeline("} // namespace") diff --git a/torch/_inductor/codegen/cpp_wrapper_cuda.py b/torch/_inductor/codegen/cpp_wrapper_cuda.py index e0a3baff06..69838dccad 100644 --- a/torch/_inductor/codegen/cpp_wrapper_cuda.py +++ b/torch/_inductor/codegen/cpp_wrapper_cuda.py @@ -11,6 +11,8 @@ from .. import config from ..codecache import CudaKernelParamCache from ..triton_heuristics import grid as default_grid from ..virtualized import V +from .aoti_hipify_utils import maybe_hipify_code_wrapper +from .codegen_device_driver import cuda_kernel_driver, cuda_kernel_header from .cpp_wrapper_cpu import CppWrapperCpu from .wrapper import SymbolicCallArg @@ -64,88 +66,12 @@ class CppWrapperCuda(CppWrapperCpu): "#include <torch/csrc/inductor/aoti_runtime/utils_cuda.h>" ) else: - self.header.splice( - """ - #include <c10/cuda/CUDAGuard.h> - #include <c10/cuda/CUDAStream.h> - #include <ATen/cuda/EmptyTensor.h> - """ - ) - - self.header.splice( - """ - #define CUDA_DRIVER_CHECK(EXPR) \\ - do { \\ - CUresult code = EXPR; \\ - const char *msg; \\ - cuGetErrorString(code, &msg); \\ - if (code != CUDA_SUCCESS) { \\ - throw std::runtime_error( \\ - std::string("CUDA driver error: ") + \\ - std::string(msg)); \\ - } \\ - } while (0); - - namespace { - - struct Grid { - Grid(uint32_t x, uint32_t y, uint32_t z) - : grid_x(x), grid_y(y), grid_z(z) {} - uint32_t grid_x; - uint32_t grid_y; - uint32_t grid_z; - - bool is_non_zero() { - return grid_x > 0 && grid_y > 0 && grid_z > 0; - } - }; - - } // anonymous namespace - - static inline CUfunction loadKernel( - std::string filePath, - const std::string &funcName, - uint32_t sharedMemBytes, - const std::optional<std::string> &cubinDir = std::nullopt) { - if (cubinDir) { - std::filesystem::path p1{*cubinDir}; - std::filesystem::path p2{filePath}; - filePath = (p1 / p2.filename()).string(); - } - - CUmodule mod; - CUfunction func; - CUDA_DRIVER_CHECK(cuModuleLoad(&mod, filePath.c_str())); - CUDA_DRIVER_CHECK(cuModuleGetFunction(&func, mod, funcName.c_str())); - if (sharedMemBytes > 0) { - CUDA_DRIVER_CHECK(cuFuncSetAttribute( - func, - CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, - sharedMemBytes - )) - } - return func; - } - - static inline void launchKernel( - CUfunction func, - uint32_t gridX, - uint32_t gridY, - uint32_t gridZ, - uint32_t numWarps, - uint32_t sharedMemBytes, - void* args[], - cudaStream_t stream) { - CUDA_DRIVER_CHECK(cuLaunchKernel( - func, gridX, gridY, gridZ, 32*numWarps, 1, 1, sharedMemBytes, stream, args, nullptr - )); - } - """ - ) + self.header.splice(maybe_hipify_code_wrapper(cuda_kernel_header())) + self.header.splice(maybe_hipify_code_wrapper(cuda_kernel_driver())) def write_get_raw_stream(self, index, graph=None): name = f"stream{index}" - self.writeline(f"cudaStream_t {name};") + self.writeline(maybe_hipify_code_wrapper(f"cudaStream_t {name};")) self.writeline( f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_current_cuda_stream({index}, (void**)&{name}));" ) @@ -164,7 +90,9 @@ class CppWrapperCuda(CppWrapperCpu): sorted(self.src_to_kernel.values()), sorted([entry[0] for entry in self.user_defined_kernel_cache.values()]), ): - self.prefix.writeline(f"static CUfunction {kernel} = nullptr;") + self.prefix.writeline( + maybe_hipify_code_wrapper(f"static CUfunction {kernel} = nullptr;") + ) self.prefix.writeline("\n") return super().generate(is_inference) @@ -214,13 +142,17 @@ class CppWrapperCuda(CppWrapperCpu): self.writeline(f"auto {var_name} = c10::nullopt;") else: if config.abi_compatible: - self.writeline(f"CUdeviceptr {var_name};") + self.writeline( + maybe_hipify_code_wrapper(f"CUdeviceptr {var_name};") + ) self.writeline( f"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_get_data_ptr({arg}, reinterpret_cast<void**>(&{var_name})));" ) else: self.writeline( - f"CUdeviceptr {var_name} = reinterpret_cast<CUdeviceptr>({arg}.data_ptr());" + maybe_hipify_code_wrapper( + f"CUdeviceptr {var_name} = reinterpret_cast<CUdeviceptr>({arg}.data_ptr());" + ) ) new_args.append(f"&{var_name}") diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py index 1609a6d897..145721d50e 100644 --- a/torch/_inductor/codegen/wrapper.py +++ b/torch/_inductor/codegen/wrapper.py @@ -42,6 +42,7 @@ from ..utils import ( sympy_str, ) from ..virtualized import V +from .aoti_hipify_utils import maybe_hipify_code_wrapper from .common import CodeGen, DeferredLine, IndentedBuffer, PythonPrinter from .triton_utils import config_of, signature_to_meta @@ -264,8 +265,10 @@ class EnterDeviceContextManagerLine(WrapperLine): ) else: code.writeline( - "at::cuda::CUDAStreamGuard stream_guard(" - + "at::cuda::getStreamFromExternal(stream, this->device_idx_));" + maybe_hipify_code_wrapper( + "at::cuda::CUDAStreamGuard stream_guard(" + + "at::cuda::getStreamFromExternal(stream, this->device_idx_));" + ) ) else: assert ( @@ -276,7 +279,9 @@ class EnterDeviceContextManagerLine(WrapperLine): code.writeline( f"AOTICudaGuard device_guard({self.device_idx});" if config.abi_compatible - else f"at::cuda::CUDAGuard device_guard({self.device_idx});" + else maybe_hipify_code_wrapper( + f"at::cuda::CUDAGuard device_guard({self.device_idx});" + ) ) else: code.writeline(f"device_guard.set_index({self.device_idx});")
2.41.0
61de37ab7fef4ff13a65d1e84b9877361443249
Thu, 18 Apr 2024 15:23:51 -0700
[PATCH 0393/1000] [sym_shape][perf] eval_static: guards, unbacked compute once (#124217)
Differential Revision: [D56212345](https://our.internmc.facebook.com/intern/diff/D56212345) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124217 Approved by: https://github.com/ezyang
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py index 1493eb35fe..4d2f19bee9 100644 --- a/torch/fx/experimental/symbolic_shapes.py +++ b/torch/fx/experimental/symbolic_shapes.py @@ -3617,12 +3617,28 @@ class ShapeEnv: symbols = list(expr.free_symbols) # Apply known runtime asserts - for s in symbols: - # Unbacked symints only - if s in self.var_to_val: - continue + guards_exprs = [] + for g in self.guards: + e = self.simplify(g.expr) + if compute_hint: + e = canonicalize_bool_expr(e.xreplace(self.var_to_val)) + guards_exprs.append(e) + + symbols_unbacked = symbols - self.var_to_val.keys() + defra_exprs = {} + for s in symbols_unbacked: + defras = self.deferred_runtime_asserts.get(s, ()) + l = [] + for defra in defras: + e = self.simplify(defra.expr) + if compute_hint: + e = canonicalize_bool_expr(e.xreplace(self.var_to_val)) + l.append(e) + defra_exprs[s] = l - subst = {} + + subst = {} + for s in symbols_unbacked: def add_expr(expr): # Expr and negation @@ -3634,10 +3650,7 @@ class ShapeEnv: subst[canonicalize_bool_expr(dual)] = sympy.true subst[canonicalize_bool_expr(sympy.Not(dual))] = sympy.false - for e in itertools.chain(self.guards, self.deferred_runtime_asserts.get(s, ())): - e = e.expr - if compute_hint: - e = canonicalize_bool_expr(e.xreplace(self.var_to_val)) + for e in itertools.chain(guards_exprs, defra_exprs[s]): add_expr(e) # Other relational expressions this expression implies if isinstance(e, sympy.Eq): @@ -3647,8 +3660,8 @@ class ShapeEnv: add_expr(sympy.Le(e.lhs, e.rhs)) add_expr(sympy.Ne(e.lhs, e.rhs)) - # NB: this helps us deal with And/Or connectives - expr = expr.xreplace(subst) + # NB: this helps us deal with And/Or connectives + expr = expr.xreplace(subst) # Simplify making use of value range lower bound new_shape_env = {}
2.41.0
87c788a3476110d184ffddcda873ae216f3e9aa
Fri, 19 Apr 2024 19:05:44 +0000
[PATCH 0394/1000] Revert "Capture triton kernel in execution trace (#124140)"
This reverts commit 89407eca3b0be3c0272b5c583f8e77b9108a71f8. Reverted https://github.com/pytorch/pytorch/pull/124140 on behalf of https://github.com/facebook-github-bot due to Diff reverted internally ([comment](https://github.com/pytorch/pytorch/pull/124140#issuecomment-2067137104))
diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py index d9012d0e89..8e4e31718d 100644 --- a/test/profiler/test_profiler.py +++ b/test/profiler/test_profiler.py @@ -21,7 +21,6 @@ import torch.nn as nn import torch.optim import torch.utils.data import torch.utils.data.datapipes as dp -from torch import _dynamo as torchdynamo from torch._C._profiler import _TensorMetadata from torch.autograd import ( _record_function_with_args_enter, @@ -53,9 +52,7 @@ from torch.profiler._pattern_matcher import ( report_all_anti_patterns, SynchronizedDataLoaderPattern, ) - -from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU - +from torch.testing._internal.common_cuda import TEST_MULTIGPU from torch.testing._internal.common_device_type import skipCUDAVersionIn from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, @@ -73,8 +70,6 @@ from torch.testing._internal.common_utils import ( TestCase, ) -from torch.utils._triton import has_triton - Json = Dict[str, Any] try: @@ -518,52 +513,41 @@ class TestExecutionTrace(TestCase): assert loop_count == expected_loop_events @unittest.skipIf(IS_WINDOWS, "torch.compile does not support WINDOWS") - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) - @unittest.skipIf(not TEST_CUDA or not has_triton(), "need CUDA and triton to run") def test_execution_trace_with_pt2(self): - @torchdynamo.optimize("inductor") - def fn(a, b, c): - x = torch.nn.functional.linear(a, b) - x = x + c - return x.cos() - - a, b, c = (torch.randn(4, 4, requires_grad=True).to("cuda") for _ in range(3)) + class ConvAndRelu(nn.Module): + def __init__(self) -> None: + super().__init__() + self.linear = nn.Linear(4096, 4096) + self.relu = nn.ReLU(inplace=True) - inputs = [a, b, c] - fn(*inputs) + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.linear(x) + x = self.relu(x) + return x # Create a temp file to save execution trace data. fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False) fp.close() - et_file = fp.name - et = ExecutionTraceObserver() - et.register_callback(et_file) - with profile( - activities=torch.profiler.supported_activities(), record_shapes=True - ): - et.start() - fn(*inputs) - et.stop() + test_module = torch.compile(ConvAndRelu()) + + x = torch.rand(128, 4096) + et = ExecutionTraceObserver().register_callback(fp.name) + et.start() + test_module.forward(x) + et.stop() assert fp.name == et.get_output_file_path() et.unregister_callback() - nodes = self.get_execution_trace_root(fp.name) - found_captured_triton_kernel_node = False + found_root_node = False for n in nodes: assert "name" in n - if "triton_" in n["name"]: - for attr in n["attrs"]: - if attr["name"] == "kernel_file" and attr["value"] != "": - found_captured_triton_kernel_node = True - assert len(n["inputs"]["values"]) > 0 - assert len(n["outputs"]["values"]) == 0 - - assert found_captured_triton_kernel_node + if "[pytorch|profiler|execution_trace|process]" in n["name"]: + found_root_node = True + + assert found_root_node def test_execution_trace_start_stop(self): use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities() diff --git a/torch/_inductor/triton_heuristics.py b/torch/_inductor/triton_heuristics.py index 1371608f62..cf68fb020c 100644 --- a/torch/_inductor/triton_heuristics.py +++ b/torch/_inductor/triton_heuristics.py @@ -807,7 +807,7 @@ class CachingAutotuner(KernelInterface): args, { "kernel_file": self.filename, - "kernel_backend": "triton", + "kernel_type": "triton", "grid": grid_info, "stream": stream, }, diff --git a/torch/csrc/profiler/standalone/execution_trace_observer.cpp b/torch/csrc/profiler/standalone/execution_trace_observer.cpp index 80b6f2221d..763f449c23 100644 --- a/torch/csrc/profiler/standalone/execution_trace_observer.cpp +++ b/torch/csrc/profiler/standalone/execution_trace_observer.cpp @@ -236,8 +236,6 @@ const ExecutionTraceObserver::ID root_id{1}; struct FunctionCallContext : public ObserverContext { std::string name; - std::string kernel_backend; - std::string kernel_file; ExecutionTraceObserver::ID op_id{uninitialized_id}; ExecutionTraceObserver::ID parent_id{uninitialized_id}; ExecutionTraceObserver::ID fw_parent_id{uninitialized_id}; @@ -275,24 +273,14 @@ static void writeJsonNode( const std::string& outputs = "[]", const std::string& output_shapes = "[]", const std::string& output_types = "[]", - const std::string& operator_schema = "", - const std::string& kernel_backend = "", - const std::string& kernel_file = "") { + const std::string& operator_schema = "") { out << fmt::format( R"JSON( {{ "id": {}, "name": "{}", "ctrl_deps": {}, "inputs": {{"values": {}, "shapes": {}, "types": {}}}, "outputs": {{"values": {}, "shapes": {}, "types": {}}}, - "attrs": [{{"name": "rf_id", "type": "uint64", "value": {}}}, - {{"name": "fw_parent", "type": "uint64", "value": {}}}, - {{"name": "seq_id", "type": "int64", "value": {}}}, - {{"name": "scope", "type": "uint64", "value": {}}}, - {{"name": "tid", "type": "uint64", "value": {}}}, - {{"name": "fw_tid", "type": "uint64", "value": {}}}, - {{"name": "op_schema", "type": "string", "value": "{}"}}, - {{"name": "kernel_backend", "type": "string", "value": "{}"}}, - {{"name": "kernel_file", "type": "string", "value": "{}"}}] + "attrs": [{{"name": "rf_id", "type": "uint64", "value": {}}}, {{"name": "fw_parent", "type": "uint64", "value": {}}}, {{"name": "seq_id", "type": "int64", "value": {}}}, {{"name": "scope", "type": "uint64", "value": {}}}, {{"name": "tid", "type": "uint64", "value": {}}}, {{"name": "fw_tid", "type": "uint64", "value": {}}}, {{"name": "op_schema", "type": "string", "value": "{}"}}] }})JSON", id, name, @@ -309,9 +297,7 @@ static void writeJsonNode( scope, tid, fw_tid, - operator_schema, - kernel_backend, - kernel_file); + operator_schema); } inline std::string timeString(const std::time_t timepoint) { @@ -456,44 +442,6 @@ inline void appendValueInfo( shapes.push_back(getValueShape(val)); } -inline void handleKernelBackendInfo( - FunctionCallContext& fc, - const RecordFunction& fn) { - // triton kernel related information are in kwinputs - const auto& kwinputs = fn.kwinputs(); - if (kwinputs.find("kernel_backend") != kwinputs.end()) { - fc.kernel_backend = kwinputs.at("kernel_backend").toStringRef(); - if (fc.kernel_backend == "triton") { - fc.kernel_file = kwinputs.at("kernel_file").toStringRef(); - TORCH_INTERNAL_ASSERT( - kwinputs.find("kernel_file") != kwinputs.end(), - "kernel file is missing in triton kernel"); - // Remove the path of the file name - if (fc.kernel_file.find_last_of('/') != std::string::npos) - fc.kernel_file = - fc.kernel_file.substr(fc.kernel_file.find_last_of('/') + 1); - - // get grid information - TORCH_INTERNAL_ASSERT( - kwinputs.find("grid") != kwinputs.end(), - "grid is missing in triton kernel"); - fc.input_values.emplace_back( - "\"" + kwinputs.at("grid").toStringRef() + "\""); - fc.input_types.emplace_back("\"String\""); - fc.input_shapes.emplace_back("[]"); - - // get stream information - TORCH_INTERNAL_ASSERT( - kwinputs.find("stream") != kwinputs.end(), - "stream is missing in triton kernel"); - fc.input_values.emplace_back( - std::to_string(kwinputs.at("stream").toInt())); - fc.input_types.emplace_back("\"Int\""); - fc.input_shapes.emplace_back("[]"); - } - } -} - static void recordOperatorStart( ExecutionTraceObserver& ob, FunctionCallContext& fc, @@ -543,9 +491,6 @@ static void recordOperatorStart( appendValueInfo( ob, inputs[i], fc.input_values, fc.input_types, fc.input_shapes); } - - handleKernelBackendInfo(fc, fn); - fc.parent_id = ob.op_stack[tid].top(); // get parent id from the forward stack, this can be different for // autograd ops, which may execute on a different thread than the original @@ -670,9 +615,7 @@ static void onFunctionExit(const RecordFunction& fn, ObserverContext* ctx_ptr) { vectorToString(output_values), vectorToString(output_shapes), vectorToString(output_types), - op_schema_str, - fc.kernel_backend, - fc.kernel_file); + op_schema_str); ob->out << ","; } catch (const std::exception& e) { LOG(WARNING) << "Exception in execution trace observer: [" << fc.name diff --git a/torch/profiler/profiler.py b/torch/profiler/profiler.py index bfc725700a..fc7a61bf45 100644 --- a/torch/profiler/profiler.py +++ b/torch/profiler/profiler.py @@ -1,7 +1,6 @@ import gzip import json import os -import shutil import tempfile from abc import ABC, abstractmethod from enum import Enum @@ -785,33 +784,8 @@ class ExecutionTraceObserver(_ITraceObserver): """ Removes ET observer from record function callbacks. """ - - def _save_triton_kernels(): - # Save the kernel paths for the generated kernels - from torch._inductor.codecache import PyCodeCache as PyCodeCache - - kernel_files = [ - v.__file__ - for v in PyCodeCache.cache.values() - if getattr(v, "__file__", None) is not None - ] - work_dir, file_name = os.path.split(self._output_file_path) - resource_dir = os.path.join( - work_dir, os.path.splitext(file_name)[0] + "_resources" - ) - if not os.path.exists(resource_dir): - os.mkdir(resource_dir) - - for kernel_file in kernel_files: - if kernel_file is None: - continue - path, name = os.path.split(kernel_file) - dst = os.path.join(resource_dir, name) - shutil.copyfile(kernel_file, dst) - if self._registered: self.stop() - _save_triton_kernels() _remove_execution_trace_observer() self._registered = False
2.41.0
61fd23640741055851c8d6f95a29529238061e3
Fri, 19 Apr 2024 19:08:06 +0000
[PATCH 0395/1000] [AMD] TunableOp take priority over DISABLE_ADDMM_HIP_LT (#124161)
Summary: It seems super confusing that if we set DISABLE_ADDMM_HIP_LT + PYTORCH_TUNABLEOP_ENABLED, the former takes priority. This is because the former goes through the gemm_and_bias and tunable op is integrated with gemm path. Before we can integrate tunable op with gemm_and_bias, we'll probably just let tunable op takes priority Test Plan: Run a simple linear program and verified. Differential Revision: D56183954 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124161 Approved by: https://github.com/jeffdaily, https://github.com/nmacchioni
diff --git a/aten/src/ATen/native/cuda/Blas.cpp b/aten/src/ATen/native/cuda/Blas.cpp index be8aa363a9..7195f939f7 100644 --- a/aten/src/ATen/native/cuda/Blas.cpp +++ b/aten/src/ATen/native/cuda/Blas.cpp @@ -6,6 +6,7 @@ #include <ATen/OpMathType.h> #include <ATen/TensorUtils.h> #include <ATen/cuda/CUDABlas.h> +#include <ATen/cuda/tunable/Tunable.h> #include <ATen/native/Resize.h> #include <c10/util/MaybeOwned.h> @@ -174,6 +175,12 @@ cuda::blas::GEMMAndBiasActivationEpilogue activation_to_gemm_and_blas_arg(Activa static bool getDisableAddmmCudaLt() { static const char* env_value = std::getenv("DISABLE_ADDMM_CUDA_LT"); #ifdef USE_ROCM + // if we enable tunable op, it'll take priority over just hipblaslt (heuristics) + // note the current tunable op is not the hipblaslt path (gemm_and_bias) + auto tuning_ctx = at::cuda::tunable::getTuningContext(); + if (tuning_ctx->IsTunableOpEnabled()) { + return true; + } // allow both CUDA and HIP env var names for ROCm builds // also, current default for ROCm builds is disable by default if (env_value == nullptr) {
2.41.0
3f56e1e81d2842bacd58d3be340bd099f45b575
Fri, 19 Apr 2024 05:01:05 -0700
[PATCH 0396/1000] [sym_shapes][perf] Do not calculate hint in advice_is_size (#124472)
Differential Revision: [D56352412](https://our.internmc.facebook.com/intern/diff/D56352412) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124472 Approved by: https://github.com/ezyang
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py index 4d2f19bee9..c400596ee0 100644 --- a/torch/fx/experimental/symbolic_shapes.py +++ b/torch/fx/experimental/symbolic_shapes.py @@ -556,8 +556,8 @@ def _advise_is_size(a): if ( isinstance(a, SymInt) and isinstance(a.node, SymNode) - and not a.node.has_hint() and isinstance(a.node.expr, sympy.Symbol) + and a.node.shape_env.is_unbacked_symint(a.node.expr) ): _constrain_range_for_size(a)
2.41.0
79108f14d67accac3b5852bac314f8aced520d0
Thu, 18 Apr 2024 21:38:19 -0700
[PATCH 0397/1000] Use separate flags for MultiTemplates from BenchmarkFusion (#122825)
Two changes: - Make the flag for multi template buffer independent from benchmark fusion. While benchmark fusion can be useful, the compilation time/performance trade offs are different than for just templates, which we'd like to enable by default. - Dont do MultiTemplateBuffers/benchmark-fusion for templates which have custom input gen fn's (currently which only exist internally). Threading the custom input gn fns to benchmark fusion is NYI. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122825 Approved by: https://github.com/shunting314 ghstack dependencies: #124030, #122642, #123229
diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py index e3f7f39514..0373216cac 100644 --- a/torch/_inductor/scheduler.py +++ b/torch/_inductor/scheduler.py @@ -1769,7 +1769,11 @@ class Scheduler: If config.benchmark_fusion is False, always return True. Otherwise, return True if fusion can brings speedup. """ - if not config.benchmark_fusion: + + is_multi_template = node1.is_template() and isinstance( + node1.get_template_node(), ir.MultiTemplateBuffer + ) + if not config.benchmark_fusion and not is_multi_template: return True if ( diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py index 5d135175c6..d8fdb09748 100644 --- a/torch/_inductor/select_algorithm.py +++ b/torch/_inductor/select_algorithm.py @@ -892,6 +892,13 @@ class AlgorithmSelectorCache(PersistentCache): ): from .codegen.cuda.cuda_kernel import CUDATemplateCaller + # Templates selected with input_gen_fns require specific input data to avoid IMA + # Passing custom input gen fns to benchmark_fusion NYI, so skip deferred template selection + if input_gen_fns is not None: + return_multi_template = False + + # TODO - assert that we have not mutating kernels here + # TODO(nmacchioni): remove once CI tests are fixed choices = [choice for choice in choices if choice is not None] @@ -1315,11 +1322,9 @@ def autotune_select_algorithm(*args, **kwargs): _ALGORITHM_SELECTOR_CACHE = AlgorithmSelectorCache() if "return_multi_template" not in kwargs: - # TODO - enable multi templates even if benchmark_fusion not enabled - kwargs["return_multi_template"] = ( - torch._inductor.config.benchmark_multi_templates - and torch._inductor.config.benchmark_fusion - ) + kwargs[ + "return_multi_template" + ] = torch._inductor.config.benchmark_multi_templates return _ALGORITHM_SELECTOR_CACHE(*args, **kwargs)
2.41.0
6a788ac26cdb3c3056528e47e3f60a572ecfe9f
Fri, 19 Apr 2024 19:53:19 +0000
[PATCH 0398/1000] Fix compilation on aarch64 with gcc (#124511)
Which is more stringent than clang when equivalently sized NEON registers are cast to each other. In particular, at one point `uint16x4_t` were cast to `int16x4_t`, which gcc does not allow. Added `vreinterpret_s16_u16` (which is a no-op) to solve this and tested in https://godbolt.org/z/sYb4ThM6M Test plan: Build aarch64 wheels Pull Request resolved: https://github.com/pytorch/pytorch/pull/124511 Approved by: https://github.com/mikekgfb
diff --git a/aten/src/ATen/native/cpu/int4mm_kernel.cpp b/aten/src/ATen/native/cpu/int4mm_kernel.cpp index f4fcfbae0c..7e5d14bb23 100644 --- a/aten/src/ATen/native/cpu/int4mm_kernel.cpp +++ b/aten/src/ATen/native/cpu/int4mm_kernel.cpp @@ -354,8 +354,8 @@ inline void tinygemm_kernel( int BLOCK_K) { int16_t shift_vals[4] = {0, -4, -8, -12}; int16x4_t shifts = vld1_s16(shift_vals); - int16x4_t mask = vdup_n_s16(0x0F); int16x4_t offs = vdup_n_s16(8); + uint16x4_t mask = vdup_n_u16(0x0F); for (const auto m : c10::irange(BLOCK_M)) { for (int n = 0; n < BLOCK_N; n+= 16) { float32x4_t c_val[4]; @@ -375,7 +375,8 @@ inline void tinygemm_kernel( } c10::ForcedUnroll<4>{}([&](auto i) { uint16_t b_pack = reinterpret_cast<const uint16_t*>(B + k * ldb + n / 2)[i]; - int16x4_t b_ints = vsub_s16(vand_u16(vshl_u16(vdup_n_u16(b_pack), shifts), mask), offs); + uint16x4_t b_masked = vand_u16(vshl_u16(vdup_n_u16(b_pack), shifts), mask); + int16x4_t b_ints = vsub_s16(vreinterpret_s16_u16(b_masked), offs); float32x4_t b_vals = vcvtq_f32_s32(vmovl_s16(b_ints)); b_vals = vaddq_f32(zeros[i], vmulq_f32(scales[i], b_vals)); c_val[i] = vfmaq_f32(c_val[i], b_vals, a_val);
2.41.0
74dfca5e7bff4b1821c1aaa5f95389574505062
Fri, 19 Apr 2024 21:17:12 +0000
[PATCH 0400/1000] Int4MM: Unswizzle for different dtypes (#124448)
If dtype is not the one this platform is optimized for, it might need different unswizzling pattenrs Implement ones for non-vectorized flavor of the kernel, so that int4mm can be used with float32 and float16 dtypes Pull Request resolved: https://github.com/pytorch/pytorch/pull/124448 Approved by: https://github.com/jgong5, https://github.com/mikekgfb
diff --git a/aten/src/ATen/native/cpu/int4mm_kernel.cpp b/aten/src/ATen/native/cpu/int4mm_kernel.cpp index 7e5d14bb23..57e485ab02 100644 --- a/aten/src/ATen/native/cpu/int4mm_kernel.cpp +++ b/aten/src/ATen/native/cpu/int4mm_kernel.cpp @@ -390,15 +390,45 @@ inline void tinygemm_kernel( } #endif -inline float convert_int4_to_float(uint8_t a, bool is_even) { +template<int BLOCK_N> +inline float convert_int4_to_float(const uint8_t* b, int n) { static constexpr float lut[16] = { -8.0f, -7.0f, -6.0f, -5.0f, -4.0f, -3.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f }; - - int index = is_even ? (a & 0x0F) : (a >> 4); + int index; +#if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) + if constexpr (BLOCK_N == 64) { + const int nb = n/BLOCK_N; + n -= nb*BLOCK_N; + if (n < 32) { + auto val = b[nb * BLOCK_N / 2 + n]; + index = val & 0x0f; + } else { + auto val = b[nb * BLOCK_N / 2 + (n - 32)]; + index = val >> 4; + } + } else +#elif defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) + if constexpr (BLOCK_N == 32) { + const int nb = n/BLOCK_N; + n -= nb*BLOCK_N; + if (n < 16) { + auto val = b[nb * BLOCK_N / 2 + n]; + index = val & 0x0f; + } else { + auto val = b[nb * BLOCK_N / 2 + (n - 16)]; + index = val >> 4; + } + } else +#endif + { + const auto is_even = (n & 1) == 0; + auto val = b[n/2]; + index = is_even ? (val & 0x0F) : (val >> 4); + } return lut[index]; } @@ -423,9 +453,7 @@ inline void tinygemm_kernel( const auto scale = static_cast<float>(ScaleAndZeros[kb * ldc * 2 + n * 2]); const auto zero = static_cast<float>(ScaleAndZeros[kb * ldc * 2 + n * 2 + 1]); const auto a_val = static_cast<float>(A[m * lda + k]); - uint8_t b_pack = B[k * ldb + n / 2]; - // range [-8, 7]: B_val = (bf16(B_int4_val) * scale) + zero - float b_val = convert_int4_to_float(b_pack, n % 2 == 0); + float b_val = convert_int4_to_float<BLOCK_N>(B + k *ldb, n); b_val = b_val * scale + zero; c_val += a_val * b_val; @@ -506,7 +534,7 @@ void weight_to_int4pack_kernel( auto weight_packed_data = reinterpret_cast<uint8_t*>(weight_packed.data_ptr()); const auto weight_data = weight.data_ptr<int32_t>(); - // 64 for avx512 and 64 for avx2/non-vectorized + // 64 for avx512 and 32 for avx2/non-vectorized constexpr int BLOCK_N = vec::Vectorized<float>::size() * 4; const int NB = (N + BLOCK_N - 1) / BLOCK_N; diff --git a/test/test_linalg.py b/test/test_linalg.py index 0eb5d953fa..1216094118 100644 --- a/test/test_linalg.py +++ b/test/test_linalg.py @@ -5988,8 +5988,8 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2: inner_k_tiles = 2 torch.manual_seed(1) - a = torch.rand((m, k), dtype=torch.bfloat16, device=device) - b = torch.rand((k, n), dtype=torch.bfloat16, device=device) + a_bf16 = torch.rand((m, k), dtype=torch.bfloat16, device=device) + b_bf16 = torch.rand((k, n), dtype=torch.bfloat16, device=device) def convert_weight_to_int4pack(b): b_int32, b_scales_and_zeros = self._group_quantize_tensor( @@ -6006,12 +6006,18 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2: a, b_int4pack, q_group, b_scales_and_zeros ) - b_int4pack, b_scales_and_zeros = convert_weight_to_int4pack(b) - res = weight_int4pack_mm(a, b_int4pack, b_scales_and_zeros) - ref = torch.mm(a, b) + b_int4pack, b_scales_and_zeros_bf16 = convert_weight_to_int4pack(b_bf16) + + for dtype in [torch.bfloat16] + ([torch.float16, torch.float32] if device == "cpu" else []): + a = a_bf16.to(dtype=dtype) + b = b_bf16.to(dtype=dtype) + b_scales_and_zeros = b_scales_and_zeros_bf16.to(dtype=dtype) + ref = torch.mm(a, b) + res = weight_int4pack_mm(a, b_int4pack, b_scales_and_zeros) + + mean_err = ((res - ref).abs() / ref).mean() + self.assertTrue(mean_err < 0.05) - mean_err = ((res - ref).abs() / ref).mean() - self.assertTrue(mean_err < 0.05) @unittest.skipIf(IS_WINDOWS, "Skipped on Windows!") @unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
2.41.0
0b9d4d19c7c9bb7c12e82ae460a31dc845cf632
Fri, 19 Apr 2024 21:29:36 +0000
[PATCH 0401/1000] [export] handle Dim.lower = 0, 1 for ep.run_decompositions() (#123602)
Summary: With pre-dispatch export and ep.run_decompositions(), range constraints are updated through looking at ShapeEnv.var_to_range. However the lower bounds on these may be incorrect - analysis on un-specialized symbols are done with lower bounds of 2, which mismatch with user-specified bounds (may be 0, 1). This updates `_get_updated_range_constraints()` to use the old range constraints if possible. Test Plan: Existing pre-dispatch/dynamic shapes test case. Differential Revision: D55899872 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123602 Approved by: https://github.com/tugsbayasgalan
diff --git a/test/export/test_export.py b/test/export/test_export.py index 7a0bbc7eed..f73d93a926 100644 --- a/test/export/test_export.py +++ b/test/export/test_export.py @@ -1056,7 +1056,6 @@ class TestExport(TestCase): ): _ = export(foo, inputs, dynamic_shapes=((dx, 9), (dy, 4), (3, 3))) - @testing.expectedFailurePreDispatchRunDecomp # T183703911 def test_dim_1_2(self): class Foo(torch.nn.Module): def forward(self, x): @@ -1074,7 +1073,6 @@ class TestExport(TestCase): self.assertEqual(vr.lower, 1) self.assertEqual(vr.upper, 2) - @testing.expectedFailurePreDispatchRunDecomp # T183703911 def test_derived_dim_1_2(self): class Bar(torch.nn.Module): def forward(self, x, y): diff --git a/torch/_export/serde/serialize.py b/torch/_export/serde/serialize.py index 9b21fce91c..b71a221a53 100644 --- a/torch/_export/serde/serialize.py +++ b/torch/_export/serde/serialize.py @@ -1768,7 +1768,7 @@ class GraphModuleDeserializer(metaclass=Final): if symbol_name_to_range: for k, vr in symbol_name_to_range.items(): lower = int(vr.lower) - if vr.upper >= 2: # no specialization on 0/1 + if vr.upper >= 2: # max is >= 2, not sym bool range lower = max(2, lower) self.symbol_name_to_range[k] = symbolic_shapes.ValueRanges(_int_to_sympy_int(lower), vr.upper) diff --git a/torch/export/exported_program.py b/torch/export/exported_program.py index 9ebc5f63cb..53b3a0a2ff 100644 --- a/torch/export/exported_program.py +++ b/torch/export/exported_program.py @@ -866,7 +866,7 @@ def _get_updated_range_constraints( # runtime_var_to_range will make a difference compated to var_to_range. # e.g. [2, oo) -> [0, oo) for k, v in shape_env.var_to_range.items(): - if k not in shape_env.replacements: + if k not in shape_env.replacements and k not in range_constraints: range_constraints[k] = v return range_constraints
2.41.0
900f79b728ea94ff34a7743d92f400253983682
Fri, 19 Apr 2024 08:06:40 -0700
[PATCH 0402/1000] [FSDP2] Added `set_reshard_after_backward` (#124319)
This PR adds a `set_reshard_after_backward` method to allow disabling resharding after backward. `reshard_after_backward=False` can be used with `reshard_after_forward=False` to implement "ZeRO-1", where there is only all-gather on the first microbatch forward and reduce-scatter on the last microbatch backward. ``` for microbatch_idx, microbatch in dataloader: is_last_microbatch = microbatch_idx == num_microbatches - 1 model.set_requires_gradient_sync(is_last_microbatch) model.set_reshard_after_backward(is_last_microbatch) model.set_is_last_backward(is_last_microbatch) microbatch_fwd_bwd(model, microbatch, microbatch_idx) ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124319 Approved by: https://github.com/weifengpy
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_training.py b/test/distributed/_composable/fsdp/test_fully_shard_training.py index d29ccebf86..00dcf4308d 100644 --- a/test/distributed/_composable/fsdp/test_fully_shard_training.py +++ b/test/distributed/_composable/fsdp/test_fully_shard_training.py @@ -616,11 +616,10 @@ class TestFullyShardGradientAccumulation(FSDPTest): return min(2, torch.cuda.device_count()) @skip_if_lt_x_gpu(2) - def test_set_requires_gradient_sync(self): + def test_gradient_accumulation(self): """ - Tests the ``set_requires_gradient_sync`` API to exercise gradient - accumulation without gradient reduction. This test includes mixing with - gradient accumulation *with* gradient reduction. + Tests gradient accumulation with/without gradient reduction and + with/without resharding after backward. """ self.run_subtests( { @@ -629,15 +628,22 @@ class TestFullyShardGradientAccumulation(FSDPTest): # "root_only": disable reduce-scatter for root's linear only # "some_mlps": disable reduce-scatter for some MLPs "mode": ["all", "root_only", "some_mlps"], + "reshard_after_backward": [False, True], }, - self._test_set_requires_gradient_sync, + self._test_gradient_accumulation, ) - def _test_set_requires_gradient_sync( + def _test_gradient_accumulation( self, reshard_after_forward: Union[bool, int], mode: str, + reshard_after_backward: bool, ): + if not reshard_after_backward and ( + reshard_after_forward is not False or mode == "some_mlps" + ): + return # skip since not common + torch.manual_seed(42) local_batch_size, lin_dim, num_mlps, num_microbatches = (2, 32, 3, 3) global_batch_size = local_batch_size * self.world_size @@ -658,9 +664,17 @@ class TestFullyShardGradientAccumulation(FSDPTest): fully_shard_fn(model) # root gets the 1st linear ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2) optim = torch.optim.Adam(model.parameters(), lr=1e-2) + + orig_all_gather = dist.all_gather_into_tensor + all_gather_count = 0 orig_reduce_scatter = dist.reduce_scatter_tensor reduce_scatter_count = 0 + def all_gather_with_count(*args, **kwargs): + nonlocal all_gather_count + all_gather_count += 1 + return orig_all_gather(*args, **kwargs) + def reduce_scatter_with_count(*args, **kwargs): nonlocal reduce_scatter_count reduce_scatter_count += 1 @@ -668,18 +682,29 @@ class TestFullyShardGradientAccumulation(FSDPTest): torch.manual_seed(1) # same on all ranks for iter_idx in range(5): - with patch_reduce_scatter(reduce_scatter_with_count): + with patch_all_gather(all_gather_with_count), patch_reduce_scatter( + reduce_scatter_with_count + ): for microbatch_idx in range(num_microbatches): is_last_microbatch = microbatch_idx == num_microbatches - 1 if mode == "all": model.set_requires_gradient_sync(is_last_microbatch) + if not reshard_after_backward: + model.set_reshard_after_backward(is_last_microbatch) elif mode == "some_mlps": for mlp in model[1 : 1 + num_mlps_to_disable_reduce_scatter]: mlp.set_requires_gradient_sync(is_last_microbatch) + if not reshard_after_backward: + mlp.set_reshard_after_backward(is_last_microbatch) elif mode == "root_only": model.set_requires_gradient_sync( is_last_microbatch, recurse=False ) + if not reshard_after_backward: + model.set_reshard_after_backward( + is_last_microbatch, recurse=False + ) + global_inp = torch.rand((global_batch_size, lin_dim), device="cuda") local_inp = global_inp[ self.rank @@ -695,6 +720,7 @@ class TestFullyShardGradientAccumulation(FSDPTest): losses[-1].backward() dist.all_reduce(losses[1]) # partial -> replicated self.assertEqual(losses[0], losses[1]) + # Expect one reduce-scatter per MLP plus one for the root's linear # on the last microbatch expected_reduce_scatter_count = num_mlps + 1 @@ -709,6 +735,32 @@ class TestFullyShardGradientAccumulation(FSDPTest): expected_reduce_scatter_count += (num_mlps) * (num_microbatches - 1) self.assertEqual(reduce_scatter_count, expected_reduce_scatter_count) reduce_scatter_count = 0 + + # Expect one all-gather per MLP plus one for the root's linear in + # the first microbatch's forward + expected_all_gather_count = num_mlps + 1 + if reshard_after_forward is not False: # `True` or `2` + # Add the number of MLPs without the +1 for the backward + # all-gathers since the root does not reshard after forward + expected_all_gather_count += num_mlps + # Multiply by the number of microbatches since these + # all-gathers run every microbatch + expected_all_gather_count *= num_microbatches + elif reshard_after_backward: # `reshard_after_forward=False` + expected_all_gather_count *= num_microbatches + elif mode == "all": # `reshard_after_forward/backward=False` + # Only reshard parameters after the last microbatch's backward, + # so there should not be any more all-gathers + pass + elif mode == "root_only": # `reshard_after_forward/backward=False` + # The MLPs should still contribute all-gathers in each + # microbatch forward + expected_all_gather_count += num_mlps * (num_microbatches - 1) + self.assertEqual(all_gather_count, expected_all_gather_count) + all_gather_count = 0 + + # Average the ref model's gradients over the world size to match + # data parallel semantics for param in ref_model.parameters(): if param.grad is not None: param.grad.div_(self.world_size) @@ -722,11 +774,16 @@ class TestFullyShardGradientAccumulation(FSDPTest): @skip_if_lt_x_gpu(2) def test_1f1b_microbatching(self): self.run_subtests( - {"use_explicit_unshard": [False, True]}, + { + "use_explicit_unshard": [False, True], + "reshard_after_backward": [False, True], + }, self._test_1f1b_microbatching, ) - def _test_1f1b_microbatching(self, use_explicit_unshard: bool): + def _test_1f1b_microbatching( + self, use_explicit_unshard: bool, reshard_after_backward: bool + ): torch.manual_seed(42) model_args = ModelArgs(dropout_p=0.0) model = Transformer(model_args) @@ -764,6 +821,8 @@ class TestFullyShardGradientAccumulation(FSDPTest): is_last_microbatch = inp_idx == num_microbatches - 1 model.set_requires_gradient_sync(is_last_microbatch) model.set_is_last_backward(is_last_microbatch) + if not reshard_after_backward: + model.set_reshard_after_backward(is_last_microbatch) losses.append(model(inp).sum()) losses[-1].backward() ref_losses.append(ref_model(inp).sum()) diff --git a/torch/distributed/_composable/fsdp/_fsdp_param_group.py b/torch/distributed/_composable/fsdp/_fsdp_param_group.py index 7a7addb40a..ab8dfe1aa1 100644 --- a/torch/distributed/_composable/fsdp/_fsdp_param_group.py +++ b/torch/distributed/_composable/fsdp/_fsdp_param_group.py @@ -128,6 +128,9 @@ class FSDPParamGroup: # `self.reduce_grads` is true, in which case setting this to false # means reduce-scatter but no all-reduce self.all_reduce_grads: bool = True + # Whether to reshard parameters after backward (only useful for + # gradient accumulation) + self.reshard_after_backward: bool = True # - CUDA events for stream synchronization # Holds the all-gather output buffer, sync objects, and metadata @@ -309,7 +312,8 @@ class FSDPParamGroup: self._training_state = TrainingState.POST_BACKWARD with torch.profiler.record_function("FSDP::post_backward_reshard"): if not self.reduce_grads: - self.reshard() + if self.reshard_after_backward: + self.reshard() return # Save the autograd-computed gradients before resharding to only # access the unsharded parameters when their data is present @@ -320,7 +324,8 @@ class FSDPParamGroup: fsdp_params_with_grad.append(fsdp_param) unsharded_grads.append(fsdp_param.unsharded_grad_data) fsdp_param.unsharded_param.grad = None - self.reshard() + if self.reshard_after_backward: + self.reshard() if len(fsdp_params_with_grad) == 0: return with torch.profiler.record_function("FSDP::post_backward_reduce"): diff --git a/torch/distributed/_composable/fsdp/fully_shard.py b/torch/distributed/_composable/fsdp/fully_shard.py index a4909bbff7..06af8c2f90 100644 --- a/torch/distributed/_composable/fsdp/fully_shard.py +++ b/torch/distributed/_composable/fsdp/fully_shard.py @@ -220,7 +220,9 @@ class FSDP: fsdp_param_group.reduce_grads = requires_gradient_sync fsdp_param_group.all_reduce_grads = requires_gradient_sync - def set_requires_all_reduce(self, requires_all_reduce: bool, recurse: bool = True): + def set_requires_all_reduce( + self, requires_all_reduce: bool, recurse: bool = True + ) -> None: """ Sets if the module should all-reduce gradients. This can be used to implement gradient accumulation with only reduce-scatter but not @@ -237,6 +239,28 @@ class FSDP: if fsdp_param_group := state._fsdp_param_group: fsdp_param_group.all_reduce_grads = requires_all_reduce + def set_reshard_after_backward( + self, reshard_after_backward: bool, recurse: bool = True + ) -> None: + """ + Sets if the module should reshard parameters after backward. This can + be used during gradient accumulation to trade off higher memory for + reduced communication. + + Args: + reshard_after_backward (bool): Whether to reshard parameters after + backward. + recurse (bool): Whether to set for all submodules or just the + passed-in module. + """ + self_module = cast(nn.Module, self) + modules = list(self_module.modules()) if recurse else [self_module] + for module in modules: + if isinstance(module, FSDP): + state = module._get_fsdp_state() + if fsdp_param_group := state._fsdp_param_group: + fsdp_param_group.reshard_after_backward = reshard_after_backward + def _get_fsdp_state(self) -> FSDPState: if (state := _get_module_fsdp_state(cast(nn.Module, self))) is None: raise AssertionError(f"No FSDP state found on {self}")
2.41.0
0560f7b3b27a9f041d16b94744a7054dc2cdf3f
Fri, 19 Apr 2024 08:49:26 -0700
[PATCH 0404/1000] [opcheck] Stop doing test_aot_dispatch_static by default (#124495)
Motivations: - this is pretty redundant with test_aot_dispatch_dynamic. - The user story for opcheck is that a user should use opcheck to see if their operator was "registered correctly". If a user's custom op only supports dynamic shapes, then it's a bit awkward for one of the tests (e.g. `test_aot_dispatch_static`) to fail. - We've already stopped running test_aot_dispatch_static in all of our opcheck tests. Test Plan: - wait for CI Pull Request resolved: https://github.com/pytorch/pytorch/pull/124495 Approved by: https://github.com/williamwen42 ghstack dependencies: #124180, #124200, #124299, #124134, #124199, #124403, #124414
diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py index 8b0cc87613..6038bd3914 100644 --- a/test/test_custom_ops.py +++ b/test/test_custom_ops.py @@ -332,19 +332,11 @@ class TestCustomOpTesting(CustomOpTestCaseBase): ): args = [sample_input.input] + list(sample_input.args) kwargs = sample_input.kwargs - if op.op in ( - numpy_nonzero._opoverload, - torch.ops._torch_testing.numpy_nms, - ): - ctx = self.assertRaisesRegex(optests.OpCheckError, "failed with") - else: - ctx = contextlib.nullcontext() - with ctx: - optests.opcheck( - op.op, - args, - kwargs, - ) + optests.opcheck( + op.op, + args, + kwargs, + ) def test_opcheck_fails_basic(self, device): @custom_op(f"{self.test_ns}::foo") @@ -2777,6 +2769,7 @@ optests.generate_opcheck_tests( additional_decorators={ "test_pt2_compliant_tag_mini_op_test_no_abstract": [unittest.expectedFailure] }, + test_utils=optests.generate_tests.DEPRECATED_DEFAULT_TEST_UTILS, ) optests.generate_opcheck_tests( @@ -2786,6 +2779,7 @@ optests.generate_opcheck_tests( os.path.dirname(__file__), "minioptest_failures_dict.json", ), + test_utils=optests.generate_tests.DEPRECATED_DEFAULT_TEST_UTILS, ) @@ -2878,7 +2872,7 @@ opcheck(op, args, kwargs, test_utils="test_schema") failures = { "mini_op_test::incorrect_schema": { - "MiniOpTest.test_aot_dispatch_static__test_delayed_error": { + "MiniOpTest.test_aot_dispatch_dynamic__test_delayed_error": { "comment": "", "status": "success", } @@ -2908,7 +2902,7 @@ opcheck(op, args, kwargs, test_utils="test_schema") failures = { "mini_op_test::incorrect_schema": { - "MiniOpTest.test_aot_dispatch_static__test_delayed_error_nopenopenope": { + "MiniOpTest.test_aot_dispatch_dynamic__test_delayed_error_nopenopenope": { "comment": "", "status": "xfail", }, @@ -2939,7 +2933,6 @@ opcheck(op, args, kwargs, test_utils="test_schema") "test_schema": "SUCCESS", "test_autograd_registration": "SUCCESS", "test_faketensor": "SUCCESS", - "test_aot_dispatch_static": "SUCCESS", "test_aot_dispatch_dynamic": "SUCCESS", }, ) @@ -2988,7 +2981,6 @@ opcheck(op, args, kwargs, test_utils="test_schema") { "test_autograd_registration": "SUCCESS", "test_faketensor": "SUCCESS", - "test_aot_dispatch_static": "SUCCESS", "test_aot_dispatch_dynamic": "SUCCESS", }, ) diff --git a/torch/testing/_internal/optests/generate_tests.py b/torch/testing/_internal/optests/generate_tests.py index 2fbbd8f6c3..098c2b4cfd 100644 --- a/torch/testing/_internal/optests/generate_tests.py +++ b/torch/testing/_internal/optests/generate_tests.py @@ -146,10 +146,13 @@ DEFAULT_TEST_UTILS = [ "test_schema", "test_autograd_registration", "test_faketensor", - "test_aot_dispatch_static", "test_aot_dispatch_dynamic", ] +DEPRECATED_DEFAULT_TEST_UTILS = DEFAULT_TEST_UTILS + [ + "test_aot_dispatch_static", +] + def generate_opcheck_tests( testcase: Any, @@ -621,7 +624,7 @@ def opcheck( args: Tuple[Any, ...], kwargs: Optional[Dict[str, Any]] = None, *, - test_utils: Union[str, List[str]] = "ALL", + test_utils: Union[str, List[str]] = DEFAULT_TEST_UTILS, raise_exception: bool = True, ) -> Dict[str, str]: """Given an operator and some sample arguments, tests if the operator is
2.41.0
3504af56e89af18133dd55280c3f41b3a151b95
Fri, 19 Apr 2024 22:01:23 +0000
[PATCH 0405/1000] Enable UFMT on `test/scripts` and some files (#124137)
Part of: #123062 Ran lintrunner on: - `test/scripts` - `test/simulate_nccl_errors.py` - `test/test_ao_sparsity.py` - `test/test_autocast.py` - `test/test_binary_ufuncs.py` - `test/test_bundled_images.py` - `test/test_bundled_inputs.py` - `test/test_comparison_utils.py` - `test/test_compile_benchmark_util.py` - `test/test_complex.py` - `test/test_cpp_api_parity.py` - `test/test_cpp_extensions_aot.py` - `test/test_cpp_extensions_jit.py` - `test/test_cpp_extensions_open_device_registration.py` Detail: ```bash $ lintrunner -a --take UFMT --all-files ok No lint issues. Successfully applied all patches. ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124137 Approved by: https://github.com/soulitzer
diff --git a/.lintrunner.toml b/.lintrunner.toml index 3a161851e3..8942e7fdff 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -1051,21 +1051,6 @@ exclude_patterns = [ 'test/quantization/fx/test_numeric_suite_fx.py', 'test/quantization/fx/test_quantize_fx.py', 'test/quantization/fx/test_subgraph_rewriter.py', - 'test/scripts/cuda_memcheck_common.py', - 'test/scripts/run_cuda_memcheck.py', - 'test/simulate_nccl_errors.py', - 'test/test_ao_sparsity.py', - 'test/test_autocast.py', - 'test/test_binary_ufuncs.py', - 'test/test_bundled_images.py', - 'test/test_bundled_inputs.py', - 'test/test_comparison_utils.py', - 'test/test_compile_benchmark_util.py', - 'test/test_complex.py', - 'test/test_cpp_api_parity.py', - 'test/test_cpp_extensions_aot.py', - 'test/test_cpp_extensions_jit.py', - 'test/test_cpp_extensions_open_device_registration.py', 'test/test_cuda.py', 'test/test_cuda_expandable_segments.py', 'test/test_cuda_multigpu.py', diff --git a/test/scripts/cuda_memcheck_common.py b/test/scripts/cuda_memcheck_common.py index aa52ced783..65c91fe687 100644 --- a/test/scripts/cuda_memcheck_common.py +++ b/test/scripts/cuda_memcheck_common.py @@ -1,8 +1,10 @@ # this file contains a simple parser that parses report # from cuda-memcheck + class ParseError(Exception): """Whenever the simple parser is unable to parse the report, this exception will be raised""" + pass @@ -77,25 +79,25 @@ def parse(message): ========= ERROR SUMMARY: 4 errors """ errors = [] - HEAD = '=========' + HEAD = "=========" headlen = len(HEAD) started = False in_message = False message_lines = [] lines = message.splitlines() for l in lines: - if l == HEAD + ' CUDA-MEMCHECK': + if l == HEAD + " CUDA-MEMCHECK": started = True continue if not started or not l.startswith(HEAD): continue - l = l[headlen + 1:] - if l.startswith('ERROR SUMMARY:'): + l = l[headlen + 1 :] + if l.startswith("ERROR SUMMARY:"): return Report(l, errors) if not in_message: in_message = True message_lines = [l] - elif l == '': + elif l == "": errors.append(Error(message_lines)) in_message = False else: diff --git a/test/scripts/run_cuda_memcheck.py b/test/scripts/run_cuda_memcheck.py index 54c0e7581e..924048030e 100755 --- a/test/scripts/run_cuda_memcheck.py +++ b/test/scripts/run_cuda_memcheck.py @@ -12,39 +12,62 @@ Example usage: Note that running cuda-memcheck could be very slow. """ +import argparse import asyncio -import torch import multiprocessing -import argparse -import subprocess -import tqdm import os +import subprocess import sys + import cuda_memcheck_common as cmc +import torch +import tqdm ALL_TESTS = [] GPUS = torch.cuda.device_count() # parse arguments parser = argparse.ArgumentParser(description="Run isolated cuda-memcheck on unit tests") -parser.add_argument('filename', help="the python file for a test, such as test_torch.py") -parser.add_argument('timeout', type=int, help='kill the test if it does not terminate in a certain amount of seconds') -parser.add_argument('--strict', action='store_true', - help='Whether to show cublas/cudnn errors. These errors are ignored by default because' - 'cublas/cudnn does not run error-free under cuda-memcheck, and ignoring these errors') -parser.add_argument('--nproc', type=int, default=multiprocessing.cpu_count(), - help='Number of processes running tests, default to number of cores in the system') -parser.add_argument('--gpus', default='all', - help='GPU assignments for each process, it could be "all", or : separated list like "1,2:3,4:5,6"') -parser.add_argument('--ci', action='store_true', - help='Whether this script is executed in CI. When executed inside a CI, this script fails when ' - 'an error is detected. Also, it will not show tqdm progress bar, but directly print the error' - 'to stdout instead.') -parser.add_argument('--nohang', action='store_true', help='Treat timeout as success') -parser.add_argument('--split', type=int, default=1, help='Split the job into pieces') -parser.add_argument('--rank', type=int, default=0, help='Which piece this process should pick') +parser.add_argument( + "filename", help="the python file for a test, such as test_torch.py" +) +parser.add_argument( + "timeout", + type=int, + help="kill the test if it does not terminate in a certain amount of seconds", +) +parser.add_argument( + "--strict", + action="store_true", + help="Whether to show cublas/cudnn errors. These errors are ignored by default because" + "cublas/cudnn does not run error-free under cuda-memcheck, and ignoring these errors", +) +parser.add_argument( + "--nproc", + type=int, + default=multiprocessing.cpu_count(), + help="Number of processes running tests, default to number of cores in the system", +) +parser.add_argument( + "--gpus", + default="all", + help='GPU assignments for each process, it could be "all", or : separated list like "1,2:3,4:5,6"', +) +parser.add_argument( + "--ci", + action="store_true", + help="Whether this script is executed in CI. When executed inside a CI, this script fails when " + "an error is detected. Also, it will not show tqdm progress bar, but directly print the error" + "to stdout instead.", +) +parser.add_argument("--nohang", action="store_true", help="Treat timeout as success") +parser.add_argument("--split", type=int, default=1, help="Split the job into pieces") +parser.add_argument( + "--rank", type=int, default=0, help="Which piece this process should pick" +) args = parser.parse_args() + # Filters that ignores cublas/cudnn errors # TODO (@zasdfgbnm): When can we remove this? Will cublas/cudnn run error-free under cuda-memcheck? def is_ignored_only(output): @@ -56,32 +79,43 @@ def is_ignored_only(output): return False count_ignored_errors = 0 for e in report.errors: - if 'libcublas' in ''.join(e.stack) or 'libcudnn' in ''.join(e.stack) or 'libcufft' in ''.join(e.stack): + if ( + "libcublas" in "".join(e.stack) + or "libcudnn" in "".join(e.stack) + or "libcufft" in "".join(e.stack) + ): count_ignored_errors += 1 return count_ignored_errors == report.num_errors + # Set environment PYTORCH_CUDA_MEMCHECK=1 to allow skipping some tests -os.environ['PYTORCH_CUDA_MEMCHECK'] = '1' +os.environ["PYTORCH_CUDA_MEMCHECK"] = "1" # Discover tests: # To get a list of tests, run: # pytest --setup-only test/test_torch.py # and then parse the output -proc = subprocess.Popen(['pytest', '--setup-only', args.filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE) +proc = subprocess.Popen( + ["pytest", "--setup-only", args.filename], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, +) stdout, stderr = proc.communicate() lines = stdout.decode().strip().splitlines() for line in lines: - if '(fixtures used:' in line: + if "(fixtures used:" in line: line = line.strip().split()[0] - line = line[line.find('::') + 2:] - line = line.replace('::', '.') + line = line[line.find("::") + 2 :] + line = line.replace("::", ".") ALL_TESTS.append(line) + # Do a simple filtering: # if 'cpu' or 'CPU' is in the name and 'cuda' or 'CUDA' is not in the name, then skip it def is_cpu_only(name): name = name.lower() - return ('cpu' in name) and "cuda" not in name + return ("cpu" in name) and "cuda" not in name + ALL_TESTS = [x for x in ALL_TESTS if not is_cpu_only(x)] @@ -101,7 +135,7 @@ ALL_TESTS = ALL_TESTS[start:end] # or as specified by the user progress = 0 if not args.ci: - logfile = open('result.log', 'w') + logfile = open("result.log", "w") progressbar = tqdm.tqdm(total=len(ALL_TESTS)) else: logfile = sys.stdout @@ -110,53 +144,61 @@ else: class ProgressbarStub: def update(self, *args): return + progressbar = ProgressbarStub() + async def run1(coroutine_id): global progress - if args.gpus == 'all': + if args.gpus == "all": gpuid = coroutine_id % GPUS else: - gpu_assignments = args.gpus.split(':') - assert args.nproc == len(gpu_assignments), 'Please specify GPU assignment for each process, separated by :' + gpu_assignments = args.gpus.split(":") + assert args.nproc == len( + gpu_assignments + ), "Please specify GPU assignment for each process, separated by :" gpuid = gpu_assignments[coroutine_id] while progress < len(ALL_TESTS): test = ALL_TESTS[progress] progress += 1 - cmd = f'CUDA_VISIBLE_DEVICES={gpuid} cuda-memcheck --error-exitcode 1 python {args.filename} {test}' - proc = await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) + cmd = f"CUDA_VISIBLE_DEVICES={gpuid} cuda-memcheck --error-exitcode 1 python {args.filename} {test}" + proc = await asyncio.create_subprocess_shell( + cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE + ) try: stdout, stderr = await asyncio.wait_for(proc.communicate(), args.timeout) except asyncio.TimeoutError: - print('Timeout:', test, file=logfile) + print("Timeout:", test, file=logfile) proc.kill() if args.ci and not args.nohang: sys.exit("Hang detected on cuda-memcheck") else: if proc.returncode == 0: - print('Success:', test, file=logfile) + print("Success:", test, file=logfile) else: stdout = stdout.decode() stderr = stderr.decode() should_display = args.strict or not is_ignored_only(stdout) if should_display: - print('Fail:', test, file=logfile) + print("Fail:", test, file=logfile) print(stdout, file=logfile) print(stderr, file=logfile) if args.ci: sys.exit("Failure detected on cuda-memcheck") else: - print('Ignored:', test, file=logfile) + print("Ignored:", test, file=logfile) del proc progressbar.update(1) + async def main(): tasks = [asyncio.ensure_future(run1(i)) for i in range(args.nproc)] for t in tasks: await t -if __name__ == '__main__': + +if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main()) diff --git a/test/simulate_nccl_errors.py b/test/simulate_nccl_errors.py index 6b7d3cec1b..d3275893f7 100644 --- a/test/simulate_nccl_errors.py +++ b/test/simulate_nccl_errors.py @@ -1,22 +1,26 @@ - -import torch.distributed as c10d -import torch import argparse -import os import logging -logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) +import os + +import torch +import torch.distributed as c10d + +logging.basicConfig( + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO +) if __name__ == "__main__": parser = argparse.ArgumentParser( - description='Simple script to simulate NCCL errors. The script is ' - 'supposed to be run on multiple different nodes simultaneously with ' - 'appropriate rank and world_size. The script run an allreduce() on ' - 'the rank 0 node and aborts all the other nodes to simulate an error ' - 'in NCCL') - parser.add_argument('addr', help='address of the master node to connect to.') - parser.add_argument('port', help='port of the master node to connect to.') - parser.add_argument('rank', help='rank of this node') - parser.add_argument('world_size', help='number of nodes in process group') + description="Simple script to simulate NCCL errors. The script is " + "supposed to be run on multiple different nodes simultaneously with " + "appropriate rank and world_size. The script run an allreduce() on " + "the rank 0 node and aborts all the other nodes to simulate an error " + "in NCCL" + ) + parser.add_argument("addr", help="address of the master node to connect to.") + parser.add_argument("port", help="port of the master node to connect to.") + parser.add_argument("rank", help="rank of this node") + parser.add_argument("world_size", help="number of nodes in process group") args = parser.parse_args() rank = int(args.rank) world_size = int(args.world_size) @@ -24,14 +28,14 @@ if __name__ == "__main__": store = c10d.TCPStore(args.addr, port, world_size, rank == 0) process_group = c10d.ProcessGroupNCCL(store, rank, world_size) - logging.info('Running first allreduce') + logging.info("Running first allreduce") process_group.allreduce(torch.rand(10).cuda(rank)).wait() if rank == 0: - logging.info('Running second allreduce only on rank 0') + logging.info("Running second allreduce only on rank 0") work = process_group.allreduce(torch.rand(10).cuda(rank)) - logging.info('Waiting for allreduce to complete...') + logging.info("Waiting for allreduce to complete...") work.wait() - logging.info('Second allreduce successful: %s', work.is_success()) + logging.info("Second allreduce successful: %s", work.is_success()) else: - logging.info('Aborting all other ranks.') + logging.info("Aborting all other ranks.") os.abort() diff --git a/test/test_ao_sparsity.py b/test/test_ao_sparsity.py index cfee70b0cb..37a13ede6d 100644 --- a/test/test_ao_sparsity.py +++ b/test/test_ao_sparsity.py @@ -1,46 +1,59 @@ # Owner(s): ["module: unknown"] -from torch.testing._internal.common_utils import run_tests, IS_ARM64 - # Kernels -from ao.sparsity.test_kernels import TestQuantizedSparseKernels # noqa: F401 -from ao.sparsity.test_kernels import TestQuantizedSparseLayers # noqa: F401 +from ao.sparsity.test_kernels import ( # noqa: F401 # noqa: F401 + TestQuantizedSparseKernels, + TestQuantizedSparseLayers, +) # Parametrizations from ao.sparsity.test_parametrization import TestFakeSparsity # noqa: F401 +# Scheduler +from ao.sparsity.test_scheduler import ( # noqa: F401 # noqa: F401 + TestCubicScheduler, + TestScheduler, +) + # Sparsifier -from ao.sparsity.test_sparsifier import TestBaseSparsifier # noqa: F401 -from ao.sparsity.test_sparsifier import TestWeightNormSparsifier # noqa: F401 -from ao.sparsity.test_sparsifier import TestNearlyDiagonalSparsifier # noqa: F401 +from ao.sparsity.test_sparsifier import ( # noqa: F401 # noqa: F401 # noqa: F401 + TestBaseSparsifier, + TestNearlyDiagonalSparsifier, + TestWeightNormSparsifier, +) # Structured Pruning -from ao.sparsity.test_structured_sparsifier import TestBaseStructuredSparsifier # noqa: F401 -from ao.sparsity.test_structured_sparsifier import TestSaliencyPruner # noqa: F401 -from ao.sparsity.test_structured_sparsifier import TestFPGMPruner # noqa: F401 - -# Scheduler -from ao.sparsity.test_scheduler import TestScheduler # noqa: F401 -from ao.sparsity.test_scheduler import TestCubicScheduler # noqa: F401 +from ao.sparsity.test_structured_sparsifier import ( # noqa: F401 # noqa: F401 # noqa: F401 + TestBaseStructuredSparsifier, + TestFPGMPruner, + TestSaliencyPruner, +) +from torch.testing._internal.common_utils import IS_ARM64, run_tests # Composability if not IS_ARM64: - from ao.sparsity.test_composability import TestComposability # noqa: F401 - from ao.sparsity.test_composability import TestFxComposability # noqa: F401 - -# Utilities -from ao.sparsity.test_sparsity_utils import TestSparsityUtilFunctions # noqa: F401 + from ao.sparsity.test_composability import ( # noqa: F401 # noqa: F401 + TestComposability, + TestFxComposability, + ) -# Data Sparsifier -from ao.sparsity.test_data_sparsifier import TestBaseDataSparsifier # noqa: F401 -from ao.sparsity.test_data_sparsifier import TestNormDataSparsifiers # noqa: F401 -from ao.sparsity.test_data_sparsifier import TestQuantizationUtils # noqa: F401 +# Activation Sparsifier +from ao.sparsity.test_activation_sparsifier import ( # noqa: F401 + TestActivationSparsifier, +) # Data Scheduler from ao.sparsity.test_data_scheduler import TestBaseDataScheduler # noqa: F401 -# Activation Sparsifier -from ao.sparsity.test_activation_sparsifier import TestActivationSparsifier # noqa: F401 +# Data Sparsifier +from ao.sparsity.test_data_sparsifier import ( # noqa: F401 # noqa: F401 # noqa: F401 + TestBaseDataSparsifier, + TestNormDataSparsifiers, + TestQuantizationUtils, +) + +# Utilities +from ao.sparsity.test_sparsity_utils import TestSparsityUtilFunctions # noqa: F401 if __name__ == "__main__": run_tests() diff --git a/test/test_autocast.py b/test/test_autocast.py index c82005fd19..2f788b7f65 100644 --- a/test/test_autocast.py +++ b/test/test_autocast.py @@ -4,14 +4,20 @@ import collections import unittest import torch -from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfTorchDynamo from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists +from torch.testing._internal.common_utils import ( + IS_WINDOWS, + run_tests, + skipIfTorchDynamo, + TestCase, +) from torch.utils._python_dispatch import TorchDispatchMode + class TestAutocastCPU(TestCase): def setUp(self): super().setUp() - self.autocast_lists = AutocastCPUTestLists(torch.device('cpu')) + self.autocast_lists = AutocastCPUTestLists(torch.device("cpu")) def tearDown(self): del self.autocast_lists @@ -49,18 +55,23 @@ class TestAutocastCPU(TestCase): if module is not None and hasattr(module, op): output = getattr(module, op)(*args, **add_kwargs) if isinstance(output, torch.Tensor): - self.assertTrue(out_type == output.dtype, - f"autocast for torch.{op} produced {output.dtype}, should produce {out_type}") + self.assertTrue( + out_type == output.dtype, + f"autocast for torch.{op} produced {output.dtype}, should produce {out_type}", + ) # Try Tensor.* variant: if hasattr(torch.Tensor, op): output_method = getattr(args[0], op)(*args[1:], **add_kwargs) if isinstance(output_method, torch.Tensor): - self.assertTrue(out_type == output_method.dtype, - "autocast for torch.{} produced {}, should produce torch.{}" - .format(op, output_method.dtype, out_type)) - - self.assertTrue((output is not None) or (output_method is not None), - f"{op} not found as an attribute on either Tensor or the requested module {module}") + self.assertTrue( + out_type == output_method.dtype, + f"autocast for torch.{op} produced {output_method.dtype}, should produce torch.{out_type}", + ) + + self.assertTrue( + (output is not None) or (output_method is not None), + f"{op} not found as an attribute on either Tensor or the requested module {module}", + ) # Accounts for ops that return Tensors, iterables, and other non-Tensors. # For example, lstm_cell returns a tuple and equal returns bool. @@ -76,7 +87,9 @@ class TestAutocastCPU(TestCase): if (output is not None) and (output_method is not None): self.assertTrue(type(output) == type(output_method)) comparison = compare(output, output_method) - self.assertTrue(comparison, f"torch.{op} result did not match Tensor.{op} result") + self.assertTrue( + comparison, f"torch.{op} result did not match Tensor.{op} result" + ) # Compare numerics to Python-side "autocasting" that (we expect) does the same thing # as the C++-side autocasting, and should be bitwise accurate. @@ -85,9 +98,13 @@ class TestAutocastCPU(TestCase): self.assertFalse(torch.is_autocast_cpu_enabled()) if module is not None and hasattr(module, op): - control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs) + control = getattr(module, op)( + *cast(args, run_as_type), **add_kwargs + ) else: - control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs) + control = getattr(args[0].to(run_as_type), op)( + *cast(args[1:], run_as_type), **add_kwargs + ) self.assertTrue(type(output_to_compare) == type(control)) comparison = compare(output_to_compare, control) self.assertTrue(comparison, f"torch.{op} result did not match control") @@ -102,22 +119,51 @@ class TestAutocastCPU(TestCase): @skipIfTorchDynamo() def test_autocast_torch_expect_builtin_promote(self): - for op, args1, args2, out_type in self.autocast_lists.torch_expect_builtin_promote: + for ( + op, + args1, + args2, + out_type, + ) in self.autocast_lists.torch_expect_builtin_promote: self._run_autocast_outofplace(op, args1, torch.float32, out_type=out_type) - self._run_autocast_outofplace(op, args2, torch.float32, out_type=out_type, amp_dtype=torch.float16) + self._run_autocast_outofplace( + op, args2, torch.float32, out_type=out_type, amp_dtype=torch.float16 + ) @skipIfTorchDynamo() def test_autocast_methods_expect_builtin_promote(self): - for op, args1, args2, out_type in self.autocast_lists.methods_expect_builtin_promote: - self._run_autocast_outofplace(op, args1, torch.float32, module=None, out_type=out_type) - self._run_autocast_outofplace(op, args2, torch.float32, module=None, out_type=out_type, amp_dtype=torch.float16) + for ( + op, + args1, + args2, + out_type, + ) in self.autocast_lists.methods_expect_builtin_promote: + self._run_autocast_outofplace( + op, args1, torch.float32, module=None, out_type=out_type + ) + self._run_autocast_outofplace( + op, + args2, + torch.float32, + module=None, + out_type=out_type, + amp_dtype=torch.float16, + ) @skipIfTorchDynamo() def test_autocast_torch_16(self): for op_with_args in self.autocast_lists.torch_16: op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args) - self._run_autocast_outofplace(op, args, torch.bfloat16, add_kwargs=maybe_kwargs) - self._run_autocast_outofplace(op, args, torch.float16, add_kwargs=maybe_kwargs, amp_dtype=torch.float16) + self._run_autocast_outofplace( + op, args, torch.bfloat16, add_kwargs=maybe_kwargs + ) + self._run_autocast_outofplace( + op, + args, + torch.float16, + add_kwargs=maybe_kwargs, + amp_dtype=torch.float16, + ) @skipIfTorchDynamo() def test_autocast_nn_16(self): @@ -139,8 +185,16 @@ class TestAutocastCPU(TestCase): def test_autocast_torch_fp32(self): for op_with_args in self.autocast_lists.torch_fp32: op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args) - self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs) - self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs, amp_dtype=torch.float16) + self._run_autocast_outofplace( + op, args, torch.float32, add_kwargs=maybe_kwargs + ) + self._run_autocast_outofplace( + op, + args, + torch.float32, + add_kwargs=maybe_kwargs, + amp_dtype=torch.float16, + ) @skipIfTorchDynamo() def test_autocast_nn_fp32(self): @@ -162,11 +216,16 @@ class TestAutocastCPU(TestCase): def test_autocast_torch_need_autocast_promote(self): for op, args1, args2 in self.autocast_lists.torch_need_autocast_promote: self._run_autocast_outofplace(op, args1, torch.float32) - self._run_autocast_outofplace(op, args2, torch.float32, amp_dtype=torch.float16) + self._run_autocast_outofplace( + op, args2, torch.float32, amp_dtype=torch.float16 + ) @unittest.skipIf(IS_WINDOWS, "Limit support for bf16 path") def test_autocast_rnn(self): - if torch.backends.mkldnn.is_available() and torch.ops.mkldnn._is_mkldnn_bf16_supported(): + if ( + torch.backends.mkldnn.is_available() + and torch.ops.mkldnn._is_mkldnn_bf16_supported() + ): x = torch.randn(1, 2, 1) hx = torch.randn(2, 2, 1) cx = torch.randn(2, 2, 1) @@ -182,9 +241,10 @@ class TestAutocastCPU(TestCase): m(x, (hx, cx)) def test_autocast_disabled_with_fp32_dtype(self): - with torch.autocast(device_type='cpu', dtype=torch.float32, enabled=False): + with torch.autocast(device_type="cpu", dtype=torch.float32, enabled=False): _ = torch.ones(10) + class CustomLinear(torch.autograd.Function): @staticmethod def forward(ctx, x, w_t): @@ -194,13 +254,13 @@ class CustomLinear(torch.autograd.Function): @staticmethod def backward(ctx, grad_output): x, w_t = ctx.saved_tensors - with torch.autocast(device_type='cuda'): + with torch.autocast(device_type="cuda"): dL_dX = torch.matmul(grad_output, w_t) dL_dW = torch.matmul(x.transpose(0, 1), grad_output).transpose(0, 1) return dL_dX, dL_dW -class WeightDTypeCastCounterMode(TorchDispatchMode): +class WeightDTypeCastCounterMode(TorchDispatchMode): def __init__(self, weight): super().__init__() self.dtype_cast_counter = 0 @@ -208,9 +268,9 @@ class WeightDTypeCastCounterMode(TorchDispatchMode): def __torch_dispatch__(self, func, types, args=(), kwargs=None): if ( - func is torch.ops.aten._to_copy.default and - args[0] is self.weight and - kwargs['dtype'] is torch.float16 + func is torch.ops.aten._to_copy.default + and args[0] is self.weight + and kwargs["dtype"] is torch.float16 ): self.dtype_cast_counter += 1 return func(*args, **kwargs) @@ -224,6 +284,7 @@ class WeightDTypeCastCounterMode(TorchDispatchMode): torch.clear_autocast_cache = self.old_clear_cache return super().__exit__(exc_type, exc_val, exc_tb) + @unittest.skipIf(not torch.cuda.is_available(), "requires cuda") class TestAutocastGPU(TestCase): def test_cast_cache_is_global(self): @@ -238,7 +299,7 @@ class TestAutocastGPU(TestCase): weight = torch.nn.Parameter(torch.randn(4, 3).cuda()) with WeightDTypeCastCounterMode(weight) as mode: - with torch.autocast(device_type='cuda'): + with torch.autocast(device_type="cuda"): output = CustomLinear.apply(data, weight) s = output.sum() s.backward() @@ -246,7 +307,6 @@ class TestAutocastGPU(TestCase): self.assertEqual(mode.dtype_cast_counter, 1) def test_cache_disabled(self): - data = torch.randn(2, 3).cuda() weight = torch.nn.Parameter(torch.randn(4, 3).cuda()) @@ -255,7 +315,7 @@ class TestAutocastGPU(TestCase): torch._C._add_cached_tensor(weight) with WeightDTypeCastCounterMode(weight) as mode: - with torch.autocast(device_type='cuda'): + with torch.autocast(device_type="cuda"): output = CustomLinear.apply(data, weight) s = output.sum() s.backward() @@ -275,12 +335,12 @@ class TestTorchAutocast(TestCase): self.assertEqual(cpu_fast_dtype, torch.bfloat16) def test_invalid_device(self): - dev = 'not a real device' - msg = f'unsupported autocast device_type \'{dev}\'' + dev = "not a real device" + msg = f"unsupported autocast device_type '{dev}'" with self.assertRaisesRegex(RuntimeError, msg): with torch.autocast(device_type=dev): _ = torch.tensor(1) -if __name__ == '__main__': +if __name__ == "__main__": run_tests() diff --git a/test/test_binary_ufuncs.py b/test/test_binary_ufuncs.py index acda078fe0..3ed43e6b06 100644 --- a/test/test_binary_ufuncs.py +++ b/test/test_binary_ufuncs.py @@ -1,75 +1,75 @@ # Owner(s): ["module: tests"] -import torch -import numpy as np - import itertools -from itertools import chain -from itertools import product import math +import operator import random -from numbers import Number import warnings -import operator from functools import partial +from itertools import chain, product +from numbers import Number + +import numpy as np +import torch import torch.autograd.forward_ad as fwAD from torch import inf, nan -from torch.testing._internal.common_utils import ( - TestCase, - slowTest, - iter_indices, - run_tests, - gradcheck, - torch_to_numpy_dtype_dict, - numpy_to_torch_dtype_dict, - TEST_SCIPY, - set_default_dtype, - skipIfTorchDynamo, -) +from torch.testing import make_tensor from torch.testing._internal.common_device_type import ( + deviceCountAtLeast, + dtypes, + dtypesIfCPU, + dtypesIfCUDA, expectedFailureMeta, instantiate_device_type_tests, - onlyCUDA, onlyCPU, - dtypes, - dtypesIfCUDA, - dtypesIfCPU, - deviceCountAtLeast, - precisionOverride, + onlyCUDA, onlyNativeDeviceTypes, - skipIf, - ops, OpDTypes, + ops, + precisionOverride, + skipIf, skipMeta, ) -from torch.testing import make_tensor from torch.testing._internal.common_dtype import ( - all_types_and_complex_and, all_types_and, - integral_types, + all_types_and_complex_and, complex_types, - integral_types_and, - floating_types_and, floating_and_complex_types, - get_all_math_dtypes, + floating_types_and, get_all_int_dtypes, + get_all_math_dtypes, + integral_types, + integral_types_and, ) from torch.testing._internal.common_methods_invocations import ( binary_ufuncs, binary_ufuncs_and_refs, - generate_elementwise_binary_tensors, - generate_elementwise_binary_small_value_tensors, - generate_elementwise_binary_large_value_tensors, - generate_elementwise_binary_extremal_value_tensors, generate_elementwise_binary_broadcasting_tensors, - generate_elementwise_binary_with_scalar_samples, + generate_elementwise_binary_extremal_value_tensors, + generate_elementwise_binary_large_value_tensors, + generate_elementwise_binary_small_value_tensors, + generate_elementwise_binary_tensors, generate_elementwise_binary_with_scalar_and_type_promotion_samples, + generate_elementwise_binary_with_scalar_samples, +) +from torch.testing._internal.common_utils import ( + gradcheck, + iter_indices, + numpy_to_torch_dtype_dict, + run_tests, + set_default_dtype, + skipIfTorchDynamo, + slowTest, + TEST_SCIPY, + TestCase, + torch_to_numpy_dtype_dict, ) if TEST_SCIPY: - import scipy.special import scipy.integrate + import scipy.special + # TODO: update to use opinfos consistently class TestBinaryUfuncs(TestCase): @@ -269,7 +269,6 @@ class TestBinaryUfuncs(TestCase): ) self._test_reference_numerics(dtype, op, gen, equal_nan=True) - @ops(binary_ufuncs) def test_contig_vs_every_other(self, device, dtype, op): lhs = make_tensor( @@ -487,7 +486,7 @@ class TestBinaryUfuncs(TestCase): ) make_rhs_scalar_tensor = partial( - make_tensor, (), device='cpu', **op.rhs_make_tensor_kwargs + make_tensor, (), device="cpu", **op.rhs_make_tensor_kwargs ) def _supported(dtypes): @@ -777,10 +776,14 @@ class TestBinaryUfuncs(TestCase): # scalar x scalar # Note: result dtype is default float type if op.supports_two_python_scalars and _supported((torch.long, torch.float32)): - rhs_f_scalar = 2. - for lhs in (1, 1.): + rhs_f_scalar = 2.0 + for lhs in (1, 1.0): result = op(lhs, rhs_f_scalar) - expected_dtype = torch.get_default_dtype() if not op.always_returns_bool else torch.bool + expected_dtype = ( + torch.get_default_dtype() + if not op.always_returns_bool + else torch.bool + ) self.assertEqual(result.dtype, expected_dtype) # TODO: move to error input test @@ -966,7 +969,6 @@ class TestBinaryUfuncs(TestCase): @dtypes(torch.bfloat16, torch.half, torch.float32, torch.float64) def test_div_rounding_nonfinite(self, device, dtype): - # Compare division of special floating point values against NumPy num = torch.tensor( [1.0, -1.0, 0, 0.1, -0.1, np.pi, -np.pi, np.inf, -np.inf, np.nan], @@ -1088,21 +1090,27 @@ class TestBinaryUfuncs(TestCase): # NOTE: the calculation still produces an error if the number is greater than # finfo.max / 2, but hopefully people realized that it's a dangerous region to work with finfo = torch.finfo(dtype) - nom_lst = [complex(finfo.min / 2, finfo.min / 2), - complex(finfo.max / 2, finfo.max / 2), - complex(finfo.tiny, finfo.tiny), - complex(finfo.tiny, 0.0), - complex(0.0, 0.0)] - denom_lst = [complex(finfo.min / 2, finfo.min / 2), - complex(finfo.max / 2, finfo.max / 2), - complex(finfo.tiny, finfo.tiny), - complex(0.0, finfo.tiny), - complex(finfo.tiny, finfo.tiny)] - expected_lst = [complex(1.0, 0.0), - complex(1.0, 0.0), - complex(1.0, 0.0), - complex(0.0, -1.0), - complex(0.0, 0.0)] + nom_lst = [ + complex(finfo.min / 2, finfo.min / 2), + complex(finfo.max / 2, finfo.max / 2), + complex(finfo.tiny, finfo.tiny), + complex(finfo.tiny, 0.0), + complex(0.0, 0.0), + ] + denom_lst = [ + complex(finfo.min / 2, finfo.min / 2), + complex(finfo.max / 2, finfo.max / 2), + complex(finfo.tiny, finfo.tiny), + complex(0.0, finfo.tiny), + complex(finfo.tiny, finfo.tiny), + ] + expected_lst = [ + complex(1.0, 0.0), + complex(1.0, 0.0), + complex(1.0, 0.0), + complex(0.0, -1.0), + complex(0.0, 0.0), + ] nom = torch.tensor(nom_lst, dtype=dtype, device=device) denom = torch.tensor(denom_lst, dtype=dtype, device=device) expected = torch.tensor(expected_lst, dtype=dtype, device=device) @@ -1146,7 +1154,10 @@ class TestBinaryUfuncs(TestCase): # test that multi-d out doesn't trigger segfault arg1 = (torch.ones(2, 1, device=device), torch.ones(1, device=device)) arg2 = (torch.ones(2, device=device), torch.ones(1, 1, device=device)) - outs = (torch.ones(2, 1, 1, 1, device=device), torch.ones(2, 2, 2, 2, device=device)) + outs = ( + torch.ones(2, 1, 1, 1, device=device), + torch.ones(2, 2, 2, 2, device=device), + ) for a1, a2, o in zip(arg1, arg2, outs): with warnings.catch_warnings(record=True) as w: @@ -1360,12 +1371,16 @@ class TestBinaryUfuncs(TestCase): self._do_pow_for_exponents(m1, exponents + complex_exponents, pow, 10e-4) else: self._do_pow_for_exponents(m1, exponents, math.pow, None) - will_raise_error = dtype is torch.half and torch.device(device).type == 'cpu' + will_raise_error = ( + dtype is torch.half and torch.device(device).type == "cpu" + ) if will_raise_error: # On CPU, # Half Tensor with complex exponents leads to computation dtype # of ComplexHalf for which this ops is not supported yet - with self.assertRaisesRegex(RuntimeError, "not implemented for 'ComplexHalf'"): + with self.assertRaisesRegex( + RuntimeError, "not implemented for 'ComplexHalf'" + ): self._do_pow_for_exponents(m1, complex_exponents, pow, 10e-4) else: self._do_pow_for_exponents(m1, complex_exponents, pow, 10e-4) @@ -1663,10 +1678,15 @@ class TestBinaryUfuncs(TestCase): # of ComplexHalf for which this ops is not supported yet # NOTE: pow has fast-path when base is 1 which supports # ComplexHalf - will_raise_error = torch.device(device).type == 'cpu' and \ - dtype is torch.half and base != (1 + 0j) + will_raise_error = ( + torch.device(device).type == "cpu" + and dtype is torch.half + and base != (1 + 0j) + ) if will_raise_error: - with self.assertRaisesRegex(RuntimeError, "not implemented for 'ComplexHalf'"): + with self.assertRaisesRegex( + RuntimeError, "not implemented for 'ComplexHalf'" + ): self._test_pow(base, first_exp) self._test_pow(base, second_exp) else: @@ -2028,9 +2048,7 @@ class TestBinaryUfuncs(TestCase): tmp //= b_t self.assertEqual(tmp.item(), expected_ifloordiv) - self.assertEqual( - scripted_floor_divide__scalar(a_t), math.floor(a / 5) - ) + self.assertEqual(scripted_floor_divide__scalar(a_t), math.floor(a / 5)) # Tests binary op equivalence with Python builtin ops # Also tests that reverse operations are equivalent to forward ops @@ -2042,7 +2060,6 @@ class TestBinaryUfuncs(TestCase): (operator.mul, torch.mul), (operator.truediv, torch.div), ): - for a, b in product(range(-10, 10), range(-10, 10)): for op in (lambda x: x * 0.5, lambda x: math.floor(x)): a = op(a) @@ -3143,11 +3160,19 @@ class TestBinaryUfuncs(TestCase): bits = iinfo.bits low = iinfo.min high = iinfo.max - exact_dtype = dtype != torch.uint8 # numpy changes dtype from uint8 to int16 for some out-of-limits shift values + exact_dtype = ( + dtype != torch.uint8 + ) # numpy changes dtype from uint8 to int16 for some out-of-limits shift values for input in ( - torch.tensor([-1, 0, 1], device=device, dtype=dtype), # small for non-vectorized operation - torch.tensor([low, high], device=device, dtype=dtype), # small for non-vectorized operation - make_tensor((64, 64, 64), low=low, high=high, device=device, dtype=dtype), # large for vectorized operation + torch.tensor( + [-1, 0, 1], device=device, dtype=dtype + ), # small for non-vectorized operation + torch.tensor( + [low, high], device=device, dtype=dtype + ), # small for non-vectorized operation + make_tensor( + (64, 64, 64), low=low, high=high, device=device, dtype=dtype + ), # large for vectorized operation ): shift_left_expected = torch.zeros_like(input) shift_right_expected = torch.clamp(input, -1, 0) @@ -3158,7 +3183,8 @@ class TestBinaryUfuncs(TestCase): lambda x: x << shift, lambda x: np.left_shift(x, shift), input, - exact_dtype=exact_dtype, msg=f"<< {shift}" + exact_dtype=exact_dtype, + msg=f"<< {shift}", ) shift_right = input >> shift self.assertEqual(shift_right, shift_right_expected, msg=f">> {shift}") @@ -3166,7 +3192,8 @@ class TestBinaryUfuncs(TestCase): lambda x: x >> shift, lambda x: np.right_shift(x, shift), input, - exact_dtype=exact_dtype, msg=f">> {shift}" + exact_dtype=exact_dtype, + msg=f">> {shift}", ) @onlyNativeDeviceTypes @@ -3448,6 +3475,7 @@ class TestBinaryUfuncs(TestCase): # numpy has not implemented logaddexp for complex def _ref_func(x, y): return scipy.special.logsumexp(np.stack((x, y), axis=0), axis=0) + ref_func = _ref_func our_func = torch.logaddexp else: @@ -3488,9 +3516,11 @@ class TestBinaryUfuncs(TestCase): ) _test_helper(a, b) - @skipIfTorchDynamo() # complex infs/nans differ under Dynamo/Inductor + @skipIfTorchDynamo() # complex infs/nans differ under Dynamo/Inductor @dtypesIfCUDA(torch.float32, torch.float64, torch.bfloat16) - @dtypes(torch.float32, torch.float64, torch.bfloat16, torch.complex64, torch.complex128) + @dtypes( + torch.float32, torch.float64, torch.bfloat16, torch.complex64, torch.complex128 + ) def test_logaddexp(self, device, dtype): self._test_logaddexp(device, dtype, base2=False) @@ -3818,7 +3848,13 @@ class TestBinaryUfuncs(TestCase): b_16 = b.to(dtype=lowp_dtype) actual_16 = a_16.atan2(b_16) self.assertEqual(actual_16, actual.to(dtype=lowp_dtype)) - self.assertEqual(expected, actual_16.view(-1), exact_dtype=False, rtol=rtol, atol=atol) + self.assertEqual( + expected, + actual_16.view(-1), + exact_dtype=False, + rtol=rtol, + atol=atol, + ) _test_atan2_with_size((2, 2), device) _test_atan2_with_size((3, 3), device) @@ -3886,7 +3922,6 @@ class TestBinaryUfuncs(TestCase): @skipIf(not TEST_SCIPY, "Scipy required for the test.") def test_cumulative_trapezoid(self, device): - import scipy.integrate if hasattr(scipy.integrate, "cumulative_trapezoid"): @@ -4034,7 +4069,6 @@ class TestBinaryUfuncs(TestCase): torch.Tensor.float_power, torch.Tensor.float_power_, ): - # Case of Tensor x Tensor if op is torch.Tensor.float_power_ and base_dtype != out_dtype: with self.assertRaisesRegex( @@ -4431,6 +4465,7 @@ tensor_binary_ops = [ # '__divmod__', '__rdivmod__', '__idivmod__', ] + # Test that binary math operations return NotImplemented for unknown types. def generate_not_implemented_tests(cls): class UnknownType: diff --git a/test/test_bundled_images.py b/test/test_bundled_images.py index 118e276a30..091bad66c1 100644 --- a/test/test_bundled_images.py +++ b/test/test_bundled_images.py @@ -1,25 +1,29 @@ #!/usr/bin/env python3 # Owner(s): ["oncall: mobile"] -import torch -import torch.utils.bundled_inputs import io + import cv2 +import torch +import torch.utils.bundled_inputs from torch.testing._internal.common_utils import TestCase torch.ops.load_library("//caffe2/torch/fb/operators:decode_bundled_image") + def model_size(sm): buffer = io.BytesIO() torch.jit.save(sm, buffer) return len(buffer.getvalue()) + def save_and_load(sm): buffer = io.BytesIO() torch.jit.save(sm, buffer) buffer.seek(0) return torch.jit.load(buffer) + """Return an InflatableArg that contains a tensor of the compressed image and the way to decode it keyword arguments: @@ -27,6 +31,8 @@ def save_and_load(sm): if in NCHW format, N should be 1 quality -- the quality needed to compress the image """ + + def bundle_jpeg_image(img_tensor, quality): # turn NCHW to HWC if img_tensor.dim() == 4: @@ -37,9 +43,12 @@ def bundle_jpeg_image(img_tensor, quality): _, enc_img = cv2.imencode(".JPEG", pixels, encode_param) enc_img_tensor = torch.from_numpy(enc_img) enc_img_tensor = torch.flatten(enc_img_tensor).byte() - obj = torch.utils.bundled_inputs.InflatableArg(enc_img_tensor, "torch.ops.fb.decode_bundled_image({})") + obj = torch.utils.bundled_inputs.InflatableArg( + enc_img_tensor, "torch.ops.fb.decode_bundled_image({})" + ) return obj + def get_tensor_from_raw_BGR(im) -> torch.Tensor: raw_data = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) raw_data = torch.from_numpy(raw_data).float() @@ -53,6 +62,7 @@ class TestBundledImages(TestCase): class SingleTensorModel(torch.nn.Module): def forward(self, arg): return arg + im = cv2.imread("caffe2/test/test_img/p1.jpg") tensor = torch.from_numpy(im) inflatable_arg = bundle_jpeg_image(tensor, 90) diff --git a/test/test_bundled_inputs.py b/test/test_bundled_inputs.py index 1bf938506f..2ba1ee847e 100644 --- a/test/test_bundled_inputs.py +++ b/test/test_bundled_inputs.py @@ -3,11 +3,11 @@ import io import textwrap -from typing import List, Optional, Dict +from typing import Dict, List, Optional import torch import torch.utils.bundled_inputs -from torch.testing._internal.common_utils import TestCase, run_tests +from torch.testing._internal.common_utils import run_tests, TestCase def model_size(sm): @@ -24,7 +24,6 @@ def save_and_load(sm): class TestBundledInputs(TestCase): - def test_single_tensors(self): class SingleTensorModel(torch.nn.Module): def forward(self, arg): @@ -32,7 +31,7 @@ class TestBundledInputs(TestCase): sm = torch.jit.script(SingleTensorModel()) original_size = model_size(sm) - get_expr : List[str] = [] + get_expr: List[str] = [] samples = [ # Tensor with small numel and small storage. (torch.tensor([1]),), @@ -50,7 +49,8 @@ class TestBundledInputs(TestCase): (torch.quantize_per_tensor(torch.zeros(4, 8, 32, 32), 1, 0, torch.qint8),), ] torch.utils.bundled_inputs.augment_model_with_bundled_inputs( - sm, samples, get_expr) + sm, samples, get_expr + ) # print(get_expr[0]) # print(sm._generate_bundled_inputs.code) @@ -80,18 +80,17 @@ class TestBundledInputs(TestCase): self.assertEqual(inflated[5][0].mean().item(), 0, atol=0.025, rtol=0) self.assertEqual(inflated[5][0].std().item(), 1, atol=0.02, rtol=0) - def test_large_tensor_with_inflation(self): class SingleTensorModel(torch.nn.Module): def forward(self, arg): return arg + sm = torch.jit.script(SingleTensorModel()) sample_tensor = torch.randn(1 << 16) # We can store tensors with custom inflation functions regardless # of size, even if inflation is just the identity. sample = torch.utils.bundled_inputs.bundle_large_tensor(sample_tensor) - torch.utils.bundled_inputs.augment_model_with_bundled_inputs( - sm, [(sample,)]) + torch.utils.bundled_inputs.augment_model_with_bundled_inputs(sm, [(sample,)]) loaded = save_and_load(sm) inflated = loaded.get_all_bundled_inputs() @@ -99,17 +98,18 @@ class TestBundledInputs(TestCase): self.assertEqual(inflated[0][0], sample_tensor) - def test_rejected_tensors(self): def check_tensor(sample): # Need to define the class in this scope to get a fresh type for each run. class SingleTensorModel(torch.nn.Module): def forward(self, arg): return arg + sm = torch.jit.script(SingleTensorModel()) with self.assertRaisesRegex(Exception, "Bundled input argument"): torch.utils.bundled_inputs.augment_model_with_bundled_inputs( - sm, [(sample,)]) + sm, [(sample,)] + ) # Plain old big tensor. check_tensor(torch.randn(1 << 16)) @@ -120,7 +120,6 @@ class TestBundledInputs(TestCase): self.assertEqual(small_sparse.numel(), 2) check_tensor(small_sparse) - def test_non_tensors(self): class StringAndIntModel(torch.nn.Module): def forward(self, fmt: str, num: int): @@ -131,8 +130,7 @@ class TestBundledInputs(TestCase): ("first {}", 1), ("second {}", 2), ] - torch.utils.bundled_inputs.augment_model_with_bundled_inputs( - sm, samples) + torch.utils.bundled_inputs.augment_model_with_bundled_inputs(sm, samples) loaded = save_and_load(sm) inflated = loaded.get_all_bundled_inputs() @@ -162,23 +160,17 @@ class TestBundledInputs(TestCase): (torch.ones(4, 8, 32, 32).contiguous(memory_format=torch.channels_last),), ] info = [ - 'Tensor with small numel and small storage.', - 'Tensor with large numel and small storage.', - 'Tensor with small numel and large storage.', - 'Large zero tensor.', - 'Large channels-last ones tensor.', - 'Special encoding of random tensor.', + "Tensor with small numel and small storage.", + "Tensor with large numel and small storage.", + "Tensor with small numel and large storage.", + "Large zero tensor.", + "Large channels-last ones tensor.", + "Special encoding of random tensor.", ] torch.utils.bundled_inputs.augment_many_model_functions_with_bundled_inputs( mm, - inputs={ - mm.forward : samples, - mm.foo : samples - }, - info={ - mm.forward : info, - mm.foo : info - } + inputs={mm.forward: samples, mm.foo: samples}, + info={mm.forward: info, mm.foo: info}, ) loaded = save_and_load(mm) inflated = loaded.get_all_bundled_inputs() @@ -194,15 +186,21 @@ class TestBundledInputs(TestCase): # Check helper that work on all functions all_info = loaded.get_bundled_inputs_functions_and_info() - self.assertEqual(set(all_info.keys()), {'forward', 'foo'}) - self.assertEqual(all_info['forward']['get_inputs_function_name'], ['get_all_bundled_inputs_for_forward']) - self.assertEqual(all_info['foo']['get_inputs_function_name'], ['get_all_bundled_inputs_for_foo']) - self.assertEqual(all_info['forward']['info'], info) - self.assertEqual(all_info['foo']['info'], info) + self.assertEqual(set(all_info.keys()), {"forward", "foo"}) + self.assertEqual( + all_info["forward"]["get_inputs_function_name"], + ["get_all_bundled_inputs_for_forward"], + ) + self.assertEqual( + all_info["foo"]["get_inputs_function_name"], + ["get_all_bundled_inputs_for_foo"], + ) + self.assertEqual(all_info["forward"]["info"], info) + self.assertEqual(all_info["foo"]["info"], info) # example of how to turn the 'get_inputs_function_name' into the actual list of bundled inputs for func_name in all_info.keys(): - input_func_name = all_info[func_name]['get_inputs_function_name'][0] + input_func_name = all_info[func_name]["get_inputs_function_name"][0] func_to_run = getattr(loaded, input_func_name) self.assertEqual(func_to_run(), samples) @@ -220,16 +218,18 @@ class TestBundledInputs(TestCase): # inputs defined 2 ways so should fail with self.assertRaises(Exception): mm = torch.jit.script(MultipleMethodModel()) - definition = textwrap.dedent(""" + definition = textwrap.dedent( + """ def _generate_bundled_inputs_for_forward(self): return [] - """) + """ + ) mm.define(definition) torch.utils.bundled_inputs.augment_many_model_functions_with_bundled_inputs( mm, inputs={ - mm.forward : samples, - mm.foo : samples, + mm.forward: samples, + mm.foo: samples, }, ) @@ -251,8 +251,8 @@ class TestBundledInputs(TestCase): torch.utils.bundled_inputs.augment_many_model_functions_with_bundled_inputs( mm, inputs={ - mm.forward : None, - mm.foo : samples, + mm.forward: None, + mm.foo: samples, }, ) @@ -265,8 +265,7 @@ class TestBundledInputs(TestCase): with self.assertRaises(TypeError): m = torch.jit.script(SingleTensorModel()) torch.utils.bundled_inputs.augment_model_with_bundled_inputs( - m, - inputs="foo" # type: ignore[arg-type] + m, inputs="foo" # type: ignore[arg-type] ) # List of non tuples. Most common error using the api. @@ -274,7 +273,9 @@ class TestBundledInputs(TestCase): m = torch.jit.script(SingleTensorModel()) torch.utils.bundled_inputs.augment_model_with_bundled_inputs( m, - inputs=[torch.ones(1, 2), ] # type: ignore[list-item] + inputs=[ + torch.ones(1, 2), # type: ignore[list-item] + ], ) def test_double_augment_fail(self): @@ -284,13 +285,13 @@ class TestBundledInputs(TestCase): m = torch.jit.script(SingleTensorModel()) torch.utils.bundled_inputs.augment_model_with_bundled_inputs( - m, - inputs=[(torch.ones(1),)] + m, inputs=[(torch.ones(1),)] ) - with self.assertRaisesRegex(Exception, "Models can only be augmented with bundled inputs once."): + with self.assertRaisesRegex( + Exception, "Models can only be augmented with bundled inputs once." + ): torch.utils.bundled_inputs.augment_model_with_bundled_inputs( - m, - inputs=[(torch.ones(1),)] + m, inputs=[(torch.ones(1),)] ) def test_double_augment_non_mutator(self): @@ -300,8 +301,7 @@ class TestBundledInputs(TestCase): m = torch.jit.script(SingleTensorModel()) bundled_model = torch.utils.bundled_inputs.bundle_inputs( - m, - inputs=[(torch.ones(1),)] + m, inputs=[(torch.ones(1),)] ) with self.assertRaises(AttributeError): m.get_all_bundled_inputs() @@ -315,18 +315,15 @@ class TestBundledInputs(TestCase): m = torch.jit.script(SingleTensorModel()) bundled_model = torch.utils.bundled_inputs.bundle_inputs( - m, - inputs={m.forward : [(torch.ones(1),)]} + m, inputs={m.forward: [(torch.ones(1),)]} ) self.assertEqual(bundled_model.get_all_bundled_inputs(), [(torch.ones(1),)]) bundled_model2 = torch.utils.bundled_inputs.bundle_inputs( - bundled_model, - inputs=[(torch.ones(2),)] + bundled_model, inputs=[(torch.ones(2),)] ) self.assertEqual(bundled_model2.get_all_bundled_inputs(), [(torch.ones(2),)]) - def test_dict_args(self): class MyModel(torch.nn.Module): def forward( @@ -396,7 +393,7 @@ class TestBundledInputs(TestCase): """, ) - out : List[str] = [] + out: List[str] = [] sm = torch.jit.script(MyModel()) original_size = model_size(sm) small_inputs = ( @@ -426,7 +423,10 @@ class TestBundledInputs(TestCase): inflated = loaded.get_all_bundled_inputs() self.assertEqual(len(inflated[0]), len(small_inputs)) - methods, _ = torch.utils.bundled_inputs._get_bundled_inputs_attributes_and_methods( + ( + methods, + _, + ) = torch.utils.bundled_inputs._get_bundled_inputs_attributes_and_methods( loaded ) @@ -439,5 +439,5 @@ class TestBundledInputs(TestCase): ) -if __name__ == '__main__': +if __name__ == "__main__": run_tests() diff --git a/test/test_comparison_utils.py b/test/test_comparison_utils.py index 172e2c4092..6c5c65d1a0 100644 --- a/test/test_comparison_utils.py +++ b/test/test_comparison_utils.py @@ -2,7 +2,8 @@ # Owner(s): ["module: internals"] import torch -from torch.testing._internal.common_utils import TestCase, run_tests +from torch.testing._internal.common_utils import run_tests, TestCase + class TestComparisonUtils(TestCase): def test_all_equal_no_assert(self): @@ -32,5 +33,5 @@ class TestComparisonUtils(TestCase): torch._assert_tensor_metadata(t, [3], [1], torch.float) -if __name__ == '__main__': +if __name__ == "__main__": run_tests() diff --git a/test/test_compile_benchmark_util.py b/test/test_compile_benchmark_util.py index 3e0b996ed0..7d43b5727b 100644 --- a/test/test_compile_benchmark_util.py +++ b/test/test_compile_benchmark_util.py @@ -1,17 +1,20 @@ # Owner(s): ["module: dynamo"] +import unittest + import torch import torch._dynamo as torchdynamo -from torch.testing._internal.common_utils import TestCase, run_tests, TEST_CUDA -import unittest +from torch.testing._internal.common_utils import run_tests, TEST_CUDA, TestCase try: import tabulate # noqa: F401 # type: ignore[import] from torch.utils.benchmark.utils.compile import bench_all + HAS_TABULATE = True except ImportError: HAS_TABULATE = False + @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") @unittest.skipIf(not HAS_TABULATE, "tabulate not available") class TestCompileBenchmarkUtil(TestCase): @@ -28,10 +31,24 @@ class TestCompileBenchmarkUtil(TestCase): model = ToyModel().cuda() inference_table = bench_all(model, torch.ones(1024, 2, 2).cuda(), 5) - self.assertTrue("Inference" in inference_table and "Eager" in inference_table and "-" in inference_table) - - training_table = bench_all(model, torch.ones(1024, 2, 2).cuda(), 5, optimizer=torch.optim.SGD(model.parameters(), lr=0.01)) - self.assertTrue("Train" in training_table and "Eager" in training_table and "-" in training_table) - -if __name__ == '__main__': + self.assertTrue( + "Inference" in inference_table + and "Eager" in inference_table + and "-" in inference_table + ) + + training_table = bench_all( + model, + torch.ones(1024, 2, 2).cuda(), + 5, + optimizer=torch.optim.SGD(model.parameters(), lr=0.01), + ) + self.assertTrue( + "Train" in training_table + and "Eager" in training_table + and "-" in training_table + ) + + +if __name__ == "__main__": run_tests() diff --git a/test/test_complex.py b/test/test_complex.py index 3af2701aa0..04fa566bf9 100644 --- a/test/test_complex.py +++ b/test/test_complex.py @@ -2,27 +2,31 @@ import torch from torch.testing._internal.common_device_type import ( - instantiate_device_type_tests, dtypes, + instantiate_device_type_tests, onlyCPU, ) -from torch.testing._internal.common_utils import TestCase, run_tests, set_default_dtype from torch.testing._internal.common_dtype import complex_types +from torch.testing._internal.common_utils import run_tests, set_default_dtype, TestCase + +devices = (torch.device("cpu"), torch.device("cuda:0")) -devices = (torch.device('cpu'), torch.device('cuda:0')) class TestComplexTensor(TestCase): @dtypes(*complex_types()) def test_to_list(self, device, dtype): # test that the complex float tensor has expected values and # there's no garbage value in the resultant list - self.assertEqual(torch.zeros((2, 2), device=device, dtype=dtype).tolist(), [[0j, 0j], [0j, 0j]]) + self.assertEqual( + torch.zeros((2, 2), device=device, dtype=dtype).tolist(), + [[0j, 0j], [0j, 0j]], + ) @dtypes(torch.float32, torch.float64, torch.float16) def test_dtype_inference(self, device, dtype): # issue: https://github.com/pytorch/pytorch/issues/36834 with set_default_dtype(dtype): - x = torch.tensor([3., 3. + 5.j], device=device) + x = torch.tensor([3.0, 3.0 + 5.0j], device=device) if dtype == torch.float16: self.assertEqual(x.dtype, torch.chalf) elif dtype == torch.float32: @@ -47,7 +51,9 @@ class TestComplexTensor(TestCase): @dtypes(*complex_types()) def test_any(self, device, dtype): # issue: https://github.com/pytorch/pytorch/issues/120875 - x = torch.tensor([0, 0j, -0 + 0j, -0 - 0j, 0 + 0j, 0 - 0j], device=device, dtype=dtype) + x = torch.tensor( + [0, 0j, -0 + 0j, -0 - 0j, 0 + 0j, 0 - 0j], device=device, dtype=dtype + ) self.assertFalse(torch.any(x)) @onlyCPU @@ -57,67 +63,179 @@ class TestComplexTensor(TestCase): nan = float("nan") # Non-vectorized operations for a, b in ( - (torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype), - torch.tensor([-6.1278 - 8.5019j], device=device, dtype=dtype)), - (torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype), - torch.tensor([-6.1278 - 2.1172j], device=device, dtype=dtype)), - (torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype), - torch.tensor([-0.0610 - 8.5019j], device=device, dtype=dtype)), + ( + torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype), + torch.tensor([-6.1278 - 8.5019j], device=device, dtype=dtype), + ), + ( + torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype), + torch.tensor([-6.1278 - 2.1172j], device=device, dtype=dtype), + ), + ( + torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype), + torch.tensor([-0.0610 - 8.5019j], device=device, dtype=dtype), + ), ): actual = torch.eq(a, b) expected = torch.tensor([False], device=device, dtype=torch.bool) - self.assertEqual(actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}") + self.assertEqual( + actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}" + ) actual = torch.eq(a, a) expected = torch.tensor([True], device=device, dtype=torch.bool) - self.assertEqual(actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}") + self.assertEqual( + actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}" + ) actual = torch.full_like(b, complex(2, 2)) torch.eq(a, b, out=actual) expected = torch.tensor([complex(0)], device=device, dtype=dtype) - self.assertEqual(actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}") + self.assertEqual( + actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}" + ) actual = torch.full_like(b, complex(2, 2)) torch.eq(a, a, out=actual) expected = torch.tensor([complex(1)], device=device, dtype=dtype) - self.assertEqual(actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}") + self.assertEqual( + actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}" + ) # Vectorized operations for a, b in ( - (torch.tensor([ - -0.0610 - 2.1172j, 5.1576 + 5.4775j, complex(2.8871, nan), -6.6545 - 3.7655j, -2.7036 - 1.4470j, 0.3712 + 7.989j, - -0.0610 - 2.1172j, 5.1576 + 5.4775j, complex(nan, -3.2650), -6.6545 - 3.7655j, -2.7036 - 1.4470j, 0.3712 + 7.989j], - device=device, dtype=dtype), - torch.tensor([ - -6.1278 - 8.5019j, 0.5886 + 8.8816j, complex(2.8871, nan), 6.3505 + 2.2683j, 0.3712 + 7.9659j, 0.3712 + 7.989j, - -6.1278 - 2.1172j, 5.1576 + 8.8816j, complex(nan, -3.2650), 6.3505 + 2.2683j, 0.3712 + 7.9659j, 0.3712 + 7.989j], - device=device, dtype=dtype)), + ( + torch.tensor( + [ + -0.0610 - 2.1172j, + 5.1576 + 5.4775j, + complex(2.8871, nan), + -6.6545 - 3.7655j, + -2.7036 - 1.4470j, + 0.3712 + 7.989j, + -0.0610 - 2.1172j, + 5.1576 + 5.4775j, + complex(nan, -3.2650), + -6.6545 - 3.7655j, + -2.7036 - 1.4470j, + 0.3712 + 7.989j, + ], + device=device, + dtype=dtype, + ), + torch.tensor( + [ + -6.1278 - 8.5019j, + 0.5886 + 8.8816j, + complex(2.8871, nan), + 6.3505 + 2.2683j, + 0.3712 + 7.9659j, + 0.3712 + 7.989j, + -6.1278 - 2.1172j, + 5.1576 + 8.8816j, + complex(nan, -3.2650), + 6.3505 + 2.2683j, + 0.3712 + 7.9659j, + 0.3712 + 7.989j, + ], + device=device, + dtype=dtype, + ), + ), ): actual = torch.eq(a, b) - expected = torch.tensor([False, False, False, False, False, True, - False, False, False, False, False, True], - device=device, dtype=torch.bool) - self.assertEqual(actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}") + expected = torch.tensor( + [ + False, + False, + False, + False, + False, + True, + False, + False, + False, + False, + False, + True, + ], + device=device, + dtype=torch.bool, + ) + self.assertEqual( + actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}" + ) actual = torch.eq(a, a) - expected = torch.tensor([True, True, False, True, True, True, - True, True, False, True, True, True], - device=device, dtype=torch.bool) - self.assertEqual(actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}") + expected = torch.tensor( + [ + True, + True, + False, + True, + True, + True, + True, + True, + False, + True, + True, + True, + ], + device=device, + dtype=torch.bool, + ) + self.assertEqual( + actual, expected, msg=f"\neq\nactual {actual}\nexpected {expected}" + ) actual = torch.full_like(b, complex(2, 2)) torch.eq(a, b, out=actual) - expected = torch.tensor([complex(0), complex(0), complex(0), complex(0), complex(0), complex(1), - complex(0), complex(0), complex(0), complex(0), complex(0), complex(1)], - device=device, dtype=dtype) - self.assertEqual(actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}") + expected = torch.tensor( + [ + complex(0), + complex(0), + complex(0), + complex(0), + complex(0), + complex(1), + complex(0), + complex(0), + complex(0), + complex(0), + complex(0), + complex(1), + ], + device=device, + dtype=dtype, + ) + self.assertEqual( + actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}" + ) actual = torch.full_like(b, complex(2, 2)) torch.eq(a, a, out=actual) - expected = torch.tensor([complex(1), complex(1), complex(0), complex(1), complex(1), complex(1), - complex(1), complex(1), complex(0), complex(1), complex(1), complex(1)], - device=device, dtype=dtype) - self.assertEqual(actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}") + expected = torch.tensor( + [ + complex(1), + complex(1), + complex(0), + complex(1), + complex(1), + complex(1), + complex(1), + complex(1), + complex(0), + complex(1), + complex(1), + complex(1), + ], + device=device, + dtype=dtype, + ) + self.assertEqual( + actual, expected, msg=f"\neq(out)\nactual {actual}\nexpected {expected}" + ) @onlyCPU @dtypes(*complex_types()) @@ -126,70 +244,183 @@ class TestComplexTensor(TestCase): nan = float("nan") # Non-vectorized operations for a, b in ( - (torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype), - torch.tensor([-6.1278 - 8.5019j], device=device, dtype=dtype)), - (torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype), - torch.tensor([-6.1278 - 2.1172j], device=device, dtype=dtype)), - (torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype), - torch.tensor([-0.0610 - 8.5019j], device=device, dtype=dtype)), + ( + torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype), + torch.tensor([-6.1278 - 8.5019j], device=device, dtype=dtype), + ), + ( + torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype), + torch.tensor([-6.1278 - 2.1172j], device=device, dtype=dtype), + ), + ( + torch.tensor([-0.0610 - 2.1172j], device=device, dtype=dtype), + torch.tensor([-0.0610 - 8.5019j], device=device, dtype=dtype), + ), ): actual = torch.ne(a, b) expected = torch.tensor([True], device=device, dtype=torch.bool) - self.assertEqual(actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}") + self.assertEqual( + actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}" + ) actual = torch.ne(a, a) expected = torch.tensor([False], device=device, dtype=torch.bool) - self.assertEqual(actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}") + self.assertEqual( + actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}" + ) actual = torch.full_like(b, complex(2, 2)) torch.ne(a, b, out=actual) expected = torch.tensor([complex(1)], device=device, dtype=dtype) - self.assertEqual(actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}") + self.assertEqual( + actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}" + ) actual = torch.full_like(b, complex(2, 2)) torch.ne(a, a, out=actual) expected = torch.tensor([complex(0)], device=device, dtype=dtype) - self.assertEqual(actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}") + self.assertEqual( + actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}" + ) # Vectorized operations for a, b in ( - (torch.tensor([ - -0.0610 - 2.1172j, 5.1576 + 5.4775j, complex(2.8871, nan), -6.6545 - 3.7655j, -2.7036 - 1.4470j, 0.3712 + 7.989j, - -0.0610 - 2.1172j, 5.1576 + 5.4775j, complex(nan, -3.2650), -6.6545 - 3.7655j, -2.7036 - 1.4470j, 0.3712 + 7.989j], - device=device, dtype=dtype), - torch.tensor([ - -6.1278 - 8.5019j, 0.5886 + 8.8816j, complex(2.8871, nan), 6.3505 + 2.2683j, 0.3712 + 7.9659j, 0.3712 + 7.989j, - -6.1278 - 2.1172j, 5.1576 + 8.8816j, complex(nan, -3.2650), 6.3505 + 2.2683j, 0.3712 + 7.9659j, 0.3712 + 7.989j], - device=device, dtype=dtype)), + ( + torch.tensor( + [ + -0.0610 - 2.1172j, + 5.1576 + 5.4775j, + complex(2.8871, nan), + -6.6545 - 3.7655j, + -2.7036 - 1.4470j, + 0.3712 + 7.989j, + -0.0610 - 2.1172j, + 5.1576 + 5.4775j, + complex(nan, -3.2650), + -6.6545 - 3.7655j, + -2.7036 - 1.4470j, + 0.3712 + 7.989j, + ], + device=device, + dtype=dtype, + ), + torch.tensor( + [ + -6.1278 - 8.5019j, + 0.5886 + 8.8816j, + complex(2.8871, nan), + 6.3505 + 2.2683j, + 0.3712 + 7.9659j, + 0.3712 + 7.989j, + -6.1278 - 2.1172j, + 5.1576 + 8.8816j, + complex(nan, -3.2650), + 6.3505 + 2.2683j, + 0.3712 + 7.9659j, + 0.3712 + 7.989j, + ], + device=device, + dtype=dtype, + ), + ), ): actual = torch.ne(a, b) - expected = torch.tensor([True, True, True, True, True, False, - True, True, True, True, True, False], - device=device, dtype=torch.bool) - self.assertEqual(actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}") + expected = torch.tensor( + [ + True, + True, + True, + True, + True, + False, + True, + True, + True, + True, + True, + False, + ], + device=device, + dtype=torch.bool, + ) + self.assertEqual( + actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}" + ) actual = torch.ne(a, a) - expected = torch.tensor([False, False, True, False, False, False, - False, False, True, False, False, False], - device=device, dtype=torch.bool) - self.assertEqual(actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}") + expected = torch.tensor( + [ + False, + False, + True, + False, + False, + False, + False, + False, + True, + False, + False, + False, + ], + device=device, + dtype=torch.bool, + ) + self.assertEqual( + actual, expected, msg=f"\nne\nactual {actual}\nexpected {expected}" + ) actual = torch.full_like(b, complex(2, 2)) torch.ne(a, b, out=actual) - expected = torch.tensor([complex(1), complex(1), complex(1), complex(1), complex(1), complex(0), - complex(1), complex(1), complex(1), complex(1), complex(1), complex(0)], - device=device, dtype=dtype) - self.assertEqual(actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}") + expected = torch.tensor( + [ + complex(1), + complex(1), + complex(1), + complex(1), + complex(1), + complex(0), + complex(1), + complex(1), + complex(1), + complex(1), + complex(1), + complex(0), + ], + device=device, + dtype=dtype, + ) + self.assertEqual( + actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}" + ) actual = torch.full_like(b, complex(2, 2)) torch.ne(a, a, out=actual) - expected = torch.tensor([complex(0), complex(0), complex(1), complex(0), complex(0), complex(0), - complex(0), complex(0), complex(1), complex(0), complex(0), complex(0)], - device=device, dtype=dtype) - self.assertEqual(actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}") + expected = torch.tensor( + [ + complex(0), + complex(0), + complex(1), + complex(0), + complex(0), + complex(0), + complex(0), + complex(0), + complex(1), + complex(0), + complex(0), + complex(0), + ], + device=device, + dtype=dtype, + ) + self.assertEqual( + actual, expected, msg=f"\nne(out)\nactual {actual}\nexpected {expected}" + ) + instantiate_device_type_tests(TestComplexTensor, globals()) -if __name__ == '__main__': +if __name__ == "__main__": TestCase._default_dtype_check_enabled = True run_tests() diff --git a/test/test_cpp_api_parity.py b/test/test_cpp_api_parity.py index 9f33a51078..3b57b40d62 100644 --- a/test/test_cpp_api_parity.py +++ b/test/test_cpp_api_parity.py @@ -4,26 +4,35 @@ import os import torch -import torch.testing._internal.common_utils as common import torch.testing._internal.common_nn as common_nn +import torch.testing._internal.common_utils as common +from cpp_api_parity import ( + functional_impl_check, + module_impl_check, + sample_functional, + sample_module, +) from cpp_api_parity.parity_table_parser import parse_parity_tracker_table from cpp_api_parity.utils import is_torch_nn_functional_test -from cpp_api_parity import module_impl_check, functional_impl_check, sample_module, sample_functional # NOTE: turn this on if you want to print source code of all C++ tests (e.g. for debugging purpose) PRINT_CPP_SOURCE = False -devices = ['cpu', 'cuda'] +devices = ["cpu", "cuda"] -PARITY_TABLE_PATH = os.path.join(os.path.dirname(__file__), 'cpp_api_parity', 'parity-tracker.md') +PARITY_TABLE_PATH = os.path.join( + os.path.dirname(__file__), "cpp_api_parity", "parity-tracker.md" +) parity_table = parse_parity_tracker_table(PARITY_TABLE_PATH) + @torch.testing._internal.common_utils.markDynamoStrictTest class TestCppApiParity(common.TestCase): module_test_params_map = {} functional_test_params_map = {} + expected_test_params_dicts = [] if not common.IS_ARM64: @@ -35,27 +44,47 @@ if not common.IS_ARM64: (common_nn.criterion_tests, common_nn.CriterionTest), ]: for test_params_dict in test_params_dicts: - if test_params_dict.get('test_cpp_api_parity', True): + if test_params_dict.get("test_cpp_api_parity", True): if is_torch_nn_functional_test(test_params_dict): functional_impl_check.write_test_to_test_class( - TestCppApiParity, test_params_dict, test_instance_class, parity_table, devices) + TestCppApiParity, + test_params_dict, + test_instance_class, + parity_table, + devices, + ) else: module_impl_check.write_test_to_test_class( - TestCppApiParity, test_params_dict, test_instance_class, parity_table, devices) + TestCppApiParity, + test_params_dict, + test_instance_class, + parity_table, + devices, + ) expected_test_params_dicts.append(test_params_dict) # Assert that all NN module/functional test dicts appear in the parity test - assert len([name for name in TestCppApiParity.__dict__ if 'test_torch_nn_' in name]) == \ - len(expected_test_params_dicts) * len(devices) + assert len( + [name for name in TestCppApiParity.__dict__ if "test_torch_nn_" in name] + ) == len(expected_test_params_dicts) * len(devices) # Assert that there exists auto-generated tests for `SampleModule` and `sample_functional`. # 4 == 2 (number of test dicts that are not skipped) * 2 (number of devices) - assert len([name for name in TestCppApiParity.__dict__ if 'SampleModule' in name]) == 4 + assert ( + len([name for name in TestCppApiParity.__dict__ if "SampleModule" in name]) == 4 + ) # 4 == 2 (number of test dicts that are not skipped) * 2 (number of devices) - assert len([name for name in TestCppApiParity.__dict__ if 'sample_functional' in name]) == 4 + assert ( + len([name for name in TestCppApiParity.__dict__ if "sample_functional" in name]) + == 4 + ) - module_impl_check.build_cpp_tests(TestCppApiParity, print_cpp_source=PRINT_CPP_SOURCE) - functional_impl_check.build_cpp_tests(TestCppApiParity, print_cpp_source=PRINT_CPP_SOURCE) + module_impl_check.build_cpp_tests( + TestCppApiParity, print_cpp_source=PRINT_CPP_SOURCE + ) + functional_impl_check.build_cpp_tests( + TestCppApiParity, print_cpp_source=PRINT_CPP_SOURCE + ) if __name__ == "__main__": common.TestCase._default_dtype_check_enabled = True diff --git a/test/test_cpp_extensions_aot.py b/test/test_cpp_extensions_aot.py index 2d278aa69e..1d5df82a12 100644 --- a/test/test_cpp_extensions_aot.py +++ b/test/test_cpp_extensions_aot.py @@ -1,20 +1,22 @@ # Owner(s): ["module: cpp-extensions"] -from itertools import repeat import os import re -from typing import Union, get_args, get_origin import unittest +from itertools import repeat +from typing import get_args, get_origin, Union -import torch.testing._internal.common_utils as common -from torch.testing._internal.common_utils import IS_WINDOWS, skipIfTorchDynamo -from torch.testing._internal.common_cuda import TEST_CUDA import torch import torch.backends.cudnn + +import torch.testing._internal.common_utils as common import torch.utils.cpp_extension +from torch.testing._internal.common_cuda import TEST_CUDA +from torch.testing._internal.common_utils import IS_WINDOWS, skipIfTorchDynamo try: import pytest + HAS_PYTEST = True except ImportError as e: HAS_PYTEST = False @@ -141,11 +143,15 @@ class TestCppExtensionAOT(common.TestCase): @common.skipIfRocm @unittest.skipIf(common.IS_WINDOWS, "Windows not supported") @unittest.skipIf(not TEST_CUDA, "CUDA not found") - @unittest.skipIf(os.getenv('USE_NINJA', '0') == '0', "cuda extension with dlink requires ninja to build") + @unittest.skipIf( + os.getenv("USE_NINJA", "0") == "0", + "cuda extension with dlink requires ninja to build", + ) def test_cuda_dlink_libs(self): from torch_test_cpp_extension import cuda_dlink - a = torch.randn(8, dtype=torch.float, device='cuda') - b = torch.randn(8, dtype=torch.float, device='cuda') + + a = torch.randn(8, dtype=torch.float, device="cuda") + b = torch.randn(8, dtype=torch.float, device="cuda") ref = a + b test = cuda_dlink.add(a, b) self.assertEqual(test, ref) @@ -164,6 +170,7 @@ class TestPybindTypeCasters(common.TestCase): second argument to `PYBIND11_TYPE_CASTER` should be the type we expect to receive in python, in these tests we verify this at run-time. """ + @staticmethod def expected_return_type(func): """ @@ -220,7 +227,9 @@ class TestPybindTypeCasters(common.TestCase): break else: raise AssertionError(f"{val} is not an instance of {expected_types}") - self.assertFalse(expected_types, f"Missing functions for types {expected_types}") + self.assertFalse( + expected_types, f"Missing functions for types {expected_types}" + ) def test_pybind_return_types(self): functions = [ @@ -248,29 +257,29 @@ class TestPybindTypeCasters(common.TestCase): @torch.testing._internal.common_utils.markDynamoStrictTest class TestORTTensor(common.TestCase): def test_unregistered(self): - a = torch.arange(0, 10, device='cpu') + a = torch.arange(0, 10, device="cpu") with self.assertRaisesRegex(RuntimeError, "Could not run"): - b = torch.arange(0, 10, device='ort') + b = torch.arange(0, 10, device="ort") @skipIfTorchDynamo("dynamo cannot model ort device") def test_zeros(self): - a = torch.empty(5, 5, device='cpu') - self.assertEqual(a.device, torch.device('cpu')) + a = torch.empty(5, 5, device="cpu") + self.assertEqual(a.device, torch.device("cpu")) - b = torch.empty(5, 5, device='ort') - self.assertEqual(b.device, torch.device('ort', 0)) + b = torch.empty(5, 5, device="ort") + self.assertEqual(b.device, torch.device("ort", 0)) self.assertEqual(ort_extension.get_test_int(), 0) self.assertEqual(torch.get_default_dtype(), b.dtype) - c = torch.empty((5, 5), dtype=torch.int64, device='ort') + c = torch.empty((5, 5), dtype=torch.int64, device="ort") self.assertEqual(ort_extension.get_test_int(), 0) self.assertEqual(torch.int64, c.dtype) def test_add(self): - a = torch.empty(5, 5, device='ort', requires_grad=True) + a = torch.empty(5, 5, device="ort", requires_grad=True) self.assertEqual(ort_extension.get_test_int(), 0) - b = torch.empty(5, 5, device='ort') + b = torch.empty(5, 5, device="ort") self.assertEqual(ort_extension.get_test_int(), 0) c = a + b @@ -279,9 +288,9 @@ class TestORTTensor(common.TestCase): def test_conv_backend_override(self): # To simplify tests, we use 4d input here to avoid doing view4d( which # needs more overrides) in _convolution. - input = torch.empty(2, 4, 10, 2, device='ort', requires_grad=True) - weight = torch.empty(6, 4, 2, 2, device='ort', requires_grad=True) - bias = torch.empty(6, device='ort') + input = torch.empty(2, 4, 10, 2, device="ort", requires_grad=True) + weight = torch.empty(6, 4, 2, 2, device="ort", requires_grad=True) + bias = torch.empty(6, device="ort") # Make sure forward is overriden out = torch.nn.functional.conv2d(input, weight, bias, 2, 0, 1, 1) @@ -299,7 +308,6 @@ class TestORTTensor(common.TestCase): @torch.testing._internal.common_utils.markDynamoStrictTest class TestRNGExtension(common.TestCase): - def setUp(self): super().setUp() @@ -310,7 +318,7 @@ class TestRNGExtension(common.TestCase): t = torch.empty(10, dtype=torch.int64).random_() self.assertNotEqual(t, fourty_two) - gen = torch.Generator(device='cpu') + gen = torch.Generator(device="cpu") t = torch.empty(10, dtype=torch.int64).random_(generator=gen) self.assertNotEqual(t, fourty_two) @@ -337,7 +345,6 @@ class TestRNGExtension(common.TestCase): @torch.testing._internal.common_utils.markDynamoStrictTest @unittest.skipIf(not TEST_CUDA, "CUDA not found") class TestTorchLibrary(common.TestCase): - def test_torch_library(self): import torch_test_cpp_extension.torch_library # noqa: F401 @@ -353,7 +360,7 @@ class TestTorchLibrary(common.TestCase): self.assertFalse(s(True, False)) self.assertFalse(s(False, True)) self.assertFalse(s(False, False)) - self.assertIn('torch_library::logical_and', str(s.graph)) + self.assertIn("torch_library::logical_and", str(s.graph)) if __name__ == "__main__": diff --git a/test/test_cpp_extensions_jit.py b/test/test_cpp_extensions_jit.py index 720ca7fdc1..17c39fb655 100644 --- a/test/test_cpp_extensions_jit.py +++ b/test/test_cpp_extensions_jit.py @@ -1,31 +1,38 @@ # Owner(s): ["module: cpp-extensions"] +import glob import os +import re import shutil +import subprocess import sys +import tempfile import unittest import warnings -import re -import tempfile -import subprocess -import glob -import torch.testing._internal.common_utils as common -from torch.testing._internal.common_cuda import TEST_CUDNN, TEST_CUDA import torch import torch.backends.cudnn +import torch.multiprocessing as mp + +import torch.testing._internal.common_utils as common import torch.utils.cpp_extension -from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME +from torch.testing._internal.common_cuda import TEST_CUDA, TEST_CUDNN from torch.testing._internal.common_utils import gradcheck -import torch.multiprocessing as mp -from torch.utils.cpp_extension import _TORCH_PATH, remove_extension_h_precompiler_headers, get_cxx_compiler, check_compiler_is_gcc +from torch.utils.cpp_extension import ( + _TORCH_PATH, + check_compiler_is_gcc, + CUDA_HOME, + get_cxx_compiler, + remove_extension_h_precompiler_headers, + ROCM_HOME, +) # define TEST_ROCM before changing TEST_CUDA TEST_ROCM = TEST_CUDA and torch.version.hip is not None and ROCM_HOME is not None TEST_CUDA = TEST_CUDA and CUDA_HOME is not None TEST_MPS = torch.backends.mps.is_available() IS_WINDOWS = sys.platform == "win32" -IS_LINUX = sys.platform.startswith('linux') +IS_LINUX = sys.platform.startswith("linux") def remove_build_path(): @@ -73,9 +80,11 @@ class TestCppExtensionJIT(common.TestCase): "cpp_extensions/jit_extension.cpp", "cpp_extensions/jit_extension2.cpp", ], - extra_include_paths=["cpp_extensions", - "path / with spaces in it", - "path with quote'"], + extra_include_paths=[ + "cpp_extensions", + "path / with spaces in it", + "path with quote'", + ], extra_cflags=["-g"], verbose=True, ) @@ -140,33 +149,39 @@ class TestCppExtensionJIT(common.TestCase): def _run_jit_cuda_archflags(self, flags, expected): # Compile an extension with given `flags` def _check_cuobjdump_output(expected_values, is_ptx=False): - elf_or_ptx = '--list-ptx' if is_ptx else '--list-elf' - lib_ext = '.pyd' if IS_WINDOWS else '.so' + elf_or_ptx = "--list-ptx" if is_ptx else "--list-elf" + lib_ext = ".pyd" if IS_WINDOWS else ".so" # Note, .extension name may include _v1, _v2, so first find exact name - ext_filename = glob.glob(os.path.join(temp_dir, - 'cudaext_archflag*' + lib_ext))[0] - command = ['cuobjdump', elf_or_ptx, ext_filename] - p = subprocess.Popen(command, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + ext_filename = glob.glob( + os.path.join(temp_dir, "cudaext_archflag*" + lib_ext) + )[0] + command = ["cuobjdump", elf_or_ptx, ext_filename] + p = subprocess.Popen( + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) output, err = p.communicate() output = output.decode("ascii") err = err.decode("ascii") - if not p.returncode == 0 or not err == '': - raise AssertionError(f"Flags: {flags}\nReturncode: {p.returncode}\nStderr: {err}\n" - f"Output: {output} ") - - actual_arches = sorted(re.findall(r'sm_\d\d', output)) - expected_arches = sorted(['sm_' + xx for xx in expected_values]) - self.assertEqual(actual_arches, expected_arches, - msg=f"Flags: {flags}, Actual: {actual_arches}, Expected: {expected_arches}\n" - f"Stderr: {err}\nOutput: {output}") + if not p.returncode == 0 or not err == "": + raise AssertionError( + f"Flags: {flags}\nReturncode: {p.returncode}\nStderr: {err}\n" + f"Output: {output} " + ) + + actual_arches = sorted(re.findall(r"sm_\d\d", output)) + expected_arches = sorted(["sm_" + xx for xx in expected_values]) + self.assertEqual( + actual_arches, + expected_arches, + msg=f"Flags: {flags}, Actual: {actual_arches}, Expected: {expected_arches}\n" + f"Stderr: {err}\nOutput: {output}", + ) temp_dir = tempfile.mkdtemp() - old_envvar = os.environ.get('TORCH_CUDA_ARCH_LIST', None) + old_envvar = os.environ.get("TORCH_CUDA_ARCH_LIST", None) try: - os.environ['TORCH_CUDA_ARCH_LIST'] = flags + os.environ["TORCH_CUDA_ARCH_LIST"] = flags params = { "name": "cudaext_archflags", @@ -209,9 +224,9 @@ class TestCppExtensionJIT(common.TestCase): shutil.rmtree(temp_dir) if old_envvar is None: - os.environ.pop('TORCH_CUDA_ARCH_LIST') + os.environ.pop("TORCH_CUDA_ARCH_LIST") else: - os.environ['TORCH_CUDA_ARCH_LIST'] = old_envvar + os.environ["TORCH_CUDA_ARCH_LIST"] = old_envvar @unittest.skipIf(not TEST_CUDA, "CUDA not found") @unittest.skipIf(TEST_ROCM, "disabled on rocm") @@ -227,15 +242,18 @@ class TestCppExtensionJIT(common.TestCase): # expected values is length-2 tuple: (list of ELF, list of PTX) # note: there should not be more than one PTX value archflags = { - '': ([f'{capability[0]}{capability[1]}' for capability in capabilities], None), - "Maxwell+Tegra;6.1": (['53', '61'], None), - "Volta": (['70'], ['70']), + "": ( + [f"{capability[0]}{capability[1]}" for capability in capabilities], + None, + ), + "Maxwell+Tegra;6.1": (["53", "61"], None), + "Volta": (["70"], ["70"]), } - archflags["7.5+PTX"] = (['75'], ['75']) - archflags["5.0;6.0+PTX;7.0;7.5"] = (['50', '60', '70', '75'], ['60']) - if int(torch.version.cuda.split('.')[0]) < 12: + archflags["7.5+PTX"] = (["75"], ["75"]) + archflags["5.0;6.0+PTX;7.0;7.5"] = (["50", "60", "70", "75"], ["60"]) + if int(torch.version.cuda.split(".")[0]) < 12: # CUDA 12 drops compute capability < 5.0 - archflags["Pascal 3.5"] = (['35', '60', '61'], None) + archflags["Pascal 3.5"] = (["35", "60", "61"], None) for flags, expected in archflags.items(): try: @@ -594,8 +612,13 @@ class TestCppExtensionJIT(common.TestCase): self.assertEqual(sequential[2].parameters()[0].dtype, old_dtype) # Make sure we can access these methods recursively. - self.assertEqual(len(list(sequential.parameters())), len(net.parameters()) * 2 + 1) - self.assertEqual(len(list(sequential.named_parameters())), len(net.named_parameters()) * 2 + 1) + self.assertEqual( + len(list(sequential.parameters())), len(net.parameters()) * 2 + 1 + ) + self.assertEqual( + len(list(sequential.named_parameters())), + len(net.named_parameters()) * 2 + 1, + ) self.assertEqual(len(list(sequential.buffers())), len(net.buffers()) * 2) self.assertEqual(len(list(sequential.modules())), 8) @@ -751,8 +774,9 @@ class TestCppExtensionJIT(common.TestCase): with self.assertRaises(RuntimeError) as e: torch.utils.cpp_extension.load_inline( name="test_compilation_error_formatting", - cpp_sources="int main() { return 0 }") - pattern = r'.*(\\n|\\r).*' + cpp_sources="int main() { return 0 }", + ) + pattern = r".*(\\n|\\r).*" self.assertNotRegex(str(e), pattern) def test_warning(self): @@ -760,7 +784,7 @@ class TestCppExtensionJIT(common.TestCase): # symbol. But because of visibility and the fact that it lives in a # different compilation unit than pybind, this trips up ubsan even though # it is fine. "ubsan.supp" thus needs to contain "vptr:warn_mod.so". - source = ''' + source = """ // error_type: // 0: no error // 1: torch::TypeError @@ -788,17 +812,19 @@ class TestCppExtensionJIT(common.TestCase): } return x.cos(); } - ''' + """ # Ensure double type for hard-coded c name below t = torch.rand(2).double() cpp_tensor_name = r"CPUDoubleType" # Without error handling, the warnings cannot be catched - warn_mod = torch.utils.cpp_extension.load_inline(name='warn_mod', - cpp_sources=[source], - functions=['foo'], - with_pytorch_error_handling=False) + warn_mod = torch.utils.cpp_extension.load_inline( + name="warn_mod", + cpp_sources=[source], + functions=["foo"], + with_pytorch_error_handling=False, + ) with warnings.catch_warnings(record=True) as w: warn_mod.foo(t, 0) @@ -808,7 +834,9 @@ class TestCppExtensionJIT(common.TestCase): warn_mod.foo(t, 1) self.assertEqual(len(w), 0) - with self.assertRaisesRegex(SystemError, "bad argument to internal function"): + with self.assertRaisesRegex( + SystemError, "bad argument to internal function" + ): warn_mod.foo(t, 2) self.assertEqual(len(w), 0) @@ -816,12 +844,12 @@ class TestCppExtensionJIT(common.TestCase): warn_mod.foo(t, 3) self.assertEqual(len(w), 0) - - warn_mod = torch.utils.cpp_extension.load_inline(name='warn_mod', - cpp_sources=[source], - functions=['foo'], - with_pytorch_error_handling=True) - + warn_mod = torch.utils.cpp_extension.load_inline( + name="warn_mod", + cpp_sources=[source], + functions=["foo"], + with_pytorch_error_handling=True, + ) with warnings.catch_warnings(record=True) as w: # Catched with no error should be detected @@ -834,7 +862,9 @@ class TestCppExtensionJIT(common.TestCase): self.assertEqual(len(w), 2) # Catched with python error should also be detected - with self.assertRaisesRegex(SystemError, "bad argument to internal function"): + with self.assertRaisesRegex( + SystemError, "bad argument to internal function" + ): warn_mod.foo(t, 2) self.assertEqual(len(w), 3) @@ -859,7 +889,7 @@ class TestCppExtensionJIT(common.TestCase): self.assertEqual(len(w), 0) def test_autograd_from_cpp(self): - source = ''' + source = """ void run_back(at::Tensor x) { x.backward({}); } @@ -868,7 +898,7 @@ class TestCppExtensionJIT(common.TestCase): pybind11::gil_scoped_release no_gil; x.backward({}); } - ''' + """ class MyFn(torch.autograd.Function): @staticmethod @@ -879,14 +909,18 @@ class TestCppExtensionJIT(common.TestCase): def backward(ctx, gx): return gx - test_backward_deadlock = torch.utils.cpp_extension.load_inline(name='test_backward_deadlock', - cpp_sources=[source], - functions=['run_back', 'run_back_no_gil'],) + test_backward_deadlock = torch.utils.cpp_extension.load_inline( + name="test_backward_deadlock", + cpp_sources=[source], + functions=["run_back", "run_back_no_gil"], + ) # This used to deadlock inp = torch.rand(20, requires_grad=True) loss = MyFn.apply(inp).sum() - with self.assertRaisesRegex(RuntimeError, "The autograd engine was called while holding the GIL."): + with self.assertRaisesRegex( + RuntimeError, "The autograd engine was called while holding the GIL." + ): test_backward_deadlock.run_back(loss) inp = torch.rand(20, requires_grad=True) @@ -936,7 +970,6 @@ class TestCppExtensionJIT(common.TestCase): with self.assertRaisesRegex(RuntimeError, msg): torch.func.grad(identity_m.identity)(t) - def test_gen_extension_h_pch(self): if not IS_LINUX: return @@ -973,5 +1006,6 @@ class TestCppExtensionJIT(common.TestCase): self.assertEqual(pch_exist, True) self.assertEqual(signature_exist, True) + if __name__ == "__main__": common.run_tests() diff --git a/test/test_cpp_extensions_open_device_registration.py b/test/test_cpp_extensions_open_device_registration.py index a040aafda7..1950644f5e 100644 --- a/test/test_cpp_extensions_open_device_registration.py +++ b/test/test_cpp_extensions_open_device_registration.py @@ -3,14 +3,15 @@ import os import shutil import sys -from typing import Union import tempfile import unittest +from typing import Union -import torch.testing._internal.common_utils as common -from torch.testing._internal.common_utils import IS_ARM64, TEST_CUDA import torch + +import torch.testing._internal.common_utils as common import torch.utils.cpp_extension +from torch.testing._internal.common_utils import IS_ARM64, TEST_CUDA from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME @@ -28,18 +29,19 @@ def remove_build_path(): class DummyModule: - @staticmethod def device_count() -> int: return 1 @staticmethod - def get_rng_state(device: Union[int, str, torch.device] = 'foo') -> torch.Tensor: + def get_rng_state(device: Union[int, str, torch.device] = "foo") -> torch.Tensor: # create a tensor using our custom device object. return torch.empty(4, 4, device="foo") @staticmethod - def set_rng_state(new_state: torch.Tensor, device: Union[int, str, torch.device] = 'foo') -> None: + def set_rng_state( + new_state: torch.Tensor, device: Union[int, str, torch.device] = "foo" + ) -> None: pass @staticmethod @@ -50,11 +52,12 @@ class DummyModule: def current_device(): return 0 + @unittest.skipIf(IS_ARM64, "Does not work on arm") @torch.testing._internal.common_utils.markDynamoStrictTest class TestCppExtensionOpenRgistration(common.TestCase): - """Tests Open Device Registration with C++ extensions. - """ + """Tests Open Device Registration with C++ extensions.""" + module = None def setUp(self): @@ -89,7 +92,7 @@ class TestCppExtensionOpenRgistration(common.TestCase): def test_open_device_registration(self): def test_base_device_registration(): - torch.utils.rename_privateuse1_backend('foo') + torch.utils.rename_privateuse1_backend("foo") self.assertFalse(self.module.custom_add_called()) # create a tensor using our custom device object device = self.module.custom_device() @@ -103,7 +106,7 @@ class TestCppExtensionOpenRgistration(common.TestCase): z = x + y # check that it was called self.assertTrue(self.module.custom_add_called()) - z_cpu = z.to(device='cpu') + z_cpu = z.to(device="cpu") # Check that our cross-device copy correctly copied the data to cpu self.assertTrue(z_cpu.is_cpu) self.assertFalse(z.is_cpu) @@ -115,40 +118,45 @@ class TestCppExtensionOpenRgistration(common.TestCase): def test_before_common_registration(): # check that register module name should be the same as custom backend with self.assertRaisesRegex(RuntimeError, "Expected one of cpu"): - torch._register_device_module('xxx', DummyModule) + torch._register_device_module("xxx", DummyModule) # check generator registered before using - torch.utils.rename_privateuse1_backend('foo') + torch.utils.rename_privateuse1_backend("foo") with self.assertRaisesRegex(RuntimeError, "torch has no module of"): with torch.random.fork_rng(device_type="foo"): pass # check attributes before registered - self.assertFalse(hasattr(torch.Tensor, 'is_foo')) - self.assertFalse(hasattr(torch.Tensor, 'foo')) - self.assertFalse(hasattr(torch.TypedStorage, 'is_foo')) - self.assertFalse(hasattr(torch.TypedStorage, 'foo')) - self.assertFalse(hasattr(torch.UntypedStorage, 'is_foo')) - self.assertFalse(hasattr(torch.UntypedStorage, 'foo')) - self.assertFalse(hasattr(torch.nn.Module, 'foo')) + self.assertFalse(hasattr(torch.Tensor, "is_foo")) + self.assertFalse(hasattr(torch.Tensor, "foo")) + self.assertFalse(hasattr(torch.TypedStorage, "is_foo")) + self.assertFalse(hasattr(torch.TypedStorage, "foo")) + self.assertFalse(hasattr(torch.UntypedStorage, "is_foo")) + self.assertFalse(hasattr(torch.UntypedStorage, "foo")) + self.assertFalse(hasattr(torch.nn.Module, "foo")) def test_after_common_registration(): # check attributes after registered - self.assertTrue(hasattr(torch.Tensor, 'is_foo')) - self.assertTrue(hasattr(torch.Tensor, 'foo')) - self.assertTrue(hasattr(torch.TypedStorage, 'is_foo')) - self.assertTrue(hasattr(torch.TypedStorage, 'foo')) - self.assertTrue(hasattr(torch.UntypedStorage, 'is_foo')) - self.assertTrue(hasattr(torch.UntypedStorage, 'foo')) - self.assertTrue(hasattr(torch.nn.Module, 'foo')) + self.assertTrue(hasattr(torch.Tensor, "is_foo")) + self.assertTrue(hasattr(torch.Tensor, "foo")) + self.assertTrue(hasattr(torch.TypedStorage, "is_foo")) + self.assertTrue(hasattr(torch.TypedStorage, "foo")) + self.assertTrue(hasattr(torch.UntypedStorage, "is_foo")) + self.assertTrue(hasattr(torch.UntypedStorage, "foo")) + self.assertTrue(hasattr(torch.nn.Module, "foo")) def test_common_registration(): # first rename custom backend - torch.utils.rename_privateuse1_backend('foo') + torch.utils.rename_privateuse1_backend("foo") # backend name can only rename once - with self.assertRaisesRegex(RuntimeError, "torch.register_privateuse1_backend()"): - torch.utils.rename_privateuse1_backend('xxx') + with self.assertRaisesRegex( + RuntimeError, "torch.register_privateuse1_backend()" + ): + torch.utils.rename_privateuse1_backend("xxx") # register foo module, torch.foo - torch._register_device_module('foo', DummyModule) - self.assertTrue(torch.utils.backend_registration._get_custom_mod_func("device_count")() == 1) + torch._register_device_module("foo", DummyModule) + self.assertTrue( + torch.utils.backend_registration._get_custom_mod_func("device_count")() + == 1 + ) with self.assertRaisesRegex(RuntimeError, "Try to call torch.foo"): torch.utils.backend_registration._get_custom_mod_func("func_name_") # default set for_tensor and for_module are True, so only set for_storage is True @@ -162,23 +170,29 @@ class TestCppExtensionOpenRgistration(common.TestCase): # None of our CPU operations should call the custom add function. self.assertFalse(self.module.custom_add_called()) # check generator registered before using - with self.assertRaisesRegex(RuntimeError, - "Please register a generator to the PrivateUse1 dispatch key"): + with self.assertRaisesRegex( + RuntimeError, + "Please register a generator to the PrivateUse1 dispatch key", + ): gen_ = torch.Generator(device=device) self.module.register_generator_first() gen = torch.Generator(device=device) self.assertTrue(gen.device == device) # generator can be registered only once - with self.assertRaisesRegex(RuntimeError, - "Only can register a generator to the PrivateUse1 dispatch key once"): + with self.assertRaisesRegex( + RuntimeError, + "Only can register a generator to the PrivateUse1 dispatch key once", + ): self.module.register_generator_second() self.module.register_hook() default_gen = self.module.default_generator(0) - self.assertTrue(default_gen.device.type == torch._C._get_privateuse1_backend_name()) + self.assertTrue( + default_gen.device.type == torch._C._get_privateuse1_backend_name() + ) def test_open_device_dispatchstub(): # test kernels could be reused by privateuse1 backend through dispatchstub - torch.utils.rename_privateuse1_backend('foo') + torch.utils.rename_privateuse1_backend("foo") input_data = torch.randn(2, 2, 3, dtype=torch.float32, device="cpu") foo_input_data = input_data.to("foo") output_data = torch.abs(input_data) @@ -202,10 +216,14 @@ class TestCppExtensionOpenRgistration(common.TestCase): self.assertEqual(output_data, foo_output_data.cpu()) def test_open_device_quantized(): - torch.utils.rename_privateuse1_backend('foo') - input_data = torch.randn(3, 4, 5, dtype=torch.float32, device="cpu").to("foo") - quantized_tensor = torch.quantize_per_tensor(input_data, 0.1, 10, torch.qint8) - self.assertEqual(quantized_tensor.device, torch.device('foo:0')) + torch.utils.rename_privateuse1_backend("foo") + input_data = torch.randn(3, 4, 5, dtype=torch.float32, device="cpu").to( + "foo" + ) + quantized_tensor = torch.quantize_per_tensor( + input_data, 0.1, 10, torch.qint8 + ) + self.assertEqual(quantized_tensor.device, torch.device("foo:0")) self.assertEqual(quantized_tensor.dtype, torch.qint8) def test_open_device_random(): @@ -216,15 +234,15 @@ class TestCppExtensionOpenRgistration(common.TestCase): device = self.module.custom_device() # check whether print tensor.type() meets the expectation dtypes = { - torch.bool: 'torch.foo.BoolTensor', - torch.double: 'torch.foo.DoubleTensor', - torch.float32: 'torch.foo.FloatTensor', - torch.half: 'torch.foo.HalfTensor', - torch.int32: 'torch.foo.IntTensor', - torch.int64: 'torch.foo.LongTensor', - torch.int8: 'torch.foo.CharTensor', - torch.short: 'torch.foo.ShortTensor', - torch.uint8: 'torch.foo.ByteTensor', + torch.bool: "torch.foo.BoolTensor", + torch.double: "torch.foo.DoubleTensor", + torch.float32: "torch.foo.FloatTensor", + torch.half: "torch.foo.HalfTensor", + torch.int32: "torch.foo.IntTensor", + torch.int64: "torch.foo.LongTensor", + torch.int8: "torch.foo.CharTensor", + torch.short: "torch.foo.ShortTensor", + torch.uint8: "torch.foo.ByteTensor", } for tt, dt in dtypes.items(): test_tensor = torch.empty(4, 4, dtype=tt, device=device) @@ -284,9 +302,11 @@ class TestCppExtensionOpenRgistration(common.TestCase): self.assertTrue(self.module.custom_storageImpl_called()) def test_open_device_storage_pin_memory(): - torch.utils.rename_privateuse1_backend('foo') + torch.utils.rename_privateuse1_backend("foo") with self.assertRaisesRegex(RuntimeError, "The custom device module of"): - torch.utils.generate_methods_for_privateuse1_backend(for_tensor=False, for_module=False, for_storage=True) + torch.utils.generate_methods_for_privateuse1_backend( + for_tensor=False, for_module=False, for_storage=True + ) # Check if the pin_memory is functioning properly on custom device cpu_tensor = torch.empty(3) self.assertFalse(cpu_tensor.is_foo) @@ -333,32 +353,42 @@ class TestCppExtensionOpenRgistration(common.TestCase): self.assertFalse(cpu_untyped_storage_pinned.is_pinned()) self.assertTrue(cpu_untyped_storage_pinned.is_pinned("foo")) self.assertTrue(cpu_untyped_storage_pinned.is_pinned(foo_device)) - with self.assertRaisesRegex(TypeError, "positional arguments but 3 were given"): + with self.assertRaisesRegex( + TypeError, "positional arguments but 3 were given" + ): cpu_untyped_storage_pinned.is_pinned("foo1", "foo2") # Test storage pin_memory on error device self.assertFalse(cpu_storage_pinned.is_pinned("hpu")) - with self.assertRaisesRegex(NotImplementedError, "with arguments from the 'HPU' backend"): + with self.assertRaisesRegex( + NotImplementedError, "with arguments from the 'HPU' backend" + ): cpu_storage.pin_memory("hpu") self.assertFalse(cpu_untyped_storage_pinned.is_pinned("hpu")) - with self.assertRaisesRegex(NotImplementedError, "with arguments from the 'HPU' backend"): + with self.assertRaisesRegex( + NotImplementedError, "with arguments from the 'HPU' backend" + ): cpu_untyped_storage.pin_memory("hpu") invalid_device = torch.device("hpu") self.assertFalse(cpu_untyped_storage_pinned.is_pinned(invalid_device)) - with self.assertRaisesRegex(NotImplementedError, "with arguments from the 'HPU' backend"): + with self.assertRaisesRegex( + NotImplementedError, "with arguments from the 'HPU' backend" + ): cpu_untyped_storage.pin_memory(invalid_device) def test_open_device_serialization(): self.module.set_custom_device_index(-1) - storage = torch.UntypedStorage(4, device=torch.device('foo')) - self.assertEqual(torch.serialization.location_tag(storage), 'foo') + storage = torch.UntypedStorage(4, device=torch.device("foo")) + self.assertEqual(torch.serialization.location_tag(storage), "foo") self.module.set_custom_device_index(0) - storage = torch.UntypedStorage(4, device=torch.device('foo')) - self.assertEqual(torch.serialization.location_tag(storage), 'foo:0') + storage = torch.UntypedStorage(4, device=torch.device("foo")) + self.assertEqual(torch.serialization.location_tag(storage), "foo:0") cpu_storage = torch.empty(4, 4).storage() - foo_storage = torch.serialization.default_restore_location(cpu_storage, 'foo:0') + foo_storage = torch.serialization.default_restore_location( + cpu_storage, "foo:0" + ) self.assertTrue(foo_storage.is_foo) # test tensor MetaData serialization x = torch.empty(4, 4).long() @@ -369,7 +399,7 @@ class TestCppExtensionOpenRgistration(common.TestCase): self.module.custom_serialization_registry() with tempfile.TemporaryDirectory() as tmpdir: - path = os.path.join(tmpdir, 'data.pt') + path = os.path.join(tmpdir, "data.pt") torch.save(y, path) z1 = torch.load(path) # loads correctly onto the foo backend device @@ -377,14 +407,14 @@ class TestCppExtensionOpenRgistration(common.TestCase): # loads BackendMeta data correctly self.assertTrue(self.module.check_backend_meta(z1)) # cross-backend - z2 = torch.load(path, map_location='cpu') + z2 = torch.load(path, map_location="cpu") # loads correctly onto the cpu backend device self.assertFalse(z2.is_foo) # loads BackendMeta data correctly self.assertFalse(self.module.check_backend_meta(z2)) def test_open_device_storage_resize(): - torch.utils.rename_privateuse1_backend('foo') + torch.utils.rename_privateuse1_backend("foo") cpu_tensor = torch.randn([8]) foo_tensor = cpu_tensor.foo() foo_storage = foo_tensor.storage() @@ -392,11 +422,11 @@ class TestCppExtensionOpenRgistration(common.TestCase): # Only register tensor resize_ function. foo_tensor.resize_(8) self.assertTrue(foo_storage.size() == 8) - with self.assertRaisesRegex(TypeError, 'Overflow'): + with self.assertRaisesRegex(TypeError, "Overflow"): foo_tensor.resize_(8**29) def test_open_device_storage_type(): - torch.utils.rename_privateuse1_backend('foo') + torch.utils.rename_privateuse1_backend("foo") # test cpu float storage cpu_tensor = torch.randn([8]).float() cpu_storage = cpu_tensor.storage() @@ -429,14 +459,14 @@ class TestCppExtensionOpenRgistration(common.TestCase): torch.foo.FloatStorage = None def test_open_device_faketensor(): - torch.utils.rename_privateuse1_backend('foo') + torch.utils.rename_privateuse1_backend("foo") with torch._subclasses.fake_tensor.FakeTensorMode.push(): a = torch.empty(1, device="foo") b = torch.empty(1, device="foo:0") result = a + b def test_open_device_named_tensor(): - torch.utils.rename_privateuse1_backend('foo') + torch.utils.rename_privateuse1_backend("foo") a = torch.empty([2, 3, 4, 5], device="foo", names=["N", "C", "H", "W"]) # Not an open registration test - this file is just very convenient @@ -462,7 +492,9 @@ class TestCppExtensionOpenRgistration(common.TestCase): out_ref.sum().backward() x_test = x_ref.clone().detach().requires_grad_(True) - f_compiled = torch.compile(torch.ops._test_funcs.custom_autograd_fn_aliasing) + f_compiled = torch.compile( + torch.ops._test_funcs.custom_autograd_fn_aliasing + ) out_test = f_compiled(x_test) out_test.sum().backward() @@ -470,16 +502,18 @@ class TestCppExtensionOpenRgistration(common.TestCase): self.assertEqual(x_ref.grad, x_test.grad) def test_open_device_scalar_type_fallback(): - torch.utils.rename_privateuse1_backend('foo') - z_cpu = torch.Tensor([[0, 0, 0, 1, 1, 2], [0, 1, 2, 1, 2, 2]]).to(torch.int64) - z = torch.triu_indices(3, 3, device='foo') + torch.utils.rename_privateuse1_backend("foo") + z_cpu = torch.Tensor([[0, 0, 0, 1, 1, 2], [0, 1, 2, 1, 2, 2]]).to( + torch.int64 + ) + z = torch.triu_indices(3, 3, device="foo") self.assertEqual(z_cpu, z) def test_open_device_tensor_type_fallback(): - torch.utils.rename_privateuse1_backend('foo') + torch.utils.rename_privateuse1_backend("foo") # create tensors located in custom device - x = torch.Tensor([[1, 2, 3], [2, 3, 4]]).to('foo') - y = torch.Tensor([1, 0, 2]).to('foo') + x = torch.Tensor([[1, 2, 3], [2, 3, 4]]).to("foo") + y = torch.Tensor([1, 0, 2]).to("foo") # create result tensor located in cpu z_cpu = torch.Tensor([[0, 2, 1], [1, 3, 2]]) # Check that our device is correct. @@ -491,14 +525,14 @@ class TestCppExtensionOpenRgistration(common.TestCase): self.assertEqual(z_cpu, z) # call index op, which will fallback to cpu z_cpu = torch.Tensor([3, 1]) - y = torch.Tensor([1, 0]).long().to('foo') + y = torch.Tensor([1, 0]).long().to("foo") z = x[y, y] self.assertEqual(z_cpu, z) def test_open_device_tensorlist_type_fallback(): - torch.utils.rename_privateuse1_backend('foo') + torch.utils.rename_privateuse1_backend("foo") # create tensors located in custom device - v_foo = torch.Tensor([1, 2, 3]).to('foo') + v_foo = torch.Tensor([1, 2, 3]).to("foo") # create result tensor located in cpu z_cpu = torch.Tensor([2, 4, 6]) # create tensorlist for foreach_add op
2.41.0
8a98ddd604cdd0baf2e7d845167c76d47011ec3
Fri, 19 Apr 2024 22:10:32 +0000
[PATCH 0406/1000] Prep PR for cutlass 3.5 update (#124412)
# Summary These changes are needed for the upgrade to cutlass 3.5 #123458 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124412 Approved by: https://github.com/Skylion007, https://github.com/nWEIdia, https://github.com/malfet
diff --git a/aten/src/ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h b/aten/src/ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h index 55f3f9a1ce..564e3f2f35 100644 --- a/aten/src/ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h +++ b/aten/src/ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h @@ -1429,7 +1429,7 @@ struct AttentionBackwardKernel { uint8_t lane_id) { cutlass::Array<cutlass::uint1b_t, MatmulDOIVJ::Mma::FragmentC::kElements> dropout_keep_mask_doivj; - dropout_keep_mask_doivj.fill(1); + dropout_keep_mask_doivj.fill(cutlass::uint1b_t{1}); const float dropout_scale = kApplyDropout ? 1.0 / (1.0 - p.dropout_prob) : 1.0f; @@ -1752,7 +1752,7 @@ struct AttentionBackwardKernel { [&](int accum_m) {}, [&](int accum_m /*q*/, int accum_n /*k*/, int idx) { if (zij.at({accum_n, accum_m}) == scalar_t(0)) { - dropout_keep_mask_doivj[idx] = cutlass::uint1b_t(0); + dropout_keep_mask_doivj[idx] = cutlass::uint1b_t{0}; } }, [&](int accum_m) {});
2.41.0
9850d770d1b4d24d4561b493807620a92de29cb
Thu, 18 Apr 2024 14:30:51 -0700
[PATCH 0407/1000] update triton pin (#124429)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124429 Approved by: https://github.com/shunting314, https://github.com/malfet
diff --git a/.ci/docker/ci_commit_pins/triton.txt b/.ci/docker/ci_commit_pins/triton.txt index 59fa5b6dd3..26516efc0b 100644 --- a/.ci/docker/ci_commit_pins/triton.txt +++ b/.ci/docker/ci_commit_pins/triton.txt @@ -1 +1 @@ -989adb9a29496c22a36ef82ca69cad5dad536b9c +45fff310c891f5a92d55445adf8cc9d29df5841e
2.41.0
8f7cfbeee17a0b3f880ce40a5685d5d7943405b
Fri, 19 Apr 2024 22:34:25 +0000
[PATCH 0408/1000] Add __torch_function__ support for generated tensor methods/property of PrivateUse1 (#121723)
support following case: ```python import torch ... class CustomFooTensor(torch.Tensor): @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): ... a = CustomFooTensor([3]) print(a.is_foo) ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/121723 Approved by: https://github.com/albanD
diff --git a/torch/overrides.py b/torch/overrides.py index 7f45044265..4ce2548800 100644 --- a/torch/overrides.py +++ b/torch/overrides.py @@ -1433,6 +1433,11 @@ def get_testing_overrides() -> Dict[Callable, Callable]: torch.linalg.lstsq: lambda self, b, cond=None, driver=None: -1, } + privateuse1_backend_name = torch.utils.backend_registration._privateuse1_backend_name + if hasattr(Tensor, privateuse1_backend_name): + ret[getattr(Tensor, privateuse1_backend_name)] = lambda self, device=None, non_blocking=False, **kwargs: -1 + ret[getattr(Tensor, f'is_{privateuse1_backend_name}').__get__] = lambda self: -1 # noqa: B009 + ret2 = {} ignored = get_ignored_functions() diff --git a/torch/utils/backend_registration.py b/torch/utils/backend_registration.py index aee7964c42..d2d2b1cb89 100644 --- a/torch/utils/backend_registration.py +++ b/torch/utils/backend_registration.py @@ -1,4 +1,8 @@ import torch +from torch.overrides import ( + handle_torch_function, + has_torch_function_unary, +) from torch._C import _rename_privateuse1_backend, _get_privateuse1_backend_name from typing import List, Optional, Union @@ -126,9 +130,13 @@ def _normalization_device(custom_backend_name: str, device: Optional[Union[int, def _generate_tensor_methods_for_privateuse1_backend(custom_backend_name: str) -> None: @property # type: ignore[misc] def wrap_tensor_backend(self: torch.Tensor) -> bool: + if has_torch_function_unary(self): + # TODO mypy doesn't support @property, see: https://github.com/python/mypy/issues/6185 + return handle_torch_function(wrap_tensor_backend.__get__, (self,), self) # type: ignore[attr-defined] return self.device.type == custom_backend_name _check_register_once(torch.Tensor, f'is_{custom_backend_name}') + wrap_tensor_backend.fget.__name__ = f'is_{custom_backend_name}' # type: ignore[attr-defined] setattr(torch.Tensor, f'is_{custom_backend_name}', wrap_tensor_backend) def wrap_tensor_to(self: torch.Tensor, device: Optional[Union[int, torch.device]] = None, non_blocking=False, @@ -147,10 +155,13 @@ def _generate_tensor_methods_for_privateuse1_backend(custom_backend_name: str) - the argument has no effect. **kwargs (dict): For compatibility, may contain the key ``memory_format`` argument. """ + if has_torch_function_unary(self): + return handle_torch_function(wrap_tensor_to, (self,), self, device=device, non_blocking=False, **kwargs) device_idx = _normalization_device(custom_backend_name, device) return self.to(device=torch.device(f'{custom_backend_name}:{device_idx}'), non_blocking=non_blocking, **kwargs) _check_register_once(torch.Tensor, custom_backend_name) + wrap_tensor_to.__name__ = custom_backend_name setattr(torch.Tensor, custom_backend_name, wrap_tensor_to)
2.41.0
2da03edeb5f942869758bb529831c04a69b0606
Fri, 19 Apr 2024 22:44:26 +0000
[PATCH 0409/1000] Revert "Add test_cpp_extensions tests for stream_and_event and mita_backend (#123614)"
This reverts commit b6f0159db08c1ad55fe57a5e92d8933e21ea543e. Reverted https://github.com/pytorch/pytorch/pull/123614 on behalf of https://github.com/jeffdaily due to This broke ROCm. see test_overrides.py ([comment](https://github.com/pytorch/pytorch/pull/123611#issuecomment-2067363780))
diff --git a/test/cpp_extensions/mtia_extension.cpp b/test/cpp_extensions/mtia_extension.cpp deleted file mode 100644 index 3b02d3968e..0000000000 --- a/test/cpp_extensions/mtia_extension.cpp +++ /dev/null @@ -1,219 +0,0 @@ -#include <ATen/detail/MTIAHooksInterface.h> -#include <c10/core/Device.h> -#include <c10/core/Stream.h> -#include <c10/core/impl/DeviceGuardImplInterface.h> -#include <c10/util/Logging.h> -#include <torch/csrc/utils/device_lazy_init.h> -#include <thread> -namespace torch::mtia { - -constexpr c10::DeviceType kMTIADeviceType = c10::DeviceType::MTIA; -constexpr c10::DeviceIndex kMTIADeviceCount = 2; -static thread_local c10::DeviceIndex current_device = 0; -static thread_local std::array<c10::Stream, kMTIADeviceCount> current_streams = - {c10::Stream::unpack3(0, 0, c10::DeviceType::MTIA), - c10::Stream::unpack3(0, 1, c10::DeviceType::MTIA)}; -static int64_t stream_id_gen = 1; -static int64_t event_id_gen = 1; -static std::array<c10::Stream, kMTIADeviceCount> default_streams = { - c10::Stream::unpack3(0, 0, c10::DeviceType::MTIA), - c10::Stream::unpack3(0, 1, c10::DeviceType::MTIA)}; -struct MTIAGuardImpl final : public c10::impl::DeviceGuardImplInterface { - MTIAGuardImpl() = default; - explicit MTIAGuardImpl(c10::DeviceType t) { - TORCH_INTERNAL_ASSERT(t == kMTIADeviceType); - } - c10::DeviceType type() const override { - return kMTIADeviceType; - } - c10::Device exchangeDevice(c10::Device d) const override { - c10::Device old_device = getDevice(); - if (old_device.index() != d.index()) { - setDevice(d); - } - return old_device; - } - c10::Device getDevice() const override { - return c10::Device(kMTIADeviceType, current_device); - } - - void setDevice(c10::Device d) const override { - c10::Device current_device = getDevice(); - if (current_device.index() != d.index()) { - current_device = d; - } - } - void uncheckedSetDevice(c10::Device d) const noexcept override { - (void)d; - } - c10::Stream getStream(c10::Device d) const noexcept override { - return current_streams[d.index()]; - } - c10::Stream getNewStream(c10::Device d, int priority = 0) const override { - (void)priority; - return c10::Stream::unpack3(stream_id_gen++, d.index(), d.type()); - } - c10::Stream getDefaultStream(c10::Device d) const override { - return default_streams[d.index()]; - } - c10::Stream getStreamFromGlobalPool( - c10::Device d, - bool isHighPriority = false) const override { - return c10::Stream::unpack3(stream_id_gen++, d.index(), d.type()); - } - // NB: These do NOT set the current device - c10::Stream exchangeStream(c10::Stream s) const noexcept override { - c10::Stream old_stream = getStream(s.device()); - return old_stream; - } - c10::DeviceIndex deviceCount() const noexcept override { - return kMTIADeviceCount; - } - - void destroyEvent(void* event, const c10::DeviceIndex device_index) - const noexcept override { - (void)device_index; - } - - void record( - void** event, - const c10::Stream& stream, - const c10::DeviceIndex device_index, - const c10::EventFlag flag) const override { - TORCH_CHECK( - device_index == -1 || device_index == stream.device_index(), - "Event device index ", - device_index, - " does not match recording stream's device index ", - stream.device_index(), - "."); - - const auto orig_device = getDevice(); - - setDevice(stream.device()); - - if (*event == nullptr) { - *event = reinterpret_cast<void*>(event_id_gen++); - } - setDevice(orig_device); - } - - void block(void* event, const c10::Stream& stream) const override { - (void)event; - (void)stream; - } - - // May be called from any device - bool queryEvent(void* event) const override { - (void)event; - return true; - } - - // Stream-related functions - bool queryStream(const c10::Stream& stream) const override { - (void)stream; - return true; - } - - void synchronizeStream(const c10::Stream& stream) const override { - (void)stream; - } - - void recordDataPtrOnStream( - const c10::DataPtr& data_ptr, - const c10::Stream& stream) const override { - (void)data_ptr; - (void)stream; - } - - double elapsedTime(void* event1, void* event2) const override { - uint64_t elapsed_time = 1e6; - return (double)(elapsed_time / 1e6); - } - - void synchronizeEvent(void* event) const override { - (void)event; - } -}; - -struct MTIAHooks : public at::MTIAHooksInterface { - explicit MTIAHooks(at::MTIAHooksArgs) {} - void initMTIA() const override {} - - bool hasMTIA() const override { - return true; - } - - c10::DeviceIndex deviceCount() const override { - torch::utils::device_lazy_init(at::kMTIA); - return c10::DeviceIndex(2); - } - - void deviceSynchronize(c10::DeviceIndex device_index) const override { - torch::utils::device_lazy_init(at::kMTIA); - (void)device_index; - } - - std::string showConfig() const override { - return "None config"; - } - - c10::DeviceIndex exchangeDevice(c10::DeviceIndex device) const override { - torch::utils::device_lazy_init(at::kMTIA); - auto orig_device = current_device; - if (current_device != device) { - current_device = device; - } - return orig_device; - } - - c10::DeviceIndex maybeExchangeDevice(c10::DeviceIndex device) const override { - torch::utils::device_lazy_init(at::kMTIA); - - auto orig_device = current_device; - if (current_device != device) { - current_device = device; - } - return orig_device; - } - - c10::Stream getDefaultStream(c10::DeviceIndex device) const override { - torch::utils::device_lazy_init(at::kMTIA); - - return default_streams[device]; - } - - c10::Stream getCurrentStream(c10::DeviceIndex device) const override { - torch::utils::device_lazy_init(at::kMTIA); - - return current_streams[device]; - } - - void setCurrentStream(const c10::Stream& stream) const override { - torch::utils::device_lazy_init(at::kMTIA); - - current_streams[stream.device_index()] = stream; - } - - c10::DeviceIndex getCurrentDevice() const override { - torch::utils::device_lazy_init(at::kMTIA); - - return current_device; - } - - void setCurrentDevice(c10::DeviceIndex device) const override { - torch::utils::device_lazy_init(at::kMTIA); - - if (current_device != device) { - current_device = device; - } - } -}; - -using at::MTIAHooksRegistry; -using at::RegistererMTIAHooksRegistry; - -REGISTER_MTIA_HOOKS(MTIAHooks); -C10_REGISTER_GUARD_IMPL(MTIA, MTIAGuardImpl); - -} // namespace torch::mtia diff --git a/test/run_test.py b/test/run_test.py index d7bc40f521..c029a96566 100755 --- a/test/run_test.py +++ b/test/run_test.py @@ -191,8 +191,6 @@ XPU_TEST = [ RUN_PARALLEL_BLOCKLIST = [ "test_cpp_extensions_jit", "test_cpp_extensions_open_device_registration", - "test_cpp_extensions_stream_and_event", - "test_cpp_extensions_mtia_backend", "test_jit_disabled", "test_mobile_optimizer", "test_multiprocessing", diff --git a/test/test_cpp_extensions_mtia_backend.py b/test/test_cpp_extensions_mtia_backend.py deleted file mode 100644 index e2ebbf702d..0000000000 --- a/test/test_cpp_extensions_mtia_backend.py +++ /dev/null @@ -1,154 +0,0 @@ -# Owner(s): ["module: mtia"] - -import os -import shutil -import sys -import tempfile -import unittest - -import torch -import torch.testing._internal.common_utils as common -import torch.utils.cpp_extension -from torch.testing._internal.common_utils import ( - IS_ARM64, - IS_LINUX, - skipIfTorchDynamo, - TEST_CUDA, - TEST_PRIVATEUSE1, -) -from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME - - -TEST_CUDA = TEST_CUDA and CUDA_HOME is not None -TEST_ROCM = TEST_CUDA and torch.version.hip is not None and ROCM_HOME is not None - - -def remove_build_path(): - if sys.platform == "win32": - # Not wiping extensions build folder because Windows - return - default_build_root = torch.utils.cpp_extension.get_default_build_root() - if os.path.exists(default_build_root): - shutil.rmtree(default_build_root, ignore_errors=True) - - -@unittest.skipIf( - IS_ARM64 or not IS_LINUX or TEST_CUDA or TEST_PRIVATEUSE1, - "Only on linux platform and mutual exclusive to other backends", -) -@torch.testing._internal.common_utils.markDynamoStrictTest -class TestCppExtensionMTIABackend(common.TestCase): - """Tests MTIA backend with C++ extensions.""" - - module = None - - def setUp(self): - super().setUp() - # cpp extensions use relative paths. Those paths are relative to - # this file, so we'll change the working directory temporarily - self.old_working_dir = os.getcwd() - os.chdir(os.path.dirname(os.path.abspath(__file__))) - - def tearDown(self): - super().tearDown() - # return the working directory (see setUp) - os.chdir(self.old_working_dir) - - @classmethod - def tearDownClass(cls): - remove_build_path() - - @classmethod - def setUpClass(cls): - remove_build_path() - build_dir = tempfile.mkdtemp() - # Load the fake device guard impl. - cls.module = torch.utils.cpp_extension.load( - name="mtia_extension", - sources=["cpp_extensions/mtia_extension.cpp"], - build_directory=build_dir, - extra_include_paths=[ - "cpp_extensions", - "path / with spaces in it", - "path with quote'", - ], - is_python_module=False, - verbose=True, - ) - - @skipIfTorchDynamo("Not a TorchDynamo suitable test") - def test_get_device_module(self): - device = torch.device("mtia:0") - default_stream = torch.get_device_module(device).current_stream() - self.assertEqual( - default_stream.device_type, int(torch._C._autograd.DeviceType.MTIA) - ) - print(torch._C.Stream.__mro__) - print(torch.cuda.Stream.__mro__) - - @skipIfTorchDynamo("Not a TorchDynamo suitable test") - def test_stream_basic(self): - default_stream = torch.mtia.current_stream() - user_stream = torch.mtia.Stream() - self.assertEqual(torch.mtia.current_stream(), default_stream) - self.assertNotEqual(default_stream, user_stream) - # Check mtia_extension.cpp, default stream id starts from 0. - self.assertEqual(default_stream.stream_id, 0) - self.assertNotEqual(user_stream.stream_id, 0) - with torch.mtia.stream(user_stream): - self.assertEqual(torch.mtia.current_stream(), user_stream) - self.assertTrue(user_stream.query()) - default_stream.synchronize() - self.assertTrue(default_stream.query()) - - @skipIfTorchDynamo("Not a TorchDynamo suitable test") - def test_stream_context(self): - mtia_stream_0 = torch.mtia.Stream(device="mtia:0") - mtia_stream_1 = torch.mtia.Stream(device="mtia:0") - print(mtia_stream_0) - print(mtia_stream_1) - with torch.mtia.stream(mtia_stream_0): - current_stream = torch.mtia.current_stream() - msg = f"current_stream {current_stream} should be {mtia_stream_0}" - self.assertTrue(current_stream == mtia_stream_0, msg=msg) - - with torch.mtia.stream(mtia_stream_1): - current_stream = torch.mtia.current_stream() - msg = f"current_stream {current_stream} should be {mtia_stream_1}" - self.assertTrue(current_stream == mtia_stream_1, msg=msg) - - @skipIfTorchDynamo("Not a TorchDynamo suitable test") - def test_stream_context_different_device(self): - device_0 = torch.device("mtia:0") - device_1 = torch.device("mtia:1") - mtia_stream_0 = torch.mtia.Stream(device=device_0) - mtia_stream_1 = torch.mtia.Stream(device=device_1) - print(mtia_stream_0) - print(mtia_stream_1) - orig_current_device = torch.mtia.current_device() - with torch.mtia.stream(mtia_stream_0): - current_stream = torch.mtia.current_stream() - self.assertTrue(torch.mtia.current_device() == device_0.index) - msg = f"current_stream {current_stream} should be {mtia_stream_0}" - self.assertTrue(current_stream == mtia_stream_0, msg=msg) - self.assertTrue(torch.mtia.current_device() == orig_current_device) - with torch.mtia.stream(mtia_stream_1): - current_stream = torch.mtia.current_stream() - self.assertTrue(torch.mtia.current_device() == device_1.index) - msg = f"current_stream {current_stream} should be {mtia_stream_1}" - self.assertTrue(current_stream == mtia_stream_1, msg=msg) - self.assertTrue(torch.mtia.current_device() == orig_current_device) - - @skipIfTorchDynamo("Not a TorchDynamo suitable test") - def test_device_context(self): - device_0 = torch.device("mtia:0") - device_1 = torch.device("mtia:1") - with torch.mtia.device(device_0): - self.assertTrue(torch.mtia.current_device() == device_0.index) - - with torch.mtia.device(device_1): - self.assertTrue(torch.mtia.current_device() == device_1.index) - - -if __name__ == "__main__": - common.run_tests() diff --git a/test/test_cpp_extensions_stream_and_event.py b/test/test_cpp_extensions_stream_and_event.py deleted file mode 100644 index 0be81dd492..0000000000 --- a/test/test_cpp_extensions_stream_and_event.py +++ /dev/null @@ -1,108 +0,0 @@ -# Owner(s): ["module: mtia"] - -import os -import shutil -import sys -import tempfile -import unittest - -import torch -import torch.testing._internal.common_utils as common -import torch.utils.cpp_extension -from torch.testing._internal.common_utils import ( - IS_ARM64, - IS_LINUX, - skipIfTorchDynamo, - TEST_CUDA, - TEST_PRIVATEUSE1, -) -from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME - - -TEST_CUDA = TEST_CUDA and CUDA_HOME is not None -TEST_ROCM = TEST_CUDA and torch.version.hip is not None and ROCM_HOME is not None - - -def remove_build_path(): - if sys.platform == "win32": - # Not wiping extensions build folder because Windows - return - default_build_root = torch.utils.cpp_extension.get_default_build_root() - if os.path.exists(default_build_root): - shutil.rmtree(default_build_root, ignore_errors=True) - - -# Since we use a fake MTIA device backend to test generic Stream/Event, device backends are mutual exclusive to each other. -# The test will be skipped if any of the following conditions are met: -@unittest.skipIf( - IS_ARM64 or not IS_LINUX or TEST_CUDA or TEST_PRIVATEUSE1, - "Only on linux platform and mutual exclusive to other backends", -) -@torch.testing._internal.common_utils.markDynamoStrictTest -class TestCppExtensionStreamAndEvent(common.TestCase): - """Tests Stream and Event with C++ extensions.""" - - module = None - - def setUp(self): - super().setUp() - # cpp extensions use relative paths. Those paths are relative to - # this file, so we'll change the working directory temporarily - self.old_working_dir = os.getcwd() - os.chdir(os.path.dirname(os.path.abspath(__file__))) - - def tearDown(self): - super().tearDown() - # return the working directory (see setUp) - os.chdir(self.old_working_dir) - - @classmethod - def tearDownClass(cls): - remove_build_path() - - @classmethod - def setUpClass(cls): - remove_build_path() - build_dir = tempfile.mkdtemp() - # Load the fake device guard impl. - src = f"{os.path.abspath(os.path.dirname(__file__))}/cpp_extensions/mtia_extension.cpp" - cls.module = torch.utils.cpp_extension.load( - name="mtia_extension", - sources=[src], - build_directory=build_dir, - extra_include_paths=[ - "cpp_extensions", - "path / with spaces in it", - "path with quote'", - ], - is_python_module=False, - verbose=True, - ) - - @skipIfTorchDynamo("Not a TorchDynamo suitable test") - def test_stream_event(self): - s = torch.Stream() - self.assertTrue(s.device_type, int(torch._C._autograd.DeviceType.MTIA)) - e = torch.Event() - self.assertTrue(e.device.type, "mtia") - # Should be nullptr by default - self.assertTrue(e.event_id == 0) - s.record_event(event=e) - print(f"recorded event 1: {e}") - self.assertTrue(e.event_id != 0) - e2 = s.record_event() - print(f"recorded event 2: {e2}") - self.assertTrue(e2.event_id != 0) - self.assertTrue(e2.event_id != e.event_id) - e.synchronize() - e2.synchronize() - time_elapsed = e.elapsed_time(e2) - print(f"time elapsed between e1 and e2: {time_elapsed}") - old_event_id = e.event_id - e.record(stream=s) - print(f"recorded event 1: {e}") - self.assertTrue(e.event_id == old_event_id) - - -if __name__ == "__main__": - common.run_tests() diff --git a/tools/testing/modulefinder_determinator.py b/tools/testing/modulefinder_determinator.py index ba58d75c57..ce55fdb424 100644 --- a/tools/testing/modulefinder_determinator.py +++ b/tools/testing/modulefinder_determinator.py @@ -21,8 +21,6 @@ TARGET_DET_LIST = [ "test_cpp_extensions_aot_no_ninja", "test_cpp_extensions_jit", "test_cpp_extensions_open_device_registration", - "test_cpp_extensions_stream_and_event", - "test_cpp_extensions_mtia_backend", "test_cuda", "test_cuda_primary_ctx", "test_dataloader",
2.41.0
29242a15c71ff0a900f115f0915aa5c5b1b9279
Fri, 19 Apr 2024 22:44:26 +0000
[PATCH 0410/1000] Revert "torch.mtia module for MTIA device backend (#123612)"
This reverts commit d7e1bf9ff908d2a9c20d5354426d34c539fcb7a1. Reverted https://github.com/pytorch/pytorch/pull/123612 on behalf of https://github.com/jeffdaily due to This broke ROCm. see test_overrides.py ([comment](https://github.com/pytorch/pytorch/pull/123611#issuecomment-2067363780))
diff --git a/aten/src/ATen/Context.h b/aten/src/ATen/Context.h index 99ed7c53fc..931cd86e77 100644 --- a/aten/src/ATen/Context.h +++ b/aten/src/ATen/Context.h @@ -68,8 +68,6 @@ class TORCH_API Context { return at::detail::getMPSHooks(); } else if (device_type == at::kPrivateUse1) { return at::detail::getPrivateUse1Hooks(); - } else if (device_type == at::kMTIA) { - return at::detail::getMTIAHooks(); } else { AT_ERROR( c10::DeviceTypeName(device_type), " device type not an accelerator."); @@ -154,9 +152,6 @@ class TORCH_API Context { void lazyInitXPU() { c10::call_once(thx_init, [&] { detail::getXPUHooks().initXPU(); }); } - void lazyInitMTIA() { - c10::call_once(th_mtia_init, [&] { detail::getMTIAHooks().initMTIA(); }); - } void lazyInitPrivateUse1() { c10::call_once(thp_init, [&] { if (isPrivateUse1HooksRegistered()) { @@ -347,7 +342,6 @@ class TORCH_API Context { c10::once_flag thc_init; c10::once_flag thh_init; c10::once_flag thx_init; - c10::once_flag th_mtia_init; c10::once_flag thp_init; bool enabled_cudnn = true; bool deterministic_cudnn = false; diff --git a/aten/src/ATen/DeviceAccelerator.cpp b/aten/src/ATen/DeviceAccelerator.cpp index ec3cd2a2f5..05327cc219 100644 --- a/aten/src/ATen/DeviceAccelerator.cpp +++ b/aten/src/ATen/DeviceAccelerator.cpp @@ -10,9 +10,6 @@ C10_API std::optional<DeviceType> getAccelerator(bool checked) { #define CHECK_NO_PU1 \ TORCH_CHECK(!is_privateuse1_backend_registered(), "Cannot have both CUDA and PrivateUse1"); -#define CHECK_NO_MTIA \ - TORCH_CHECK(!at::hasMTIA(), "Cannot have MTIA with other devices"); - if (is_privateuse1_backend_registered()) { // We explicitly allow PrivateUse1 and another device at the same time // as we use this for testing. @@ -20,12 +17,7 @@ C10_API std::optional<DeviceType> getAccelerator(bool checked) { return kPrivateUse1; } else if (at::hasCUDA()) { CHECK_NO_PU1 - CHECK_NO_MTIA return kCUDA; - } else if (at::hasMTIA()) { - CHECK_NO_CUDA - CHECK_NO_PU1 - return kMTIA; } else { TORCH_CHECK(!checked, "Cannot access accelerator device when none is available.") return std::nullopt; diff --git a/aten/src/ATen/detail/AcceleratorHooksInterface.h b/aten/src/ATen/detail/AcceleratorHooksInterface.h index 96e15e1f69..c099c9f59a 100644 --- a/aten/src/ATen/detail/AcceleratorHooksInterface.h +++ b/aten/src/ATen/detail/AcceleratorHooksInterface.h @@ -1,7 +1,7 @@ #pragma once #include <c10/core/Device.h> -#include <c10/core/Stream.h> + namespace at { // AcceleratorHooksInterface is a shared interface provided by all @@ -16,29 +16,6 @@ struct TORCH_API AcceleratorHooksInterface { // Whether the device at device_index is fully initialized or not. virtual bool hasPrimaryContext(DeviceIndex device_index) const = 0; - - virtual DeviceIndex deviceCount() const { - return 0; - } - - virtual void setCurrentDevice(DeviceIndex device) const { - TORCH_CHECK(false, "Backend doesn't support setCurrentDevice()"); - } - - virtual DeviceIndex getCurrentDevice() const { - TORCH_CHECK(false, "Backend doesn't support getCurrentDevice()"); - return -1; - } - - virtual DeviceIndex exchangeDevice(DeviceIndex device) const { - TORCH_CHECK(false, "Backend doesn't support exchangeDevice()"); - return -1; - } - - virtual DeviceIndex maybeExchangeDevice(DeviceIndex device) const { - TORCH_CHECK(false, "Backend doesn't support maybeExchangeDevice()"); - return -1; - } }; } // namespace at diff --git a/aten/src/ATen/detail/MTIAHooksInterface.cpp b/aten/src/ATen/detail/MTIAHooksInterface.cpp index 0963881713..6b69fdb03f 100644 --- a/aten/src/ATen/detail/MTIAHooksInterface.cpp +++ b/aten/src/ATen/detail/MTIAHooksInterface.cpp @@ -8,22 +8,19 @@ namespace at { namespace detail { -const MTIAHooksInterface& getMTIAHooks() { - static std::unique_ptr<MTIAHooksInterface> mtia_hooks = nullptr; + +const MTIAHooksInterface &getMTIAHooks() { + static MTIAHooksInterface* MTIA_hooks = nullptr; static c10::once_flag once; c10::call_once(once, [] { - mtia_hooks = MTIAHooksRegistry()->Create("MTIAHooks", MTIAHooksArgs{}); - if (!mtia_hooks) { - mtia_hooks = std::make_unique<MTIAHooksInterface>(); + MTIA_hooks = + MTIAHooksRegistry()->Create("MTIAHooks", MTIAHooksArgs{}).release(); + if (!MTIA_hooks) { + MTIA_hooks = new MTIAHooksInterface(); } }); - return *mtia_hooks; -} - -bool isMTIAHooksBuilt() { - return MTIAHooksRegistry()->Has("MTIAHooks"); + return *MTIA_hooks; } - } // namespace detail C10_DEFINE_REGISTRY(MTIAHooksRegistry, MTIAHooksInterface, MTIAHooksArgs) diff --git a/aten/src/ATen/detail/MTIAHooksInterface.h b/aten/src/ATen/detail/MTIAHooksInterface.h index 1da1bda4e6..c843ca52c2 100644 --- a/aten/src/ATen/detail/MTIAHooksInterface.h +++ b/aten/src/ATen/detail/MTIAHooksInterface.h @@ -1,9 +1,7 @@ #pragma once -#include <c10/core/Device.h> #include <c10/util/Exception.h> -#include <c10/core/Stream.h> #include <c10/util/Registry.h> #include <ATen/detail/AcceleratorHooksInterface.h> @@ -22,72 +20,33 @@ constexpr const char* MTIA_HELP = "to use some MTIA's functionality without MTIA extension included."; struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface { -// this fails the implementation if MTIAHooks functions are called, but -// MTIA backend is not present. -#define FAIL_MTIAHOOKS_FUNC(func) \ - TORCH_CHECK(false, "Cannot execute ", func, "() without MTIA backend."); - virtual ~MTIAHooksInterface() override = default; virtual void initMTIA() const { - // Avoid logging here, since MTIA needs init devices first then it will know - // how many devices are available. Make it as no-op if mtia extension is not - // dynamically loaded. - return; + TORCH_CHECK( + false, + "Cannot initialize MTIA without MTIA Extension for PyTorch.", + MTIA_HELP); } virtual bool hasMTIA() const { return false; } - virtual DeviceIndex deviceCount() const override { - return 0; - } - - virtual void deviceSynchronize(c10::DeviceIndex device_index) const { - FAIL_MTIAHOOKS_FUNC(__func__); - } - virtual std::string showConfig() const { - FAIL_MTIAHOOKS_FUNC(__func__); + TORCH_CHECK( + false, + "Cannot query detailed MTIA version without MTIA Extension for PyTorch.", + MTIA_HELP); } virtual bool hasPrimaryContext(DeviceIndex device_index) const override { - return false; - } - - virtual void setCurrentDevice(DeviceIndex device) const override { - FAIL_MTIAHOOKS_FUNC(__func__); - } - - virtual DeviceIndex getCurrentDevice() const override { - FAIL_MTIAHOOKS_FUNC(__func__); - return -1; + TORCH_CHECK( + false, + "Cannot check MTIA primary context without MTIA Extension for PyTorch.", + MTIA_HELP); } - virtual DeviceIndex exchangeDevice(DeviceIndex device) const override { - FAIL_MTIAHOOKS_FUNC(__func__); - return -1; - } - - virtual DeviceIndex maybeExchangeDevice(DeviceIndex device) const override { - FAIL_MTIAHOOKS_FUNC(__func__); - return -1; - } - - virtual c10::Stream getCurrentStream(DeviceIndex device) const { - FAIL_MTIAHOOKS_FUNC(__func__); - return c10::Stream::unpack3(-1, 0, c10::DeviceType::MTIA); - } - - virtual c10::Stream getDefaultStream(DeviceIndex device) const { - FAIL_MTIAHOOKS_FUNC(__func__); - return c10::Stream::unpack3(-1, 0, c10::DeviceType::MTIA); - } - - virtual void setCurrentStream(const c10::Stream& stream) const { - FAIL_MTIAHOOKS_FUNC(__func__); - } }; struct TORCH_API MTIAHooksArgs {}; @@ -98,6 +57,5 @@ C10_DECLARE_REGISTRY(MTIAHooksRegistry, MTIAHooksInterface, MTIAHooksArgs); namespace detail { TORCH_API const MTIAHooksInterface& getMTIAHooks(); -TORCH_API bool isMTIAHooksBuilt(); } // namespace detail } // namespace at diff --git a/build_variables.bzl b/build_variables.bzl index 36e54ffda4..c7bddeaa3b 100644 --- a/build_variables.bzl +++ b/build_variables.bzl @@ -822,7 +822,6 @@ libtorch_python_core_sources = [ "torch/csrc/dynamo/init.cpp", "torch/csrc/functorch/init.cpp", "torch/csrc/mps/Module.cpp", - "torch/csrc/mtia/Module.cpp", "torch/csrc/inductor/aoti_runner/pybind.cpp", "torch/csrc/jit/backends/backend_init.cpp", "torch/csrc/jit/python/init.cpp", diff --git a/docs/source/index.rst b/docs/source/index.rst index a7afe60bc2..9e7cc6a9a6 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -69,7 +69,6 @@ Features described in this documentation are classified by release status: torch.cuda.memory <torch_cuda_memory> mps xpu - mtia meta torch.backends <backends> torch.export <export> diff --git a/docs/source/mtia.rst b/docs/source/mtia.rst deleted file mode 100644 index f2f5b5195d..0000000000 --- a/docs/source/mtia.rst +++ /dev/null @@ -1,34 +0,0 @@ -torch.mtia -=================================== - -The MTIA backend is implemented out of the tree, only interfaces are be defined here. - -.. automodule:: torch.mtia -.. currentmodule:: torch.mtia - -.. autosummary:: - :toctree: generated - :nosignatures: - - StreamContext - current_device - current_stream - default_stream - device_count - init - is_available - is_initialized - set_stream - stream - synchronize - device - DeferredMtiaCallError - -Streams and events ------------------- -.. autosummary:: - :toctree: generated - :nosignatures: - - Event - Stream diff --git a/docs/source/torch.rst b/docs/source/torch.rst index 32bcadc154..b65a7a5239 100644 --- a/docs/source/torch.rst +++ b/docs/source/torch.rst @@ -684,7 +684,6 @@ Utilities set_float32_matmul_precision get_float32_matmul_precision set_warn_always - get_device_module is_warn_always_enabled vmap _assert diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in index 583bd384ed..4d3f2b64ff 100644 --- a/torch/_C/__init__.pyi.in +++ b/torch/_C/__init__.pyi.in @@ -1700,24 +1700,6 @@ _TensorBase = TensorBase # Defined in torch/csrc/multiprocessing/init.cpp def _multiprocessing_init() -> None: ... -# Defined in torch/csrc/Module.cpp -def _accelerator_hooks_device_count() -> _int: ... -def _accelerator_hooks_set_current_device(device_index: _int) -> None: ... -def _accelerator_hooks_get_current_device() -> _int: ... -def _accelerator_hooks_exchange_device(device_index: _int) -> _int: ... -def _accelerator_hooks_maybe_exchange_device(device_index: _int) -> _int: ... -def _get_accelerator(check: _bool = False) -> _device: ... - -# Defined in torch/csrc/mtia/Module.cpp -def _mtia_init() -> None: ... -def _mtia_isBuilt() -> _bool: ... -def _mtia_isInBadFork() -> _bool: ... -def _mtia_deviceSynchronize() -> None: ... -def _mtia_getCurrentStream(device: _int) -> Stream: ... -def _mtia_setCurrentStream(stream: Stream) -> None: ... -def _mtia_getDefaultStream(device: _int) -> Stream: ... - - # Defined in torch/csrc/mps/Module.cpp def _mps_deviceSynchronize() -> None: ... def _mps_get_default_generator() -> Generator: ... diff --git a/torch/_C/_autograd.pyi b/torch/_C/_autograd.pyi index 7e503a8e90..92b21f96df 100644 --- a/torch/_C/_autograd.pyi +++ b/torch/_C/_autograd.pyi @@ -23,7 +23,6 @@ class DeviceType(Enum): FPGA = ... ORT = ... XLA = ... - MTIA = ... MPS = ... HPU = ... Meta = ... diff --git a/torch/__init__.py b/torch/__init__.py index 846038e351..9a7249f220 100644 --- a/torch/__init__.py +++ b/torch/__init__.py @@ -58,7 +58,6 @@ __all__ = [ 'SymBool', 'sym_not', 'unravel_index', 'sym_int', 'sym_float', 'sym_max', 'sym_min', 'sym_ite', 'compile', 'vmap', 'export', 'autocast', 'cond', 'GradScaler', - 'get_device_module', ] ################################################################################ @@ -1580,7 +1579,6 @@ from torch import cuda as cuda from torch import cpu as cpu from torch import mps as mps from torch import xpu as xpu -from torch import mtia as mtia from torch import autograd as autograd from torch.autograd import ( no_grad as no_grad, @@ -2018,27 +2016,6 @@ else: raise AttributeError(f"module '{__name__}' has no attribute '{name}'") -def get_device_module(device: Optional[Union[torch.device, str]] = None): - """ - Returns the module associated with a given device(e.g., torch.device('cuda'), "mtia:0", "xpu", ...). - If no device is given, return the module for the current accelerator or CPU if none is present. - """ - if isinstance(device, torch.device): - device_module_name = device.type - elif isinstance(device, str): - device_module_name = torch.device(device).type - elif device is None: - # Using default accelerator type. If no accelerator is available, it automatically returns CPU device. - device_module_name = torch._C._get_accelerator().type - else: - raise RuntimeError(f"Invalid value of device '{device}', expect torch.device, str, or None") - device_module = getattr(torch, device_module_name, None) - if device_module is None: - raise RuntimeError( - f"Device '{device_module_name}' does not have a corresponding module registered as 'torch.{device_module_name}'." - ) - return device_module - def _constrain_as_value(symbol, min: Optional[builtins.int] = None, max: Optional[builtins.int] = None): """ diff --git a/torch/_utils.py b/torch/_utils.py index 43c6284d24..7f9a1af43f 100644 --- a/torch/_utils.py +++ b/torch/_utils.py @@ -713,8 +713,6 @@ def _get_available_device_type(): return "cuda" if hasattr(torch, "xpu") and torch.xpu.is_available(): # type: ignore[attr-defined] return "xpu" - if hasattr(torch, "mtia") and torch.mtia.is_available(): - return "mtia" custom_backend_name = torch._C._get_privateuse1_backend_name() custom_device_mod = getattr(torch, custom_backend_name, None) if custom_device_mod and custom_device_mod.is_available(): @@ -729,8 +727,6 @@ def _get_device_attr(get_member): return get_member(torch.cuda) if device_type and device_type.lower() == "xpu": return get_member(torch.xpu) # type: ignore[attr-defined] - if device_type and device_type.lower() == "mtia": - return get_member(torch.mtia) if device_type == torch._C._get_privateuse1_backend_name(): return get_member(getattr(torch, device_type)) # add more available device types here diff --git a/torch/csrc/Module.cpp b/torch/csrc/Module.cpp index dd7b74c909..8aff73047f 100644 --- a/torch/csrc/Module.cpp +++ b/torch/csrc/Module.cpp @@ -1,4 +1,3 @@ -#include <ATen/DeviceAccelerator.h> #include <c10/util/Optional.h> #include <fmt/core.h> #include <sys/types.h> @@ -16,12 +15,10 @@ #include <ATen/Parallel.h> #include <ATen/Utils.h> #include <ATen/core/Vitals.h> -#include <ATen/detail/AcceleratorHooksInterface.h> #include <ATen/dlpack.h> #include <ATen/native/ConvUtils.h> #include <ATen/native/ForeachUtils.h> #include <ATen/native/Normalization.h> -#include <c10/core/Device.h> #include <c10/core/DispatchKeySet.h> #include <c10/util/AbortHandler.h> #include <c10/util/Backtrace.h> @@ -74,7 +71,6 @@ #include <torch/csrc/lazy/python/init.h> #include <torch/csrc/monitor/python_init.h> #include <torch/csrc/mps/Module.h> -#include <torch/csrc/mtia/Module.h> #include <torch/csrc/multiprocessing/init.h> #include <torch/csrc/onnx/init.h> #include <torch/csrc/profiler/python/init.h> @@ -1644,7 +1640,6 @@ PyObject* initModule() { #ifdef USE_XPU torch::xpu::initModule(module); #endif - torch::mtia::initModule(module); torch::cpu::initModule(module); torch::initVerboseBindings(module); ASSERT_TRUE(THPStorage_init(module)); @@ -1940,70 +1935,6 @@ Call this whenever a new thread is created in order to propagate values from return at::globalContext().linalgPreferredBackend(); }); - py_module.def("_accelerator_hooks_device_count", []() { - auto device_type = at::getAccelerator(); - if (device_type.has_value()) { - return at::globalContext() - .getAcceleratorHooksInterface(device_type.value()) - .deviceCount(); - } - return c10::DeviceIndex(-1); - }); - - py_module.def( - "_accelerator_hooks_set_current_device", - [](c10::DeviceIndex device_index) { - auto device_type = at::getAccelerator(); - if (device_type.has_value()) { - at::globalContext() - .getAcceleratorHooksInterface(device_type.value()) - .setCurrentDevice(device_index); - } - }); - - py_module.def("_accelerator_hooks_get_current_device", []() { - auto device_type = at::getAccelerator(); - if (device_type.has_value()) { - return at::globalContext() - .getAcceleratorHooksInterface(device_type.value()) - .getCurrentDevice(); - } - return c10::DeviceIndex(-1); - }); - - py_module.def( - "_accelerator_hooks_exchange_device", [](c10::DeviceIndex device_index) { - auto device_type = at::getAccelerator(); - if (device_type.has_value()) { - return at::globalContext() - .getAcceleratorHooksInterface(device_type.value()) - .exchangeDevice(device_index); - } - return c10::DeviceIndex(-1); - }); - - py_module.def( - "_accelerator_hooks_maybe_exchange_device", - [](c10::DeviceIndex device_index) { - auto device_type = at::getAccelerator(); - if (device_type.has_value()) { - return at::globalContext() - .getAcceleratorHooksInterface(device_type.value()) - .maybeExchangeDevice(device_index); - } - return c10::DeviceIndex(-1); - }); - - py_module.def( - "_get_accelerator", - [](c10::optional<bool> check = c10::nullopt) { - return c10::Device( - at::getAccelerator(check.value_or(false)) - .value_or(c10::DeviceType::CPU), - -1); - }, - py::arg("check") = nullptr); - py_module.def( "_construct_storage_from_data_pointer", [](int64_t data_ptr, c10::Device device, size_t size_bytes) { diff --git a/torch/csrc/mtia/Module.cpp b/torch/csrc/mtia/Module.cpp deleted file mode 100644 index 84cc11f718..0000000000 --- a/torch/csrc/mtia/Module.cpp +++ /dev/null @@ -1,81 +0,0 @@ -#include <ATen/ATen.h> -#include <c10/util/CallOnce.h> -#include <torch/csrc/Generator.h> -#include <torch/csrc/Stream.h> -#include <torch/csrc/python_headers.h> -#include <torch/csrc/utils/device_lazy_init.h> -#include <torch/csrc/utils/pybind.h> - -#include <c10/core/DeviceType.h> -#include <c10/core/Stream.h> -#ifndef WIN32 -#include <pthread.h> -#endif - -namespace torch { -namespace mtia { - -static bool in_bad_fork = false; // True for children forked after mtia init - -#ifndef WIN32 -// Called in the forked child if mtia has already been initialized -static void forked_child() { - in_bad_fork = true; - torch::utils::set_requires_device_init(at::kMTIA, true); -} -#endif - -// Should be called before the first mtia call. -// Note: This is distinct from initExtension because a stub mtia implementation -// has some working functions (e.g. device_count) but cannot fully initialize. -static void poison_fork() { -#ifndef WIN32 - static c10::once_flag flag; - c10::call_once(flag, [] { pthread_atfork(nullptr, nullptr, forked_child); }); -#endif -} - -void initModule(PyObject* module) { - auto m = py::handle(module).cast<py::module>(); - - m.def("_mtia_init", []() { - TORCH_INTERNAL_ASSERT(!in_bad_fork); // Handled at python level - poison_fork(); - at::globalContext().lazyInitMTIA(); - }); - - m.def("_mtia_isBuilt", []() { - // Check if the MTIAHooks class has been registered with the registry. - return at::detail::isMTIAHooksBuilt(); - }); - - m.def("_mtia_isInBadFork", []() { return in_bad_fork; }); - - m.def("_mtia_getCurrentStream", [](c10::DeviceIndex device_index) { - torch::utils::device_lazy_init(at::kMTIA); - return at::detail::getMTIAHooks().getCurrentStream(device_index); - }); - - m.def("_mtia_deviceSynchronize", [](c10::DeviceIndex device_index) { - torch::utils::device_lazy_init(at::kMTIA); - at::detail::getMTIAHooks().deviceSynchronize( - at::detail::getMTIAHooks().getCurrentDevice()); - }); - - m.def("_mtia_getDefaultStream", [](c10::DeviceIndex device_index) { - torch::utils::device_lazy_init(at::kMTIA); - return at::detail::getMTIAHooks().getDefaultStream(device_index); - }); - - m.def("_mtia_setCurrentStream", [](const c10::Stream& stream) { - torch::utils::device_lazy_init(at::kMTIA); - auto device = at::detail::getMTIAHooks().getCurrentDevice(); - if (device != stream.device_index()) { - at::detail::getMTIAHooks().setCurrentDevice(stream.device_index()); - } - at::detail::getMTIAHooks().setCurrentStream(stream); - }); -} - -} // namespace mtia -} // namespace torch diff --git a/torch/csrc/mtia/Module.h b/torch/csrc/mtia/Module.h deleted file mode 100644 index 96a98ed448..0000000000 --- a/torch/csrc/mtia/Module.h +++ /dev/null @@ -1,12 +0,0 @@ -#pragma once - -#include <torch/csrc/python_headers.h> - -namespace torch { -namespace mtia { - -// PyMethodDef* python_functions(); -void initModule(PyObject* module); - -} // namespace mtia -} // namespace torch diff --git a/torch/csrc/utils/pybind.h b/torch/csrc/utils/pybind.h index 1a4e7bb26f..36cb83659a 100644 --- a/torch/csrc/utils/pybind.h +++ b/torch/csrc/utils/pybind.h @@ -194,12 +194,6 @@ struct type_caster<c10::Stream> { // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) PYBIND11_TYPE_CASTER(c10::Stream, _("torch.Stream")); - // PYBIND11_TYPE_CASTER defines a member field called value. Since c10::Stream - // cannot be default-initialized, we provide this constructor to explicitly - // initialize that field. The value doesn't matter as it will be overwritten - // after a successful call to load. - type_caster() : value(c10::Stream::DEFAULT, c10::Device(c10::kCPU, 0)) {} - bool load(handle src, bool) { PyObject* obj = src.ptr(); if (THPStream_Check(obj)) { diff --git a/torch/mtia/__init__.py b/torch/mtia/__init__.py deleted file mode 100644 index 4007f0e584..0000000000 --- a/torch/mtia/__init__.py +++ /dev/null @@ -1,262 +0,0 @@ -r""" -This package enables an interface for accessing MTIA backend in python -""" - -import threading -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -import torch - -from torch.types import Device - -from .. import device as _device, Tensor -from .._utils import _dummy_type, _LazySeedTracker, classproperty -from ._utils import _get_device_index - -_device_t = Union[_device, str, int, None] - -# torch.mtia.Event/Stream is alias of torch.Event/Stream -Event = torch.Event -Stream = torch.Stream - -_initialized = False -_queued_calls: List[ - Tuple[Callable[[], None], List[str]] -] = [] # don't invoke these until initialization occurs -_tls = threading.local() -_initialization_lock = threading.Lock() -_lazy_seed_tracker = _LazySeedTracker() - - -def init(): - _lazy_init() - - -def is_initialized(): - r"""Return whether PyTorch's MTIA state has been initialized.""" - return _initialized and not _is_in_bad_fork() - - -def _is_in_bad_fork() -> bool: - return torch._C._mtia_isInBadFork() - - -def _lazy_init() -> None: - global _initialized, _queued_calls - if is_initialized() or hasattr(_tls, "is_initializing"): - return - with _initialization_lock: - # We be double-checked locking, boys! This is OK because - # the above test was GIL protected anyway. The inner test - # is for when a thread blocked on some other thread which was - # doing the initialization; when they get the lock, they will - # find there is nothing left to do. - if is_initialized(): - return - # It is important to prevent other threads from entering _lazy_init - # immediately, while we are still guaranteed to have the GIL, because some - # of the C calls we make below will release the GIL - if _is_in_bad_fork(): - raise RuntimeError( - "Cannot re-initialize MTIA in forked subprocess. To use MTIA with " - "multiprocessing, you must use the 'spawn' start method" - ) - if not _is_compiled(): - raise AssertionError("Torch not compiled with MTIA enabled") - - torch._C._mtia_init() - # Some of the queued calls may reentrantly call _lazy_init(); - # we need to just return without initializing in that case. - # However, we must not let any *other* threads in! - _tls.is_initializing = True - - for calls in _lazy_seed_tracker.get_calls(): - if calls: - _queued_calls.append(calls) - - try: - for queued_call, orig_traceback in _queued_calls: - try: - queued_call() - except Exception as e: - msg = ( - f"MTIA call failed lazily at initialization with error: {str(e)}\n\n" - f"MTIA call was originally invoked at:\n\n{''.join(orig_traceback)}" - ) - raise DeferredMtiaCallError(msg) from e - finally: - delattr(_tls, "is_initializing") - _initialized = True - - -class DeferredMtiaCallError(Exception): - pass - - -def _is_compiled() -> bool: - r"""Return true if compiled with MTIA support.""" - return torch._C._mtia_isBuilt() - - -def is_available() -> bool: - r"""Return true if MTIA device is available""" - if not _is_compiled(): - return False - # MTIA has to init devices first to know if there is any devices available. - return device_count() > 0 - - -def synchronize() -> None: - r"""Waits for all jobs in all streams on a MTIA device to complete.""" - return torch._C._mtia_deviceSynchronize() - - -def device_count() -> int: - r"""Return the number of MTIA devices available.""" - return torch._C._accelerator_hooks_device_count() - - -def current_device() -> int: - r"""Return the index of a currently selected device.""" - return torch._C._accelerator_hooks_get_current_device() - - -def current_stream(device: Optional[_device_t] = None) -> Stream: - r"""Return the currently selected :class:`Stream` for a given device. - - Args: - device (torch.device or int, optional): selected device. Returns - the currently selected :class:`Stream` for the current device, given - by :func:`~torch.mtia.current_device`, if :attr:`device` is ``None`` - (default). - """ - return torch._C._mtia_getCurrentStream(_get_device_index(device, optional=True)) - - -def default_stream(device: Optional[_device_t] = None) -> Stream: - r"""Return the default :class:`Stream` for a given device. - - Args: - device (torch.device or int, optional): selected device. Returns - the default :class:`Stream` for the current device, given by - :func:`~torch.mtia.current_device`, if :attr:`device` is ``None`` - (default). - """ - return torch._C._mtia_getDefaultStream(_get_device_index(device, optional=True)) - - -def set_stream(stream: Stream): - r"""Set the current stream.This is a wrapper API to set the stream. - Usage of this function is discouraged in favor of the ``stream`` - context manager. - - Args: - stream (Stream): selected stream. This function is a no-op - if this argument is ``None``. - """ - if stream is None: - return - torch._C._mtia_setCurrentStream(stream) - - -class device: - r"""Context-manager that changes the selected device. - - Args: - device (torch.device or int): device index to select. It's a no-op if - this argument is a negative integer or ``None``. - """ - - def __init__(self, device: Any): - self.idx = _get_device_index(device, optional=True) - self.prev_idx = -1 - - def __enter__(self): - self.prev_idx = torch._C._accelerator_hooks_maybe_exchange_device(self.idx) - - def __exit__(self, type: Any, value: Any, traceback: Any): - self.idx = torch._C._accelerator_hooks_maybe_exchange_device(self.prev_idx) - return False - - -class StreamContext: - r"""Context-manager that selects a given stream. - - All MTIA kernels queued within its context will be enqueued on a selected - stream. - - Args: - Stream (Stream): selected stream. This manager is a no-op if it's - ``None``. - .. note:: Streams are per-device. - """ - - cur_stream: Optional["torch.mtia.Stream"] - - def __init__(self, stream: Optional["torch.mtia.Stream"]): - self.stream = stream - self.idx = _get_device_index(None, True) - if not torch.jit.is_scripting(): - if self.idx is None: - self.idx = -1 - - self.src_prev_stream = ( - None if not torch.jit.is_scripting() else torch.mtia.default_stream(None) - ) - self.dst_prev_stream = ( - None if not torch.jit.is_scripting() else torch.mtia.default_stream(None) - ) - - def __enter__(self): - # Local cur_stream variable for type refinement - cur_stream = self.stream - # Return if stream is None or MTIA device not available - if cur_stream is None or self.idx == -1: - return - self.src_prev_stream = torch.mtia.current_stream(None) - - # If the stream is not on the current device, then - # set the current stream on the device - if self.src_prev_stream.device != cur_stream.device: - with device(cur_stream.device): - self.dst_prev_stream = torch.mtia.current_stream(cur_stream.device) - torch.mtia.set_stream(cur_stream) - - def __exit__(self, type: Any, value: Any, traceback: Any): - # Local cur_stream variable for type refinement - cur_stream = self.stream - # If stream is None or no MTIA device available, return - if cur_stream is None or self.idx == -1: - return - - # Reset the stream on the original device - # and destination device - if self.src_prev_stream.device != cur_stream.device: # type: ignore[union-attr] - torch.mtia.set_stream(self.dst_prev_stream) # type: ignore[arg-type] - torch.mtia.set_stream(self.src_prev_stream) # type: ignore[arg-type] - - -def stream(stream: Optional["torch.mtia.Stream"]) -> StreamContext: - r"""Wrap around the Context-manager StreamContext that selects a given stream. - - Arguments: - stream (Stream): selected stream. This manager is a no-op if it's - ``None``. - ..Note:: In eager mode stream is of type Stream class while in JIT it doesn't support torch.mtia.stream - """ - return StreamContext(stream) - - -__all__ = [ - "init", - "is_available", - "is_initialized", - "synchronize", - "device_count", - "current_device", - "current_stream", - "default_stream", - "set_stream", - "stream", - "device", -] diff --git a/torch/mtia/_utils.py b/torch/mtia/_utils.py deleted file mode 100644 index 090e26f321..0000000000 --- a/torch/mtia/_utils.py +++ /dev/null @@ -1,38 +0,0 @@ -from typing import Any - -import torch - -# The _get_device_index has been moved to torch.utils._get_device_index -from torch._utils import _get_device_index as _torch_get_device_index - - -def _get_device_index( - device: Any, optional: bool = False, allow_cpu: bool = False -) -> int: - r"""Get the device index from :attr:`device`, which can be a torch.device object, a Python integer, or ``None``. - - If :attr:`device` is a torch.device object, returns the device index if it - is a MTIA device. Note that for a MTIA device without a specified index, - i.e., ``torch.device('mtia')``, this will return the current default MTIA - device if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``, - CPU devices will be accepted and ``-1`` will be returned in this case. - - If :attr:`device` is a Python integer, it is returned as is. - - If :attr:`device` is ``None``, this will return the current default MTIA - device if :attr:`optional` is ``True``. - """ - if isinstance(device, int): - return device - if isinstance(device, str): - device = torch.device(device) - if isinstance(device, torch.device): - if allow_cpu: - if device.type not in ["mtia", "cpu"]: - raise ValueError(f"Expected a mtia or cpu device, but got: {device}") - elif device.type != "mtia": - raise ValueError(f"Expected a mtia device, but got: {device}") - if not torch.jit.is_scripting(): - if isinstance(device, torch.mtia.device): - return device.idx - return _torch_get_device_index(device, optional, allow_cpu) diff --git a/torch/overrides.py b/torch/overrides.py index 4ce2548800..6a5d3e891d 100644 --- a/torch/overrides.py +++ b/torch/overrides.py @@ -281,7 +281,6 @@ def get_ignored_functions() -> Set[Callable]: torch.use_deterministic_algorithms, torch.is_deterministic_algorithms_warn_only_enabled, torch.set_deterministic_debug_mode, - torch.get_device_module, torch.get_deterministic_debug_mode, torch.set_float32_matmul_precision, torch.get_float32_matmul_precision,
2.41.0
cbf888a130bc87436f32095dfc2406d2e123993
Fri, 19 Apr 2024 22:50:13 +0000
[PATCH 0412/1000] rename sl to strobelight (#124455)
Summary: TORCH_COMPILE_SL_PROFILE ->TORCH_COMPILE_STROBELIGHT SL_MAX_STACK_LENGTH -> COMPILE_STROBELIGHT_MAX_STACK_LENGTH SL_MAX_PROFILE_TIME -> COMPILE_STROBELIGHT_MAX_PROFILE_TIME profile_with_sl() -> strobelight() compiletime_sl_profile_meta() -> compiletime_strobelight_meta() Test Plan: 1. run and verify ``` TORCH_COMPILE_STROBELIGHT=TRUE buck2 run @//mode/inplace @//mode/opt //caffe2/fb/strobelight:compiletime_profiler_example ``` 2. run and verify ``` buck2 run @//mode/inplace @//mode/opt //caffe2/fb/strobelight:function_profiler_example --local-only ``` 3. run and verify truncated stack for ``` TORCH_COMPILE_STROBELIGHT=TRUE COMPILE_STROBELIGHT_MAX_STACK_LENGTH=1 buck2 run @//mode/inplace @//mode/opt //caffe2/fb/strobelight:compiletime_profiler_example ``` 4. add infinite loop in _verify and verify samples for ``` COMPILE_STROBELIGHT_MAX_PROFILE_TIME=30 TORCH_COMPILE_STROBELIGHT=TRUE buck2 run @//mode/inplace @//mode/opt //caffe2/fb/strobelight:compiletime_profiler_example ``` Reviewed By: oulgen Differential Revision: D56327139 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124455 Approved by: https://github.com/oulgen
diff --git a/torch/_dynamo/convert_frame.py b/torch/_dynamo/convert_frame.py index 725cfbaf50..f0770df439 100644 --- a/torch/_dynamo/convert_frame.py +++ b/torch/_dynamo/convert_frame.py @@ -28,7 +28,7 @@ import torch import torch._logging from torch._guards import compile_context, CompileContext, CompileId, tracing from torch._logging import structured -from torch._utils_internal import compiletime_sl_profile_meta, signpost_event +from torch._utils_internal import compiletime_strobelight_meta, signpost_event from torch.fx.experimental.symbolic_shapes import ( ConstraintViolationError, GuardOnDataDependentSymNode, @@ -452,7 +452,7 @@ def register_bytecode_hook(hook: BytecodeHook) -> RemovableHandle: return handle -@compiletime_sl_profile_meta(phase_name="_compile") +@compiletime_strobelight_meta(phase_name="_compile") @_use_lazy_graph_module(config.use_lazy_graph_module) @maybe_cprofile def _compile( diff --git a/torch/_inductor/compile_fx.py b/torch/_inductor/compile_fx.py index b6aca56ad0..5eeb3f688e 100644 --- a/torch/_inductor/compile_fx.py +++ b/torch/_inductor/compile_fx.py @@ -36,7 +36,7 @@ from torch._inductor.utils import BoxedBool, count_tangents from torch._logging import trace_structured from torch._ops import OpOverload from torch._subclasses.fake_tensor import FakeTensor -from torch._utils_internal import compiletime_sl_profile_meta +from torch._utils_internal import compiletime_strobelight_meta from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols from torch.fx.passes.fake_tensor_prop import FakeTensorProp @@ -1349,7 +1349,7 @@ def compile_fx( graph, joint_inputs, **kwargs, compiler="inductor" ) - @compiletime_sl_profile_meta(phase_name="bw_compiler") + @compiletime_strobelight_meta(phase_name="bw_compiler") @dynamo_utils.dynamo_timed @dynamo_utils.maybe_cprofile def bw_compiler(model: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): diff --git a/torch/_utils_internal.py b/torch/_utils_internal.py index 670f14457e..f89fa99b3b 100644 --- a/torch/_utils_internal.py +++ b/torch/_utils_internal.py @@ -63,15 +63,15 @@ def throw_abstract_impl_not_imported_error(opname, module, context): # Meta only, act as nop otherwise. -def compiletime_sl_profile_meta(phase_name): - def compiletime_sl_profile_inner(function): +def compiletime_strobelight_meta(phase_name): + def compiletime_strobelight_meta_inner(function): @functools.wraps(function) def wrapper_function(*args, **kwargs): return function(*args, **kwargs) return wrapper_function - return compiletime_sl_profile_inner + return compiletime_strobelight_meta_inner # Meta only, see
2.41.0
9fce110af428a600e597291a0ab80e43dc39e93
Fri, 19 Apr 2024 12:12:23 -0700
[PATCH 0415/1000] [FSDP2][ez] Removed error check for swap tensors flag (#124513)
Since `DTensor` uses `swap_tensors` path automatically now, we can remove this check for the global flag. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124513 Approved by: https://github.com/weifengpy ghstack dependencies: #124319, #120256
diff --git a/torch/distributed/_composable/fsdp/fully_shard.py b/torch/distributed/_composable/fsdp/fully_shard.py index 82820a56a2..11aaf758a6 100644 --- a/torch/distributed/_composable/fsdp/fully_shard.py +++ b/torch/distributed/_composable/fsdp/fully_shard.py @@ -289,16 +289,10 @@ class FSDP: module_info = fsdp_param._module_info new_param = getattr(module_info.module, module_info.param_name) if new_param is not fsdp_param.sharded_param: - if torch.__future__.get_swap_module_params_on_conversion(): - raise AssertionError( - "Expects swap_tensors to preserve object but got " - f"{new_param} instead of {fsdp_param.sharded_param}" - ) - else: - raise AssertionError( - "Please set torch.__future__.set_swap_module_params_on_conversion(True) " - "to use _apply methods with FSDP" - ) + raise AssertionError( + "Expects swap_tensors to preserve object but got " + f"{new_param} instead of {fsdp_param.sharded_param}" + ) local_tensor = new_param._local_tensor padded_sharded_size = fsdp_param.padded_sharded_param_size if local_tensor.size() != padded_sharded_size:
2.41.0
d8b903d95394e4a1023a2c4b8e50aadff7cdec2
Fri, 19 Apr 2024 13:41:49 -0700
[PATCH 0416/1000] [PyTorch] Remove ArrayRefTensor::numel_ (#124516)
ArrayRefTensor::numel_ is redundant with the size of the contained MiniArrayRef. Reclaiming the space entirely would break ABI compatibility, but at least we have 4-8 bytes for future expansion. Differential Revision: [D56366829](https://our.internmc.facebook.com/intern/diff/D56366829/) **NOTE FOR REVIEWERS**: This PR has internal Meta-specific changes or comments, please review them on [Phabricator](https://our.internmc.facebook.com/intern/diff/D56366829/)! Pull Request resolved: https://github.com/pytorch/pytorch/pull/124516 Approved by: https://github.com/chenyang78, https://github.com/desertfire
diff --git a/torch/csrc/inductor/aoti_runtime/arrayref_tensor.h b/torch/csrc/inductor/aoti_runtime/arrayref_tensor.h index a864dbf45b..436ed3f01f 100644 --- a/torch/csrc/inductor/aoti_runtime/arrayref_tensor.h +++ b/torch/csrc/inductor/aoti_runtime/arrayref_tensor.h @@ -154,6 +154,10 @@ class MiniArrayRef final { using MiniIntArrayRef = MiniArrayRef<int64_t>; +static_assert( + sizeof(MiniIntArrayRef) == sizeof(void*) + sizeof(size_t), + "changing the size of MiniArrayRef breaks ABI compatibility!"); + inline bool is_contiguous_strides_for_shape( int64_t ndim, const int64_t* strides_ptr, @@ -189,8 +193,7 @@ class ArrayRefTensor { sizes_(sizes), strides_(strides), device_type_(device_type), - device_idx_(device_idx), - numel_(arr.size()) { + device_idx_(device_idx) { assert(sizes.size() == strides.size()); assert(is_contiguous_strides_for_shape( sizes.size(), strides.data(), sizes.data())); @@ -242,7 +245,7 @@ class ArrayRefTensor { } auto numel() const { - return numel_; + return arrayRef_.size(); } void set_arrayref(MiniArrayRef<T> new_arrayref) { @@ -257,9 +260,17 @@ class ArrayRefTensor { MiniArrayRef<const int64_t> strides_; int32_t device_type_ = 0; int32_t device_idx_ = 0; - int32_t numel_ = 0; + // We continue to zero-initialize this field in case we repurpose + // the space later; having predictable contents can only help. + int32_t unusedDoNotRemoveForABICompatibility_ = 0; }; +static_assert( + sizeof(ArrayRefTensor<int>) == + 3 * sizeof(MiniIntArrayRef) + 3 * sizeof(int32_t) + + (alignof(ArrayRefTensor<int>) > 4 ? sizeof(int32_t) : 0), + "changing the size of ArrayRefTensor breaks ABI compatibility!"); + inline AtenTensorHandle reinterpret_tensor_wrapper( AtenTensorHandle self, int64_t ndim,
2.41.0
7f64197f34a56acafdafa5975e26d07ec86d043
Sat, 20 Apr 2024 04:45:12 +0000
[PATCH 0418/1000] Reduce warning msg in torch.profiler (#124469)MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit
Summary: This is actually quite noisy and my logs are full of this soft assertion msg. Maybe making it log once? Test Plan: On AMD GPU side, I got a lot of those warnings: ``` W0415 01:40:45.109864 917160 collection.cpp:602] Warning: Memcpy ? (? -> ?) (function operator())” ``` So just suppress the excessive logs Reviewed By: aaronenyeshi, yoyoyocmu Differential Revision: D55602788 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124469 Approved by: https://github.com/aaronenyeshi
diff --git a/torch/csrc/profiler/util.h b/torch/csrc/profiler/util.h index df141dcb1e..e27d408441 100644 --- a/torch/csrc/profiler/util.h +++ b/torch/csrc/profiler/util.h @@ -27,7 +27,7 @@ if (torch::profiler::impl::softAssertRaises()) { \ TORCH_INTERNAL_ASSERT(cond, __VA_ARGS__); \ } else { \ - TORCH_WARN(__VA_ARGS__); \ + TORCH_WARN_ONCE(__VA_ARGS__); \ } \ return false; \ } \
2.41.0
5a4ba225749e2f18acc07d94005e6869dee906c
Thu, 18 Apr 2024 11:21:50 -0700
[PATCH 0419/1000] [inductor] consider pointwise nodes when deciding reduction hint (#124131)
In certain **rare** scenarios, inductor can generate a reduction kernel with really bad perf. E.g., if - the reduction kernel contains a reduction node followed by a pointwise node - And the pointwise node use a transposed layout. - the reduction node is an inner reduction - and rnumel <= 1024 , then inductor will generate a persistent reduction kernel and it causes really bad perf when doing tl.store for the pointwise node since we use a very skinny tile `(XBLOCK=1, RBLOCK=next_power_of_2(rnumel))` . I've tried a few version of fix. - The first version is, if I found any pointwise node in a reduction kernel uses a non-contiguous dependency, we use ReductionHint.DEFAULT. This cause 8s compilation time increase for huggingface with no perf wins... The reason is ReductionHint.DEFAULT does more autotunings. - Then I changed the code to be more specific. We change the hint from INNER to DEFAULT if we are sure that the pointwise kernel can use a >1 stride for the lowest dimension. Kernels meet this condition should mostly have really bad perf anyways. The situation mentioned above is rare. But it's reported by internal users. I'll also run one more perf test. Testing script: https://gist.github.com/shunting314/9d3389891fa43633b49b8b7564ad6d8b . Something equivalent is also added as a unit test. For this specific test from user reports, we improve the mentioned reduction kernels perf by **4.14x** (451us -> 109us) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124131 Approved by: https://github.com/jansel
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py index ba1da01427..3000be0f88 100644 --- a/test/inductor/test_torchinductor.py +++ b/test/inductor/test_torchinductor.py @@ -82,6 +82,8 @@ from torch.utils._pytree import tree_flatten, tree_unflatten from torch.utils._triton import has_triton from torch.utils.weak import WeakTensorKeyDictionary +DO_PERF_TEST = os.environ.get("DO_PERF_TEST") == "1" + if IS_WINDOWS and IS_CI: sys.stderr.write( "Windows CI does not have necessary dependencies for test_torchinductor yet\n" @@ -10332,6 +10334,69 @@ if HAS_GPU and RUN_GPU and not TEST_WITH_ASAN: seq_nr_set.add(int(res.group(1))) self.assertTrue(bwd_seq_nr_set.issubset(fwd_seq_nr_set)) + @config.patch( + { + "coordinate_descent_tuning": True, + "triton.unique_kernel_names": True, + "benchmark_kernel": True, + } + ) + @skipIfRocm + @unittest.skipIf( + torch.cuda.get_device_capability() < (9, 0), + "Triton does not support fp8 on A100", + ) + def test_red_followed_by_transposed_pointwise(self): + bs = 26624 + dim = 1024 + + @torch.compile(dynamic=False) + def f(in1, in2, a, b): + out = torch.nn.functional.silu(in1) * in2 + out_row = (out / out.amax(dim=1, keepdim=True)).to(torch.float8_e4m3fn) + out_col = (out / out.amax(dim=0, keepdim=True)).to(torch.float8_e4m3fn) + + # setup strides for _scaled_mm + out_row = out_row.contiguous() + out_col = out_col.t().contiguous().t() + + return ( + torch._scaled_mm(out_row, a, out_dtype=torch.bfloat16)[0], + torch._scaled_mm(b, out_col, out_dtype=torch.bfloat16)[0], + ) + + in1 = torch.randn((bs, dim), dtype=torch.bfloat16, device=GPU_TYPE) + in2 = torch.randn((bs, dim), dtype=torch.bfloat16, device=GPU_TYPE) + a = ( + torch.randn((dim, dim), dtype=torch.bfloat16, device=GPU_TYPE) + .t() + .to(torch.float8_e4m3fn) + ) + b = torch.randn((dim, bs), dtype=torch.bfloat16, device=GPU_TYPE).to( + torch.float8_e4m3fn + ) + + # warmup + _, (wrapper,) = run_and_get_code(f, in1, in2, a, b) + + # Previously indcutor decide reduction hint for a reduction kernel without considering + # the pointwise nodes. That will cause the third reduction kernel in this wrapper to be a + # persistent inner reduction and cause bad perf. + # + # We fix that by making the third reduction a non-persistent reduction + # and improve the perf by 4.14x (451us -> 109us) + self.assertEqual(3, wrapper.count("def triton_red_")) + self.assertEqual(0, wrapper.count("def triton_per_")) + + if DO_PERF_TEST: + with torch.profiler.profile( + activities=[torch.profiler.ProfilerActivity.CUDA] + ) as p: + for _ in range(1000): + f(in1, in2, a, b) + + print(p.key_averages().table(max_name_column_width=200)) + class RNNTest(TestCase): class Model(torch.nn.Module): def __init__(self): diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py index 045ceb5030..cf51b70c16 100644 --- a/torch/_inductor/codegen/triton.py +++ b/torch/_inductor/codegen/triton.py @@ -3398,6 +3398,29 @@ class TritonScheduling(BaseScheduling): return "tl.int32" return "tl.int64" + def has_non_contiguous_pw_in_reduction_kernel(self, node_schedule, numel, rnumel): + pointwise_nodes = list( + filter( + lambda n: n not in (EnableReduction, DisableReduction) + and not n.is_reduction() + and n.group[1][0] == numel * rnumel, + node_schedule, + ) + ) + for node in pointwise_nodes: + # An index can be an integer when loading a random seed. + if not all( + not isinstance(dep, MemoryDep) + or dep.is_contiguous() + or isinstance(dep.index, (sympy.Integer, int)) + or dep.stride1_for_last_dim() + for dep in itertools.chain( + node.read_writes.reads, node.read_writes.writes + ) + ): + return True + return False + def get_kernel_args(self, node_schedule, numel, reduction_numel): reductions = list( filter( @@ -3412,6 +3435,14 @@ class TritonScheduling(BaseScheduling): reduction_hint_val = hints[0] else: reduction_hint_val = ReductionHint.DEFAULT + + if ( + reduction_hint_val == ReductionHint.INNER + and self.has_non_contiguous_pw_in_reduction_kernel( + node_schedule, numel, reduction_numel + ) + ): + reduction_hint_val = ReductionHint.DEFAULT else: reduction_hint_val = ReductionHint.DEFAULT @@ -3456,9 +3487,11 @@ class TritonScheduling(BaseScheduling): from torch._inductor.codegen.triton_split_scan import TritonSplitScanKernel tiled_groups = self.select_tiling(node_schedule, numel, reduction_numel) - reduction_hint_val, mutations, index_dtype = self.get_kernel_args( - node_schedule, numel, reduction_numel - ) + ( + reduction_hint_val, + mutations, + index_dtype, + ) = self.get_kernel_args(node_schedule, numel, reduction_numel) is_split_scan = any( isinstance(node, BaseSchedulerNode) and node.is_split_scan() diff --git a/torch/_inductor/dependencies.py b/torch/_inductor/dependencies.py index 0ca62ad764..2d89636346 100644 --- a/torch/_inductor/dependencies.py +++ b/torch/_inductor/dependencies.py @@ -71,6 +71,35 @@ class MemoryDep(typing.NamedTuple): def is_contiguous(self) -> bool: return isinstance(self.index, sympy.Symbol) and self.index in self.var_names + def stride1_for_last_dim(self, result_for_complex_expression=True) -> bool: + """ + Whether the stride for the last dimension is 1. + """ + # python test/inductor/test_torchinductor_opinfo.py -k test_comprehensive_masked_scatter_cuda_float16 + # will exercise thru this corner case. + if len(self.var_names) == 0: + return True + + terms = self.index.args if isinstance(self.index, sympy.Add) else [self.index] + + last_sym = self.var_names[-1] + for term in terms: + if term is last_sym: + return True + + # Having a >1 stride for the last dimension is bad for perf + # return False. + if ( + isinstance(term, sympy.Mul) + and len(term.args) == 2 + and term.args[1] is last_sym + and isinstance(term.args[0], (int, sympy.Integer)) + and term.args[0] > 1 + ): + return False + + return result_for_complex_expression + def is_scalar(self) -> bool: if isinstance(self.index, sympy.Symbol): return self.index not in self.var_names and not self.is_indirect() diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py index 53319bd2dd..10d452d5fd 100644 --- a/torch/_inductor/utils.py +++ b/torch/_inductor/utils.py @@ -1558,3 +1558,33 @@ def use_scatter_fallback( or (reduction_type == reduce_ty and self_dtype in {torch.bool, torch.int64}) or torch.are_deterministic_algorithms_enabled() ) + + +def dump_node_schedule(node_schedule): + """ + An API that can be used in pdb to dump a node_schedule. + Right mainly dump the read/write dependencies but can add more as needed. + """ + from torch._inductor.codegen.triton import DisableReduction, EnableReduction + from torch._inductor.scheduler import SchedulerNode + + print(f"Node schedule with {len(node_schedule)} nodes") + for idx, node in enumerate(node_schedule): + print(f" {idx:3}:") + if node is EnableReduction: + print("enable reduction") + elif node is DisableReduction: + print("disable reduction") + elif isinstance(node, SchedulerNode): + is_red = node.is_reduction() + print(f"{'red' if is_red else 'pw'} scheduler node") + if is_red: + print(f"original reduction hint {node.node.data.reduction_hint}") # type: ignore[attr-defined] + print("ReadDep:") + for dep in node.read_writes.reads: + print(dep) + print("WriteDep:") + for dep in node.read_writes.writes: + print(dep) + else: + raise RuntimeError(f"Unrecognized node type: {type(node)}")
2.41.0
1cbaf1764239113ff8b120d87152f3fe67f824d
Sat, 20 Apr 2024 05:45:53 +0000
[PATCH 0421/1000] Adds LSE output for templated-attention-hop if inputs require grad (#124308)
Adds LSE output for templated-attention-hop if inputs require grad Prep PR for adding autograd support to templated-attention-hop. The kernel needs to output the LSE during the forward which will be used during backwards. ### Output code https://gist.github.com/drisspg/2aea3ce5db75811e7e143eeecb774d8a ## Before | Type | Speedup | batch_size | num_heads | q_seq_len | k_seq_len | head_dim | score_mod | dtype | |---------|-----------|--------------|-------------|-------------|-------------|------------|---------------|----------------| | Average | 1.159 | | | | | | | | | Max | 1.342 | 16 | 16 | 512 | 512 | 64 | noop | torch.bfloat16 | | Min | 1.016 | 1 | 16 | 512 | 512 | 64 | relative_bias | torch.bfloat16 | ## After Type | Speedup | batch_size | num_heads | q_seq_len | k_seq_len | head_dim | score_mod | dtype | |---------|-----------|--------------|-------------|-------------|-------------|------------|-------------|----------------| | Average | 1.155 | | | | | | | | | Max | 1.339 | 16 | 16 | 512 | 512 | 64 | noop | torch.bfloat16 | | Min | 1.009 | 1 | 16 | 512 | 512 | 64 | head_bias | torch.bfloat16 | Pull Request resolved: https://github.com/pytorch/pytorch/pull/124308 Approved by: https://github.com/Chillee
diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py index 5c2606d81c..b906689af9 100644 --- a/test/inductor/test_templated_attention.py +++ b/test/inductor/test_templated_attention.py @@ -8,8 +8,13 @@ from unittest import expectedFailure, skipUnless from unittest.mock import patch import torch +from torch._higher_order_ops.templated_attention import ( + templated_attention as templated_attention_hop, +) from torch._inductor.test_case import TestCase as InductorTestCase +from torch._inductor.utils import run_and_get_code from torch.nn.attention._templated_attention import _compose, _templated_attention +from torch.testing import FileCheck from torch.testing._internal import common_utils from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_BF16 from torch.utils._triton import has_triton @@ -44,6 +49,10 @@ def _identity_mod(score, b, h, m, n): return score +def _causal_mod(score, b, h, token_q, token_kv): + return torch.where(token_q >= token_kv, score, float("-inf")) + + class TestTemplatedSDPA(InductorTestCase): def run_test(self, score_mod: Callable, dtype: torch.dtype = torch.float16): sdpa_partial = create_attention(score_mod) @@ -66,7 +75,7 @@ class TestTemplatedSDPA(InductorTestCase): else: fudge_factor = 1.1 if compiled_error > ref_error * fudge_factor: - msg = f"Compiled error {compiled_error} is greater than ref error {ref_error} by more than 10%." + msg = f"Compiled error {compiled_error} is greater than ref error {ref_error} by more than {fudge_factor}X." self.assertTrue(False, msg) @supported_platform @@ -192,6 +201,96 @@ class TestTemplatedSDPA(InductorTestCase): self.run_test(score_mod) + @supported_platform + @common_utils.parametrize("dtype", test_dtypes) + @common_utils.parametrize("score_mod", [_identity_mod, _causal_mod]) + def test_logsumexp_correctness(self, dtype, score_mod): + @torch.compile + def sdpa_hop(q, k, v, score_mod): + return templated_attention_hop(q, k, v, score_mod) + + make_tensor = functools.partial( + torch.randn, + (4, 8, 2048, 64), + dtype=dtype, + device="cuda", + requires_grad=True, + ) + q, k, v = make_tensor(), make_tensor(), make_tensor() + + ref_out, ref_lse = templated_attention_hop( + q.to(torch.float64), k.to(torch.float64), v.to(torch.float64), score_mod + ) + compiled_out, compiled_lse = sdpa_hop(q, k, v, score_mod) + + # Comparing LSE for the ref and the compiled version + # The compiled uses a change of base trick to more efficiently compute the LSE + # this means that the base for the LSE computed by ref is e while for the compiled + # version it is 2. To compare we use the change of base formula + # log_2(x_compiled) = log_e(x_ref) * log_2(e) where + # x_ref = ∑_i e^(scores[i]) + # x_compiled = ∑_i 2^(log2(e) * scores[i]) + + self.assertTrue(ref_lse.dtype == torch.float32) + self.assertTrue(compiled_lse.dtype == torch.float32) + ref_lse = ref_lse * torch.log2(torch.tensor(torch.e)) + + tolerance = Tolerances(atol=2e-2, rtol=2e-2) + torch.testing.assert_close( + ref_out.to(dtype=torch.float32), + compiled_out.to(dtype=torch.float32), + atol=tolerance.atol, + rtol=tolerance.rtol, + ) + torch.testing.assert_close( + ref_lse.to(dtype=torch.float32), + compiled_lse.to(dtype=torch.float32), + atol=tolerance.atol, + rtol=tolerance.rtol, + ) + + @supported_platform + def test_logsumexp_only_return(self): + make_tensor = functools.partial( + torch.randn, + (4, 8, 2048, 64), + dtype=torch.float32, + device="cuda", + requires_grad=True, + ) + q, k, v = make_tensor(), make_tensor(), make_tensor() + + @torch.compile + def func(q, k, v, score_mod): + _, lse = templated_attention_hop(q, k, v, score_mod) + lse_2 = lse * 2 + return lse_2 + + _, code = run_and_get_code(func, q, k, v, _identity_mod) + # Ensure that two kernels are generated + FileCheck().check_count(".run(", 2, True).run(code[0]) + + @supported_platform + def test_logsumexp_is_not_fused(self): + make_tensor = functools.partial( + torch.randn, + (4, 8, 2048, 64), + dtype=torch.float32, + device="cuda", + requires_grad=True, + ) + q, k, v = make_tensor(), make_tensor(), make_tensor() + + @torch.compile + def func(q, k, v, score_mod): + out, lse = templated_attention_hop(q, k, v, score_mod) + lse_2 = lse * 2 + return out, lse_2 + + _, code = run_and_get_code(func, q, k, v, _identity_mod) + # Ensure that two kernels are generated + FileCheck().check_count(".run(", 2, True).run(code[0]) + common_utils.instantiate_parametrized_tests(TestTemplatedSDPA) diff --git a/torch/_dynamo/variables/higher_order_ops.py b/torch/_dynamo/variables/higher_order_ops.py index e112e7bc7d..b750501f1f 100644 --- a/torch/_dynamo/variables/higher_order_ops.py +++ b/torch/_dynamo/variables/higher_order_ops.py @@ -1366,7 +1366,12 @@ class TemplatedAttentionHigherOrderVariable(TorchHigherOrderOperatorVariable): def create_scalar(): return query.call_method( - tx, "new_empty", (SourcelessBuilder.create(tx, []),), {} + tx, + "new_empty", + (SourcelessBuilder.create(tx, []),), + { + "dtype": SourcelessBuilder.create(tx, torch.int32), + }, ) bhmn = [create_scalar() for _ in range(4)] @@ -1400,13 +1405,8 @@ class TemplatedAttentionHigherOrderVariable(TorchHigherOrderOperatorVariable): lifted_args = tuple(arg for arg in body_lifted_freevars.keys()) proxy_args = (body_node,) + lifted_args - example_value = pytree.tree_map_only( - torch.fx.Proxy, - lambda a: a.node.meta["example_value"], - body_output.as_proxy(), - ) - return proxy_args, {}, example_value + return proxy_args, {} def call_function( self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]" @@ -1417,13 +1417,22 @@ class TemplatedAttentionHigherOrderVariable(TorchHigherOrderOperatorVariable): args, kwargs ) - p_args, p_kwargs, example_value = self.create_wrapped_node(tx, query, score_mod) + p_args, p_kwargs = self.create_wrapped_node(tx, query, score_mod) proxied_args = [query, key, value, *other_buffers] # Store the invocation as a call # Norm_kwargs contains the score_function and we dont want to proxy this because # Proxying user defined functions is not supported. inp_args, _ = proxy_args_kwargs(proxied_args, {}) + + # Why is this here? Unlike other HOPs, the subgrpah's output for this hop is unrelated + # to what the overall HOP returns, we create the correct output proxy by calling the + # hop (self.value) with the example values. + with torch._guards.TracingContext.try_get().fake_mode: + example_args = pytree.tree_map_only( + torch.fx.Proxy, lambda a: a.node.meta["example_value"], inp_args + ) + example_value = self.value(*example_args, score_mod) return wrap_fx_proxy( tx=tx, proxy=tx.output.create_proxy( diff --git a/torch/_higher_order_ops/templated_attention.py b/torch/_higher_order_ops/templated_attention.py index d34767093e..09e10754fe 100644 --- a/torch/_higher_order_ops/templated_attention.py +++ b/torch/_higher_order_ops/templated_attention.py @@ -44,7 +44,7 @@ def math_attention( value: torch.Tensor, score_mod: Callable, *other_buffers: torch.Tensor, -): +) -> Tuple[torch.Tensor, torch.Tensor]: """Eager implementation This implementation uses vmap to vectorize the score_mod function over the batch, head, m, and n dimensions. @@ -73,10 +73,15 @@ def math_attention( score_mod = torch.vmap(score_mod, in_dims=(0, None, 0, None, None) + in_dim_buffers) score_mod = torch.vmap(score_mod, in_dims=(0, 0, None, None, None) + in_dim_buffers) - scores = score_mod(scores, b, h, m, n, *other_buffers) + scores = score_mod(scores, b, h, m, n, *other_buffers).to(torch.float32) + + # TODO Unconditionally return logsumexp for backwards + # if any(t.requires_grad for t in (query, key, value)): + logsumexp = scores.logsumexp(dim=-1) scores = scores.softmax(dim=-1) - return scores @ value + + return scores.to(query.dtype) @ value, logsumexp @templated_attention.py_impl(DispatchKey.CompositeExplicitAutograd) @@ -86,8 +91,10 @@ def sdpa_dense( value: torch.Tensor, score_mod: Callable, *other_buffers: torch.Tensor, -): - return math_attention(query, key, value, score_mod, *other_buffers).contiguous() +) -> Tuple[torch.Tensor, torch.Tensor]: + out, lse = math_attention(query, key, value, score_mod, *other_buffers) + out = out.contiguous() + return out, lse # TODO We need to implement an autograd function for this, there is some complexity to do this generically @@ -103,7 +110,7 @@ def trace_templated_attention( value: torch.Tensor, score_mod: Callable, *other_buffers: torch.Tensor, -): +) -> Tuple[torch.Tensor, torch.Tensor]: """Traces the templated_attention operator with the given score_mod function and other_buffers. Trace SDPA will call make_fx with "fake" example vals and then trace the score_mod function @@ -135,7 +142,7 @@ def templated_attention_proxy_torch_dispatch_mode( value: torch.Tensor, score_mod: Callable, *other_buffers: torch.Tensor, -): +) -> Tuple[torch.Tensor, torch.Tensor]: assert mode is not None, "Mode should always be enabled for python fallback key" if mode.enable_tracing: return trace_templated_attention( @@ -153,7 +160,7 @@ def templated_attention_functionalize( value: torch.Tensor, score_mod: Callable, *other_buffers: torch.Tensor, -): +) -> Tuple[torch.Tensor, torch.Tensor]: """Defines the functionalization rules for the templated_attention operator. Write now we are unwrapping each tensor and then redispatching to the next, however we want to @@ -193,7 +200,7 @@ def templated_attention_functionalize( functional_score_mod, *other_buffers_unwrapped, ) - return ctx.wrap_tensors(out) + return ctx.wrap_tensors(out) # type: ignore[return-value] @templated_attention.py_impl(FakeTensorMode) @@ -204,6 +211,10 @@ def templated_attention_fake_tensor_mode( value: torch.Tensor, score_mod: Callable, *other_buffers: Tuple[torch.Tensor, ...], -) -> torch.Tensor: +) -> Tuple[torch.Tensor, torch.Tensor]: with mode: - return torch.empty_like(query, memory_format=torch.contiguous_format) + batch_size, num_heads, seq_len_q, _ = query.shape + logsumexp = query.new_empty( + batch_size, num_heads, seq_len_q, dtype=torch.float32 + ) + return torch.empty_like(query, memory_format=torch.contiguous_format), logsumexp diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py index 4cfb582e22..973506ec16 100644 --- a/torch/_inductor/ir.py +++ b/torch/_inductor/ir.py @@ -3634,9 +3634,34 @@ class TemplateBuffer(Buffer): class TritonTemplateBuffer(TemplateBuffer): - def __init__(self, layout, inputs, make_kernel_render, debug_extra=None): + def __init__( + self, + layout, + inputs, + make_kernel_render, + debug_extra=None, + mutated_inputs: Optional[Iterable[IRNode]] = None, + ): + """ + NOTE:[TritonTemplates with multiple outputs] + We want the ability for TritonTemplates to output multiple tensors. Triton + kernels have no notion of outputs and this is done by creating tensors that + are then mutated by the kernel. Currenlty our STORE_OUTPUT codegen doesn't + support creating multinode outputs for triton templates. + We work around this by creating an extra input buffer during the lowering + and we mark them as mutated inputs. + """ super().__init__(layout, inputs, make_kernel_render) self.debug_extra = debug_extra + self.mutated_inputs = mutated_inputs + if mutated_inputs is not None: + # Ensure that the mutated inputs are only allowed for certain nodes + allowed_set = {"templated_attention"} + current_node = str(V.graph.current_node) + assert ( + current_node in allowed_set + ), f"Mutated inputs are only allowed for {allowed_set} but got {current_node}" + mark_node_as_mutating(self, *mutated_inputs) def __str__(self): out = f"TritonTemplateBuffer(layout={self.layout}, {self.debug_extra})" @@ -4668,13 +4693,15 @@ class UserDefinedTritonKernel(ExternKernel): return [i.get_name() for i in self.inputs] -def mark_node_as_mutating(cur_buffer, *mutated_ops): +def mark_node_as_mutating(cur_buffer, *mutated_ops: IRNode): """ Allows ops in mutated_ops to be marked as being mutated as well as indicates to the scheduler that these ops depend on cur_buffer. """ for op in mutated_ops: - assert isinstance(op, IRNode), op + assert isinstance( + op, IRNode + ), f"{op} op is type {type(op)} and is not an IRNode" V.graph.mark_buffer_mutated(op.get_name()) assert hasattr(op, "layout") MutationOutput(op.layout, op, cur_buffer) diff --git a/torch/_inductor/kernel/templated_attention.py b/torch/_inductor/kernel/templated_attention.py index 6153c63a0f..7942a367e2 100644 --- a/torch/_inductor/kernel/templated_attention.py +++ b/torch/_inductor/kernel/templated_attention.py @@ -3,7 +3,7 @@ import logging from typing import Any, List import torch -from ..lowering import lowerings, register_lowering +from ..lowering import empty_strided, lowerings, register_lowering from ..select_algorithm import autotune_select_algorithm, TritonTemplate log = logging.getLogger(__name__) @@ -25,7 +25,7 @@ sdpa_template = TritonTemplate( name="sdpa", grid=sdpa_grid, source=r""" -{{def_kernel("Q", "K", "V")}} +{{def_kernel("Q", "K", "V", "LSE")}} # Sub notation for this kernel: # Q: Query, K: Key, V: Value # M: Number of queries, N: Number of keys/values, D: Model dimension @@ -37,6 +37,7 @@ sdpa_template = TritonTemplate( # change of base out of the loop # ROWS_GUARANTEED_SAFE: Is it guaranteed that at least one value in each row # is not masked out? If so, we can skip an extra safety check + # OUTPUT_LOGSUMEXP: We only need to store the logsumexp if we require grad # Define Q Strides stride_qz = {{stride("Q", 0)}} @@ -129,11 +130,11 @@ sdpa_template = TritonTemplate( # -- compute scaling constant --- row_max = tl.max(qk, 1) m_i_new = tl.maximum(m_i, row_max) - masked_out_rows = (m_i_new == float("-inf")) alpha = tl.math.exp2(m_i - m_i_new) p = tl.math.exp2(qk - m_i_new[:, None]) if not ROWS_GUARANTEED_SAFE: + masked_out_rows = (m_i_new == float("-inf")) alpha = tl.where(masked_out_rows, 0, alpha) p = tl.where(masked_out_rows[:, None], 0, p) @@ -149,19 +150,22 @@ sdpa_template = TritonTemplate( K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N)) V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0)) - # write back l and m + # Store output and logsumexp acc = acc / l_i[:, None] - # TODO For backward support we need to add the Logsumexp - # l_ptrs = L + off_hz * N_CTX + offs_m - # tl.store(l_ptrs, m_i + tl.math.log2(l_i)) - idx_z = tl.program_id(1) // H idx_h = tl.program_id(1) % H idx_m = offs_m[:, None] idx_d = tl.arange(0, BLOCK_DMODEL)[None, :] + # TODO generalize and add proper mask support mask = (idx_m != -1) & (idx_d != -1) {{store_output(("idx_z", "idx_h", "idx_m", "idx_d"), "acc")}} + + # TODO dont want to write this if we dont require grad + if OUTPUT_LOGSUMEXP: + l_ptrs = LSE + off_hz * N_CTX + offs_m + lse = m_i + tl.math.log2(l_i) + tl.store(l_ptrs, lse) """, ) @@ -204,10 +208,10 @@ def templated_attention(*args, **kwargs): create_placeholder(name, dtype) for name, dtype in [ ("score", query.get_dtype()), - ("b", torch.int64), - ("h", torch.int64), - ("m", torch.int64), - ("n", torch.int64), + ("b", torch.int32), + ("h", torch.int32), + ("m", torch.int32), + ("n", torch.int32), ] ] for node in subgraph.graph_module.graph.nodes: @@ -239,7 +243,7 @@ def templated_attention(*args, **kwargs): "The output node for the templated attention subgraph must be a StorageBox, but got: ", type(output_buffer), ) - # Create the ComputedBuffere directly that will be inlined into the modfication block + # Create the ComputedBuffer directly that will be inlined into the modification block subgraph_buffer = ComputedBuffer( name=None, layout=FlexibleLayout( @@ -256,6 +260,14 @@ def templated_attention(*args, **kwargs): query.get_size(), make_contiguous_strides_for(query.get_size()), ) + # see NOTE:[TritonTemplates with multiple outputs] + logsumexp_shape = query.get_size()[:-1] # [B, H, M] + logsumexp = empty_strided( + logsumexp_shape, + None, + dtype=torch.float32, # The logsumexp is always stored in fp32 regardless of the input dtype + device=output_buffer.get_device(), + ) choices: List[Any] = [] configs: List[Any] = [] if query.get_dtype() == torch.float32: @@ -270,9 +282,12 @@ def templated_attention(*args, **kwargs): for BLOCK_M, BLOCK_N, num_warps, num_stages in configs: sdpa_template.maybe_append_choice( choices=choices, - input_nodes=(query, key, value), + input_nodes=(query, key, value, logsumexp), layout=layout, subgraphs=subgraph_buffer, + mutated_inputs=[ + logsumexp, + ], num_stages=num_stages, num_warps=num_warps, BLOCK_M=BLOCK_M, @@ -281,8 +296,12 @@ def templated_attention(*args, **kwargs): # For now, we always assume the "sound" option SCORE_MOD_IS_LINEAR=False, ROWS_GUARANTEED_SAFE=False, + OUTPUT_LOGSUMEXP=True, ) - return autotune_select_algorithm( - "sdpa", choices, [query, key, value], layout + return ( + autotune_select_algorithm( + "sdpa", choices, [query, key, value, logsumexp], layout + ), + logsumexp, ) raise ValueError("TemplatedAttention was passed a subgraph with no output node!") diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py index b5a7b9071d..59bbf05da7 100644 --- a/torch/_inductor/select_algorithm.py +++ b/torch/_inductor/select_algorithm.py @@ -517,8 +517,25 @@ class TritonTemplate(KernelTemplate): suffix_args=0, epilogue_fn=identity, subgraphs=None, + mutated_inputs=None, **kwargs, ): + """This function generates a TritonTemplateCaller + + Args: + input_nodes: List of input nodes + layout: Output layout + num_stages: Number of stages for triton launch + num_warps: Number of warps for triton launch + prefix_args: Number of input nodes to be passed as arguments + suffix_args: Number of input nodes to be passed as arguments + epilogue_fn: Optional epilogue function to be called on the output + subgraphs: Optional subgraphs to be passed as arguments, these will be inlined + into the triton template string + mutated_inputs: Optional list of input nodes that are mutated by the kernel, this is helpful + if you need to return multiple outputs. You can pass them as inputs and mark them as + being mutated by the kernel. + """ assert self.template, "requires jinja2" defines = StringIO() for name, val in kwargs.items(): @@ -649,6 +666,7 @@ class TritonTemplate(KernelTemplate): "allow_tf32": str(kwargs.get("ALLOW_TF32", None)), "acc_type": str(kwargs.get("ACC_TYPE", None)), }, + mutated_inputs=mutated_inputs, ) @@ -719,6 +737,7 @@ class TritonTemplateCaller(ir.TritonTemplateCallerBase): log_info: Optional[ Dict[str, Union[PrimitiveInfoType, List[PrimitiveInfoType]]] ] = None, + mutated_inputs=None, ): super().__init__(name, input_nodes, layout) self.make_kernel_render = make_kernel_render @@ -735,6 +754,7 @@ class TritonTemplateCaller(ir.TritonTemplateCallerBase): "num_warps": self.bmreq.num_warps, } ) + self.mutated_inputs = mutated_inputs def benchmark(self, *args, out): assert self.bmreq is not None @@ -765,6 +785,7 @@ class TritonTemplateCaller(ir.TritonTemplateCallerBase): inputs=self.input_nodes, make_kernel_render=self.make_kernel_render, debug_extra=self.debug_extra, + mutated_inputs=self.mutated_inputs, ) ) diff --git a/torch/nn/attention/_templated_attention.py b/torch/nn/attention/_templated_attention.py index 5bf94b62be..0e614c8a4e 100644 --- a/torch/nn/attention/_templated_attention.py +++ b/torch/nn/attention/_templated_attention.py @@ -1,6 +1,6 @@ """This module implements the user facing API for templated attention in PyTorch.""" import functools -from typing import Callable +from typing import Callable, Tuple import torch from torch._higher_order_ops.templated_attention import ( @@ -31,7 +31,7 @@ def _templated_attention( key: torch.Tensor, value: torch.Tensor, score_mod: _score_mod_signature, -) -> torch.Tensor: +) -> Tuple[torch.Tensor, torch.Tensor]: r"""This function implements scaled dot product attention with an arbitrary attention score modification function. This function computes the scaled dot product attention between query, key, and value tensors with a user-defined @@ -86,4 +86,7 @@ def _templated_attention( raise ValueError( "NYI: The target sequence length (L) of the query tensor must match the source sequence length (S) of the key tensor." ) - return templated_attention_hop(query, key, value, score_mod) + out, _ = templated_attention_hop(query, key, value, score_mod) + + # Drop the logsumexp value since this is only needed for backwards + return out
2.41.0
a730698e2b5261b3515595a3c4a961299da10af
Fri, 19 Apr 2024 18:37:59 -0700
[PATCH 0422/1000] Enable dynamo-traced Adamax tests (#124540)
Enabling tests related to https://github.com/pytorch/pytorch/issues/121178 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124540 Approved by: https://github.com/yf225
diff --git a/torch/testing/_internal/common_optimizers.py b/torch/testing/_internal/common_optimizers.py index f3663e98f0..18a08c99e2 100644 --- a/torch/testing/_internal/common_optimizers.py +++ b/torch/testing/_internal/common_optimizers.py @@ -1305,27 +1305,6 @@ optim_db: List[OptimizerInfo] = [ "TestOptimRenewed", "test_foreach_large_tensor", ), - DecorateInfo( - skipIfTorchDynamo( - "capturable path no longer called after hitting cache limit, see #121178" - ), - "TestOptimRenewed", - "test_save_load_equality_with_weights_only", - ), - DecorateInfo( - skipIfTorchDynamo( - "capturable path no longer called after hitting cache limit, see #121178" - ), - "TestOptimRenewed", - "test_load_nontensor_step", - ), - DecorateInfo( - skipIfTorchDynamo( - "capturable path no longer called after hitting cache limit, see #121178" - ), - "TestOptimRenewed", - "test_param_groups_lr", - ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support"
2.41.0
84f16016e3e8756d23f7312a2826c4303291664
Fri, 19 Apr 2024 18:38:00 -0700
[PATCH 0423/1000] Enable dynamo-traced deepcopy test for RMSprop (#124541)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124541 Approved by: https://github.com/yf225 ghstack dependencies: #124540
diff --git a/torch/testing/_internal/common_optimizers.py b/torch/testing/_internal/common_optimizers.py index 18a08c99e2..eaa49d8170 100644 --- a/torch/testing/_internal/common_optimizers.py +++ b/torch/testing/_internal/common_optimizers.py @@ -1649,13 +1649,6 @@ optim_db: List[OptimizerInfo] = [ "test_mixed_device_dtype", active_if=TEST_WITH_TORCHDYNAMO, ), - DecorateInfo( - skipIfTorchDynamo( - "fails, https://github.com/pytorch/pytorch/issues/117165" - ), - "TestOptimRenewed", - "test_deepcopy_copies_all_public_attrs", - ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support"
2.41.0
d0b5b2655f39bca67ef80b926d5f237a8247272
Fri, 19 Apr 2024 18:38:00 -0700
[PATCH 0424/1000] Enable dynamo rosenbrock sparse tests (#124542)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124542 Approved by: https://github.com/yf225 ghstack dependencies: #124540, #124541
diff --git a/torch/testing/_internal/common_optimizers.py b/torch/testing/_internal/common_optimizers.py index eaa49d8170..9533b22032 100644 --- a/torch/testing/_internal/common_optimizers.py +++ b/torch/testing/_internal/common_optimizers.py @@ -1138,13 +1138,6 @@ optim_db: List[OptimizerInfo] = [ "TestOptimRenewed", "test_set_default_dtype_works_with_foreach", ), - DecorateInfo( - skipIfTorchDynamo( - "Fails assertion of params close to params_c at all, see #123147" - ), - "TestOptimRenewed", - "test_rosenbrock_sparse", - ), DecorateInfo( skipIfTorchDynamo( "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" @@ -1772,13 +1765,6 @@ optim_db: List[OptimizerInfo] = [ "TestOptimRenewed", "test_set_default_dtype_works_with_foreach", ), - DecorateInfo( - skipIfTorchDynamo( - "Fails assertion of params close to params_c at all, see #123147" - ), - "TestOptimRenewed", - "test_rosenbrock_sparse", - ), DecorateInfo( skipIfTorchDynamo( "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184"
2.41.0
7ccfad915fb291aecf0de4018d1c7558df8657f
Fri, 19 Apr 2024 17:37:45 +0000
[PATCH 0427/1000] Fix test_decomp test for ops with py_impl(CompositeImplicitAutograd) (#116832)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/116832 Approved by: https://github.com/lezcano
diff --git a/test/test_decomp.py b/test/test_decomp.py index 39d0c2eef2..1063e0d33e 100644 --- a/test/test_decomp.py +++ b/test/test_decomp.py @@ -144,7 +144,8 @@ def ref_vjp_no_create(f, *primals): def wrapped(cotangents): return _autograd_grad( - _as_tuple(result), primals, _as_tuple(cotangents), create_graph=False + _as_tuple(result), primals, _as_tuple(cotangents), create_graph=False, + retain_graph=True, ) return result, wrapped @@ -200,6 +201,12 @@ def op_assert_ref(test_case, op, test_dtype, i, orig, decomp, ref, args, kwargs) (torch.bfloat16, torch.ops.aten.nll_loss_forward.default): 1e-1, (torch.float16, torch.ops.aten.nll_loss2d_forward.default): 1e-2, (torch.bfloat16, torch.ops.aten.nll_loss2d_forward.default): 2e-1, + (torch.float16, torch.ops.aten.hardswish.default): 2e-7, + (torch.bfloat16, torch.ops.aten.hardswish.default): 2e-7, + (torch.float16, torch.ops.aten.multi_margin_loss.default): 3e-2, + (torch.bfloat16, torch.ops.aten.multi_margin_loss.default): 3e-2, + (torch.float16, torch.ops.aten.multilabel_margin_loss_forward.default): 3e-2, + (torch.bfloat16, torch.ops.aten.multilabel_margin_loss_forward.default): 3e-2, # see https://github.com/pytorch/pytorch/pull/96264 (torch.float16, torch.ops.aten.mv.default): 1e-5, } @@ -488,6 +495,11 @@ if not TEST_WITH_SLOW: skip('unsafe_split'), # slow: takes 49 sec on A100 }) +comprehensive_failures = { + xfail("nn.functional.interpolate", "bilinear", dtypes=(torch.uint8,)), # off by one error + xfail("nn.functional.interpolate", "bicubic", dtypes=(torch.uint8,)), # off by one error + xfail("nn.functional.upsample_bilinear", "", dtypes=(torch.uint8,)), # off by one error +} @unMarkDynamoStrictTest class TestDecomp(TestCase): @@ -524,6 +536,7 @@ class TestDecomp(TestCase): @unittest.skipIf(TEST_WITH_ASAN, "Skipped under ASAN") @onlyNativeDeviceTypes @skipIfCrossRef + @skipOps('TestDecomp', 'test_comprehensive', comprehensive_failures) @suppress_warnings @ops(op_db) def test_comprehensive(self, device, dtype, op): @@ -810,6 +823,12 @@ def forward(self, x_1, start_1): aten_name = op.decomp_aten_name or op.aten_name func = op.get_op() + + def run_without_python_dispatcher(mode): + return any(isinstance(op, torch._ops.OpOverload) and + op.has_kernel_for_dispatch_key(DispatchKey.CompositeImplicitAutograd) + for op in mode.decomposed.union([func])) + for sample_input in samples: if requires_grad: fn, primals = normalize_op_input_output(func, sample_input) @@ -824,6 +843,12 @@ def forward(self, x_1, start_1): with self.DecompCrossRefMode(self, self.precision, self.rel_tol, dtype, run_all)\ as mode, enable_python_dispatcher(): decomp_out, decomp_vjp_fn = ref_vjp_no_create(fn, *primals) + if run_without_python_dispatcher(mode): + # without this check, incorrect decomps at the python dispatcher level can still pass because + # they're checking aten decomps at the torch_dispatch level. + with self.DecompCrossRefMode(self, self.precision, self.rel_tol, dtype, run_all)\ + as mode: + decomp_out, decomp_vjp_fn = ref_vjp_no_create(fn, *primals) if aten_name in decomposition_names: self.check_decomposed(aten_name, mode) @@ -833,15 +858,31 @@ def forward(self, x_1, start_1): with self.DecompCrossRefMode(self, self.precision, self.rel_tol, dtype, run_all)\ as mode, enable_python_dispatcher(): decomp_vjp_fn(cotangents) + if run_without_python_dispatcher(mode): + # without this check, incorrect decomps at the python dispatcher level can still pass because + # they're checking aten decomps at the torch_dispatch level. + with self.DecompCrossRefMode(self, self.precision, self.rel_tol, dtype, run_all)\ + as mode: + decomp_vjp_fn(cotangents) if not run_all: self.check_decomposed(op.aten_backward_name, mode) elif aten_name in decomposition_names or run_all: args = [sample_input.input] + list(sample_input.args) kwargs = sample_input.kwargs + # A failure here might be because the decomposition for the op is wrong or because a + # decomposition used by the particular op is wrong. with self.DecompCrossRefMode(self, self.precision, self.rel_tol, dtype, run_all)\ as mode, enable_python_dispatcher(): func(*args, **kwargs) + + if run_without_python_dispatcher(mode): + # without this check, incorrect decomps at the python dispatcher level can still pass because + # they're checking aten decomps at the torch_dispatch level. + with self.DecompCrossRefMode(self, self.precision, self.rel_tol, dtype, run_all)\ + as mode: + func(*args, **kwargs) + if not run_all: self.check_decomposed(aten_name, mode) else:
2.41.0
ebc4d8759e13c298f0a4fd9b19eecf03a6f356b
Fri, 19 Apr 2024 08:55:53 -0700
[PATCH 0428/1000] [dynamo][easy] forbid_in_graph check to use getattr_static (#124445)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124445 Approved by: https://github.com/yanboliang, https://github.com/jansel
diff --git a/test/dynamo_expected_failures/TestFakeSparsity.test_jit_trace b/test/dynamo_expected_failures/TestFakeSparsity.test_jit_trace deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestMkldnnCPU.test_batch_norm_2d_cpu b/test/dynamo_expected_failures/TestMkldnnCPU.test_batch_norm_2d_cpu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestMkldnnCPU.test_batch_norm_3d_cpu b/test/dynamo_expected_failures/TestMkldnnCPU.test_batch_norm_3d_cpu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestMkldnnCPU.test_conv1d_cpu b/test/dynamo_expected_failures/TestMkldnnCPU.test_conv1d_cpu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestMkldnnCPU.test_conv2d_cpu b/test/dynamo_expected_failures/TestMkldnnCPU.test_conv2d_cpu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestMkldnnCPU.test_conv3d_cpu b/test/dynamo_expected_failures/TestMkldnnCPU.test_conv3d_cpu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestMkldnnCPU.test_linear_cpu b/test/dynamo_expected_failures/TestMkldnnCPU.test_linear_cpu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestMkldnnCPU.test_linear_lowp_cpu_float16 b/test/dynamo_expected_failures/TestMkldnnCPU.test_linear_lowp_cpu_float16 deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestMkldnnCPU.test_prelu_bf16_cpu b/test/dynamo_expected_failures/TestMkldnnCPU.test_prelu_bf16_cpu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestMkldnnCPU.test_prelu_cpu b/test/dynamo_expected_failures/TestMkldnnCPU.test_prelu_cpu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestMkldnnCPU.test_reshape_blocked_format_cpu b/test/dynamo_expected_failures/TestMkldnnCPU.test_reshape_blocked_format_cpu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestMkldnnCPU.test_resnet18_cpu b/test/dynamo_expected_failures/TestMkldnnCPU.test_resnet18_cpu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestMkldnnCPU.test_resnext50_32x4d_cpu b/test/dynamo_expected_failures/TestMkldnnCPU.test_resnext50_32x4d_cpu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestNN.test_ParameterDict_replication b/test/dynamo_expected_failures/TestNN.test_ParameterDict_replication deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestNN.test_ParameterList_replication b/test/dynamo_expected_failures/TestNN.test_ParameterList_replication deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestNNDeviceTypeCPU.test_triplet_margin_with_distance_loss_cpu b/test/dynamo_expected_failures/TestNNDeviceTypeCPU.test_triplet_margin_with_distance_loss_cpu deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestNNParametrization.test_deepcopy_after_parametrization_swap_False b/test/dynamo_expected_failures/TestNNParametrization.test_deepcopy_after_parametrization_swap_False deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestScript.test_nn_GRU b/test/dynamo_expected_failures/TestScript.test_nn_GRU deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestScript.test_nn_LSTM_with_layers b/test/dynamo_expected_failures/TestScript.test_nn_LSTM_with_layers deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/dynamo_expected_failures/TestScript.test_pack_unpack_state b/test/dynamo_expected_failures/TestScript.test_pack_unpack_state deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/torch/_dynamo/trace_rules.py b/torch/_dynamo/trace_rules.py index daeb8626c1..72a880ab48 100644 --- a/torch/_dynamo/trace_rules.py +++ b/torch/_dynamo/trace_rules.py @@ -3064,7 +3064,7 @@ def is_callable_disallowed(obj) -> bool: def is_forbidden(obj) -> bool: _maybe_init_lazy_module(obj) - return getattr(obj, "_dynamo_forbidden", False) + return inspect.getattr_static(obj, "_dynamo_forbidden", False) def is_builtin_callable(obj) -> bool: diff --git a/torch/_dynamo/variables/nn_module.py b/torch/_dynamo/variables/nn_module.py index 32ef830577..ffd591793a 100644 --- a/torch/_dynamo/variables/nn_module.py +++ b/torch/_dynamo/variables/nn_module.py @@ -791,7 +791,10 @@ class UnspecializedNNModuleVariable(UserDefinedObjectVariable): kwargs, ) - if id(method.__code__) in self._nn_module_method_ids(): + if ( + hasattr(method, "__code__") + and id(method.__code__) in self._nn_module_method_ids() + ): unimplemented(f"UnspecializedNNModuleVariable missing {name}") # "_parameters" in self.value.__dict__ checks that module is initialized
2.41.0
32eac345f5d7862246c397d0a32b5e0cbca77ff
Fri, 19 Apr 2024 18:02:04 -0700
[PATCH 0429/1000] [dynamo] Return gm.forward for eager backend (#124109)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124109 Approved by: https://github.com/yanboliang, https://github.com/jansel ghstack dependencies: #124445
diff --git a/torch/_dynamo/backends/debugging.py b/torch/_dynamo/backends/debugging.py index a349a3dd63..03d3412946 100644 --- a/torch/_dynamo/backends/debugging.py +++ b/torch/_dynamo/backends/debugging.py @@ -21,7 +21,7 @@ This file contains TorchDynamo backends intended for debugging uses. @register_backend def eager(gm, fake_tensor_inputs): - return gm + return gm.forward @register_backend diff --git a/torch/_dynamo/testing.py b/torch/_dynamo/testing.py index 3b1b725fca..c115e1cc09 100644 --- a/torch/_dynamo/testing.py +++ b/torch/_dynamo/testing.py @@ -208,7 +208,7 @@ class EagerAndRecordGraphs: def __call__(self, gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): self.graphs.append(gm) - return gm + return gm.forward def strip_comment(code) -> str:
2.41.0
fa78ad08cc748c25e7e82cec02cec4c97c7d3af
Fri, 19 Apr 2024 15:38:37 -0400
[PATCH 0430/1000] Call writeline from writelines (#124515)
This makes it more convenient to add a breakpoint here. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124515 Approved by: https://github.com/albanD
diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py index 145721d50e..6f28ea7ea8 100644 --- a/torch/_inductor/codegen/wrapper.py +++ b/torch/_inductor/codegen/wrapper.py @@ -1345,7 +1345,7 @@ class WrapperCodeGen(CodeGen): def writelines(self, lines): for line in lines: - self.lines.append(line) + self.writeline(line) def enter_context(self, ctx): self.lines.append(LineContext(ctx))
2.41.0
6f88105ceaff0598c0c941c93e1744dee7865a2
Sat, 20 Apr 2024 08:32:47 +0000
[PATCH 0431/1000] Fix the problem about load_state_dict with unexpected key whose prefix matches a valid key (#124385)
Fixes https://github.com/pytorch/pytorch/issues/123510 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124385 Approved by: https://github.com/mikaylagawarecki
diff --git a/test/nn/test_load_state_dict.py b/test/nn/test_load_state_dict.py index 4fc62645f8..cd9540382c 100644 --- a/test/nn/test_load_state_dict.py +++ b/test/nn/test_load_state_dict.py @@ -451,6 +451,35 @@ class TestLoadStateDict(NNTestCase): ): m.load_state_dict(state_dict) + @swap([True, False]) + def test_load_state_dict_with_unexpected_key(self): + class MyModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.fc1 = torch.nn.Linear(5, 10) + + m = MyModule() + + # Unexpected key & strict = True + with self.assertRaisesRegex(RuntimeError, "Unexpected key"): + state_dict = m.state_dict() + state_dict["fc1.bad_suffix"] = torch.randn(5, 10) + m.load_state_dict(state_dict) + + # Unexpected key & strict = False + state_dict = m.load_state_dict(state_dict, strict=False) + self.assertIn("fc1.bad_suffix", state_dict.unexpected_keys) + + # Unexpected key whose prefix matches a valid key & strict = True + with self.assertRaisesRegex(RuntimeError, "Unexpected key"): + state_dict = m.state_dict() + state_dict["fc1.weight.bad_suffix"] = torch.randn(5, 10) + m.load_state_dict(state_dict) + + # Unexpected key whose prefix matches a valid key & strict = False + state_dict = m.load_state_dict(state_dict, strict=False) + self.assertIn("fc1.weight.bad_suffix", state_dict.unexpected_keys) + def load_torch_function_handler(cls, func, types, args=(), kwargs=None): kwargs = {} if kwargs is None else kwargs diff --git a/torch/nn/modules/module.py b/torch/nn/modules/module.py index 28fe3af8f2..ecbc9943cf 100644 --- a/torch/nn/modules/module.py +++ b/torch/nn/modules/module.py @@ -1651,7 +1651,6 @@ class Module: # raise exception raised in try block raise - __call__ : Callable[..., Any] = _wrapped_call_impl def __getstate__(self): @@ -1970,7 +1969,6 @@ class Module: self._load_state_dict_post_hooks[handle.id] = hook return handle - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): r"""Copy parameters and buffers from :attr:`state_dict` into only this module, but not its descendants. @@ -2095,9 +2093,12 @@ class Module: if strict: for key in state_dict.keys(): if key.startswith(prefix) and key != extra_state_key: - input_name = key[len(prefix):] - input_name = input_name.split('.', 1)[0] # get the name of param/buffer/child - if input_name not in self._modules and input_name not in local_state: + input_name = key[len(prefix):].split(".", 1) + # Must be Module if it have attributes + if len(input_name) > 1: + if input_name[0] not in self._modules: + unexpected_keys.append(key) + elif input_name[0] not in local_state: unexpected_keys.append(key) def load_state_dict(self, state_dict: Mapping[str, Any],
2.41.0
8f3e0214b8b598c7ebf9b0172621d4b3f97b3ef
Sat, 20 Apr 2024 23:42:30 +0000
[PATCH 0432/1000] [NCCL][TEST] Synchronize proper devices (#124517)
There are multiple instances of `torch.cuda.synchronize()` calls without arguments. These calls cause device 0 being synchronized from multiple ranks while the rest of the devices are not. I am pretty sure that was not intended. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124517 Approved by: https://github.com/wconstab, https://github.com/eqy
diff --git a/test/distributed/test_c10d_nccl.py b/test/distributed/test_c10d_nccl.py index 8743b37157..0d6fcf3c83 100644 --- a/test/distributed/test_c10d_nccl.py +++ b/test/distributed/test_c10d_nccl.py @@ -462,7 +462,7 @@ class ProcessGroupNCCLTest(MultiProcessTestCase): # this triggers cudaFree torch.cuda.empty_cache() work.wait() - torch.cuda.synchronize(local_device) + torch.cuda.synchronize(device=local_device) @requires_nccl() @skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "NCCL test requires 2+ GPUs") @@ -2947,7 +2947,7 @@ class DistributedDataParallelTest( loss.backward() optimizer.step() - torch.cuda.synchronize() + torch.cuda.synchronize(device=device_id) class WorkHookTest(MultiProcessTestCase): @@ -4550,7 +4550,7 @@ class NCCLTraceTest(NCCLTraceTestBase): dist.batch_isend_irecv(ops).pop().wait() - torch.cuda.synchronize() + torch.cuda.synchronize(device=self.local_device) if timing_enabled: # wait for watchdog thread to process the queue of works @@ -4643,7 +4643,7 @@ class NCCLTraceTest(NCCLTraceTestBase): tensor *= 2 dist.send(tensor, 0) - torch.cuda.synchronize() + torch.cuda.synchronize(device=self.local_device) if timing_enabled: # wait for watchdog thread to process the queue of works time.sleep(1) @@ -4702,7 +4702,7 @@ class NCCLTraceTest(NCCLTraceTestBase): dist.reduce_scatter_tensor(output_tensors[i], input_tensors[i]) self.assertEqual(output_tensors, input_tensors[self.rank] * self.world_size) - torch.cuda.synchronize() + torch.cuda.synchronize(device=self.rank) if timing_enabled: # wait for watchdog thread to process the queue of works @@ -4809,7 +4809,7 @@ class NCCLTraceTestDumpOnTimeout(NCCLTraceTestDumpOnTimeoutBase): pg.allreduce(a).wait() # rank 0 will crash before it passes the sync, but rank1 will exit quickly and cleanly - torch.cuda.synchronize() + torch.cuda.synchronize(device=device) instantiate_parametrized_tests(ProcessGroupNCCLTest) @@ -4861,7 +4861,7 @@ class NCCLTraceTestTimeoutDumpOnStuckRanks(NCCLTraceTestDumpOnTimeoutBase): pg.allreduce(a).wait() # rank 0 will get stuck, timeout and then signal a timeout to all ranks. - torch.cuda.synchronize() + torch.cuda.synchronize(device=device) if self.rank == 1: # Force rank 1 to idle so that it will eventually timeout as well after
2.41.0
6a3f2e06b8f9c6124d96953523a6cf19c5bce14
Sun, 21 Apr 2024 00:12:29 +0000
[PATCH 0433/1000] [MPS] Fixes GELU, LeakyRELU and MISH on non-contiguous tensors (#123049)
Fixes GELU, LeakyRELU and MISH activation functions on non-contiguous tensors (for instance, when a transpose operation was applied on the tensors prior to the MPS operator), forward and backward passes. I also extended tests on the 3 activation functions to check: full-precision and half-precision, contiguous and non-contiguous, and several dims of tensors: scalars, 1D, empty, 2D, > 3D. I had issues with Mish and GELU activations when asserting the gradients vs. CPU with sum() on some cases, so I reverted to the previous setup by setting a gradient parameter on .backwards(). This PR also fixes an issue with LeakyRELU on empty tensors. Fixes #98212 huggingface/transformers#22468 huggingface/transformers#19353 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123049 Approved by: https://github.com/kulinseth
diff --git a/aten/src/ATen/native/mps/operations/Activation.mm b/aten/src/ATen/native/mps/operations/Activation.mm index c5e6102232..e8491e565b 100644 --- a/aten/src/ATen/native/mps/operations/Activation.mm +++ b/aten/src/ATen/native/mps/operations/Activation.mm @@ -131,8 +131,17 @@ TORCH_IMPL_FUNC(leaky_relu_out_mps)(const Tensor& self, const Scalar& negative_s using CachedGraph = MPSUnaryCachedGraph; TORCH_CHECK(output.is_mps()); + if (self.numel() == 0) { + return; + } + MPSStream* stream = getCurrentMPSStream(); + bool executeGatherOp = + !(self.is_contiguous(MemoryFormat::Contiguous) || self.is_contiguous(MemoryFormat::ChannelsLast) || + self.is_contiguous(MemoryFormat::ChannelsLast3d)); + Tensor output_ = at::empty_like(self, executeGatherOp ? MemoryFormat::Contiguous : MemoryFormat::Preserve); + @autoreleasepool { string key = "leaky_relu" + getTensorsStringKey({self}) + ":" + to_string(negative_slope.to<double>()); auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) { @@ -152,13 +161,17 @@ TORCH_IMPL_FUNC(leaky_relu_out_mps)(const Tensor& self, const Scalar& negative_s newCachedGraph->outputTensor_ = outputTensor; }); - Placeholder selfPlaceholder = Placeholder(cachedGraph->inputTensor_, self); - Placeholder outputPlaceholder = Placeholder(cachedGraph->outputTensor_, output); + Placeholder selfPlaceholder = Placeholder(cachedGraph->inputTensor_, self, nil, executeGatherOp); + Placeholder outputPlaceholder = + Placeholder(cachedGraph->outputTensor_, executeGatherOp ? output_ : output, nil, false); // Create dictionary of inputs and outputs auto feeds = dictionaryFromPlaceholders(selfPlaceholder); runMPSGraph(stream, cachedGraph->graph(), feeds, outputPlaceholder); } + if (executeGatherOp) { + output.copy_(output_); + } } TORCH_IMPL_FUNC(leaky_relu_backward_out_mps) @@ -171,8 +184,14 @@ TORCH_IMPL_FUNC(leaky_relu_backward_out_mps) using CachedGraph = MPSUnaryGradCachedGraph; TORCH_CHECK(output.is_mps()); + if (self.numel() == 0) { + return; + } + MPSStream* stream = getCurrentMPSStream(); + Tensor output_ = at::empty_like(self, self.suggest_memory_format()); + @autoreleasepool { string key = "leaky_relu_backward" + getTensorsStringKey({self, grad_output}) + ":" + to_string(negative_slope.to<double>()); @@ -202,12 +221,13 @@ TORCH_IMPL_FUNC(leaky_relu_backward_out_mps) Placeholder selfPlaceholder = Placeholder(cachedGraph->inputTensor_, self); Placeholder gradOutputPlaceholder = Placeholder(cachedGraph->gradOutputTensor_, grad_output); - Placeholder outputPlaceholder = Placeholder(cachedGraph->gradInputTensor_, output); + Placeholder outputPlaceholder = Placeholder(cachedGraph->gradInputTensor_, output_); // Create dictionary of inputs and outputs auto feeds = dictionaryFromPlaceholders(gradOutputPlaceholder, selfPlaceholder); runMPSGraph(stream, cachedGraph->graph(), feeds, outputPlaceholder); } + output.copy_(output_); } TORCH_IMPL_FUNC(log_softmax_mps_out) @@ -656,6 +676,11 @@ TORCH_IMPL_FUNC(gelu_out_mps)(const Tensor& self, c10::string_view approximate, auto approximate_type = get_gelutype_enum(approximate); MPSStream* stream = getCurrentMPSStream(); + bool executeGatherOp = + !(self.is_contiguous(MemoryFormat::Contiguous) || self.is_contiguous(MemoryFormat::ChannelsLast) || + self.is_contiguous(MemoryFormat::ChannelsLast3d)); + Tensor output_ = at::empty_like(self, executeGatherOp ? MemoryFormat::Contiguous : MemoryFormat::Preserve); + @autoreleasepool { const auto key = "gelu_out_mps" + getTensorsStringKey({self}) + ":" + gelutype_to_string(approximate_type); auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) { @@ -672,12 +697,17 @@ TORCH_IMPL_FUNC(gelu_out_mps)(const Tensor& self, c10::string_view approximate, newCachedGraph->outputTensor_ = outputTensor; }); - Placeholder selfPlaceholder = Placeholder(cachedGraph->inputTensor_, self); - Placeholder outputPlaceholder = Placeholder(cachedGraph->outputTensor_, output); + Placeholder selfPlaceholder = Placeholder(cachedGraph->inputTensor_, self, nil, executeGatherOp); + Placeholder outputPlaceholder = + Placeholder(cachedGraph->outputTensor_, executeGatherOp ? output_ : output, nil, false); auto feeds = dictionaryFromPlaceholders(selfPlaceholder); runMPSGraph(stream, cachedGraph->graph(), feeds, outputPlaceholder); } + + if (executeGatherOp) { + output.copy_(output_); + } } TORCH_IMPL_FUNC(gelu_backward_out_mps) @@ -686,8 +716,11 @@ TORCH_IMPL_FUNC(gelu_backward_out_mps) using CachedGraph = MPSUnaryGradCachedGraph; // Empty output - if (grad_input.numel() == 0) + if (self.numel() == 0) { return; + } + + Tensor grad_input_ = at::empty_like(self, self.suggest_memory_format()); auto approximate_type = get_gelutype_enum(approximate); MPSStream* stream = getCurrentMPSStream(); @@ -761,11 +794,12 @@ TORCH_IMPL_FUNC(gelu_backward_out_mps) Placeholder gradPlaceholder = Placeholder(cachedGraph->gradOutputTensor_, grad); Placeholder selfPlaceholder = Placeholder(cachedGraph->inputTensor_, self); - Placeholder outputPlaceholder = Placeholder(cachedGraph->gradInputTensor_, grad_input); + Placeholder outputPlaceholder = Placeholder(cachedGraph->gradInputTensor_, grad_input_); auto feeds = dictionaryFromPlaceholders(gradPlaceholder, selfPlaceholder); runMPSGraph(stream, cachedGraph->graph(), feeds, outputPlaceholder); } + grad_input.copy_(grad_input_); } static void elu_variants_out_mps(const Tensor& self, @@ -1241,6 +1275,11 @@ TORCH_IMPL_FUNC(mish_out_mps) MPSStream* stream = getCurrentMPSStream(); + bool executeGatherOp = + !(self.is_contiguous(MemoryFormat::Contiguous) || self.is_contiguous(MemoryFormat::ChannelsLast) || + self.is_contiguous(MemoryFormat::ChannelsLast3d)); + Tensor result_ = at::empty_like(self, executeGatherOp ? MemoryFormat::Contiguous : MemoryFormat::Preserve); + @autoreleasepool { string key = "mish_out_mps:" + getTensorsStringKey({self}); @@ -1257,12 +1296,16 @@ TORCH_IMPL_FUNC(mish_out_mps) newCachedGraph->inputTensor_ = inputTensor; newCachedGraph->outputTensor_ = outputTensor; }); - Placeholder selfPlaceholder = Placeholder(cachedGraph->inputTensor_, self); - Placeholder outputPlaceholder = Placeholder(cachedGraph->outputTensor_, result); + Placeholder selfPlaceholder = Placeholder(cachedGraph->inputTensor_, self, nil, executeGatherOp); + Placeholder outputPlaceholder = + Placeholder(cachedGraph->outputTensor_, executeGatherOp ? result_ : result, nil, false); auto feeds = dictionaryFromPlaceholders(selfPlaceholder); runMPSGraph(stream, cachedGraph->graph(), feeds, outputPlaceholder); } + if (executeGatherOp) { + result.copy_(result_); + } } Tensor mish_backward_mps(const Tensor& grad_output, const Tensor& self) { diff --git a/test/test_mps.py b/test/test_mps.py index 862bda96c7..eb5e45aa0f 100644 --- a/test/test_mps.py +++ b/test/test_mps.py @@ -1470,9 +1470,19 @@ class MPSLeakyReluTest(TestCaseMPS): 0.9]]), negative_slope=0.1)) - def _testLeakyRelu(self, np_features, negative_slope, device): - cpu_x = torch.from_numpy(np_features).requires_grad_() - mps_x = torch.from_numpy(np_features).to('mps').requires_grad_() + def _testLeakyRelu(self, shape, dtype, negative_slope, contiguous): + cpu_x = torch.randn(shape, device='cpu', dtype=dtype) + mps_x = cpu_x.detach().clone().to('mps') + + if not contiguous and not (0 in shape or len(shape) < 2): + # Tranposing will make the tensor non-contiguous + cpu_x = cpu_x.transpose(0, 1) + mps_x = mps_x.transpose(0, 1) + assert not mps_x.is_contiguous() + + cpu_x.requires_grad_() + mps_x.requires_grad_() + relu_op = torch.nn.LeakyReLU(negative_slope) cpu_leaky_relu = relu_op(cpu_x) @@ -1480,19 +1490,24 @@ class MPSLeakyReluTest(TestCaseMPS): torch.testing.assert_close(cpu_leaky_relu, mps_leaky_relu.to('cpu')) # test backward pass + cpu_grad = torch.ones_like(cpu_leaky_relu) mps_grad = cpu_grad.to('mps') - cpu_leaky_relu.backward(gradient=cpu_grad) + mps_leaky_relu.backward(gradient=mps_grad) - torch.testing.assert_close(cpu_x.grad, mps_x.grad.to('cpu')) + cpu_leaky_relu.backward(gradient=cpu_grad) - def testNumbersCPU(self): - for t in [np.float32]: - self._testLeakyRelu( - np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t), - negative_slope=0.2, - device="cpu") + assert cpu_x.grad is not None # Check that the grad is well-populated + self.assertEqual(cpu_x.grad, mps_x.grad) + def testNumbersCPU(self): + for t in [torch.float, torch.half]: + for shape in [[], (0,), (0, 3), (4,), (4, 3), (5, 4, 3)]: + for contiguous in [True, False]: + self._testLeakyRelu(shape, + dtype=t, + negative_slope=0.2, + contiguous=contiguous) class TestAvgPool(TestCaseMPS): def _sum_pool2d(self, x, kernel_size): @@ -6631,9 +6646,18 @@ class TestMPS(TestCaseMPS): helper((2, 16, 16), (4, 4), return_indices, dtype) def test_gelu_simple(self): - def helper(shape, dtype=torch.float): - cpu_x = torch.randn(shape, device='cpu', dtype=dtype, requires_grad=True) - x = cpu_x.detach().clone().to('mps').requires_grad_() + def helper(shape, dtype=torch.float, contiguous=True): + cpu_x = torch.randn(shape, device='cpu', dtype=dtype) + x = cpu_x.detach().clone().to('mps') + + if not contiguous and (0 not in shape and len(shape) >= 2): + # Tranposing will make the tensor non-contiguous + cpu_x = cpu_x.transpose(0, 1) + x = x.transpose(0, 1) + assert not x.is_contiguous() + + cpu_x.requires_grad_() + x.requires_grad_() gelu_result = torch.nn.GELU()(x) # GELU is not supported on CPU, so cast it to float @@ -6648,16 +6672,55 @@ class TestMPS(TestCaseMPS): atol = 1e-5 if dtype == torch.float else 1e-2 rtol = 1e-3 if dtype == torch.float else 1e-2 self.assertEqual(gelu_result, gelu_result_cpu.to(dtype), atol=atol, rtol=rtol) + + assert x.grad is not None # Check that the grad is well-populated self.assertEqual(x.grad, cpu_x.grad, atol=atol, rtol=rtol) # Test empty shape too for dtype in [torch.float, torch.half]: - for shape in [(0, 3), [], (2, 3), (2, 8, 4, 5)]: - helper(shape, dtype) + for shape in [[], (0,), (0, 3), (4,), (4, 3), (5, 4, 3)]: + for contiguous in [True, False]: + helper(shape, dtype, contiguous) # Test that gelu would raise an assert for integral types for dtype in [torch.int8, torch.int16, torch.int32, torch.int64]: self.assertRaises(RuntimeError, lambda: torch.nn.GELU()(torch.randint(100, (2,), dtype=dtype, device="mps"))) + def test_mish_simple(self): + def helper(shape, dtype=torch.float, contiguous=True): + cpu_x = torch.randn(shape, device='cpu', dtype=dtype) + x = cpu_x.detach().clone().to('mps') + + if not contiguous and (0 not in shape and len(shape) >= 2): + # Tranposing will make the tensor non-contiguous + cpu_x = cpu_x.transpose(0, 1) + x = x.transpose(0, 1) + assert not x.is_contiguous() + + cpu_x.requires_grad_() + x.requires_grad_() + + mish_result = torch.nn.Mish()(x) + mish_result_cpu = torch.nn.Mish()(cpu_x) + + cpu_grad = torch.ones_like(mish_result_cpu) + grad = cpu_grad.to('mps') + + mish_result.backward(gradient=grad) + mish_result_cpu.backward(gradient=cpu_grad) + + atol = 1e-5 if dtype == torch.float else 1e-2 + rtol = 1e-3 if dtype == torch.float else 1e-2 + self.assertEqual(mish_result, mish_result_cpu.to(dtype), atol=atol, rtol=rtol) + + assert x.grad is not None # Check that the grad is well-populated + self.assertEqual(x.grad, cpu_x.grad, atol=atol, rtol=rtol) + + # Test empty shape too + for dtype in [torch.float, torch.half]: + for shape in [[], (0,), (0, 3), (4,), (4, 3), (5, 4, 3)]: + for contiguous in [True, False]: + helper(shape, dtype, contiguous) + def test_gelu(self): def _test_gelu(n, m, dtype, contiguous, atol=None, rtol=None): numpy_dtype = {
2.41.0
d90d4d613f29b61fc0e865c7d9867bd2e888e9b
Sun, 21 Apr 2024 04:36:22 +0000
[PATCH 0434/1000] [Dynamo] Fix NamedTuple hasattr bug (#124531)
Fixes #124402 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124531 Approved by: https://github.com/jansel
diff --git a/test/dynamo/test_functions.py b/test/dynamo/test_functions.py index 48acf9b16f..bcd299a9e8 100644 --- a/test/dynamo/test_functions.py +++ b/test/dynamo/test_functions.py @@ -1284,6 +1284,22 @@ class FunctionTests(torch._dynamo.test_case.TestCase): mytuple = FunctionTests.MyNamedTuple(a, b) return mytuple.add(), mytuple.static_method(), mytuple.class_method() + @make_test + def test_namedtuple_hasattr(a, b): + mytuple = FunctionTests.MyNamedTuple(a, b) + + def isinstance_namedtuple(obj) -> bool: + return ( + isinstance(obj, tuple) + and hasattr(obj, "_asdict") + and hasattr(obj, "_fields") + ) + + if isinstance_namedtuple(mytuple): + return a + b + else: + return a - b + @make_test def test_is_quantized(a, b): if not a.is_quantized: diff --git a/torch/_dynamo/variables/lists.py b/torch/_dynamo/variables/lists.py index 1f9b83a8a9..d51b4daff3 100644 --- a/torch/_dynamo/variables/lists.py +++ b/torch/_dynamo/variables/lists.py @@ -586,8 +586,7 @@ class NamedTupleVariable(TupleVariable): return self.items[fields.index(name)] def call_hasattr(self, tx, name: str) -> "VariableTracker": - fields = namedtuple_fields(self.tuple_cls) - return variables.ConstantVariable.create(name in fields) + return variables.ConstantVariable.create(hasattr(self.tuple_cls, name)) class SliceVariable(BaseListVariable):
2.41.0
34905f61d614fb5b0551ec363d7945fdcd06268
Sun, 21 Apr 2024 04:20:06 -0700
[PATCH 0437/1000] Assert that TracingContext is available when set_example_value is called (#124284)
Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124284 Approved by: https://github.com/Chillee ghstack dependencies: #124105, #124059, #124176, #124283
diff --git a/torch/_dynamo/eval_frame.py b/torch/_dynamo/eval_frame.py index 99a466523a..cf0fe7fcf2 100644 --- a/torch/_dynamo/eval_frame.py +++ b/torch/_dynamo/eval_frame.py @@ -58,7 +58,7 @@ from .code_context import code_context from .exc import CondOpArgsMismatchError, UserError, UserErrorType from .mutation_guard import install_generation_tagging_init from .types import CacheEntry, DynamoCallback -from .utils import common_constant_types, compile_times, set_example_value +from .utils import common_constant_types, compile_times log = logging.getLogger(__name__) @@ -766,7 +766,8 @@ class FlattenInputOutputSignature(torch.fx.interpreter.Transformer): if "tensor_dict" in self.current_node.meta: arg.node.meta["tensor_dict"] = self.current_node.meta["tensor_dict"] if "example_value" in self.current_node.meta: - set_example_value(arg.node, self.current_node.meta["example_value"]) + # NB: intentionally do not use set_example_value + arg.node.meta["example_value"] = self.current_node.meta["example_value"] return arg def output(self, target, args, kwargs): @@ -790,9 +791,10 @@ class FlattenInputOutputSignature(torch.fx.interpreter.Transformer): if "val" in self.current_node.meta: result_proxy.node.meta["val"] = self.current_node.meta["val"] if "example_value" in self.current_node.meta: - set_example_value( - result_proxy.node, self.current_node.meta["example_value"] - ) + # NB: intentionally do not use set_example_value + result_proxy.node.meta["example_value"] = self.current_node.meta[ + "example_value" + ] if self.current_node.op != "output": result_proxy.node._rename( getattr(self.current_node, "name", result_proxy.node.name) diff --git a/torch/_dynamo/utils.py b/torch/_dynamo/utils.py index 84368806c6..2b9be139ec 100644 --- a/torch/_dynamo/utils.py +++ b/torch/_dynamo/utils.py @@ -95,6 +95,7 @@ import torch.fx.experimental.symbolic_shapes import torch.utils._pytree as pytree from torch import fx from torch._dispatch.python import enable_python_dispatcher +from torch._guards import TracingContext from torch._subclasses.meta_utils import is_sparse_compressed from torch._utils_internal import log_compilation_event @@ -1152,6 +1153,7 @@ def set_example_value(node, example_value): # this to accurately reflect what the state of the value was at the time # the program was traced). node.meta["example_value"] = example_value + assert TracingContext.try_get() is not None def _get_fake_tensor(vt):
2.41.0
9cc2937254aa5d014e96ca710a042fb958933db
Sun, 21 Apr 2024 14:12:30 +0000
[PATCH 0439/1000] [BE]: FURB142 - Remove set mutations. Use set update (#124551)
Uses set mutation methods instead of manually reimplementing (update, set_difference etc). Pull Request resolved: https://github.com/pytorch/pytorch/pull/124551 Approved by: https://github.com/ezyang
diff --git a/benchmarks/dynamo/timm_models.py b/benchmarks/dynamo/timm_models.py index a59a24132d..9e08b37fd5 100755 --- a/benchmarks/dynamo/timm_models.py +++ b/benchmarks/dynamo/timm_models.py @@ -167,11 +167,9 @@ def refresh_model_names(): del all_models_family[key] chosen_models = set() - for value in docs_models_family.values(): - chosen_models.add(value[0]) + chosen_models.update(value[0] for value in docs_models_family.values()) - for key, value in all_models_family.items(): - chosen_models.add(value[0]) + chosen_models.update(value[0] for key, value in all_models_family.items()) filename = "timm_models_list.txt" if os.path.exists("benchmarks"): diff --git a/benchmarks/operator_benchmark/benchmark_utils.py b/benchmarks/operator_benchmark/benchmark_utils.py index d68a0504b8..d7e45b7c16 100644 --- a/benchmarks/operator_benchmark/benchmark_utils.py +++ b/benchmarks/operator_benchmark/benchmark_utils.py @@ -345,8 +345,9 @@ def get_operator_range(chars_range): ops_start_chars_set.add(item.lower()) continue start, end = item.split("-") - for c in range(ord(start), ord(end) + 1): - ops_start_chars_set.add(chr(c).lower()) + ops_start_chars_set.update( + chr(c).lower() for c in range(ord(start), ord(end) + 1) + ) return ops_start_chars_set diff --git a/test/distributed/_composable/fully_shard/test_fully_shard_init.py b/test/distributed/_composable/fully_shard/test_fully_shard_init.py index d5297f4cc1..c6da3ab295 100644 --- a/test/distributed/_composable/fully_shard/test_fully_shard_init.py +++ b/test/distributed/_composable/fully_shard/test_fully_shard_init.py @@ -144,10 +144,12 @@ class TestInitialization(FSDPTest): # Check that the composable module does not add any wrapper class local_module_classes = set() composable_module_classes = set() - for submodule in local_model.modules(): - local_module_classes.add(type(submodule)) - for submodule in composable_module.modules(): - composable_module_classes.add(type(submodule)) + local_module_classes.update( + type(submodule) for submodule in local_model.modules() + ) + composable_module_classes.update( + type(submodule) for submodule in composable_module.modules() + ) self.assertEqual(local_module_classes, composable_module_classes) # Check that the composable module has the same FSDP states with the @@ -310,14 +312,14 @@ class TestInitialization(FSDPTest): ] for data_structure_name in data_structure_names: all_structures = set() - for module in ( - composable_module.u1, - composable_module.u2, - composable_module, - ): - all_structures.add( - id(getattr(fully_shard.state(module), data_structure_name)) + all_structures.update( + id(getattr(fully_shard.state(module), data_structure_name)) + for module in ( + composable_module.u1, + composable_module.u2, + composable_module, ) + ) self.assertEqual(len(all_structures), 1) diff --git a/test/distributed/fsdp/test_wrap.py b/test/distributed/fsdp/test_wrap.py index 1bb7b2c884..d39ba37307 100644 --- a/test/distributed/fsdp/test_wrap.py +++ b/test/distributed/fsdp/test_wrap.py @@ -945,8 +945,7 @@ class TestWrapUtils(TestCase): ignored_params = set() for module_name, module in model.named_modules(): if "lora_A" in module_name: - for param in module.parameters(): - ignored_params.add(param) + ignored_params.update(module.parameters()) _validate_frozen_params(model, modules_to_wrap, ignored_params, use_orig_params) diff --git a/test/dynamo/test_higher_order_ops.py b/test/dynamo/test_higher_order_ops.py index 4b46c568af..9f1819570a 100644 --- a/test/dynamo/test_higher_order_ops.py +++ b/test/dynamo/test_higher_order_ops.py @@ -1375,8 +1375,7 @@ def forward(self, getitem, const): cond_gm = backend.graphs[0] name_set = set() - for name, _ in cond_gm.named_modules(): - name_set.add(name) + name_set.update(name for name, _ in cond_gm.named_modules()) self.assertEqual( name_set, { @@ -1735,8 +1734,7 @@ def forward(self): self.assertEqual(result, x + y + x) wrap_gm = backend.graphs[0] names = set() - for mod_name, _ in wrap_gm.named_modules(): - names.add(mod_name) + names.update(mod_name for mod_name, _ in wrap_gm.named_modules()) self.assertEqual( names, { diff --git a/test/functorch/discover_coverage.py b/test/functorch/discover_coverage.py index f6e0858148..dd7e8b6c9a 100644 --- a/test/functorch/discover_coverage.py +++ b/test/functorch/discover_coverage.py @@ -365,8 +365,7 @@ def get_all_tested_ops(): result = set({}) for op in get_covered_ops(overridable_outplace_we_care_about).values(): opinfos = op_to_opinfo[op] - for opinfo in opinfos: - result.add(opinfo.name) + result.update(opinfo.name for opinfo in opinfos) return result diff --git a/test/package/test_digraph.py b/test/package/test_digraph.py index 90dc11f3a1..9868466b64 100644 --- a/test/package/test_digraph.py +++ b/test/package/test_digraph.py @@ -79,8 +79,7 @@ class TestDiGraph(PackageTestCase): g.add_node(3) nodes = set() - for n in g: - nodes.add(n) + nodes.update(g) self.assertEqual(nodes, {1, 2, 3}) diff --git a/test/test_dataloader.py b/test/test_dataloader.py index a921f59dad..cd8600a573 100644 --- a/test/test_dataloader.py +++ b/test/test_dataloader.py @@ -1617,8 +1617,7 @@ except RuntimeError as e: dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers) dataloader = self._get_data_loader(dataset, batch_size=batch_size, num_workers=num_workers) seeds = set() - for batch in dataloader: - seeds.add(batch[0]) + seeds.update(batch[0] for batch in dataloader) self.assertEqual(len(seeds), num_workers) def test_worker_seed_reproducibility(self): diff --git a/test/test_torch.py b/test/test_torch.py index 0dbcc0fa79..735a4f447a 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -9523,8 +9523,7 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], device_set = {'cpu', 'cpu:0', 'cuda', 'cuda:0', 'cuda:1', 'cuda:10', 'cuda:100'} device_hash_set = set() - for device in device_set: - device_hash_set.add(hash(torch.device(device))) + device_hash_set.update(hash(torch.device(device)) for device in device_set) self.assertEqual(len(device_set), len(device_hash_set)) def get_expected_device_repr(device): diff --git a/torch/_dynamo/trace_rules.py b/torch/_dynamo/trace_rules.py index 72a880ab48..ba25524915 100644 --- a/torch/_dynamo/trace_rules.py +++ b/torch/_dynamo/trace_rules.py @@ -3233,17 +3233,19 @@ if torch.distributed.is_available(): @functools.lru_cache(None) def get_legacy_mod_inlinelist(): - inlinelist = set() - for m in LEGACY_MOD_INLINELIST: - inlinelist.add(_module_dir(torch) + m[len("torch.") :].replace(".", "/")) + inlinelist = { + _module_dir(torch) + m[len("torch.") :].replace(".", "/") + for m in LEGACY_MOD_INLINELIST + } return inlinelist @functools.lru_cache(None) def get_mod_inlinelist(): - inlinelist = set() - for m in MOD_INLINELIST: - inlinelist.add(_module_dir(torch) + m[len("torch.") :].replace(".", "/")) + inlinelist = { + _module_dir(torch) + m[len("torch.") :].replace(".", "/") + for m in MOD_INLINELIST + } return inlinelist diff --git a/torch/_functorch/partitioners.py b/torch/_functorch/partitioners.py index fd7fba3e8f..873441a971 100644 --- a/torch/_functorch/partitioners.py +++ b/torch/_functorch/partitioners.py @@ -744,8 +744,7 @@ def min_cut_rematerialization_partition( if node.op == "placeholder" and "tangents" in node.target: required_bw_nodes.add(node) if node in required_bw_nodes: - for user in node.users: - required_bw_nodes.add(user) + required_bw_nodes.update(node.users) primal_inputs = list(filter(_is_primal, joint_module.graph.nodes)) fwd_seed_offset_inputs = list( diff --git a/torch/_inductor/codegen/cpp.py b/torch/_inductor/codegen/cpp.py index e8ca0dd18b..e4b30f0ba1 100644 --- a/torch/_inductor/codegen/cpp.py +++ b/torch/_inductor/codegen/cpp.py @@ -3623,8 +3623,7 @@ class CppScheduling(BaseScheduling): if var_ranges is None: var_ranges = v assert var_ranges == v, (var_ranges, v, node.snodes) - for expr in exprs: - indexing_exprs.add(expr) + indexing_exprs.update(exprs) return var_ranges, list(indexing_exprs) else: assert isinstance(node, SchedulerNode) diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py index e42176a769..97e1683120 100644 --- a/torch/_inductor/graph.py +++ b/torch/_inductor/graph.py @@ -635,8 +635,7 @@ class GraphLowering(torch.fx.Interpreter): # - sebotnet33ts_256 for n in self.module.graph.nodes: if n in output_set: - for child in n.users: - output_set.add(child) + output_set.update(n.users) return output_set diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py index 57d48f1f4c..7fd89ab3bf 100644 --- a/torch/_inductor/lowering.py +++ b/torch/_inductor/lowering.py @@ -89,8 +89,9 @@ def add_needs_realized_inputs(fn): return [add_needs_realized_inputs(x) for x in fn] needs_realized_inputs.add(fn) if isinstance(fn, torch._ops.OpOverloadPacket): - for overload in fn.overloads(): - needs_realized_inputs.add(getattr(fn, overload)) + needs_realized_inputs.update( + getattr(fn, overload) for overload in fn.overloads() + ) def add_layout_constraint(fn, constraint): diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py index 30aa54843d..32f734ba8b 100644 --- a/torch/_inductor/scheduler.py +++ b/torch/_inductor/scheduler.py @@ -2292,9 +2292,7 @@ class Scheduler: Populate node.last_usage recursively (also for the nodes within a FusedSchedulerNode) """ - future_used_buffers = set() - for node_name in V.graph.get_output_names(): - future_used_buffers.add(node_name) + future_used_buffers = set(V.graph.get_output_names()) for node in reversed(self.nodes): node.set_last_usage(future_used_buffers, self.mutation_real_name) diff --git a/torch/_library/custom_ops.py b/torch/_library/custom_ops.py index f21d22651e..bd36d6b652 100644 --- a/torch/_library/custom_ops.py +++ b/torch/_library/custom_ops.py @@ -223,9 +223,10 @@ class CustomOpDef: def backend_impl(*args, **kwargs): # Checks the assumption that outputs cannot alias # inputs or other outputs. - storages = set() - for tensor in iter_tensors(args, kwargs): - storages.add(id(tensor.untyped_storage())) + storages = { + id(tensor.untyped_storage()) + for tensor in iter_tensors(args, kwargs) + } result = self._backend_fns[device_type](*args, **kwargs) diff --git a/torch/ao/ns/fx/n_shadows_utils.py b/torch/ao/ns/fx/n_shadows_utils.py index b7eddf93e2..1fd6f069ac 100644 --- a/torch/ao/ns/fx/n_shadows_utils.py +++ b/torch/ao/ns/fx/n_shadows_utils.py @@ -742,8 +742,7 @@ def create_add_loggers_graph( insert_submodule_copy = False if maybe_subgraph is not None: first_node, last_node = maybe_subgraph[0], maybe_subgraph[-1] - for node_to_skip in maybe_subgraph: - nodes_to_skip.add(node_to_skip) + nodes_to_skip.update(maybe_subgraph) qconfig = node_name_to_qconfig[first_node.name] if qconfig is not None: insert_submodule_copy = True @@ -873,8 +872,7 @@ def create_add_loggers_graph( maybe_subgraph = _get_subgraph_containing_node(n, subgraphs_dedup) if maybe_subgraph is not None: first_node, last_node = maybe_subgraph[0], maybe_subgraph[-1] - for node_to_skip in maybe_subgraph: - nodes_to_skip.add(node_to_skip) + nodes_to_skip.update(maybe_subgraph) else: first_node, last_node = n, n diff --git a/torch/ao/quantization/quantizer/embedding_quantizer.py b/torch/ao/quantization/quantizer/embedding_quantizer.py index 8ffd2002e5..8130694326 100644 --- a/torch/ao/quantization/quantizer/embedding_quantizer.py +++ b/torch/ao/quantization/quantizer/embedding_quantizer.py @@ -45,9 +45,9 @@ class EmbeddingQuantizer(Quantizer): @classmethod def get_supported_quantization_configs(cls) -> List[QuantizationConfig]: - op_configs: Set[QuantizationConfig] = set({}) - for spec, _ in cls.get_supported_operators(): - op_configs.add(spec) + op_configs: Set[QuantizationConfig] = { + spec for spec, _ in cls.get_supported_operators() + } return list(op_configs) @classmethod diff --git a/torch/ao/quantization/quantizer/x86_inductor_quantizer.py b/torch/ao/quantization/quantizer/x86_inductor_quantizer.py index 8889cf2df0..269b0128c6 100644 --- a/torch/ao/quantization/quantizer/x86_inductor_quantizer.py +++ b/torch/ao/quantization/quantizer/x86_inductor_quantizer.py @@ -286,9 +286,9 @@ class X86InductorQuantizer(Quantizer): @classmethod def get_supported_quantization_configs(cls) -> List[QuantizationConfig]: - op_configs: Set[QuantizationConfig] = set({}) - for spec, _ in cls.supported_config_and_operators: - op_configs.add(spec) + op_configs: Set[QuantizationConfig] = { + spec for spec, _ in cls.supported_config_and_operators + } return list(op_configs) @classmethod diff --git a/torch/ao/quantization/quantizer/xnnpack_quantizer.py b/torch/ao/quantization/quantizer/xnnpack_quantizer.py index 1f7dad387f..b66cfdf37a 100644 --- a/torch/ao/quantization/quantizer/xnnpack_quantizer.py +++ b/torch/ao/quantization/quantizer/xnnpack_quantizer.py @@ -305,9 +305,9 @@ class XNNPACKQuantizer(Quantizer): @classmethod def get_supported_quantization_configs(cls) -> List[QuantizationConfig]: - op_configs: Set[QuantizationConfig] = set({}) - for spec, _ in cls.supported_config_and_operators: - op_configs.add(spec) + op_configs: Set[QuantizationConfig] = { + spec for spec, _ in cls.supported_config_and_operators + } return list(op_configs) @classmethod diff --git a/torch/distributed/_tensor/ops/basic_strategy.py b/torch/distributed/_tensor/ops/basic_strategy.py index 8005528123..6c2d87f470 100644 --- a/torch/distributed/_tensor/ops/basic_strategy.py +++ b/torch/distributed/_tensor/ops/basic_strategy.py @@ -1,7 +1,7 @@ import itertools from dataclasses import dataclass -from typing import List, Tuple +from typing import List, Set, Tuple from torch.distributed._tensor.op_schema import OpStrategy, PlacementStrategy from torch.distributed._tensor.placement_types import ( @@ -44,10 +44,9 @@ class EinsumDims: Parse the dims and extract the contracting, batch, and free dimensions for the left and right hand sides. """ - dim_char_set = set() + dim_char_set: Set[str] = set() for input_dim in input_dims: - for input_char in list(input_dim): - dim_char_set.add(input_char) + dim_char_set.update(input_dim) # get a determinisitc order of all dim chars all_dim_chars = sorted(dim_char_set) diff --git a/torch/distributed/checkpoint/state_dict.py b/torch/distributed/checkpoint/state_dict.py index 84659586ea..a8f8216057 100644 --- a/torch/distributed/checkpoint/state_dict.py +++ b/torch/distributed/checkpoint/state_dict.py @@ -218,7 +218,7 @@ def _verify_options( fqn_param_mapping[fqn] = param all_fqns.add(fqn) - submodule_prefixes = set() + submodule_prefixes: Set[str] = set() if submodules: submodules = set(submodules) for name, module in model.named_modules(): @@ -226,8 +226,7 @@ def _verify_options( continue fqns = _get_fqns(model, name) assert len(fqns) == 1, "Submodule FQN should only have 1 instance" - for fqn in fqns: - submodule_prefixes.add(f"{fqn}.") + submodule_prefixes.update(f"{fqn}." for fqn in fqns) fsdp_modules = FSDP.fsdp_modules(model) state_dict_config: StateDictConfig diff --git a/torch/fx/graph_module.py b/torch/fx/graph_module.py index 9569a0d01b..3e797638ab 100644 --- a/torch/fx/graph_module.py +++ b/torch/fx/graph_module.py @@ -112,9 +112,7 @@ def _format_import_statement(name: str, obj: Any, importer: Importer) -> str: def _format_import_block(globals: Dict[str, Any], importer: Importer): - import_strs: Set[str] = set() - for name, obj in globals.items(): - import_strs.add(_format_import_statement(name, obj, importer)) + import_strs: Set[str] = {_format_import_statement(name, obj, importer) for name, obj in globals.items()} # Sort the imports so we have a stable import block that allows us to # hash the graph module and get a consistent key for use in a cache. return "\n".join(sorted(import_strs)) diff --git a/torch/fx/subgraph_rewriter.py b/torch/fx/subgraph_rewriter.py index b4972720a0..d0bb4b55a4 100644 --- a/torch/fx/subgraph_rewriter.py +++ b/torch/fx/subgraph_rewriter.py @@ -294,8 +294,7 @@ def _replace_pattern( # Copy the replacement graph over user_nodes: Set[Node] = set() for n in match.returning_nodes: - for user in n.users: - user_nodes.add(user) + user_nodes.update(n.users) assert user_nodes, "The returning_nodes should have at least one user node" if len(user_nodes) == 1: diff --git a/torch/profiler/_memory_profiler.py b/torch/profiler/_memory_profiler.py index f091dd47d0..be3edc5065 100644 --- a/torch/profiler/_memory_profiler.py +++ b/torch/profiler/_memory_profiler.py @@ -930,8 +930,9 @@ class MemoryProfile: self._is_gradient(*i) or i in used_for_gradient for i in node.outputs.items() ): - for key, (_, version) in node.inputs.items(): - used_for_gradient.add((key, version)) + used_for_gradient.update( + (key, version) for key, (_, version) in node.inputs.items() + ) candidate_parameters.intersection_update(used_for_gradient) # and depends on a gradient. diff --git a/torch/utils/data/datapipes/_hook_iterator.py b/torch/utils/data/datapipes/_hook_iterator.py index 7463cc55d2..49e17438d6 100644 --- a/torch/utils/data/datapipes/_hook_iterator.py +++ b/torch/utils/data/datapipes/_hook_iterator.py @@ -34,9 +34,7 @@ def _strip_datapipe_from_name(name: str) -> str: def _generate_input_args_string(obj): """Generate a string for the input arguments of an object.""" signature = inspect.signature(obj.__class__) - input_param_names = set() - for param_name in signature.parameters.keys(): - input_param_names.add(param_name) + input_param_names = set(signature.parameters.keys()) result = [] for name, value in inspect.getmembers(obj): if name in input_param_names: diff --git a/torch/utils/tensorboard/_caffe2_graph.py b/torch/utils/tensorboard/_caffe2_graph.py index 5367460260..cd2d371204 100644 --- a/torch/utils/tensorboard/_caffe2_graph.py +++ b/torch/utils/tensorboard/_caffe2_graph.py @@ -578,10 +578,8 @@ def _compute_in_out(ops): out_blobs = set() for op in ops: - for input_blob in op.input: - in_blobs.add(input_blob) - for output_blob in op.output: - out_blobs.add(output_blob) + in_blobs.update(op.input) + out_blobs.update(op.output) input_blobs = list(in_blobs.difference(out_blobs)) output_blobs = list(out_blobs.difference(in_blobs)) @@ -700,8 +698,7 @@ def _operators_to_graph_def( else [_operator_to_node(shapes, op)] ) # .extend() expects an iterable current_graph.node.extend(nodes_from_op) - for input_blob in op.input: - blobs.add(input_blob) + blobs.update(op.input) for i, output_blob in enumerate(op.output): blobs.add(output_blob) producing_ops.setdefault(output_blob, []).append((op, i)) diff --git a/torchgen/gen.py b/torchgen/gen.py index 2549fd175c..dee23957e3 100644 --- a/torchgen/gen.py +++ b/torchgen/gen.py @@ -2125,7 +2125,7 @@ def gen_headers( ) def gen_aten_interned_strings() -> Dict[str, str]: - attrs = set() # All function argument names + attrs: Set[str] = set() # All function argument names names = set() # All ATen function names for func in native_functions: names.add(str(func.func.name.name)) @@ -2133,8 +2133,7 @@ def gen_headers( # symbol without the underscore names.add(func.func.name.name.base) - for arg in func.func.schema_order_arguments(): - attrs.add(arg.name) + attrs.update(arg.name for arg in func.func.schema_order_arguments()) # These are keywords in C++, so aren't valid symbol names # https://en.cppreference.com/w/cpp/language/operator_alternative
2.41.0
d90991790b4cdf66a076711844ca620669dcc04
Sun, 21 Apr 2024 15:20:21 +0000
[PATCH 0440/1000] [rfc] opentelemetry in pytorch (#122999)
1. Add current latest version (opentelemetry-cpp version v1.14.2) to PyTorch library. Steps: ``` $cd pytorch $git submodule add https://github.com/open-telemetry/opentelemetry-cpp.git third_party/opentelemetry-cpp $cd third_party/opentelemetry-cpp $git checkout v1.14.2 $git add third_party/opentelemetry-cpp .gitmodules $git commit ``` Expected change in checkout size: ``` (/home/cpio/local/a/pytorch-env) [cpio@devvm17556.vll0 ~/local/pytorch (gh/c-p-i-o/otel)]$ git count-objects -vH count: 654 size: 3.59 MiB in-pack: 1229701 packs: 17 size-pack: 1.17 GiB prune-packable: 76 garbage: 0 size-garbage: 0 bytes ``` 2. TODO - [x] Figure out how dynamic linking works. App builders will somehow need to `target_include` opentelemetry-cpp at runtime. - [ ] Examples on how to use opentelemetry + pytorch - [ ] Tests + documentation (e.g. using null opentelemetry implementation). Pull Request resolved: https://github.com/pytorch/pytorch/pull/122999 Approved by: https://github.com/ezyang
diff --git a/.bazelignore b/.bazelignore index 61b5e9458d..01fcdd0d8e 100644 --- a/.bazelignore +++ b/.bazelignore @@ -1,3 +1,4 @@ # We do not use this library in our Bazel build. It contains an # infinitely recursing symlink that makes Bazel very unhappy. third_party/ittapi/ +third_party/opentelemetry-cpp diff --git a/.gitmodules b/.gitmodules index 7e1b09e591..c9b84a3701 100644 --- a/.gitmodules +++ b/.gitmodules @@ -149,3 +149,6 @@ [submodule "third_party/mimalloc"] path = third_party/mimalloc url = https://github.com/microsoft/mimalloc.git +[submodule "third_party/opentelemetry-cpp"] + path = third_party/opentelemetry-cpp + url = https://github.com/open-telemetry/opentelemetry-cpp.git diff --git a/WORKSPACE b/WORKSPACE index 59bc0998dd..8eabea571a 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -71,6 +71,13 @@ http_archive( ], ) +http_archive( + name = "com_github_opentelemetry-cpp", + urls = [ + "https://github.com/open-telemetry/opentelemetry-cpp/archive/refs/tags/v1.14.2.tar.gz", + ], +) + new_local_repository( name = "gloo", build_file = "//third_party:gloo.BUILD", @@ -155,6 +162,12 @@ new_local_repository( path = "third_party/kineto", ) +new_local_repository( + name = "opentelemetry-cpp", + build_file = "//third_party::opentelemetry-cpp.BUILD", + path = "third_party/opentelemetry-cpp", +) + new_patched_local_repository( name = "tbb", build_file = "//third_party:tbb.BUILD", diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake index 8fc7f51f6d..d8aea3999e 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake @@ -1156,6 +1156,16 @@ if(APPLE) target_link_options(pybind::pybind11 INTERFACE -undefined dynamic_lookup) endif() +# ---[ OpenTelemetry API headers +find_package(OpenTelemetryApi) +if(NOT OpenTelemetryApi_FOUND) + message(STATUS "Using third_party/opentelemetry-cpp.") + set(OpenTelemetryApi_INCLUDE_DIRS ${CMAKE_CURRENT_LIST_DIR}/../third_party/opentelemetry-cpp/api/include) +endif() +message(STATUS "opentelemetry api include dirs: " "${OpenTelemetryApi_INCLUDE_DIRS}") +add_library(opentelemetry::api INTERFACE IMPORTED) +target_include_directories(opentelemetry::api SYSTEM INTERFACE ${OpenTelemetryApi_INCLUDE_DIRS}) + # ---[ MPI if(USE_MPI) find_package(MPI) diff --git a/cmake/Modules/FindOpenTelemetryApi.cmake b/cmake/Modules/FindOpenTelemetryApi.cmake new file mode 100644 index 0000000000..01334ed704 --- /dev/null +++ b/cmake/Modules/FindOpenTelemetryApi.cmake @@ -0,0 +1,19 @@ +# Try to find the OpenTelemetry API headers +# OpenTelemetryApi_FOUND - system has OpenTelemetry API headers +# OpenTelemetryApi_INCLUDE_DIRS - the OpenTelemetry API headers dir + +find_path(OpenTelemetryApi_INCLUDE_DIR + NAMES include/opentelemetry/version.h + DOC "The directory where Open Telemetry API headers reside" +) + +set(OpenTelemetryApi_INCLUDE_DIRS ${OpenTelemetryApi_INCLUDE_DIR}) + +include(FindPackageHandleStandardArgs) + +find_package_handle_standard_args(OpenTelemetryApi + FOUND_VAR OpenTelemetryApi_FOUND + REQUIRED_VARS OpenTelemetryApi_INCLUDE_DIRS +) + +mark_as_advanced(OpenTelemetryApi_FOUND) diff --git a/third_party/LICENSES_BUNDLED.txt b/third_party/LICENSES_BUNDLED.txt index df5cfee1f4..23d990fe59 100644 --- a/third_party/LICENSES_BUNDLED.txt +++ b/third_party/LICENSES_BUNDLED.txt @@ -1,4 +1,4 @@ -The Pytorch repository and source distributions bundle several libraries that are +The Pytorch repository and source distributions bundle several libraries that are compatibly licensed. We list these here. Name: DCGM @@ -41,11 +41,28 @@ License: Apache-2.0 Files: third_party/benchmark, third_party/onnx/third_party/benchmark, third_party/onnx-tensorrt/third_party/onnx/third_party/benchmark, - third_party/protobuf/third_party/benchmark + third_party/protobuf/third_party/benchmark, + third_party/opentelemetry-cpp/third_party/benchmark For details, see: third_party/benchmark/LICENSE, third_party/onnx/third_party/benchmark/LICENSE, third_party/onnx-tensorrt/third_party/onnx/third_party/benchmark/LICENSE, - third_party/protobuf/third_party/benchmark/LICENSE + third_party/protobuf/third_party/benchmark/LICENSE, + third_party/opentelemetry-cpp/third_party/benchmark/LICENSE + +Name: boost-vcpkg-helpers +License: MIT +Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/boost-vcpkg-helpers + For details, see: third_party/opentelemetry-cpp/tools/vcpkg/ports/boost-vcpkg-helpers/LICENSE.txt + +Name: cJSON +License: MIT +Files: third_party/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/civetweb/examples/rest/cJSON + For details, see: third_party/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/civetweb/examples/rest/cJSON/LICENSE + +Name: catch2 +License: BSL-1.0 +Files: third_party/opentelemetry-cpp/third_party/opentracing-cpp/3rd_party/include/opentracing/catch2 + For details, see: third_party/opentelemetry-cpp/third_party/opentracing-cpp/3rd_party/include/opentracing/catch2/LICENSE.txt Name: clog License: BSD-2-Clause @@ -104,6 +121,16 @@ Files: third_party/kineto/libkineto/third_party/dynolog/third_party/json/test/th For details, see: third_party/kineto/libkineto/third_party/dynolog/third_party/json/test/thirdparty/doctest/LICENSE.txt, third_party/nlohmann/tests/thirdparty/doctest/LICENSE.txt +Name: duktape-1.5.2 +License: MIT +Files: third_party/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/civetweb/src/third_party/duktape-1.5.2 + For details, see: third_party/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/civetweb/src/third_party/duktape-1.5.2/LICENSE.txt + +Name: duktape-1.8.0 +License: MIT +Files: third_party/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/civetweb/src/third_party/duktape-1.8.0 + For details, see: third_party/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/civetweb/src/third_party/duktape-1.8.0/LICENSE.txt + Name: dynolog License: MIT Files: third_party/kineto/libkineto/third_party/dynolog @@ -114,22 +141,37 @@ License: BSD-3-Clause Files: third_party/eigen For details, see: third_party/eigen/COPYING.BSD +Name: etw +License: MIT +Files: third_party/opentelemetry-cpp/exporters/etw/include/opentelemetry/exporters/etw + For details, see: third_party/opentelemetry-cpp/exporters/etw/include/opentelemetry/exporters/etw/LICENSE + +Name: expected +License: MIT +Files: third_party/opentelemetry-cpp/third_party/opentracing-cpp/3rd_party/include/opentracing/expected + For details, see: third_party/opentelemetry-cpp/third_party/opentracing-cpp/3rd_party/include/opentracing/expected/LICENSE + Name: fbgemm License: BSD-3-Clause Files: third_party/fbgemm For details, see: third_party/fbgemm/LICENSE +Name: ffnvcodec +License: MIT with exception +Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/ffnvcodec + For details, see: third_party/opentelemetry-cpp/tools/vcpkg/ports/ffnvcodec/LICENSE.txt + Name: flatbuffers License: Apache-2.0 Files: third_party/flatbuffers - For details, see: third_party/flatbuffers/LICENSE.txt + For details, see: third_party/flatbuffers/LICENSE Name: fmt License: MIT with exception Files: third_party/fmt, third_party/kineto/libkineto/third_party/dynolog/third_party/fmt, third_party/kineto/libkineto/third_party/fmt - For details, see: third_party/fmt/LICENSE.rst, + For details, see: third_party/fmt/LICENSE, third_party/kineto/libkineto/third_party/dynolog/third_party/fmt/LICENSE.rst, third_party/kineto/libkineto/third_party/fmt/LICENSE.rst @@ -149,12 +191,19 @@ Files: third_party/fbgemm/third_party/googletest/googlemock/scripts/generator, third_party/googletest/googlemock/scripts/generator, third_party/kineto/libkineto/third_party/googletest/googlemock/scripts/generator, third_party/protobuf/third_party/googletest/googlemock/scripts/generator, - third_party/tensorpipe/third_party/googletest/googlemock/scripts/generator + third_party/tensorpipe/third_party/googletest/googlemock/scripts/generator, + third_party/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/googletest/googlemock/scripts/generator For details, see: third_party/fbgemm/third_party/googletest/googlemock/scripts/generator/LICENSE, third_party/googletest/googlemock/scripts/generator/LICENSE, third_party/kineto/libkineto/third_party/googletest/googlemock/scripts/generator/LICENSE, third_party/protobuf/third_party/googletest/googlemock/scripts/generator/LICENSE, - third_party/tensorpipe/third_party/googletest/googlemock/scripts/generator/LICENSE + third_party/tensorpipe/third_party/googletest/googlemock/scripts/generator/LICENSE, + third_party/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/googletest/googlemock/scripts/generator/LICENSE + +Name: gettimeofday +License: Apache-2.0 +Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/gettimeofday + For details, see: third_party/opentelemetry-cpp/tools/vcpkg/ports/gettimeofday/LICENSE Name: gloo License: BSD-3-Clause @@ -183,7 +232,9 @@ Files: third_party/fbgemm/third_party/googletest, third_party/protobuf/third_party/googletest, third_party/protobuf/third_party/googletest/googletest, third_party/tensorpipe/third_party/googletest, - third_party/tensorpipe/third_party/googletest/googletest + third_party/tensorpipe/third_party/googletest/googletest, + third_party/opentelemetry-cpp/third_party/googletest, + third_party/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/googletest For details, see: third_party/fbgemm/third_party/googletest/LICENSE, third_party/fbgemm/third_party/googletest/googletest/LICENSE, third_party/googletest/LICENSE, @@ -193,20 +244,25 @@ Files: third_party/fbgemm/third_party/googletest, third_party/protobuf/third_party/googletest/LICENSE, third_party/protobuf/third_party/googletest/googletest/LICENSE, third_party/tensorpipe/third_party/googletest/LICENSE, - third_party/tensorpipe/third_party/googletest/googletest/LICENSE + third_party/tensorpipe/third_party/googletest/googletest/LICENSE, + third_party/opentelemetry-cpp/third_party/googletest/LICENSE, + third_party/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/googletest/LICENSE Name: gtest License: BSD-3-Clause -Files: third_party/ideep/mkl-dnn/tests/gtest, - third_party/ideep/mkl-dnn/third_party/oneDNN/tests/gtests/gtest - For details, see: third_party/ideep/mkl-dnn/tests/gtest/LICENSE, - third_party/ideep/mkl-dnn/third_party/oneDNN/tests/gtests/gtest/LICENSE +Files: third_party/ideep/mkl-dnn/tests/gtests/gtest + For details, see: third_party/ideep/mkl-dnn/tests/gtests/gtest/LICENSE Name: hipify_torch License: MIT Files: third_party/fbgemm/third_party/hipify_torch For details, see: third_party/fbgemm/third_party/hipify_torch/LICENSE.txt +Name: hungarian +License: Apache-2.0 +Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/hungarian + For details, see: third_party/opentelemetry-cpp/tools/vcpkg/ports/hungarian/LICENSE.txt + Name: ideep License: MIT Files: third_party/ideep @@ -217,10 +273,10 @@ License: BSD-3-Clause Files: third_party/ios-cmake For details, see: third_party/ios-cmake/LICENSE -Name: json +Name: irrlicht License: MIT -Files: third_party/cudnn_frontend/include/contrib/nlohmann/json - For details, see: third_party/cudnn_frontend/include/contrib/nlohmann/json/LICENSE.txt +Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/irrlicht + For details, see: third_party/opentelemetry-cpp/tools/vcpkg/ports/irrlicht/LICENSE.txt Name: kineto License: BSD-3-Clause @@ -232,11 +288,21 @@ License: Apache-2.0 Files: third_party/tensorpipe/third_party/libnop For details, see: third_party/tensorpipe/third_party/libnop/LICENSE +Name: libstemmer +License: BSD-3-Clause +Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/libstemmer + For details, see: third_party/opentelemetry-cpp/tools/vcpkg/ports/libstemmer/LICENSE + Name: libuv License: MIT Files: third_party/tensorpipe/third_party/libuv For details, see: third_party/tensorpipe/third_party/libuv/LICENSE +Name: mimalloc +License: MIT +Files: third_party/mimalloc + For details, see: third_party/mimalloc/LICENSE + Name: miniz-2.1.0 License: MIT Files: third_party/miniz-2.1.0 @@ -247,6 +313,11 @@ License: Apache-2.0 Files: third_party/ideep/mkl-dnn For details, see: third_party/ideep/mkl-dnn/LICENSE +Name: ms-gsl +License: MIT +Files: third_party/opentelemetry-cpp/third_party/ms-gsl + For details, see: third_party/opentelemetry-cpp/third_party/ms-gsl/LICENSE + Name: nccl License: BSD-3-Clause Files: third_party/nccl/nccl @@ -257,11 +328,6 @@ License: BSD-Source-Code Files: third_party/neon2sse For details, see: third_party/neon2sse/LICENSE -Name: oneDNN -License: Apache-2.0 -Files: third_party/ideep/mkl-dnn/third_party/oneDNN - For details, see: third_party/ideep/mkl-dnn/third_party/oneDNN/LICENSE - Name: onnx License: Apache-2.0 Files: third_party/onnx @@ -277,11 +343,46 @@ License: MIT Files: third_party/onnx-tensorrt For details, see: third_party/onnx-tensorrt/LICENSE +Name: opentelemetry-cpp +License: Apache-2.0 +Files: third_party/opentelemetry-cpp + For details, see: third_party/opentelemetry-cpp/LICENSE + +Name: opentelemetry-proto +License: Apache-2.0 +Files: third_party/opentelemetry-cpp/third_party/opentelemetry-proto + For details, see: third_party/opentelemetry-cpp/third_party/opentelemetry-proto/LICENSE + +Name: opentracing-cpp +License: Apache-2.0 +Files: third_party/opentelemetry-cpp/third_party/opentracing-cpp + For details, see: third_party/opentelemetry-cpp/third_party/opentracing-cpp/LICENSE + +Name: pdcurses +License: Apache-2.0 +Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/pdcurses + For details, see: third_party/opentelemetry-cpp/tools/vcpkg/ports/pdcurses/LICENSE + Name: pfs License: Apache-2.0 Files: third_party/kineto/libkineto/third_party/dynolog/third_party/pfs For details, see: third_party/kineto/libkineto/third_party/dynolog/third_party/pfs/LICENSE +Name: physac +License: MIT +Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/physac + For details, see: third_party/opentelemetry-cpp/tools/vcpkg/ports/physac/LICENSE + +Name: pqp +License: Apache-2.0 +Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/pqp + For details, see: third_party/opentelemetry-cpp/tools/vcpkg/ports/pqp/LICENSE + +Name: prometheus-cpp +License: MIT +Files: third_party/opentelemetry-cpp/third_party/prometheus-cpp + For details, see: third_party/opentelemetry-cpp/third_party/prometheus-cpp/LICENSE + Name: protobuf License: BSD-3-Clause Files: third_party/protobuf @@ -308,11 +409,21 @@ Files: third_party/onnx/third_party/pybind11, third_party/pybind11/LICENSE, third_party/tensorpipe/third_party/pybind11/LICENSE +Name: python +License: BSD-3-Clause +Files: third_party/cutlass/python + For details, see: third_party/cutlass/python/LICENSE.txt + Name: python-peachpy License: BSD-2-Clause Files: third_party/python-peachpy For details, see: third_party/python-peachpy/LICENSE.rst +Name: sigslot +License: Apache-2.0 +Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/sigslot + For details, see: third_party/opentelemetry-cpp/tools/vcpkg/ports/sigslot/LICENSE + Name: sleef License: BSL-1.0 Files: third_party/sleef @@ -333,6 +444,11 @@ License: Apache-2.0 Files: third_party/tbb For details, see: third_party/tbb/LICENSE +Name: tensorflow-common +License: MIT +Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/tensorflow-common + For details, see: third_party/opentelemetry-cpp/tools/vcpkg/ports/tensorflow-common/LICENSE.txt + Name: tensorpipe License: BSD-3-Clause Files: third_party/tensorpipe @@ -343,7 +459,22 @@ License: MIT with exception Files: third_party/kineto/libkineto/third_party/dynolog/third_party/cpr/test For details, see: third_party/kineto/libkineto/third_party/dynolog/third_party/cpr/test/LICENSE +Name: variant +License: BSD-3-Clause +Files: third_party/opentelemetry-cpp/third_party/opentracing-cpp/3rd_party/include/opentracing/variant + For details, see: third_party/opentelemetry-cpp/third_party/opentracing-cpp/3rd_party/include/opentracing/variant/LICENSE + +Name: vcpkg +License: MIT +Files: third_party/opentelemetry-cpp/tools/vcpkg + For details, see: third_party/opentelemetry-cpp/tools/vcpkg/LICENSE.txt + +Name: vulkan +License: Apache-2.0 with exception +Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/vulkan + For details, see: third_party/opentelemetry-cpp/tools/vcpkg/ports/vulkan/LICENSE.txt + Name: zstd License: BSD-3-Clause Files: third_party/zstd - For details, see: third_party/zstd/LICENSE + For details, see: third_party/zstd/LICENSE \ No newline at end of file diff --git a/third_party/build_bundled.py b/third_party/build_bundled.py index d60a2c1354..4e983400eb 100644 --- a/third_party/build_bundled.py +++ b/third_party/build_bundled.py @@ -102,6 +102,21 @@ def identify_license(f, exception=''): elif 'BoostSoftwareLicense-Version1.0' in txt: # Hmm, do we need to check the text? return 'BSL-1.0' + elif 'gettimeofday' in txt: + # Used in opentelemetry-cpp/tools/vcpkg/ports/gettimeofday + return 'Apache-2.0' + elif 'libhungarian' in txt: + # Used in opentelemetry-cpp/tools/vcpkg/ports/hungarian + return 'Apache-2.0' + elif 'PDCurses' in txt: + # Used in opentelemetry-cpp/tools/vcpkg/ports/pdcurses + return 'Apache-2.0' + elif 'Copyright1999UniversityofNorthCarolina' in txt: + # Used in opentelemetry-cpp/tools/vcpkg/ports/pqp + return 'Apache-2.0' + elif 'sigslot' in txt: + # Used in opentelemetry-cpp/tools/vcpkg/ports/sigslot + return 'Apache-2.0' elif squeeze("Clarified Artistic License") in txt: return 'Clarified Artistic License' elif all([squeeze(m) in txt.lower() for m in bsd3_txt]): diff --git a/third_party/opentelemetry-cpp b/third_party/opentelemetry-cpp new file mode 160000 index 0000000000..a799f4aed9 --- /dev/null +++ b/third_party/opentelemetry-cpp @@ -0,0 +1 @@ +Subproject commit a799f4aed9c94b765dcdaabaeab7d5e7e2310878 diff --git a/third_party/opentelemetry-cpp.BUILD b/third_party/opentelemetry-cpp.BUILD new file mode 100644 index 0000000000..af58a82452 --- /dev/null +++ b/third_party/opentelemetry-cpp.BUILD @@ -0,0 +1,71 @@ +load("@rules_cc//cc:defs.bzl", "cc_library") +load("@bazel_skylib//rules:common_settings.bzl", "bool_flag", "string_flag") + +package(default_visibility = ["//visibility:public"]) + +bool_flag( + name = "with_abseil", + build_setting_default = False, +) + +CPP_STDLIBS = [ + "none", + "best", + "2014", + "2017", + "2020", + "2023", +] + +string_flag( + name = "with_cxx_stdlib", + build_setting_default = "best", + values = CPP_STDLIBS, +) + +cc_library( + name = "api", + hdrs = glob(["include/**/*.h"]), + defines = select({ + ":with_external_abseil": ["HAVE_ABSEIL"], + "//conditions:default": [], + }) + select({ + ":set_cxx_stdlib_none": [], + ### automatic selection + ":set_cxx_stdlib_best": ["OPENTELEMETRY_STL_VERSION=(__cplusplus/100)"], + # See https://learn.microsoft.com/en-us/cpp/build/reference/zc-cplusplus + ":set_cxx_stdlib_best_and_msvc": ["OPENTELEMETRY_STL_VERSION=(_MSVC_LANG/100)"], + ### manual selection + ":set_cxx_stdlib_2014": ["OPENTELEMETRY_STL_VERSION=2014"], + ":set_cxx_stdlib_2017": ["OPENTELEMETRY_STL_VERSION=2017"], + ":set_cxx_stdlib_2020": ["OPENTELEMETRY_STL_VERSION=2020"], + ":set_cxx_stdlib_2023": ["OPENTELEMETRY_STL_VERSION=2023"], + "//conditions:default": [], + }), + strip_include_prefix = "include", + tags = ["api"], + deps = select({ + ":with_external_abseil": [ + "@com_google_absl//absl/base", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:variant", + ], + "//conditions:default": [], + }), +) + +config_setting( + name = "with_external_abseil", + flag_values = {":with_abseil": "true"}, +) + +[config_setting( + name = "set_cxx_stdlib_%s" % v, + flag_values = {":with_cxx_stdlib": v}, +) for v in CPP_STDLIBS] + +config_setting( + name = "set_cxx_stdlib_best_and_msvc", + constraint_values = ["@bazel_tools//tools/cpp:msvc"], + flag_values = {":with_cxx_stdlib": "best"}, +) diff --git a/torch/CMakeLists.txt b/torch/CMakeLists.txt index 97a72eed55..8f879a8ecc 100644 --- a/torch/CMakeLists.txt +++ b/torch/CMakeLists.txt @@ -79,6 +79,7 @@ list(APPEND TORCH_PYTHON_INCLUDE_DIRECTORIES ${LIBSHM_SRCDIR}) set(TORCH_PYTHON_LINK_LIBRARIES python::python pybind::pybind11 + opentelemetry::api shm fmt::fmt-header-only ATEN_CPU_FILES_GEN_LIB)
2.41.0
5fafe9f481a026052b854b12d9adfe880c493d3
Sun, 21 Apr 2024 22:26:40 +0000
[PATCH 0441/1000] [BE]: TRY002 - Ban raising vanilla exceptions (#124570)
Adds a ruff lint rule to ban raising raw exceptions. Most of these should at the very least be runtime exception, value errors, type errors or some other errors. There are hundreds of instance of these bad exception types already in the codebase, so I have noqa'd most of them. Hopefully this error code will get commiters to rethink what exception type they should raise when they submit a PR. I also encourage people to gradually go and fix all the existing noqas that have been added so they can be removed overtime and our exception typing can be improved. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124570 Approved by: https://github.com/ezyang
diff --git a/.ci/pytorch/perf_test/compare_with_baseline.py b/.ci/pytorch/perf_test/compare_with_baseline.py index 49b77cbba2..caf9e993bd 100644 --- a/.ci/pytorch/perf_test/compare_with_baseline.py +++ b/.ci/pytorch/perf_test/compare_with_baseline.py @@ -59,16 +59,16 @@ print("sample mean: ", sample_mean) print("sample sigma: ", sample_sigma) if math.isnan(sample_mean): - raise Exception("""Error: sample mean is NaN""") + raise Exception("""Error: sample mean is NaN""") # noqa: TRY002 elif math.isnan(sample_sigma): - raise Exception("""Error: sample sigma is NaN""") + raise Exception("""Error: sample sigma is NaN""") # noqa: TRY002 z_value = (sample_mean - mean) / sigma print("z-value: ", z_value) if z_value >= 3: - raise Exception( + raise Exception( # noqa: TRY002 f"""\n z-value >= 3, there is high chance of perf regression.\n To reproduce this regression, run diff --git a/.github/scripts/delete_old_branches.py b/.github/scripts/delete_old_branches.py index d22bb16a50..21b86fefa1 100644 --- a/.github/scripts/delete_old_branches.py +++ b/.github/scripts/delete_old_branches.py @@ -18,7 +18,7 @@ ESTIMATED_TOKENS = [0] TOKEN = os.environ["GITHUB_TOKEN"] if not TOKEN: - raise Exception("GITHUB_TOKEN is not set") + raise Exception("GITHUB_TOKEN is not set") # noqa: TRY002 REPO_ROOT = Path(__file__).parent.parent.parent diff --git a/.github/scripts/generate_ci_workflows.py b/.github/scripts/generate_ci_workflows.py index f33d37f2e5..5b2b473d2a 100755 --- a/.github/scripts/generate_ci_workflows.py +++ b/.github/scripts/generate_ci_workflows.py @@ -378,7 +378,9 @@ def main() -> None: for template, workflows in template_and_workflows: # added Iterable check to appease the mypy gods if not isinstance(workflows, Iterable): - raise Exception(f"How is workflows not iterable? {workflows}") + raise Exception( # noqa: TRY002 + f"How is workflows not iterable? {workflows}" + ) # noqa: TRY002 for workflow in workflows: workflow.generate_workflow_file(workflow_template=template) diff --git a/.github/scripts/tryrebase.py b/.github/scripts/tryrebase.py index 641b354ef7..39a38aaf36 100755 --- a/.github/scripts/tryrebase.py +++ b/.github/scripts/tryrebase.py @@ -60,7 +60,7 @@ def rebase_onto( repo._run_git("rebase", onto_branch, branch) if repo.rev_parse(branch) == repo.rev_parse(onto_branch): - raise Exception(SAME_SHA_ERROR) + raise Exception(SAME_SHA_ERROR) # noqa: TRY002 if dry_run: push_result = repo._run_git("push", "--dry-run", "-f", remote_url, refspec) @@ -100,7 +100,7 @@ def rebase_ghstack_onto( repo._run_git("rebase", onto_branch, orig_ref) if repo.rev_parse(orig_ref) == repo.rev_parse(onto_branch): - raise Exception(SAME_SHA_ERROR) + raise Exception(SAME_SHA_ERROR) # noqa: TRY002 # steal the identity of the committer of the commit on the orig branch email = repo._run_git("log", orig_ref, "--pretty=format:%ae", "-1") @@ -126,7 +126,7 @@ def rebase_ghstack_onto( print(push_result) if ghstack_result.returncode != 0: print(ghstack_result.stderr.decode("utf-8")) - raise Exception(f"\n```{push_result}```") + raise Exception(f"\n```{push_result}```") # noqa: TRY002 # The contents of a successful push result should look like: # Summary of changes (ghstack 0.6.0) diff --git a/benchmarks/tensorexpr/benchmark.py b/benchmarks/tensorexpr/benchmark.py index 569a95fe81..d6fa148543 100644 --- a/benchmarks/tensorexpr/benchmark.py +++ b/benchmarks/tensorexpr/benchmark.py @@ -211,7 +211,7 @@ class Benchmark: msg += f", compute {result_dict['compute_workload']:.2f} Gops/s" print(msg) else: - raise Exception("Unknown output_type " + self.output_type) + raise Exception("Unknown output_type " + self.output_type) # noqa: TRY002 @contextlib.contextmanager diff --git a/ios/TestApp/run_on_aws_devicefarm.py b/ios/TestApp/run_on_aws_devicefarm.py index 5ac692ea49..46ad8b4bc7 100755 --- a/ios/TestApp/run_on_aws_devicefarm.py +++ b/ios/TestApp/run_on_aws_devicefarm.py @@ -79,14 +79,14 @@ def upload_file( print(f"Uploading {filename} to Device Farm as {upload_name}...") r = requests.put(upload_url, data=file_stream, headers={"content-type": mime}) if not r.ok: - raise Exception(f"Couldn't upload {filename}: {r.reason}") + raise Exception(f"Couldn't upload {filename}: {r.reason}") # noqa: TRY002 start_time = datetime.datetime.now() # Polling AWS till the uploaded file is ready while True: waiting_time = datetime.datetime.now() - start_time if waiting_time > datetime.timedelta(seconds=MAX_UPLOAD_WAIT_IN_SECOND): - raise Exception( + raise Exception( # noqa: TRY002 f"Uploading {filename} is taking longer than {MAX_UPLOAD_WAIT_IN_SECOND} seconds, terminating..." ) @@ -96,7 +96,7 @@ def upload_file( print(f"{filename} is in state {status} after {waiting_time}") if status == "FAILED": - raise Exception(f"Couldn't upload {filename}: {r}") + raise Exception(f"Couldn't upload {filename}: {r}") # noqa: TRY002 if status == "SUCCEEDED": break diff --git a/pyproject.toml b/pyproject.toml index 8e9a44f388..5d749c3462 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -131,6 +131,7 @@ select = [ "RUF015", # access first ele in constant time "RUF016", # type error non-integer index "RUF017", + "TRY002", # ban vanilla raise (todo fix NOQAs) "TRY200", # TODO: migrate from deprecated alias "TRY302", "UP", diff --git a/scripts/release_notes/common.py b/scripts/release_notes/common.py index 9e1feaeee6..d05ec76572 100644 --- a/scripts/release_notes/common.py +++ b/scripts/release_notes/common.py @@ -217,7 +217,7 @@ def run_query(query): if request.status_code == 200: return request.json() else: - raise Exception( + raise Exception( # noqa: TRY002 f"Query failed to run by returning code of {request.status_code}. {request.json()}" ) @@ -262,7 +262,7 @@ def github_data(pr_number): if len(_ERRORS) < _MAX_ERROR_LEN: return [], "None", () else: - raise Exception( + raise Exception( # noqa: TRY002 f"Got {_MAX_ERROR_LEN} errors: {_ERRORS}, please check if" " there is something wrong" ) diff --git a/test/distributed/elastic/timer/local_timer_test.py b/test/distributed/elastic/timer/local_timer_test.py index 6111f326d2..591a5d7e7c 100644 --- a/test/distributed/elastic/timer/local_timer_test.py +++ b/test/distributed/elastic/timer/local_timer_test.py @@ -51,7 +51,7 @@ if not (IS_WINDOWS or IS_MACOS or TEST_WITH_DEV_DBG_ASAN): def test_exception_propagation(self): with self.assertRaises(Exception, msg="foobar"): with timer.expires(after=1): - raise Exception("foobar") + raise Exception("foobar") # noqa: TRY002 def test_no_client(self): # no timer client configured; exception expected diff --git a/test/dynamo/test_skip_non_tensor.py b/test/dynamo/test_skip_non_tensor.py index 136e5b93d1..43fe1ba1ec 100644 --- a/test/dynamo/test_skip_non_tensor.py +++ b/test/dynamo/test_skip_non_tensor.py @@ -147,10 +147,10 @@ class SkipNonTensorTests(torch._dynamo.test_case.TestCase): class Foo(list): def __iter__(self): - raise Exception + raise Exception # noqa: TRY002 def __len__(self): - raise Exception + raise Exception # noqa: TRY002 x = Foo() x.append(torch.randn(4)) diff --git a/test/fx/test_fx_param_shape_control_flow.py b/test/fx/test_fx_param_shape_control_flow.py index af17bb000d..373e3b5b83 100644 --- a/test/fx/test_fx_param_shape_control_flow.py +++ b/test/fx/test_fx_param_shape_control_flow.py @@ -20,7 +20,7 @@ class MyModuleBase(torch.nn.Module): return self.param def no_relu(self): - raise Exception("not implemented") + raise Exception("not implemented") # noqa: TRY002 class MyModuleParamShape(MyModuleBase): diff --git a/test/jit/test_exception.py b/test/jit/test_exception.py index 04c3294ec5..aa05153cef 100644 --- a/test/jit/test_exception.py +++ b/test/jit/test_exception.py @@ -86,7 +86,7 @@ class TestException(TestCase): @torch.jit.script def foo_decl_always_throws(): # type: () -> Tensor - raise Exception("Hi") + raise Exception("Hi") # noqa: TRY002 output_type = next(foo_decl_always_throws.graph.outputs()).type() self.assertTrue(str(output_type) == "Tensor") @@ -104,9 +104,9 @@ class TestException(TestCase): a = 1 else: if 1 == 1: - raise Exception("Hi") + raise Exception("Hi") # noqa: TRY002 else: - raise Exception("Hi") + raise Exception("Hi") # noqa: TRY002 return a self.assertEqual(foo(), 1) @@ -150,7 +150,7 @@ class TestException(TestCase): def test_python_op_exception(self): @torch.jit.ignore def python_op(x): - raise Exception("bad!") + raise Exception("bad!") # noqa: TRY002 @torch.jit.script def fn(x): diff --git a/test/jit/test_peephole.py b/test/jit/test_peephole.py index d78c4fb91e..496f2d63a6 100644 --- a/test/jit/test_peephole.py +++ b/test/jit/test_peephole.py @@ -274,7 +274,7 @@ class TestPeephole(JitTestCase): @torch.jit.script def foo(x: List[int], y: List[int]): if len(x) != 4 or len(y) != 5: - raise Exception("") + raise Exception("") # noqa: TRY002 return len(x) + len(y) @@ -288,7 +288,7 @@ class TestPeephole(JitTestCase): if len(x) == 4 and len(y) == 5: pass else: - raise Exception("hi") + raise Exception("hi") # noqa: TRY002 return len(x) + len(y) @@ -300,15 +300,15 @@ class TestPeephole(JitTestCase): @torch.jit.script def foo(x: List[int], y: List[int], z: List[int]): if len(x) != 4: - raise Exception("..") + raise Exception("..") # noqa: TRY002 else: if len(y) != 8: - raise Exception("...") + raise Exception("...") # noqa: TRY002 else: if len(z) == 3: pass else: - raise Exception("...") + raise Exception("...") # noqa: TRY002 return len(x) + len(y) * len(z) @@ -458,7 +458,7 @@ class TestPeephole(JitTestCase): @torch.jit.script def foo(x: int, y: int): if x != 4 or y != 5: - raise Exception("") + raise Exception("") # noqa: TRY002 return x + y @@ -477,7 +477,7 @@ class TestPeephole(JitTestCase): if x == 4 and y == 5: pass else: - raise Exception("hi") + raise Exception("hi") # noqa: TRY002 return x + y @@ -489,15 +489,15 @@ class TestPeephole(JitTestCase): @torch.jit.script def foo(x: int, y: int, z: int): if x != 4: - raise Exception("..") + raise Exception("..") # noqa: TRY002 else: if y != 8: - raise Exception("...") + raise Exception("...") # noqa: TRY002 else: if z == 3: pass else: - raise Exception("...") + raise Exception("...") # noqa: TRY002 return x + y * z diff --git a/test/jit/test_with.py b/test/jit/test_with.py index 4cbdfb2e99..fe4488836a 100644 --- a/test/jit/test_with.py +++ b/test/jit/test_with.py @@ -371,7 +371,7 @@ class TestWith(JitTestCase): @torch.jit.script def method_that_raises() -> torch.Tensor: - raise Exception("raised exception") + raise Exception("raised exception") # noqa: TRY002 @torch.jit.script def test_exception(x: torch.Tensor, c: Context) -> torch.Tensor: diff --git a/test/onnx/test_pytorch_onnx_onnxruntime.py b/test/onnx/test_pytorch_onnx_onnxruntime.py index 82132db735..c6f329942e 100644 --- a/test/onnx/test_pytorch_onnx_onnxruntime.py +++ b/test/onnx/test_pytorch_onnx_onnxruntime.py @@ -12682,7 +12682,7 @@ class TestONNXRuntime(onnx_test_common._TestONNXRuntime): class M(torch.nn.Module): def forward(self, t: Tensor) -> Tuple[Tensor, Tensor]: if float(t) < 0: - raise Exception("Negative input") + raise Exception("Negative input") # noqa: TRY002 else: return torch.zeros(5), torch.zeros(5) diff --git a/test/test_autograd.py b/test/test_autograd.py index b8f578bf26..c6fa124ce0 100644 --- a/test/test_autograd.py +++ b/test/test_autograd.py @@ -399,7 +399,7 @@ class TestAutograd(TestCase): @staticmethod @once_differentiable def backward(ctx, input): - raise Exception("Simulate error on backward pass") + raise Exception("Simulate error on backward pass") # noqa: TRY002 def test_custom_function_exception(self): t1 = torch.rand((3, 3), requires_grad=True) diff --git a/test/test_jit.py b/test/test_jit.py index 30dba34936..6f24f07a50 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -5866,7 +5866,7 @@ a") def test_python_frontend_source_range(self): def fn(): - raise Exception("hello") + raise Exception("hello") # noqa: TRY002 ast = torch.jit.frontend.get_jit_def(fn, fn.__name__) FileCheck().check("SourceRange at:") \ .check("def fn():") \ @@ -5877,7 +5877,7 @@ a") def test_python_frontend_py3(self): def fn(): - raise Exception("hello") + raise Exception("hello") # noqa: TRY002 ast = torch.jit.frontend.get_jit_def(fn, fn.__name__) self.assertExpected(str(ast)) @@ -16241,7 +16241,7 @@ def normalize_check_ad(check_ad, name): elif len(check_ad) == 3: check_ad = list(check_ad) else: - raise Exception('Invalid check_ad, requires (bool, str|List[str], str|List[str])') + raise Exception('Invalid check_ad, requires (bool, str|List[str], str|List[str])') # noqa: TRY002 check_ad = [[t] if isinstance(t, str) else t for t in check_ad] diff --git a/test/test_native_functions.py b/test/test_native_functions.py index c95b4a221e..2760ca9171 100644 --- a/test/test_native_functions.py +++ b/test/test_native_functions.py @@ -96,7 +96,7 @@ class TestNativeFunctions(TestCase): return traced_none(values) if const == [5.1, 4.1]: return traced_list(values) - raise Exception("Invalid argument") + raise Exception("Invalid argument") # noqa: TRY002 self.do_test_optional_floatlist_with_module(fake_module) @@ -150,7 +150,7 @@ class TestNativeFunctions(TestCase): return traced_none(values) if const == [5, 4]: return traced_list(values) - raise Exception("Invalid argument") + raise Exception("Invalid argument") # noqa: TRY002 self.do_test_optional_intlist_with_module(fake_module) @@ -217,7 +217,7 @@ class TestNativeFunctions(TestCase): return traced_none(values) if const == 10: return traced_int(values) - raise Exception("Invalid argument") + raise Exception("Invalid argument") # noqa: TRY002 self.do_test_optional_filled_intlist_with_module(fake_module) diff --git a/test/test_nnapi.py b/test/test_nnapi.py index df4bc06f7f..f1d00796eb 100644 --- a/test/test_nnapi.py +++ b/test/test_nnapi.py @@ -287,7 +287,7 @@ class TestNNAPI(TestCase): return torch.nn.functional.relu(arg) if op == "sigmoid": return torch.sigmoid(arg) - raise Exception("Bad op") + raise Exception("Bad op") # noqa: TRY002 self.check(UnaryModule(), torch.tensor([-1.0, 1.0])) self.check( UnaryModule(), @@ -307,7 +307,7 @@ class TestNNAPI(TestCase): return lhs * rhs if op == "div": return lhs / rhs - raise Exception("Bad op") + raise Exception("Bad op") # noqa: TRY002 self.check( BinaryModule(), diff --git a/test/test_ops_jit.py b/test/test_ops_jit.py index 05ae05b94f..28f8c7672c 100644 --- a/test/test_ops_jit.py +++ b/test/test_ops_jit.py @@ -117,7 +117,7 @@ class TestJit(JitCommonTestCase): with inputs {sample}: """ ) - raise Exception(variant_error_info) from e + raise Exception(variant_error_info) from e # noqa: TRY002 assert tested, "JIT Test does not execute any logic" diff --git a/test/torch_np/numpy_tests/core/test_multiarray.py b/test/torch_np/numpy_tests/core/test_multiarray.py index bbb9104ff1..d1910c85b0 100644 --- a/test/torch_np/numpy_tests/core/test_multiarray.py +++ b/test/torch_np/numpy_tests/core/test_multiarray.py @@ -1181,7 +1181,7 @@ class TestCreation(TestCase): # Special case where a bad __getitem__ makes us fall back on __iter__: class C: def __getitem__(self, x): - raise Exception + raise Exception # noqa: TRY002 def __iter__(self): return iter(()) diff --git a/tools/code_analyzer/gen_oplist.py b/tools/code_analyzer/gen_oplist.py index c963f3651a..5f4a883736 100644 --- a/tools/code_analyzer/gen_oplist.py +++ b/tools/code_analyzer/gen_oplist.py @@ -34,7 +34,7 @@ def throw_if_any_op_includes_overloads(selective_builder: SelectiveBuilder) -> N if op.include_all_overloads: ops.append(op_name) if ops: - raise Exception( + raise Exception( # noqa: TRY002 ( "Operators that include all overloads are " + "not allowed since --allow-include-all-overloads " diff --git a/tools/code_coverage/package/tool/clang_coverage.py b/tools/code_coverage/package/tool/clang_coverage.py index 1d1ebff6ae..a6b1fa0c08 100644 --- a/tools/code_coverage/package/tool/clang_coverage.py +++ b/tools/code_coverage/package/tool/clang_coverage.py @@ -74,7 +74,9 @@ def export_target( platform_type: TestPlatform, ) -> None: if binary_file is None: - raise Exception(f"{merged_file} doesn't have corresponding binary!") + raise Exception( # noqa: TRY002 + f"{merged_file} doesn't have corresponding binary!" + ) # noqa: TRY002 print_log("start to export: ", merged_file) # run export cmd_shared_library = ( diff --git a/tools/code_coverage/package/util/utils.py b/tools/code_coverage/package/util/utils.py index e0b4befb57..ddeef94398 100644 --- a/tools/code_coverage/package/util/utils.py +++ b/tools/code_coverage/package/util/utils.py @@ -116,7 +116,7 @@ def get_test_name_from_whole_path(path: str) -> str: def check_compiler_type(cov_type: Optional[CompilerType]) -> None: if cov_type is not None and cov_type in [CompilerType.GCC, CompilerType.CLANG]: return - raise Exception( + raise Exception( # noqa: TRY002 f"Can't parse compiler type: {cov_type}.", " Please set environment variable COMPILER_TYPE as CLANG or GCC", ) @@ -125,7 +125,7 @@ def check_compiler_type(cov_type: Optional[CompilerType]) -> None: def check_platform_type(platform_type: TestPlatform) -> None: if platform_type in [TestPlatform.OSS, TestPlatform.FBCODE]: return - raise Exception( + raise Exception( # noqa: TRY002 f"Can't parse platform type: {platform_type}.", " Please set environment variable COMPILER_TYPE as OSS or FBCODE", ) @@ -134,7 +134,7 @@ def check_platform_type(platform_type: TestPlatform) -> None: def check_test_type(test_type: str, target: str) -> None: if test_type in [TestType.CPP.value, TestType.PY.value]: return - raise Exception( + raise Exception( # noqa: TRY002 f"Can't parse test type: {test_type}.", f" Please check the type of buck target: {target}", ) diff --git a/tools/pyi/gen_pyi.py b/tools/pyi/gen_pyi.py index bafccc5125..369f1504bf 100644 --- a/tools/pyi/gen_pyi.py +++ b/tools/pyi/gen_pyi.py @@ -252,7 +252,7 @@ def sig_for_ops(opname: str) -> List[str]: tname = "builtins." + tname return [f"def {opname}(self) -> {tname}: ..."] else: - raise Exception("unknown op", opname) + raise Exception("unknown op", opname) # noqa: TRY002 def generate_type_hints(sig_group: PythonSignatureGroup) -> List[str]: diff --git a/tools/stats/upload_test_stat_aggregates.py b/tools/stats/upload_test_stat_aggregates.py index 5d22f69073..5eb9a12d98 100644 --- a/tools/stats/upload_test_stat_aggregates.py +++ b/tools/stats/upload_test_stat_aggregates.py @@ -22,9 +22,11 @@ def get_oncall_from_testfile(testfile: str) -> Union[List[str], None]: if line.startswith("# Owner(s): "): possible_lists = re.findall(r"\[.*\]", line) if len(possible_lists) > 1: - raise Exception("More than one list found") + raise Exception("More than one list found") # noqa: TRY002 elif len(possible_lists) == 0: - raise Exception("No oncalls found or file is badly formatted") + raise Exception( # noqa: TRY002 + "No oncalls found or file is badly formatted" + ) # noqa: TRY002 oncalls = ast.literal_eval(possible_lists[0]) return list(oncalls) except Exception as e: diff --git a/torch/_inductor/autotune_process.py b/torch/_inductor/autotune_process.py index 35beb6fb06..aef3d18f35 100644 --- a/torch/_inductor/autotune_process.py +++ b/torch/_inductor/autotune_process.py @@ -495,7 +495,7 @@ class TestBenchmarkRequest(BenchmarkRequest): self, *input_tensors: torch.Tensor, output_tensor: Optional[torch.Tensor] = None ) -> float: if self.value is None: - raise Exception("Failed to run") + raise Exception("Failed to run") # noqa: TRY002 return self.value diff --git a/torch/_inductor/comm_analysis.py b/torch/_inductor/comm_analysis.py index de3f631b0e..207d533b22 100644 --- a/torch/_inductor/comm_analysis.py +++ b/torch/_inductor/comm_analysis.py @@ -46,7 +46,9 @@ def get_collective_type(node: ir.IRNode) -> NCCL_COLL: elif "reduce_scatter" in kernel_name: return NCCL_COLL.REDUCE_SCATTER else: - raise Exception(f"Unsupported collective kernel: {kernel_name}") + raise Exception( # noqa: TRY002 + f"Unsupported collective kernel: {kernel_name}" + ) # noqa: TRY002 if isinstance(node, (ir.AllReduce, ir.AllReduceCoalesced)): return NCCL_COLL.ALL_REDUCE @@ -55,7 +57,7 @@ def get_collective_type(node: ir.IRNode) -> NCCL_COLL: elif isinstance(node, (ir.ReduceScatterTensor, ir.ReduceScatterTensorCoalesced)): return NCCL_COLL.REDUCE_SCATTER else: - raise Exception(f"Unsupported collective type: {node}") + raise Exception(f"Unsupported collective type: {node}") # noqa: TRY002 def get_collective_input_size_bytes(node: ir.IRNode) -> int: diff --git a/torch/_inductor/comms.py b/torch/_inductor/comms.py index 02eb21a4e7..287820a53c 100644 --- a/torch/_inductor/comms.py +++ b/torch/_inductor/comms.py @@ -192,7 +192,7 @@ def reorder_compute_for_overlap( all_nodes.remove(node) progress = True if not progress: - raise Exception( + raise Exception( # noqa: TRY002 "Unable to find a free node (indeg == 0). This is an impossible state to reach. " "Please report a bug to PyTorch." ) @@ -312,7 +312,7 @@ def visualize_overlap(order): total_est_runtime += estimate_op_runtime(snode) cur_comm_node = snode.node elif is_wait(snode.node): - raise Exception( + raise Exception( # noqa: TRY002 "Wait is not expected when there is no collective running" ) else: # exposed compute op @@ -320,7 +320,7 @@ def visualize_overlap(order): overlap_log.debug(f"{node_summary(snode)}") # noqa: G004 else: # cur_comm_node is not None if is_collective(snode.node): - raise Exception( + raise Exception( # noqa: TRY002 "Found two collectives running at the same time. " "`visualize_overlap` needs to be updated to handle this case" ) diff --git a/torch/_inductor/optimize_indexing.py b/torch/_inductor/optimize_indexing.py index 680659dc4f..b3aa187032 100644 --- a/torch/_inductor/optimize_indexing.py +++ b/torch/_inductor/optimize_indexing.py @@ -27,7 +27,7 @@ def val_expressable_in_32_bits(val): iinfo = torch.iinfo(torch.int32) return val <= iinfo.max and val >= iinfo.min - raise Exception(f"Unexpected value {val}") + raise Exception(f"Unexpected value {val}") # noqa: TRY002 def range_expressable_in_32_bits(range): diff --git a/torch/_inductor/pattern_matcher.py b/torch/_inductor/pattern_matcher.py index 0f2f966185..1e0e9a5a87 100644 --- a/torch/_inductor/pattern_matcher.py +++ b/torch/_inductor/pattern_matcher.py @@ -1237,7 +1237,7 @@ def _serialize_pattern( return f"{file_template}{formatted_imports}" if not SERIALIZED_PATTERN_PATH.is_dir(): - raise Exception( + raise Exception( # noqa: TRY002 f"Could not find serialized patterns directory at {SERIALIZED_PATTERN_PATH}" ) diff --git a/torch/_jit_internal.py b/torch/_jit_internal.py index 64509816e0..75d0c61d1c 100644 --- a/torch/_jit_internal.py +++ b/torch/_jit_internal.py @@ -981,7 +981,7 @@ def _get_overloaded_methods(method, mod_class): mod_class_fileno = get_source_lines_and_file(mod_class)[1] mod_end_fileno = mod_class_fileno + len(get_source_lines_and_file(mod_class)[0]) if not (method_line_no >= mod_class_fileno and method_line_no <= mod_end_fileno): - raise Exception( + raise Exception( # noqa: TRY002 "Overloads are not useable when a module is redeclared within the same file: " + str(method) ) diff --git a/torch/_subclasses/fake_tensor.py b/torch/_subclasses/fake_tensor.py index 7b0835b813..f29f4ed524 100644 --- a/torch/_subclasses/fake_tensor.py +++ b/torch/_subclasses/fake_tensor.py @@ -1539,14 +1539,14 @@ class FakeTensorMode(TorchDispatchMode): if not self.is_our_fake(x): if torch.Tag.inplace_view in func.tags: args, kwargs = pytree.tree_unflatten(flat_args, args_spec) - raise Exception( + raise Exception( # noqa: TRY002 f"Can't call metadata mutating ops on non-Fake Tensor inputs. Found in {render_call(func, args, kwargs)}" ) if not self.allow_non_fake_inputs: if isinstance(x, FakeTensor) and x.fake_mode is not self: raise AssertionError("Mixing fake modes NYI") args, kwargs = pytree.tree_unflatten(flat_args, args_spec) - raise Exception( + raise Exception( # noqa: TRY002 f"Please convert all Tensors to FakeTensors first or instantiate FakeTensorMode " f"with 'allow_non_fake_inputs'. Found in {render_call(func, args, kwargs)}" ) diff --git a/torch/ao/nn/quantized/reference/modules/utils.py b/torch/ao/nn/quantized/reference/modules/utils.py index 2c1f52cdf8..6dbaf049ae 100644 --- a/torch/ao/nn/quantized/reference/modules/utils.py +++ b/torch/ao/nn/quantized/reference/modules/utils.py @@ -176,7 +176,7 @@ def _quantize_weight_decomposed( weight_quant_max, weight_dtype_) # type: ignore[arg-type] return weight - raise Exception(f"Unsupported dtype and qscheme: {weight_dtype}, {weight_qscheme}") + raise Exception(f"Unsupported dtype and qscheme: {weight_dtype}, {weight_qscheme}") # noqa: TRY002 def _dequantize_weight_decomposed( weight: torch.Tensor, @@ -226,7 +226,7 @@ def _dequantize_weight_decomposed( weight_quant_max, weight_dtype_) # type: ignore[arg-type] return weight - raise Exception(f"Unsupported dtype and qscheme: {weight_dtype}, {weight_qscheme}") + raise Exception(f"Unsupported dtype and qscheme: {weight_dtype}, {weight_qscheme}") # noqa: TRY002 def _quantize_weight( weight: torch.Tensor, @@ -250,7 +250,7 @@ def _quantize_weight( weight, weight_scale, weight_zero_point, weight_axis_int, weight_dtype) # type: ignore[arg-type] return weight - raise Exception(f"Unsupported dtype and qscheme: {weight_dtype}, {weight_qscheme}") + raise Exception(f"Unsupported dtype and qscheme: {weight_dtype}, {weight_qscheme}") # noqa: TRY002 def _quantize_and_dequantize_weight_decomposed( weight: torch.Tensor, diff --git a/torch/ao/pruning/_experimental/pruner/base_structured_sparsifier.py b/torch/ao/pruning/_experimental/pruner/base_structured_sparsifier.py index c9fa549ddd..357421fb55 100644 --- a/torch/ao/pruning/_experimental/pruner/base_structured_sparsifier.py +++ b/torch/ao/pruning/_experimental/pruner/base_structured_sparsifier.py @@ -301,7 +301,7 @@ class BaseStructuredSparsifier(BaseSparsifier): for module in self.traced.modules(): if module_contains_param(module, FakeStructuredSparsity): - raise Exception( + raise Exception( # noqa: TRY002 f"Error: {module} still contains FakeStructuredSparsity parametrizations!" ) diff --git a/torch/ao/pruning/_experimental/pruner/lstm_saliency_pruner.py b/torch/ao/pruning/_experimental/pruner/lstm_saliency_pruner.py index 4a0d74d6dc..9e569c14a6 100644 --- a/torch/ao/pruning/_experimental/pruner/lstm_saliency_pruner.py +++ b/torch/ao/pruning/_experimental/pruner/lstm_saliency_pruner.py @@ -31,7 +31,7 @@ class LSTMSaliencyPruner(BaseStructuredSparsifier): # select weights based on magnitude if weights.dim() <= 1: - raise Exception("Structured pruning can only be applied to a 2+dim weight tensor!") + raise Exception("Structured pruning can only be applied to a 2+dim weight tensor!") # noqa: TRY002 # take norm over all but first dim dims = tuple(range(1, weights.dim())) saliency = weights.norm(dim=dims, p=1) diff --git a/torch/ao/pruning/_experimental/pruner/saliency_pruner.py b/torch/ao/pruning/_experimental/pruner/saliency_pruner.py index f965fa647d..7f96f0865d 100644 --- a/torch/ao/pruning/_experimental/pruner/saliency_pruner.py +++ b/torch/ao/pruning/_experimental/pruner/saliency_pruner.py @@ -18,7 +18,7 @@ class SaliencyPruner(BaseStructuredSparsifier): # use negative weights so we can use topk (we prune out the smallest) if weights.dim() <= 1: - raise Exception("Structured pruning can only be applied to a 2+dim weight tensor!") + raise Exception("Structured pruning can only be applied to a 2+dim weight tensor!") # noqa: TRY002 saliency = -weights.norm(dim=tuple(range(1, weights.dim())), p=1) assert saliency.shape == mask.shape diff --git a/torch/ao/quantization/fuser_method_mappings.py b/torch/ao/quantization/fuser_method_mappings.py index a160346d8a..16c0c3a85b 100644 --- a/torch/ao/quantization/fuser_method_mappings.py +++ b/torch/ao/quantization/fuser_method_mappings.py @@ -151,7 +151,7 @@ def fuse_convtranspose_bn(is_qat, convt, bn): "ConvTranspose and BN both must be in the same mode (train or eval)." if is_qat: - raise Exception("Fusing ConvTranspose+BatchNorm not yet supported in QAT.") + raise Exception("Fusing ConvTranspose+BatchNorm not yet supported in QAT.") # noqa: TRY002 else: return nn.utils.fusion.fuse_conv_bn_eval(convt, bn, transpose=True) diff --git a/torch/ao/quantization/fx/_decomposed.py b/torch/ao/quantization/fx/_decomposed.py index 0326cd3186..c54a3046d5 100644 --- a/torch/ao/quantization/fx/_decomposed.py +++ b/torch/ao/quantization/fx/_decomposed.py @@ -616,7 +616,7 @@ def choose_qparams_per_token( n_bits = 8 quant_max = 2 ** (n_bits - 1) - 1 else: - raise Exception(f"unsupported dtype in choose_qparams_per_token: {dtype}") + raise Exception(f"unsupported dtype in choose_qparams_per_token: {dtype}") # noqa: TRY002 scales = scales.clamp(min=1e-5).div(quant_max) zero_points = torch.zeros_like(scales) diff --git a/torch/ao/quantization/fx/_model_report/model_report.py b/torch/ao/quantization/fx/_model_report/model_report.py index 9ea5ff406d..724e76ad57 100644 --- a/torch/ao/quantization/fx/_model_report/model_report.py +++ b/torch/ao/quantization/fx/_model_report/model_report.py @@ -269,11 +269,11 @@ class ModelReport: """ # if we haven't prepped model for callibration, then we shouldn't generate report yet if not self._prepared_flag: - raise Exception("Cannot generate report without preparing model for callibration") + raise Exception("Cannot generate report without preparing model for callibration") # noqa: TRY002 # if we already removed the observers, we cannot generate report if self._removed_observers: - raise Exception("Cannot generate report on model you already removed observers from") + raise Exception("Cannot generate report on model you already removed observers from") # noqa: TRY002 # keep track of all the reports of interest and their outputs reports_of_interest = {} @@ -416,7 +416,7 @@ class ModelReport: """ # check if user has generated reports at least once if len(self._generated_reports) == 0: - raise Exception("Unable to generate visualizers without first generating reports") + raise Exception("Unable to generate visualizers without first generating reports") # noqa: TRY002 # get the ordered dict mapping modules to their full set of collected features / stats module_fqns_to_features: OrderedDict = self._reformat_reports_for_visualizer() @@ -502,11 +502,11 @@ class ModelReport: """ # if we haven't prepped model for callibration, then we shouldn't generate mapping yet if not self._prepared_flag: - raise Exception("Cannot generate report without preparing model for callibration") + raise Exception("Cannot generate report without preparing model for callibration") # noqa: TRY002 # if we already removed the observers, we cannot mapping if self._removed_observers: - raise Exception("Cannot generate report on model you already removed observers from") + raise Exception("Cannot generate report on model you already removed observers from") # noqa: TRY002 # keep track of qconfig info for each module across detectors detector_qconfig_info_combined: Dict[str, DetectorQConfigInfo] = {} diff --git a/torch/ao/quantization/fx/_model_report/model_report_observer.py b/torch/ao/quantization/fx/_model_report/model_report_observer.py index 3ccf692dbe..eaa45264be 100644 --- a/torch/ao/quantization/fx/_model_report/model_report_observer.py +++ b/torch/ao/quantization/fx/_model_report/model_report_observer.py @@ -260,6 +260,6 @@ class ModelReportObserver(ObserverBase): @torch.jit.export def calculate_qparams(self): - raise Exception( + raise Exception( # noqa: TRY002 "calculate_qparams should not be called for ModelReportObserver" ) diff --git a/torch/ao/quantization/fx/prepare.py b/torch/ao/quantization/fx/prepare.py index eb91a71ae0..6a4ae0bb85 100644 --- a/torch/ao/quantization/fx/prepare.py +++ b/torch/ao/quantization/fx/prepare.py @@ -1099,7 +1099,7 @@ def _maybe_insert_observers_before_graph_output( elif maybe_node is None: return None else: - raise Exception("Unhandled type for returned node:", maybe_node) + raise Exception("Unhandled type for returned node:", maybe_node) # noqa: TRY002 new_args = [] for old_arg in graph_output_node.args: diff --git a/torch/ao/quantization/fx/utils.py b/torch/ao/quantization/fx/utils.py index 38176c5907..21a1034739 100644 --- a/torch/ao/quantization/fx/utils.py +++ b/torch/ao/quantization/fx/utils.py @@ -137,7 +137,7 @@ def get_linear_prepack_op_for_dtype(dtype): elif dtype == torch.qint8: return torch.ops.quantized.linear_prepack else: - raise Exception("can't get linear prepack op for dtype:", dtype) + raise Exception("can't get linear prepack op for dtype:", dtype) # noqa: TRY002 def get_qconv_prepack_op(conv_op: Callable) -> Callable: prepack_ops = { diff --git a/torch/ao/quantization/observer.py b/torch/ao/quantization/observer.py index 9dbb167308..718dc7d50b 100644 --- a/torch/ao/quantization/observer.py +++ b/torch/ao/quantization/observer.py @@ -1453,7 +1453,7 @@ class PlaceholderObserver(ObserverBase): @torch.jit.export def calculate_qparams(self): - raise Exception( + raise Exception( # noqa: TRY002 "calculate_qparams should not be called for PlaceholderObserver" ) @@ -1479,7 +1479,7 @@ class RecordingObserver(ObserverBase): @torch.jit.export def calculate_qparams(self): - raise Exception("calculate_qparams should not be called for RecordingObserver") + raise Exception("calculate_qparams should not be called for RecordingObserver") # noqa: TRY002 @torch.jit.export def get_tensor_value(self): @@ -1510,7 +1510,7 @@ class NoopObserver(ObserverBase): @torch.jit.export def calculate_qparams(self): - raise Exception("calculate_qparams should not be called for NoopObserver") + raise Exception("calculate_qparams should not be called for NoopObserver") # noqa: TRY002 class ReuseInputObserver(ObserverBase): r""" This observer is used when we want to reuse the observer from the operator @@ -1533,7 +1533,7 @@ class ReuseInputObserver(ObserverBase): @torch.jit.export def calculate_qparams(self): - raise Exception("calculate_qparams should not be called for ReuseInputObserver") + raise Exception("calculate_qparams should not be called for ReuseInputObserver") # noqa: TRY002 def _is_observer_script_module(mod, obs_type_name): """Returns true if given mod is an instance of Observer script module.""" @@ -1604,10 +1604,10 @@ def load_observer_state_dict(mod, obs_dict): ) for k in missing_keys: if "observer" in k or "activation_post_process" in k: - raise Exception(f"Missing keys for observer {k} in state_dict") + raise Exception(f"Missing keys for observer {k} in state_dict") # noqa: TRY002 for k in unexpected_keys: if "observer" in k or "activation_post_process" in k: - raise Exception(f"Unexpected keys for observer {k} in state_dict") + raise Exception(f"Unexpected keys for observer {k} in state_dict") # noqa: TRY002 # Restrict activations to be in the range (0,127) diff --git a/torch/ao/quantization/pt2e/prepare.py b/torch/ao/quantization/pt2e/prepare.py index b5c2ae716d..169a982f62 100644 --- a/torch/ao/quantization/pt2e/prepare.py +++ b/torch/ao/quantization/pt2e/prepare.py @@ -216,7 +216,7 @@ def _get_edge_or_node_to_group_id(edge_or_node_to_qspec: Dict[EdgeOrNode, Quanti # sharing with other users of the producer node # (arg, user) if not isinstance(arg, Node) or not isinstance(n, Node): - raise Exception(f"Expected input_edge to have type Tuple[Node, Node], but got: {arg, n}") + raise Exception(f"Expected input_edge to have type Tuple[Node, Node], but got: {arg, n}") # noqa: TRY002 for user in arg.users: if user is n: continue diff --git a/torch/ao/quantization/quantizer/xnnpack_quantizer_utils.py b/torch/ao/quantization/quantizer/xnnpack_quantizer_utils.py index 2db136a156..9f1732e573 100644 --- a/torch/ao/quantization/quantizer/xnnpack_quantizer_utils.py +++ b/torch/ao/quantization/quantizer/xnnpack_quantizer_utils.py @@ -971,7 +971,7 @@ def _annotate_cat( if cat_node.target != torch.ops.aten.cat.default: # TODO: change this to AnnotationException - raise Exception( + raise Exception( # noqa: TRY002 f"Expected cat node: torch.ops.aten.cat.default, but found {cat_node.target}" " please check if you are calling the correct capture API" ) diff --git a/torch/ao/quantization/utils.py b/torch/ao/quantization/utils.py index c5ab78c3cc..2a225d14b1 100644 --- a/torch/ao/quantization/utils.py +++ b/torch/ao/quantization/utils.py @@ -320,7 +320,7 @@ def get_quant_type(qconfig): elif activation.dtype == torch.float16: return QuantType.STATIC - raise Exception(f"Unrecognized dtype combination in get_quant_type: activation({activation.dtype})," + raise Exception(f"Unrecognized dtype combination in get_quant_type: activation({activation.dtype})," # noqa: TRY002 f"weight({weight.dtype})") def check_min_max_valid(min_val: torch.Tensor, max_val: torch.Tensor) -> bool: diff --git a/torch/backends/_nnapi/prepare.py b/torch/backends/_nnapi/prepare.py index d5d69b9d64..c578c1be4c 100644 --- a/torch/backends/_nnapi/prepare.py +++ b/torch/backends/_nnapi/prepare.py @@ -75,7 +75,7 @@ class NnapiModule(torch.nn.Module): elif fmt == 1: fixed_args.append(args[idx].permute(0, 2, 3, 1).contiguous()) else: - raise Exception("Invalid mem_fmt") + raise Exception("Invalid mem_fmt") # noqa: TRY002 comp.run(fixed_args, outs) assert len(outs) == len(self.out_mem_fmts) for idx in range(len(self.out_templates)): @@ -87,7 +87,7 @@ class NnapiModule(torch.nn.Module): elif fmt == 1: outs[idx] = outs[idx].permute(0, 3, 1, 2) else: - raise Exception("Invalid mem_fmt") + raise Exception("Invalid mem_fmt") # noqa: TRY002 return outs diff --git a/torch/backends/_nnapi/serializer.py b/torch/backends/_nnapi/serializer.py index 960f409172..551fa821df 100644 --- a/torch/backends/_nnapi/serializer.py +++ b/torch/backends/_nnapi/serializer.py @@ -228,7 +228,7 @@ class Operand(NamedTuple): return True if self.dim_order is DimOrder.CHANNELS_LAST: return False - raise Exception("Unknown dim order") + raise Exception("Unknown dim order") # noqa: TRY002 def broadcast_shapes(shape1, shape2): @@ -241,10 +241,14 @@ def broadcast_shapes(shape1, shape2): # don't match between PT and NNAPI, even though semantics match. if len(s1) > len(s2): # s2 = [1] * (len(s1) - len(s2)) + s2 - raise Exception("Non-equal-rank broadcast is not supported yet.") + raise Exception( # noqa: TRY002 + "Non-equal-rank broadcast is not supported yet." + ) # noqa: TRY002 if len(s2) > len(s1): # s3 = [1] * (len(s2) - len(s1)) + s1 - raise Exception("Non-equal-rank broadcast is not supported yet.") + raise Exception( # noqa: TRY002 + "Non-equal-rank broadcast is not supported yet." + ) # noqa: TRY002 ret = [] for d1, d2 in zip(s1, s2): if d1 == 1: @@ -254,7 +258,9 @@ def broadcast_shapes(shape1, shape2): elif d1 == d2: ret.append(d1) else: - raise Exception(f"Cannot broadcast shapes: {shape1} and {shape2}") + raise Exception( # noqa: TRY002 + f"Cannot broadcast shapes: {shape1} and {shape2}" + ) # noqa: TRY002 return tuple(ret) @@ -263,7 +269,7 @@ def get_conv_pool_shape(image_shape, args, out_ch, transpose): # TODO: Handle dilation if args.dilation_h != 1 or args.dilation_w != 1: - raise Exception("Dilation not supported yet.") + raise Exception("Dilation not supported yet.") # noqa: TRY002 if transpose: out_h = (in_h - 1) * args.stride_h + args.kernel_h - args.pad_t - args.pad_b @@ -296,7 +302,7 @@ def fix_shape(shape, dim_order): if dim_order is DimOrder.UNKNOWN_CONSTANT: # XXX think this through return shape - raise Exception(f"Bad dim_order: {dim_order!r}.") + raise Exception(f"Bad dim_order: {dim_order!r}.") # noqa: TRY002 def reverse_map_dim(dim_order, d): @@ -348,7 +354,7 @@ class _NnapiSerializer: def add_tensor_operand(self, jitval, oper): assert isinstance(oper, Operand) if jitval in self.jitval_operand_map: - raise Exception(f"Duplicate tensor: {jitval!r}") + raise Exception(f"Duplicate tensor: {jitval!r}") # noqa: TRY002 operand_id = self.get_next_operand_id() self.operands.append(oper) @@ -393,16 +399,18 @@ class _NnapiSerializer: scale = tensor.nnapi_scale zero_point = tensor.nnapi_zero_point else: - raise Exception( + raise Exception( # noqa: TRY002 f"`nnapi_type` needs to be one of {op_codes} for `int16`" ) else: - raise Exception( + raise Exception( # noqa: TRY002 "`int16` isn't supported. If you're trying to represent NNAPI" " qint16 with Pytorch int16, set `use_int16_for_qint16 = True`" ) else: - raise Exception(f"Can't handle input with dtype '{tensor.dtype}'") + raise Exception( # noqa: TRY002 + f"Can't handle input with dtype '{tensor.dtype}'" + ) # noqa: TRY002 return Operand( shape=tuple(tensor.shape), op_type=op_type, @@ -491,7 +499,9 @@ class _NnapiSerializer: if s == 0: # TODO: Improve this error message, possibly after converting # many callsites to support flexible size. - raise Exception("Flexible size is not supported for this operand.") + raise Exception( # noqa: TRY002 + "Flexible size is not supported for this operand." + ) # noqa: TRY002 if s < 0: # runtime flex LOG.warning("Operand %s has runtime flex shape", oper) @@ -526,10 +536,12 @@ class _NnapiSerializer: def get_constant_value(self, jitval, typekind=None): record = self.constants.get(jitval) if record is None: - raise Exception(f"Could not find constant value for '{jitval!r}'.") + raise Exception( # noqa: TRY002 + f"Could not find constant value for '{jitval!r}'." + ) # noqa: TRY002 ctype, _ = record if typekind is not None and ctype.kind() != typekind: - raise Exception( + raise Exception( # noqa: TRY002 f"Expected constant value of type {typekind}, but got {ctype.kind()} for value '{jitval!r}'" ) return record @@ -553,7 +565,9 @@ class _NnapiSerializer: # Runtime flexible shape shape_parts.append("0") else: - raise Exception("Unknown dim value, dimensions should be >= -1") + raise Exception( # noqa: TRY002 + "Unknown dim value, dimensions should be >= -1" + ) # noqa: TRY002 shape_parts.append(",") shape_parts.append(")") shape_code = "".join(shape_parts) @@ -574,12 +588,14 @@ class _NnapiSerializer: if self.use_int16_for_qint16: return f"torch.zeros({shape_code}, dtype=torch.int16)" else: - raise Exception( + raise Exception( # noqa: TRY002 "`int16` isn't supported. If you're trying to represent NNAPI" " qint16 with Pytorch int16, set `use_int16_for_qint16 = True`" ) - raise Exception(f"Unsupported output operand type: {oper.op_type}") + raise Exception( # noqa: TRY002 + f"Unsupported output operand type: {oper.op_type}" + ) # noqa: TRY002 def forward_operand_shape(self, out_op_id, out_dim, in_op_id, in_dim): self.compute_operand_shape(out_op_id, out_dim, flex_name(in_op_id, in_dim)) @@ -591,7 +607,9 @@ class _NnapiSerializer: def transpose_to_nhwc(self, in_id, oper): if oper.shape[2:] != (1, 1): - raise Exception("Automatic transpose only supported for H,W == 1,1") + raise Exception( # noqa: TRY002 + "Automatic transpose only supported for H,W == 1,1" + ) # noqa: TRY002 out_oper = oper._replace(dim_order=DimOrder.CHANNELS_LAST) @@ -618,7 +636,7 @@ class _NnapiSerializer: if orders == (DimOrder.CHANNELS_LAST, DimOrder.PRESUMED_CONTIGUOUS): return (in0_id, in0_oper) + self.transpose_to_nhwc(in1_id, in1_oper) - raise Exception( + raise Exception( # noqa: TRY002 f"Automatic transpose not supported for dim_orders: {in0_oper.dim_order!r}, {in1_oper.dim_order!r}" ) @@ -627,7 +645,9 @@ class _NnapiSerializer: if ctype.kind() == "ListType": assert ctype.getElementType().kind() == "IntType" return value - raise Exception(f"Can't handle size arg of type '{ctype!r}' for '{jitval!r}'") + raise Exception( # noqa: TRY002 + f"Can't handle size arg of type '{ctype!r}' for '{jitval!r}'" + ) # noqa: TRY002 def get_conv_pool_args_2d_from_pack(self, kernel_size, packed_config): pc = [i.item() for i in packed_config] @@ -714,7 +734,9 @@ class _NnapiSerializer: return_values = self.tensor_sequences[retn_input] retval_count = len(return_values) else: - raise Exception(f"Unsupported return type: {retn_input.type()}") + raise Exception( # noqa: TRY002 + f"Unsupported return type: {retn_input.type()}" + ) # noqa: TRY002 if return_shapes is not None: assert len(return_shapes) == len(return_values) @@ -888,7 +910,9 @@ class _NnapiSerializer: def add_node(self, node): adder = self.ADDER_MAP.get(node.kind()) if not adder: - raise Exception(f"Unsupported node kind ({node.kind()!r}) in node {node!r}") + raise Exception( # noqa: TRY002 + f"Unsupported node kind ({node.kind()!r}) in node {node!r}" + ) # noqa: TRY002 adder(self, node) def _identity(self, node): @@ -939,7 +963,7 @@ class _NnapiSerializer: if tensors is not None: self.add_tensor_sequence(output, tensors) if const_vals is None and tensors is None: - raise Exception( + raise Exception( # noqa: TRY002 f"Unable to handle ListConstruct node. Neither all constants nor all tensors. {node!r}" ) @@ -989,7 +1013,7 @@ class _NnapiSerializer: is_trivial_reshape = len(shape) == 2 and shape[1] == -1 if in_oper.dim_order != DimOrder.PRESUMED_CONTIGUOUS and not is_trivial_reshape: - raise Exception( + raise Exception( # noqa: TRY002 "Currently, reshape is only supported on NHWC tensors if the target size is [X, -1]." ) @@ -1022,7 +1046,7 @@ class _NnapiSerializer: in_oper.shape[1] == 1 or (in_oper.shape[2] == 1 and in_oper.shape[3] == 1) ) if in_oper.dim_order != DimOrder.PRESUMED_CONTIGUOUS and not is_trivial_flatten: - raise Exception( + raise Exception( # noqa: TRY002 "Currently, flatten is not supported on NHWC tensors unless C=1 or H=W=1" ) @@ -1038,10 +1062,12 @@ class _NnapiSerializer: ) if any(dim == 0 for dim in in_oper.shape[start_dim : end_dim + 1]): - raise Exception("Flattening flexible dims is not supported yet") + raise Exception( # noqa: TRY002 + "Flattening flexible dims is not supported yet" + ) # noqa: TRY002 non_flattened_dims = in_oper.shape[:start_dim] + in_oper.shape[end_dim + 1 :] if non_flattened_dims.count(0) > 1: - raise Exception("Only 1 dim can be flexible") + raise Exception("Only 1 dim can be flexible") # noqa: TRY002 out_oper = in_oper._replace( shape=out_shape, dim_order=DimOrder.PRESUMED_CONTIGUOUS @@ -1087,7 +1113,7 @@ class _NnapiSerializer: return if in_oper.shape[dim_value] == 0: - raise Exception("Unable to slice with flexible shape") + raise Exception("Unable to slice with flexible shape") # noqa: TRY002 if stop_value < 0: stop_value += in_oper.shape[dim_value] @@ -1095,7 +1121,9 @@ class _NnapiSerializer: stop_value = in_oper.shape[dim_value] if start_value >= stop_value: - raise Exception("Slice start value should be less than stop value") + raise Exception( # noqa: TRY002 + "Slice start value should be less than stop value" + ) # noqa: TRY002 out_len = (stop_value - start_value) // step_value out_shape = tuple( @@ -1253,7 +1281,7 @@ class _NnapiSerializer: in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) if in_oper.dim_order != DimOrder.CHANNELS_LAST: - raise Exception( + raise Exception( # noqa: TRY002 "Most hardware backends prefer NHWC quantized tensors. " "Try setting `t.nnapi_nhwc = True` on your tensor inputs. " ) @@ -1261,7 +1289,7 @@ class _NnapiSerializer: _, zero_point = self.get_constant_value(node.inputsAt(2), "IntType") _, scalar_type = self.get_constant_value(node.inputsAt(3), "IntType") if scalar_type != TorchScalarTypes.QUINT8.value: - raise Exception( + raise Exception( # noqa: TRY002 "PyTorch NNAPI export only supports quantized tensors " "with the quint8 dtype." ) @@ -1346,7 +1374,9 @@ class _NnapiSerializer: node.inputsAt(0), in1_oper.dim_order ) else: - raise Exception(f"Can't do a NNAPI binary op: {opcode} on two constants") + raise Exception( # noqa: TRY002 + f"Can't do a NNAPI binary op: {opcode} on two constants" + ) # noqa: TRY002 assert in0_oper.op_type == in1_oper.op_type in0_id, in0_oper, in1_id, in1_oper = self.transpose_for_broadcast( @@ -1390,7 +1420,9 @@ class _NnapiSerializer: _, alpha = self.get_constant_value(node.inputsAt(2), "IntType") if alpha != 1: - raise Exception("NNAPI does not support add/sub with alpha.") + raise Exception( # noqa: TRY002 + "NNAPI does not support add/sub with alpha." + ) # noqa: TRY002 self._do_add_binary(node, opcode, fuse_code) @@ -1440,7 +1472,9 @@ class _NnapiSerializer: opcode = op_map.get((min_val, max_val)) if opcode is None: - raise Exception("NNAPI only supports hardtanh with args (-1, 1) or (0, 6).") + raise Exception( # noqa: TRY002 + "NNAPI only supports hardtanh with args (-1, 1) or (0, 6)." + ) # noqa: TRY002 inputs = [None] * 1 inputs[0] = in_id @@ -1464,7 +1498,7 @@ class _NnapiSerializer: if w_oper.shape[0] > 1: if in_oper.use_nchw(): # TODO: Support this by adding trailing 1 dims. - raise Exception( + raise Exception( # noqa: TRY002 "Per-channel PReLU only supports channels_last right now." ) @@ -1473,7 +1507,9 @@ class _NnapiSerializer: if size > 0: pass elif dim <= 1: - raise Exception("PReLU requires fixed size for dim 0 and dim 1.") + raise Exception( # noqa: TRY002 + "PReLU requires fixed size for dim 0 and dim 1." + ) # noqa: TRY002 else: self.forward_operand_shape(out_id, dim, in_id, dim) @@ -1499,7 +1535,7 @@ class _NnapiSerializer: self.get_size_arg(kernel), stride, padding, dilation ) if args.dilation_h != 1 or args.dilation_w != 1: - raise Exception("NNAPI does not support dilated pooling.") + raise Exception("NNAPI does not support dilated pooling.") # noqa: TRY002 image_id, image_oper = self.get_tensor_operand_by_jitval_fixed_size(image) assert len(image_oper.shape) == 4 @@ -1545,7 +1581,7 @@ class _NnapiSerializer: _, count_include_pad_value = self.get_constant_value(count_include_pad) _, divisor_override_value = self.get_constant_value(divisor_override) if not count_include_pad_value or divisor_override_value: - raise Exception( + raise Exception( # noqa: TRY002 "NNAPI doesn't support count_include_pad=False or divisor_override" ) @@ -1596,7 +1632,7 @@ class _NnapiSerializer: assert size_ctype.kind() == "ListType" assert size_ctype.getElementType().kind() == "IntType" if size_arg != [1, 1]: - raise Exception( + raise Exception( # noqa: TRY002 "NNAPI only supports adaptive_avg_pool2d with output size (1, 1)." ) @@ -1651,7 +1687,7 @@ class _NnapiSerializer: assert len(image_oper.shape) == 4 if size_ctype.kind() != "NoneType" and scale_ctype.kind() != "NoneType": - raise Exception("Size and scale cannot both be non-None.") + raise Exception("Size and scale cannot both be non-None.") # noqa: TRY002 elif size_ctype.kind() != "NoneType": assert size_ctype.kind() == "ListType" assert size_ctype.getElementType().kind() == "IntType" @@ -1683,7 +1719,7 @@ class _NnapiSerializer: arg_h = self.add_immediate_float_scalar(scale_arg[0]) arg_w = self.add_immediate_float_scalar(scale_arg[1]) else: - raise Exception("Size and scale cannot both be None.") + raise Exception("Size and scale cannot both be None.") # noqa: TRY002 out_shape = (image_oper.shape[0], image_oper.shape[1], out_h, out_w) use_nchw = image_oper.use_nchw() @@ -1692,7 +1728,7 @@ class _NnapiSerializer: ) if image_oper.shape[0] == 0 or image_oper.shape[1] == 0: - raise Exception("Flexible batch or channels not supported") + raise Exception("Flexible batch or channels not supported") # noqa: TRY002 # Handle variable input size for dim in (2, 3): # h, w indices @@ -1706,7 +1742,9 @@ class _NnapiSerializer: f"int({scale_arg[dim - 2]} * {flex_name(image_id, dim)})", ) else: - raise Exception("Size and scale cannot both be None.") + raise Exception( # noqa: TRY002 + "Size and scale cannot both be None." + ) # noqa: TRY002 inputs = [None] * 4 inputs[0] = image_id @@ -1728,7 +1766,7 @@ class _NnapiSerializer: scale_ctype, scale_value = self.get_constant_value(jitval) assert scale_ctype.kind() in ("IntType", "FloatType") if scale_value != 1: - raise Exception( + raise Exception( # noqa: TRY002 "NNAPI Fully-Connected does not support alpha and beta." ) @@ -1823,7 +1861,7 @@ class _NnapiSerializer: multiplier = input_oper.scale * weight_scale / out_scale assert multiplier > 0 if multiplier >= 1: - raise Exception( + raise Exception( # noqa: TRY002 "Quantized convolution multiplier is greater than 1. " "This is supported by NNAPI, but not by most hardware backends. " "Try training a model without quantization-aware training. " @@ -2005,7 +2043,7 @@ class _NnapiSerializer: multiplier = image_oper.scale * weight_scale / out_scale assert multiplier > 0 if multiplier >= 1: - raise Exception( + raise Exception( # noqa: TRY002 "Quantized convolution multiplier is greater than 1. " "This is supported by NNAPI, but not by most hardware backends. " "Try training a model without quantization-aware training. " @@ -2050,7 +2088,7 @@ class _NnapiSerializer: depthwise = True weight_permutation = (1, 2, 3, 0) else: - raise Exception("Group convolution not supported yet.") + raise Exception("Group convolution not supported yet.") # noqa: TRY002 # TODO: Transform at load time to share weights with CPU model. nnapi_weight_tensor = weight_tensor.permute(*weight_permutation).contiguous() @@ -2068,7 +2106,9 @@ class _NnapiSerializer: assert approx_equal(image_oper.scale * weight_oper.scale, bias_oper.scale) assert bias_oper.zero_point == 0 else: - raise Exception(f"Unsupported input type for conv2d: {image_oper.op_type}") + raise Exception( # noqa: TRY002 + f"Unsupported input type for conv2d: {image_oper.op_type}" + ) # noqa: TRY002 assert len(image_oper.shape) == 4 assert len(weight_oper.shape) == 4 @@ -2139,7 +2179,7 @@ class _NnapiSerializer: if batch == 0: self.forward_operand_shape(out_id, 0, image_id, 0) if in_ch == 0: - raise Exception("Input channels can't be flexible") + raise Exception("Input channels can't be flexible") # noqa: TRY002 # H & W if transpose: if in_h == 0: diff --git a/torch/backends/cudnn/rnn.py b/torch/backends/cudnn/rnn.py index 5ce166c8b2..aaf0bd02e8 100644 --- a/torch/backends/cudnn/rnn.py +++ b/torch/backends/cudnn/rnn.py @@ -18,7 +18,7 @@ def get_cudnn_mode(mode): elif mode == "GRU": return int(_cudnn.RNNMode.gru) else: - raise Exception(f"Unknown mode: {mode}") + raise Exception(f"Unknown mode: {mode}") # noqa: TRY002 # NB: We don't actually need this class anymore (in fact, we could serialize the diff --git a/torch/cuda/jiterator.py b/torch/cuda/jiterator.py index 25d2548241..1be5525559 100644 --- a/torch/cuda/jiterator.py +++ b/torch/cuda/jiterator.py @@ -38,7 +38,7 @@ class _CodeParser: ) # DOTALL for matching multiline if result is None: - raise Exception( + raise Exception( # noqa: TRY002 f"Couldn't parse code, please check correctness:\n {code_string}" ) diff --git a/torch/distributed/elastic/agent/server/api.py b/torch/distributed/elastic/agent/server/api.py index c142fbf67b..4ebfc59523 100644 --- a/torch/distributed/elastic/agent/server/api.py +++ b/torch/distributed/elastic/agent/server/api.py @@ -916,7 +916,7 @@ class SimpleElasticAgent(ElasticAgent): ) self._restart_workers(self._worker_group) else: - raise Exception(f"[{role}] Worker group in {state.name} state") + raise Exception(f"[{role}] Worker group in {state.name} state") # noqa: TRY002 def _exit_barrier(self): """ diff --git a/torch/distributed/fsdp/_optim_utils.py b/torch/distributed/fsdp/_optim_utils.py index 682a7f2b29..5c635c959b 100644 --- a/torch/distributed/fsdp/_optim_utils.py +++ b/torch/distributed/fsdp/_optim_utils.py @@ -1290,7 +1290,7 @@ def _is_named_optimizer(optim_state_dict: Dict[str, Any]) -> bool: try: key = next(iter(state.keys())) except Exception as e: - raise Exception(optim_state_dict) from e + raise Exception(optim_state_dict) from e # noqa: TRY002 return isinstance(key, str) diff --git a/torch/distributed/run.py b/torch/distributed/run.py index f2e7f8d942..3352111068 100644 --- a/torch/distributed/run.py +++ b/torch/distributed/run.py @@ -802,7 +802,7 @@ def config_from_args(args) -> Tuple[LaunchConfig, Union[Callable, str], List[str ranks = set(map(int, args.local_ranks_filter.split(","))) assert ranks except Exception as e: - raise Exception( + raise Exception( # noqa: TRY002 "--local_ranks_filter must be a comma-separated list of integers e.g. --local_ranks_filter=0,1,2" ) from e diff --git a/torch/fx/experimental/unification/utils.py b/torch/fx/experimental/unification/utils.py index d74799a714..56cde39319 100644 --- a/torch/fx/experimental/unification/utils.py +++ b/torch/fx/experimental/unification/utils.py @@ -82,7 +82,7 @@ def reverse_dict(d): def xfail(func): try: func() - raise Exception("XFailed test passed") # pragma:nocover + raise Exception("XFailed test passed") # pragma:nocover # noqa: TRY002 except Exception: pass diff --git a/torch/fx/passes/infra/pass_manager.py b/torch/fx/passes/infra/pass_manager.py index 0adc75a1af..44de7fcc0b 100644 --- a/torch/fx/passes/infra/pass_manager.py +++ b/torch/fx/passes/infra/pass_manager.py @@ -293,7 +293,7 @@ class PassManager: for p in self.passes[:i] ] msg = f"An error occurred when running the '{fn_name}' pass after the following passes: {prev_pass_names}" - raise Exception(msg) from e + raise Exception(msg) from e # noqa: TRY002 # If the graph no longer changes, then we can stop running these passes overall_modified = overall_modified or modified diff --git a/torch/hub.py b/torch/hub.py index 5eef08e83d..286dfbaa59 100644 --- a/torch/hub.py +++ b/torch/hub.py @@ -307,7 +307,7 @@ def _check_repo_is_trusted(repo_owner, repo_name, owner_name_branch, trust_repo, if is_trusted: print("The repository is already trusted.") elif response.lower() in ("n", "no", ""): - raise Exception("Untrusted repository.") + raise Exception("Untrusted repository.") # noqa: TRY002 else: raise ValueError(f"Unrecognized response {response}.") diff --git a/torch/jit/_fuser.py b/torch/jit/_fuser.py index 0ca9cb6860..2536827360 100644 --- a/torch/jit/_fuser.py +++ b/torch/jit/_fuser.py @@ -65,7 +65,7 @@ def fuser(name): torch._C._jit_set_nvfuser_enabled(False) torch._C._jit_set_llga_enabled(False) else: - raise Exception(f"unrecognized fuser option (name: {name})") + raise Exception(f"unrecognized fuser option (name: {name})") # noqa: TRY002 try: yield finally: diff --git a/torch/package/package_exporter.py b/torch/package/package_exporter.py index 37313487d7..e069a80c5f 100644 --- a/torch/package/package_exporter.py +++ b/torch/package/package_exporter.py @@ -949,7 +949,7 @@ class PackageExporter: if _gate_torchscript_serialization and isinstance( obj, torch.jit.RecursiveScriptModule ): - raise Exception( + raise Exception( # noqa: TRY002 "Serializing ScriptModules directly into a package is a beta feature. " "To use, set global " "`torch.package.package_exporter._gate_torchscript_serialization` to `False`." diff --git a/torch/profiler/_utils.py b/torch/profiler/_utils.py index aca0c950f5..caacfb8303 100644 --- a/torch/profiler/_utils.py +++ b/torch/profiler/_utils.py @@ -182,7 +182,7 @@ class BasicEvaluation: return event.start_ns() if hasattr(event, "start_time_ns"): return event.start_time_ns - raise Exception("Unknown Event Type") + raise Exception("Unknown Event Type") # noqa: TRY002 queue_depth_list: List[Interval] = [] all_events.sort(key=new_old_event_comparator) diff --git a/torch/testing/_internal/common_device_type.py b/torch/testing/_internal/common_device_type.py index e4dffdf7cd..048ae83f72 100644 --- a/torch/testing/_internal/common_device_type.py +++ b/torch/testing/_internal/common_device_type.py @@ -973,7 +973,7 @@ class ops(_TestParametrizer): except Exception as e: tracked_input = get_tracked_input() if PRINT_REPRO_ON_FAILURE and tracked_input is not None: - raise Exception( + raise Exception( # noqa: TRY002 f"Caused by {tracked_input.type_desc} " f"at index {tracked_input.index}: " f"{_serialize_sample(tracked_input.val)}") from e diff --git a/torch/testing/_internal/distributed/rpc/dist_autograd_test.py b/torch/testing/_internal/distributed/rpc/dist_autograd_test.py index 3e89279333..cf8d7aae58 100644 --- a/torch/testing/_internal/distributed/rpc/dist_autograd_test.py +++ b/torch/testing/_internal/distributed/rpc/dist_autograd_test.py @@ -201,7 +201,7 @@ class SimulateBackwardError(Function): @once_differentiable def backward(ctx, input): if SimulateBackwardError._simulate_error: - raise Exception("Simulate error on backward pass") + raise Exception("Simulate error on backward pass") # noqa: TRY002 else: return input diff --git a/torch/testing/_internal/distributed/rpc/rpc_test.py b/torch/testing/_internal/distributed/rpc/rpc_test.py index 88932eac4d..e1f9cd052f 100644 --- a/torch/testing/_internal/distributed/rpc/rpc_test.py +++ b/torch/testing/_internal/distributed/rpc/rpc_test.py @@ -236,11 +236,11 @@ def build_complex_tensors(): def non_cont_test(t_view, t_cont): if t_view.is_contiguous(): - raise Exception('t_view is contiguous!') + raise Exception('t_view is contiguous!') # noqa: TRY002 if not t_cont.is_contiguous(): - raise Exception('t_cont is not contiguous!') + raise Exception('t_cont is not contiguous!') # noqa: TRY002 if not torch.equal(t_view, t_cont): - raise Exception('t_view is not equal to t_cont!') + raise Exception('t_view is not equal to t_cont!') # noqa: TRY002 return t_view def my_function(a, b, c): diff --git a/torch/utils/bundled_inputs.py b/torch/utils/bundled_inputs.py index 8b578c45e7..201a000b30 100644 --- a/torch/utils/bundled_inputs.py +++ b/torch/utils/bundled_inputs.py @@ -104,7 +104,7 @@ def bundle_inputs( Tensors in lists or tuples will not. """ if not isinstance(model, torch.jit.ScriptModule): - raise Exception("Only ScriptModule is supported.") + raise Exception("Only ScriptModule is supported.") # noqa: TRY002 ignored_methods, ignored_attrs = _get_bundled_inputs_attributes_and_methods(model) clone = torch._C._hack_do_not_use_clone_module_with_class( # type: ignore[attr-defined] @@ -162,7 +162,7 @@ def augment_model_with_bundled_inputs( of each tuple are the args that make up one input. """ if not isinstance(model, torch.jit.ScriptModule): - raise Exception("Only ScriptModule is supported.") + raise Exception("Only ScriptModule is supported.") # noqa: TRY002 forward: Callable = model.forward @@ -235,13 +235,13 @@ def augment_many_model_functions_with_bundled_inputs( Tensors in lists or tuples will not. """ if not isinstance(model, torch.jit.ScriptModule): - raise Exception("Only ScriptModule is supported.") + raise Exception("Only ScriptModule is supported.") # noqa: TRY002 if not inputs: - raise Exception("Please provide inputs for at least 1 function") + raise Exception("Please provide inputs for at least 1 function") # noqa: TRY002 if hasattr(model, "get_all_bundled_inputs") or hasattr(model, "get_bundled_inputs_functions_and_info"): - raise Exception( + raise Exception( # noqa: TRY002 "Models can only be augmented with bundled inputs once. " "This Model seems to have already been augmented with " "bundled inputs. Please start afresh with one that " @@ -257,7 +257,7 @@ def augment_many_model_functions_with_bundled_inputs( if hasattr(function, "name"): function_name = function.name # type: ignore[attr-defined] else: - raise Exception( + raise Exception( # noqa: TRY002 'At least one of your functions has no attribute name please ensure all have one. m.foo.name = "foo"') @@ -270,12 +270,12 @@ def augment_many_model_functions_with_bundled_inputs( if hasattr(model, "_generate_bundled_inputs_for_" + function_name): if input_list is not None: - raise Exception( + raise Exception( # noqa: TRY002 f"inputs[{function_name}] is not None, but _generate_bundled_inputs_for_{function_name} is already defined" ) # Model author already defined _generate_bundled_inputs_for_<function_name>. elif input_list is None or len(input_list) == 0: - raise Exception( + raise Exception( # noqa: TRY002 f"inputs for {function_name} must be specified if " f"_generate_bundled_inputs_for_{function_name} is not already defined" ) @@ -369,7 +369,7 @@ def _inflate_expr( if isinstance(arg, InflatableArg): if arg.fmt_fn: if arg.fmt not in ["{}", ""]: - raise Exception( + raise Exception( # noqa: TRY002 f"Bundled input argument at position '{ref}' has " f"both arg.fmt_fn => \n{arg.fmt_fn} " f"\n and arg.fmt => {arg.fmt}. " @@ -400,7 +400,7 @@ def _inflate_expr( f"{ref}.contiguous(memory_format={fmt})", None) # Prevent big tensors from being bundled by default. # TODO: Provide more useful diagnostics. - raise Exception( + raise Exception( # noqa: TRY002 f"Bundled input argument at position '{ref}' is " f"a tensor with storage size {arg._typed_storage().size()}. " f"You probably don't want to bundle this as an input. " diff --git a/torch/utils/data/datapipes/dataframe/dataframe_wrapper.py b/torch/utils/data/datapipes/dataframe/dataframe_wrapper.py index 3596cc171e..9a03a8f00e 100644 --- a/torch/utils/data/datapipes/dataframe/dataframe_wrapper.py +++ b/torch/utils/data/datapipes/dataframe/dataframe_wrapper.py @@ -26,7 +26,7 @@ class PandasWrapper: @classmethod def create_dataframe(cls, data, columns): if not _with_pandas(): - raise Exception("DataFrames prototype requires pandas to function") + raise Exception("DataFrames prototype requires pandas to function") # noqa: TRY002 return _pandas.DataFrame(data, columns=columns) # type: ignore[union-attr] @classmethod @@ -44,31 +44,31 @@ class PandasWrapper: @classmethod def iterate(cls, data): if not _with_pandas(): - raise Exception("DataFrames prototype requires pandas to function") + raise Exception("DataFrames prototype requires pandas to function") # noqa: TRY002 yield from data.itertuples(index=False) @classmethod def concat(cls, buffer): if not _with_pandas(): - raise Exception("DataFrames prototype requires pandas to function") + raise Exception("DataFrames prototype requires pandas to function") # noqa: TRY002 return _pandas.concat(buffer) # type: ignore[union-attr] @classmethod def get_item(cls, data, idx): if not _with_pandas(): - raise Exception("DataFrames prototype requires pandas to function") + raise Exception("DataFrames prototype requires pandas to function") # noqa: TRY002 return data[idx: idx + 1] @classmethod def get_len(cls, df): if not _with_pandas(): - raise Exception("DataFrames prototype requires pandas to function") + raise Exception("DataFrames prototype requires pandas to function") # noqa: TRY002 return len(df.index) @classmethod def get_columns(cls, df): if not _with_pandas(): - raise Exception("DataFrames prototype requires pandas to function") + raise Exception("DataFrames prototype requires pandas to function") # noqa: TRY002 return list(df.columns.values.tolist()) diff --git a/torch/utils/data/datapipes/dataframe/dataframes.py b/torch/utils/data/datapipes/dataframe/dataframes.py index 02c824b11a..a93ea6ba2d 100644 --- a/torch/utils/data/datapipes/dataframe/dataframes.py +++ b/torch/utils/data/datapipes/dataframe/dataframes.py @@ -90,7 +90,7 @@ class Capture: def __getattr__(self, attrname): if attrname == 'kwarg' or attrname == 'kwargs': - raise Exception('no kwargs!') + raise Exception('no kwargs!') # noqa: TRY002 if attrname in ['__deepcopy__']: raise AttributeError result = CaptureGetAttr(self, attrname, ctx=self.ctx) @@ -244,7 +244,7 @@ class CaptureVariable(Capture): def __init__(self, value, ctx): if CaptureControl.disabled: - raise Exception('Attempting to create capture variable with capture off') + raise Exception('Attempting to create capture variable with capture off') # noqa: TRY002 self.ctx = ctx self.value = value self.name = f'var_{CaptureVariable.names_idx}' @@ -404,7 +404,7 @@ class CaptureDataFrameWithDataPipeOps(CaptureDataFrame): return self._dataframes_filter(*args, **kwargs) def collate(self, *args, **kwargs): - raise Exception("Can't collate unbatched DataFrames stream") + raise Exception("Can't collate unbatched DataFrames stream") # noqa: TRY002 def __getattr__(self, attrname): # ? if attrname in UNIMPLEMENTED_ATTR: diff --git a/torch/utils/data/datapipes/datapipe.py b/torch/utils/data/datapipes/datapipe.py index c6d9baf95a..1c99fe79e4 100644 --- a/torch/utils/data/datapipes/datapipe.py +++ b/torch/utils/data/datapipes/datapipe.py @@ -132,7 +132,7 @@ class IterDataPipe(IterableDataset[T_co], metaclass=_IterDataPipeMeta): @classmethod def register_datapipe_as_function(cls, function_name, cls_to_register, enable_df_api_tracing=False): if function_name in cls.functions: - raise Exception(f"Unable to add DataPipe function name {function_name} as it is already taken") + raise Exception(f"Unable to add DataPipe function name {function_name} as it is already taken") # noqa: TRY002 def class_function(cls, enable_df_api_tracing, source_dp, *args, **kwargs): result_pipe = cls(source_dp, *args, **kwargs) @@ -174,13 +174,13 @@ class IterDataPipe(IterableDataset[T_co], metaclass=_IterDataPipeMeta): @classmethod def set_getstate_hook(cls, hook_fn): if IterDataPipe.getstate_hook is not None and hook_fn is not None: - raise Exception("Attempt to override existing getstate_hook") + raise Exception("Attempt to override existing getstate_hook") # noqa: TRY002 IterDataPipe.getstate_hook = hook_fn @classmethod def set_reduce_ex_hook(cls, hook_fn): if IterDataPipe.reduce_ex_hook is not None and hook_fn is not None: - raise Exception("Attempt to override existing reduce_ex_hook") + raise Exception("Attempt to override existing reduce_ex_hook") # noqa: TRY002 IterDataPipe.reduce_ex_hook = hook_fn def __repr__(self): @@ -275,7 +275,7 @@ class MapDataPipe(Dataset[T_co], metaclass=_DataPipeMeta): @classmethod def register_datapipe_as_function(cls, function_name, cls_to_register): if function_name in cls.functions: - raise Exception(f"Unable to add DataPipe function name {function_name} as it is already taken") + raise Exception(f"Unable to add DataPipe function name {function_name} as it is already taken") # noqa: TRY002 def class_function(cls, source_dp, *args, **kwargs): result_pipe = cls(source_dp, *args, **kwargs) @@ -310,13 +310,13 @@ class MapDataPipe(Dataset[T_co], metaclass=_DataPipeMeta): @classmethod def set_getstate_hook(cls, hook_fn): if MapDataPipe.getstate_hook is not None and hook_fn is not None: - raise Exception("Attempt to override existing getstate_hook") + raise Exception("Attempt to override existing getstate_hook") # noqa: TRY002 MapDataPipe.getstate_hook = hook_fn @classmethod def set_reduce_ex_hook(cls, hook_fn): if MapDataPipe.reduce_ex_hook is not None and hook_fn is not None: - raise Exception("Attempt to override existing reduce_ex_hook") + raise Exception("Attempt to override existing reduce_ex_hook") # noqa: TRY002 MapDataPipe.reduce_ex_hook = hook_fn def __repr__(self): diff --git a/torch/utils/data/datapipes/iter/callable.py b/torch/utils/data/datapipes/iter/callable.py index 48875e40a6..9a67cc0592 100644 --- a/torch/utils/data/datapipes/iter/callable.py +++ b/torch/utils/data/datapipes/iter/callable.py @@ -136,7 +136,7 @@ def _collate_helper(conversion, item): # TODO(VitalyFedyunin): Verify that item is any sort of batch if len(item.items) > 1: # TODO(VitalyFedyunin): Compact all batch dataframes into one - raise Exception("Only supports one DataFrame per batch") + raise Exception("Only supports one DataFrame per batch") # noqa: TRY002 df = item[0] columns_name = df_wrapper.get_columns(df) tuple_names: List = [] @@ -144,12 +144,12 @@ def _collate_helper(conversion, item): for name in conversion.keys(): if name not in columns_name: - raise Exception("Conversion keys missmatch") + raise Exception("Conversion keys missmatch") # noqa: TRY002 for name in columns_name: if name in conversion: if not callable(conversion[name]): - raise Exception('Collate (DF)DataPipe requires callable as dict values') + raise Exception('Collate (DF)DataPipe requires callable as dict values') # noqa: TRY002 collation_fn = conversion[name] else: # TODO(VitalyFedyunin): Add default collation into df_wrapper @@ -157,7 +157,7 @@ def _collate_helper(conversion, item): import torcharrow.pytorch as tap # type: ignore[import] collation_fn = tap.rec.Default() except Exception as e: - raise Exception("unable to import default collation function from the TorchArrow") from e + raise Exception("unable to import default collation function from the TorchArrow") from e # noqa: TRY002 tuple_names.append(str(name)) value = collation_fn(df[name]) diff --git a/torch/utils/data/datapipes/iter/sharding.py b/torch/utils/data/datapipes/iter/sharding.py index 0b25d6baf7..f5bd3261fc 100644 --- a/torch/utils/data/datapipes/iter/sharding.py +++ b/torch/utils/data/datapipes/iter/sharding.py @@ -50,10 +50,10 @@ class ShardingFilterIterDataPipe(_ShardingIterDataPipe): raise ValueError(f"instance_id({instance_id}) should be smaller than num_of_instances({num_of_instances})") if sharding_group == SHARDING_PRIORITIES.DEFAULT: if len(self.groups) and SHARDING_PRIORITIES.DEFAULT not in self.groups: - raise Exception('ShardingFilter cannot mix DEFAULT and non DEFAULT groups') + raise Exception('ShardingFilter cannot mix DEFAULT and non DEFAULT groups') # noqa: TRY002 else: if SHARDING_PRIORITIES.DEFAULT in self.groups: - raise Exception('ShardingFilter cannot mix DEFAULT and non DEFAULT groups') + raise Exception('ShardingFilter cannot mix DEFAULT and non DEFAULT groups') # noqa: TRY002 self.groups[sharding_group] = (num_of_instances, instance_id) self._update_num_of_instances() diff --git a/torch/utils/model_dump/__init__.py b/torch/utils/model_dump/__init__.py index 5fed94a433..a8d491ed6b 100644 --- a/torch/utils/model_dump/__init__.py +++ b/torch/utils/model_dump/__init__.py @@ -181,8 +181,8 @@ def hierarchical_pickle(data): "__module_type__": typename, "state": hierarchical_pickle((msg,)), } - raise Exception(f"Can't prepare fake object of type for JS: {typename}") - raise Exception(f"Can't prepare data of type for JS: {type(data)}") + raise Exception(f"Can't prepare fake object of type for JS: {typename}") # noqa: TRY002 + raise Exception(f"Can't prepare data of type for JS: {type(data)}") # noqa: TRY002 def get_model_info( @@ -217,7 +217,7 @@ def get_model_info( if path_prefix is None: path_prefix = prefix elif prefix != path_prefix: - raise Exception(f"Mismatched prefixes: {path_prefix} != {prefix}") + raise Exception(f"Mismatched prefixes: {path_prefix} != {prefix}") # noqa: TRY002 zip_files.append(dict( filename=zi.filename, compression=zi.compress_type, @@ -411,4 +411,4 @@ def main(argv, *, stdout=None): page = burn_in_info(skeleton, info) output.write(page) else: - raise Exception("Invalid style") + raise Exception("Invalid style") # noqa: TRY002 diff --git a/torch/utils/show_pickle.py b/torch/utils/show_pickle.py index e83bed48e6..24ea1eb4e1 100644 --- a/torch/utils/show_pickle.py +++ b/torch/utils/show_pickle.py @@ -40,7 +40,7 @@ class FakeObject: printer._format(obj.state, stream, indent, allowance + 1, context, level + 1) stream.write(")") return - raise Exception("Need to implement") + raise Exception("Need to implement") # noqa: TRY002 class FakeClass: @@ -84,7 +84,7 @@ class DumpUnpickler(pickle._Unpickler): # type: ignore[name-defined] def load_binunicode(self): strlen, = struct.unpack("<I", self.read(4)) # type: ignore[attr-defined] if strlen > sys.maxsize: - raise Exception("String too long.") + raise Exception("String too long.") # noqa: TRY002 str_bytes = self.read(strlen) # type: ignore[attr-defined] obj: Any try: @@ -107,7 +107,7 @@ def main(argv, output_stream=None): if len(argv) != 2: # Don't spam stderr if not using stdout. if output_stream is not None: - raise Exception("Pass argv of length 2.") + raise Exception("Pass argv of length 2.") # noqa: TRY002 sys.stderr.write("usage: show_pickle PICKLE_FILE\n") sys.stderr.write(" PICKLE_FILE can be any of:\n") sys.stderr.write(" path to a pickle file\n") @@ -137,7 +137,7 @@ def main(argv, output_stream=None): found = True break if not found: - raise Exception(f"Could not find member matching {mname} in {zfname}") + raise Exception(f"Could not find member matching {mname} in {zfname}") # noqa: TRY002 if __name__ == "__main__": diff --git a/torch/utils/tensorboard/_caffe2_graph.py b/torch/utils/tensorboard/_caffe2_graph.py index cd2d371204..99cafef820 100644 --- a/torch/utils/tensorboard/_caffe2_graph.py +++ b/torch/utils/tensorboard/_caffe2_graph.py @@ -320,7 +320,7 @@ def _tf_device(device_option): return "/cpu:*" if device_option.device_type == caffe2_pb2.CUDA: return f"/gpu:{device_option.device_id}" - raise Exception("Unhandled device", device_option) + raise Exception("Unhandled device", device_option) # noqa: TRY002 def _add_tf_shape(attr_dict, ints): diff --git a/torch/utils/tensorboard/writer.py b/torch/utils/tensorboard/writer.py index d3853514eb..4bd12672f5 100644 --- a/torch/utils/tensorboard/writer.py +++ b/torch/utils/tensorboard/writer.py @@ -986,7 +986,7 @@ class SummaryWriter: "warning: Embedding dir exists, did you set global_step for add_embedding()?" ) else: - raise Exception( + raise Exception( # noqa: TRY002 f"Path: `{save_path}` exists, but is a file. Cannot proceed." ) else: diff --git a/torch/xpu/__init__.py b/torch/xpu/__init__.py index 203df178f8..3e7f43b87d 100644 --- a/torch/xpu/__init__.py +++ b/torch/xpu/__init__.py @@ -132,7 +132,7 @@ def _lazy_init(): f"XPU call failed lazily at initialization with error: {str(e)}\n\n" f"XPU call was originally invoked at:\n\n{''.join(orig_traceback)}" ) - raise Exception(msg) from e + raise Exception(msg) from e # noqa: TRY002 finally: delattr(_tls, "is_initializing") _initialized = True diff --git a/torchgen/api/unboxing.py b/torchgen/api/unboxing.py index 3cc20fe9d5..7ff0c59c77 100644 --- a/torchgen/api/unboxing.py +++ b/torchgen/api/unboxing.py @@ -117,7 +117,7 @@ def convert_arguments(f: NativeFunction) -> Tuple[List[Binding], List[str]]: for arg in args: # expecting only Argument if not isinstance(arg.argument, Argument): - raise Exception( + raise Exception( # noqa: TRY002 f"Unexpected argument type, expecting `Argument` but got {arg}" ) argument: Argument = arg.argument @@ -165,7 +165,7 @@ def argumenttype_ivalue_convert( ctype=ctype, ) else: - raise Exception(f"Cannot handle type {t}. arg_name: {arg_name}") + raise Exception(f"Cannot handle type {t}. arg_name: {arg_name}") # noqa: TRY002 return out_name, ctype, code, decl diff --git a/torchgen/executorch/api/custom_ops.py b/torchgen/executorch/api/custom_ops.py index 5d11f1300b..7e31025675 100644 --- a/torchgen/executorch/api/custom_ops.py +++ b/torchgen/executorch/api/custom_ops.py @@ -47,7 +47,9 @@ class ComputeNativeFunctionStub: # Returns an empty tensor ret_name = "at::Tensor()" else: - raise Exception(f"Can't handle this return type {f.func}") + raise Exception( # noqa: TRY002 + f"Can't handle this return type {f.func}" + ) # noqa: TRY002 elif len(f.func.arguments.out) == len(f.func.returns): # Returns a tuple of out arguments tensor_type = "at::Tensor &" diff --git a/torchgen/executorch/api/unboxing.py b/torchgen/executorch/api/unboxing.py index 74b5e580b1..50d69d34e9 100644 --- a/torchgen/executorch/api/unboxing.py +++ b/torchgen/executorch/api/unboxing.py @@ -57,7 +57,7 @@ class Unboxing: for arg in args: # expecting only Argument if not isinstance(arg.argument, Argument): - raise Exception( + raise Exception( # noqa: TRY002 f"Unexpected argument type, expecting `Argument` but got {arg}" ) argument: Argument = arg.argument @@ -99,7 +99,9 @@ class Unboxing: arg_name=arg_name, out_name=out_name, t=t, ctype=ctype ) else: - raise Exception(f"Cannot handle type {t}. arg_name: {arg_name}") + raise Exception( # noqa: TRY002 + f"Cannot handle type {t}. arg_name: {arg_name}" + ) # noqa: TRY002 return out_name, ctype, code, decl def _gen_code_base_type( diff --git a/torchgen/gen_executorch.py b/torchgen/gen_executorch.py index 4d21f3f64a..436630bb66 100644 --- a/torchgen/gen_executorch.py +++ b/torchgen/gen_executorch.py @@ -132,7 +132,7 @@ class ComputeFunction: # only valid remaining case is only function is in f.variants elif not (Variant.function in f.variants and Variant.method not in f.variants): - raise Exception( + raise Exception( # noqa: TRY002 f"Can't handle native function {f.func} with the following variant specification {f.variants}." ) @@ -228,7 +228,7 @@ class ComputeCodegenUnboxedKernels: if len(f.func.returns) == 0: if len(f.func.arguments.out) == 0: - raise Exception( + raise Exception( # noqa: TRY002 f"Can't handle native function {f.func} with no returns and no out yet." ) out = f.func.arguments.out[0] diff --git a/torchgen/gen_lazy_tensor.py b/torchgen/gen_lazy_tensor.py index 4f1c3a8513..eed0e8de7a 100644 --- a/torchgen/gen_lazy_tensor.py +++ b/torchgen/gen_lazy_tensor.py @@ -138,7 +138,7 @@ def validate_shape_inference_header( decl for decl in expected_shape_infr_decls if decl not in shape_infr_decl_lines ] if missing_decls: - raise Exception( + raise Exception( # noqa: TRY002 f"""Missing shape inference function.\n Please add declare this function in {shape_inference_hdr}:\n and implement it in the corresponding shape_inference.cpp file.\n diff --git a/torchgen/selective_build/operator.py b/torchgen/selective_build/operator.py index feb4f08bb8..939d97ff94 100644 --- a/torchgen/selective_build/operator.py +++ b/torchgen/selective_build/operator.py @@ -61,7 +61,7 @@ class SelectiveBuildOperator: } if len(set(op_info.keys()) - allowed_keys) > 0: - raise Exception( + raise Exception( # noqa: TRY002 "Got unexpected top level keys: {}".format( ",".join(set(op_info.keys()) - allowed_keys), ) @@ -132,7 +132,7 @@ def combine_operators( lhs: "SelectiveBuildOperator", rhs: "SelectiveBuildOperator" ) -> "SelectiveBuildOperator": if str(lhs.name) != str(rhs.name): - raise Exception( + raise Exception( # noqa: TRY002 f"Expected both arguments to have the same name, but got '{str(lhs.name)}' and '{str(rhs.name)}' instead" ) diff --git a/torchgen/selective_build/selector.py b/torchgen/selective_build/selector.py index 4fdc513534..aa60349966 100644 --- a/torchgen/selective_build/selector.py +++ b/torchgen/selective_build/selector.py @@ -80,7 +80,7 @@ class SelectiveBuilder: } top_level_keys = set(data.keys()) if len(top_level_keys - valid_top_level_keys) > 0: - raise Exception( + raise Exception( # noqa: TRY002 "Got unexpected top level keys: {}".format( ",".join(top_level_keys - valid_top_level_keys), ) @@ -255,7 +255,7 @@ class SelectiveBuilder: break if not key_found: if "default" not in kernel_key: - raise Exception("Missing kernel for the model") + raise Exception("Missing kernel for the model") # noqa: TRY002 else: result_set.add("default") diff --git a/torchgen/shape_functions/gen_jit_shape_functions.py b/torchgen/shape_functions/gen_jit_shape_functions.py index c6336a6951..bdfd5c75b2 100644 --- a/torchgen/shape_functions/gen_jit_shape_functions.py +++ b/torchgen/shape_functions/gen_jit_shape_functions.py @@ -16,7 +16,7 @@ module_name = "torch.jit._shape_functions" err_msg = """Could not find shape functions file, please make sure you are in the root directory of the Pytorch git repo""" if not file_path.exists(): - raise Exception(err_msg) + raise Exception(err_msg) # noqa: TRY002 spec = importlib.util.spec_from_file_location(module_name, file_path) assert spec is not None
2.41.0
1984237a0fb32b760c1b84d6d02d2f0f7ed293b
Mon, 22 Apr 2024 01:26:55 +0000
[PATCH 0442/1000] [Profiler] Unify the device(CUDA, XPU, PrivateUse1) in torch profiler post processing (#123247)
This PR unifies the CUDA, XPU and PrivateUse1 in the torch profiler. Now CUDA, XPU and PrivateUse1 can together use string object `use_device` to distinguish each other and share one device path for calculating kineto time durations and memory statistics for post processing. #suppress-api-compatibility-check Co-authored-by: Aaron Enye Shi <enye.shi@gmail.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123247 Approved by: https://github.com/aaronenyeshi
diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py index e149ea379b..b9f01e0fbd 100644 --- a/test/profiler/test_profiler.py +++ b/test/profiler/test_profiler.py @@ -1080,7 +1080,7 @@ class TestProfiler(TestCase): stats = run_profiler(create_cuda_tensor) check_metrics( stats, - "cuda_memory_usage", + "device_memory_usage", allocs=[ "test_user_scope_alloc", "aten::to", @@ -1132,7 +1132,7 @@ class TestProfiler(TestCase): deallocs=["[memory]"], ) if torch.cuda.is_available(): - check_metrics(stats, "cuda_memory_usage", deallocs=["[memory]"]) + check_metrics(stats, "device_memory_usage", deallocs=["[memory]"]) @unittest.skipIf( IS_JETSON, "Jetson has a guard against OOM since host and gpu memory are shared" diff --git a/test/test_autograd.py b/test/test_autograd.py index c6fa124ce0..80af13df3d 100644 --- a/test/test_autograd.py +++ b/test/test_autograd.py @@ -4628,11 +4628,11 @@ Done""", self.assertEqual(avg.count, 4) self.assertEqual(avg.cpu_time_total, 30) self.assertEqual(avg.self_cpu_time_total, 30) - self.assertEqual(avg.cuda_time_total, 0) + self.assertEqual(avg.device_time_total, 0) # average stats self.assertEqual(avg.cpu_time, 7.5) - self.assertEqual(avg.cuda_time_total, 0) + self.assertEqual(avg.device_time_total, 0) def test_profiler_shapes(self): print("") diff --git a/torch/_C/_autograd.pyi b/torch/_C/_autograd.pyi index 92b21f96df..2c50a28bfb 100644 --- a/torch/_C/_autograd.pyi +++ b/torch/_C/_autograd.pyi @@ -15,6 +15,7 @@ from ._profiler import ( class DeviceType(Enum): CPU = ... CUDA = ... + XPU = ... MKLDNN = ... OPENGL = ... OPENCL = ... diff --git a/torch/_C/_profiler.pyi b/torch/_C/_profiler.pyi index e1481dd9c1..d19e72f573 100644 --- a/torch/_C/_profiler.pyi +++ b/torch/_C/_profiler.pyi @@ -39,6 +39,7 @@ class ActiveProfilerType(Enum): class ProfilerActivity(Enum): CPU = ... CUDA = ... + XPU = ... MTIA = ... PrivateUse1 = ... diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py index 10d452d5fd..a2a80e1e5e 100644 --- a/torch/_inductor/utils.py +++ b/torch/_inductor/utils.py @@ -136,7 +136,7 @@ def do_bench_using_profiling(fn: Callable[[], Any], warmup=25, rep=100) -> float log.debug("profiling time breakdown") log.debug(actual_events.table(row_limit=-1)) - res = sum(event.cuda_time_total for event in actual_events) / 1000.0 / n_repeat + res = sum(event.device_time_total for event in actual_events) / 1000.0 / n_repeat log.debug("profiling results: %s ms", res) return res diff --git a/torch/autograd/profiler.py b/torch/autograd/profiler.py index ba020fb3cb..f233277b7e 100644 --- a/torch/autograd/profiler.py +++ b/torch/autograd/profiler.py @@ -7,7 +7,6 @@ from warnings import warn import torch import torch.cuda -from torch._C import _get_privateuse1_backend_name from torch._C._profiler import _ExperimentalConfig from torch.autograd import ( @@ -112,8 +111,12 @@ class profile: Args: enabled (bool, optional): Setting this to False makes this context manager a no-op. - use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API. - Adds approximately 4us of overhead to each tensor operation. + use_cuda (bool, optional): Enables timing of CUDA events as well + using the cudaEvent API. (will be deprecated) + + use_device (str, optional): Enables timing of device events. + Adds approximately 4us of overhead to each tensor operation when use cuda. + The valid devices options are 'cuda', 'xpu' and 'privateuseone'. record_shapes (bool, optional): If shapes recording is set, information about input dimensions will be collected. This allows one to see which @@ -161,9 +164,9 @@ class profile: .. warning: Due to some CUDA multiprocessing limitations (multiprocessing-cuda-note_), - one cannot use the profiler with ``use_cuda = True`` to benchmark + one cannot use the profiler with ``use_device = 'cuda'`` to benchmark DataLoaders with ``num_workers > 0``. If you wish to benchmark data loading, - please use ``use_cuda = False`` or ``num_workers = 0``. + please use ``use_device = None`` or ``num_workers = 0``. Example: >>> # xdoctest: +SKIP @@ -207,9 +210,13 @@ class profile: if not self.enabled: return self.use_cuda = use_cuda - self.use_device: Optional[str] = ( - use_device if use_device != "privateuseone" else None - ) + if self.use_cuda: + warn( + "The attribute `use_cuda` will be deprecated soon, please use ``use_device = 'cuda'`` instead." + ) + self.use_device: Optional[str] = "cuda" + else: + self.use_device = use_device self.function_events: Optional[EventList] = None self.entered = False self.record_shapes = record_shapes @@ -233,17 +240,19 @@ class profile: use_kineto ), "Device-only events supported only with Kineto (use_kineto=True)" - if self.use_device == "cuda": - self.use_device = None - self.use_cuda = True - - if self.use_device and self.use_device != _get_privateuse1_backend_name(): - warn(f"{self.use_device} doesn't support profile.") + VALID_DEVICE_OPTIONS = ["cuda", "xpu", "privateuseone"] + if self.use_device not in VALID_DEVICE_OPTIONS: + warn(f"The {self.use_device} is not a valid device option.") self.use_device = None - if self.use_cuda and not torch.cuda.is_available(): + if self.use_device == "cuda" and not torch.cuda.is_available(): warn("CUDA is not available, disabling CUDA profiling") self.use_cuda = False + self.use_device = None + + if self.use_device == "xpu" and not torch.xpu.is_available(): + warn("XPU is not available, disabling XPU profiling") + self.use_device = None self.kineto_activities = set() if self.use_cpu: @@ -252,14 +261,18 @@ class profile: self.kineto_activities.add(ProfilerActivity.MTIA) self.profiler_kind = ProfilerState.KINETO - if self.use_cuda: + if self.use_device == "cuda": if not use_kineto or ProfilerActivity.CUDA not in _supported_activities(): assert self.use_cpu, "Legacy CUDA profiling requires use_cpu=True" self.profiler_kind = ProfilerState.KINETO_GPU_FALLBACK else: self.kineto_activities.add(ProfilerActivity.CUDA) - - if self.use_device: + elif self.use_device == "xpu": + assert ( + use_kineto and ProfilerActivity.XPU in _supported_activities() + ), "Legacy XPU profiling is not supported. Requires use_kineto=True on XPU devices." + self.kineto_activities.add(ProfilerActivity.XPU) + elif self.use_device is not None and self.use_device != "privateuseone": if ( not use_kineto or ProfilerActivity.PrivateUse1 not in _supported_activities() @@ -315,8 +328,10 @@ class profile: def __exit__(self, exc_type, exc_val, exc_tb): if not self.enabled: return - if self.use_cuda: + if self.use_device == "cuda": torch.cuda.synchronize() + elif self.use_device == "xpu": + torch.xpu.synchronize() t0 = perf_counter_ns() self.kineto_results = _disable_profiler() @@ -332,7 +347,6 @@ class profile: self.function_events = EventList( parsed_results, - use_cuda=self.use_cuda, use_device=self.use_device, profile_memory=self.profile_memory, with_flops=self.with_flops, @@ -445,17 +459,11 @@ class profile: else 0 ) - def _cuda_memory_usage(mem_record): + def _device_memory_usage(mem_record): return ( mem_record.nbytes() - if mem_record.device_type() in [DeviceType.CUDA, DeviceType.HIP] - else 0 - ) - - def _privateuse1_memory_usage(mem_record): - return ( - mem_record.nbytes() - if mem_record.device_type() in [DeviceType.PrivateUse1] + if mem_record.device_type() + in [DeviceType.CUDA, DeviceType.PrivateUse1, DeviceType.HIP] else 0 ) @@ -471,16 +479,14 @@ class profile: abs_end_ns = kineto_event.start_ns() + kineto_event.duration_ns() cpu_memory_usage = 0 - cuda_memory_usage = 0 - privateuse1_memory_usage = 0 + device_memory_usage = 0 if kineto_event.device_type() == DeviceType.CPU: # find the corresponding memory allocation events for mem_record in mem_records_acc.in_interval( kineto_event.start_ns() / 1000, abs_end_ns / 1000 ): cpu_memory_usage += _cpu_memory_usage(mem_record[0]) - cuda_memory_usage += _cuda_memory_usage(mem_record[0]) - privateuse1_memory_usage += _privateuse1_memory_usage(mem_record[0]) + device_memory_usage += _device_memory_usage(mem_record[0]) mem_record[1] = True is_async = kineto_event.is_async() or ( @@ -505,8 +511,7 @@ class profile: scope=kineto_event.scope(), use_device=self.use_device, cpu_memory_usage=cpu_memory_usage, - cuda_memory_usage=cuda_memory_usage, - privateuse1_memory_usage=privateuse1_memory_usage, + device_memory_usage=device_memory_usage, is_async=is_async, sequence_nr=kineto_event.sequence_nr(), device_type=kineto_event.device_type(), @@ -516,12 +521,12 @@ class profile: ) max_evt_id = max(max_evt_id, fe.id) if fe.device_type == DeviceType.CPU and not fe.is_async: - if self.use_device: + if self.use_device == "privateuseone": privateuse1_time = kineto_event.privateuse1_elapsed_us() if privateuse1_time > 0: fe.append_kernel(fe.name, fe.device_index, privateuse1_time) fe.is_legacy = True - else: + elif self.use_device == "cuda": # Check if we have CUDA time as a fallback cuda_time = kineto_event.cuda_elapsed_us() if cuda_time > 0: @@ -534,7 +539,7 @@ class profile: device_corr_map[corr_id] = [] device_corr_map[corr_id].append(fe) - # associate CUDA kernels and CUDA runtime (CPU) with CPU events + # associate device kernels and device runtime (CPU) with CPU events for fe in function_events: if ( fe.device_type == DeviceType.CPU @@ -549,7 +554,7 @@ class profile: f_evt.time_range.end - f_evt.time_range.start, ) elif f_evt.device_type == DeviceType.CPU: - # make sure that 'thread' of a CPU Kineto (e.g. CUDA Runtime) event is associated + # make sure that 'thread' of a CPU Kineto (e.g. Device Runtime) event is associated # with the 'thread' of the corresponding linked PyTorch event to properly track # parents and children f_evt.thread = fe.thread @@ -569,8 +574,7 @@ class profile: scope=0, # RecordScope::FUNCTION use_device=self.use_device, cpu_memory_usage=_cpu_memory_usage(evt), - cuda_memory_usage=_cuda_memory_usage(evt), - privateuse1_memory_usage=_privateuse1_memory_usage(evt), + device_memory_usage=_device_memory_usage(evt), is_async=False, sequence_nr=-1, device_type=DeviceType.CPU, diff --git a/torch/autograd/profiler_legacy.py b/torch/autograd/profiler_legacy.py index 32700ffb1c..f72d366a36 100644 --- a/torch/autograd/profiler_legacy.py +++ b/torch/autograd/profiler_legacy.py @@ -93,7 +93,7 @@ class profile: parsed_results = _parse_legacy_records(records) self.function_events = EventList( parsed_results, - use_cuda=self.use_cuda, + use_device="cuda" if self.use_cuda else None, profile_memory=self.profile_memory, with_flops=self.with_flops, ) @@ -250,8 +250,9 @@ def _parse_legacy_records(thread_records): entry for entry in start.stack() if _filter_stack_entry(entry) ], scope=start.scope(), + use_device="cuda" if start.has_cuda() else None, cpu_memory_usage=cpu_memory_usage, - cuda_memory_usage=cuda_memory_usage, + device_memory_usage=cuda_memory_usage, is_async=is_async, is_remote=is_remote_event, sequence_nr=start.sequence_nr(), @@ -287,7 +288,7 @@ def _parse_legacy_records(thread_records): end_us=0, stack=[], cpu_memory_usage=record.cpu_memory_usage(), - cuda_memory_usage=record.cuda_memory_usage(), + device_memory_usage=record.cuda_memory_usage(), is_legacy=True, ) functions.append(fe) diff --git a/torch/autograd/profiler_util.py b/torch/autograd/profiler_util.py index 220132f15c..4c889678ad 100644 --- a/torch/autograd/profiler_util.py +++ b/torch/autograd/profiler_util.py @@ -26,12 +26,10 @@ class EventList(list): """A list of Events (for pretty printing).""" def __init__(self, *args, **kwargs): - use_cuda = kwargs.pop("use_cuda", True) use_device = kwargs.pop("use_device", None) profile_memory = kwargs.pop("profile_memory", False) with_flops = kwargs.pop("with_flops", False) super().__init__(*args, **kwargs) - self._use_cuda = use_cuda self._use_device = use_device self._profile_memory = profile_memory self._tree_built = False @@ -181,14 +179,16 @@ class EventList(list): Args: sort_by (str, optional): Attribute used to sort entries. By default they are printed in the same order as they were registered. - Valid keys include: ``cpu_time``, ``cuda_time``, ``cpu_time_total``, - ``cuda_time_total``, ``cpu_memory_usage``, ``cuda_memory_usage``, - ``self_cpu_memory_usage``, ``self_cuda_memory_usage``, ``count``. + Valid keys include: ``cpu_time``, ``cuda_time``, ``xpu_time``, + ``cpu_time_total``, ``cuda_time_total``, ``xpu_time_total``, + ``cpu_memory_usage``, ``cuda_memory_usage``, ``xpu_memory_usage``, + ``self_cpu_memory_usage``, ``self_cuda_memory_usage``, + ``self_xpu_memory_usage``, ``count``. top_level_events_only(bool, optional): Boolean flag to determine the selection of events to display. If true, the profiler will only display events at top level like top-level invocation of python `lstm`, python `add` or other functions, nested events like low-level - cpu/cuda ops events are omitted for profiler result readability. + cpu/cuda/xpu ops events are omitted for profiler result readability. Returns: A string containing the table. @@ -267,6 +267,7 @@ class EventList(list): return [ "self_cpu_time_total", "self_cuda_time_total", + "self_xpu_time_total", "self_privateuse1_time_total", ] @@ -280,7 +281,12 @@ class EventList(list): with open(path, "w") as f: for evt in self: if evt.stack and len(evt.stack) > 0: - metric_value = getattr(evt, metric) + metric_value = getattr( + evt, + metric.replace("cuda", "device") + .replace("xpu", "device") + .replace("privateuse1", "device"), + ) if int(metric_value) > 0: stack_str = "" for entry in reversed(evt.stack): @@ -325,7 +331,6 @@ class EventList(list): avg_list = EventList( stats.values(), - use_cuda=self._use_cuda, use_device=self._use_device, profile_memory=self._profile_memory, with_flops=self._with_flops, @@ -395,26 +400,23 @@ class FormattedTimesMixin: """ cpu_time_str = _attr_formatter("cpu_time") - cuda_time_str = _attr_formatter("cuda_time") - privateuse1_time_str = _attr_formatter("privateuse1_time") + device_time_str = _attr_formatter("device_time") cpu_time_total_str = _attr_formatter("cpu_time_total") - cuda_time_total_str = _attr_formatter("cuda_time_total") - privateuse1_time_total_str = _attr_formatter("privateuse1_time_total") + device_time_total_str = _attr_formatter("device_time_total") self_cpu_time_total_str = _attr_formatter("self_cpu_time_total") - self_cuda_time_total_str = _attr_formatter("self_cuda_time_total") - self_privateuse1_time_total_str = _attr_formatter("self_privateuse1_time_total") + self_device_time_total_str = _attr_formatter("self_device_time_total") @property def cpu_time(self): return 0.0 if self.count == 0 else 1.0 * self.cpu_time_total / self.count # type: ignore[attr-defined] @property - def cuda_time(self): - return 0.0 if self.count == 0 else 1.0 * self.cuda_time_total / self.count # type: ignore[attr-defined] + def device_time(self): + return 0.0 if self.count == 0 else 1.0 * self.device_time_total / self.count # type: ignore[attr-defined] @property - def privateuse1_time(self): - return 0.0 if self.count == 0 else 1.0 * self.privateuse1_time_total / self.count # type: ignore[attr-defined] + def cuda_time(self): # To be deprecated + return self.device_time class Interval: @@ -448,8 +450,7 @@ class FunctionEvent(FormattedTimesMixin): scope=0, use_device=None, cpu_memory_usage=0, - cuda_memory_usage=0, - privateuse1_memory_usage=0, + device_memory_usage=0, is_async=False, is_remote=False, sequence_nr=-1, @@ -479,8 +480,7 @@ class FunctionEvent(FormattedTimesMixin): self.scope: int = scope self.use_device: Optional[str] = use_device self.cpu_memory_usage: int = cpu_memory_usage - self.cuda_memory_usage: int = cuda_memory_usage - self.privateuse1_memory_usage: int = privateuse1_memory_usage + self.device_memory_usage: int = device_memory_usage self.is_async: bool = is_async self.is_remote: bool = is_remote self.sequence_nr: int = sequence_nr @@ -530,20 +530,23 @@ class FunctionEvent(FormattedTimesMixin): ) @property - def self_cuda_memory_usage(self): + def self_device_memory_usage(self): if self.is_async or self.device_type != DeviceType.CPU: return 0 - return self.cuda_memory_usage - sum( - child.cuda_memory_usage for child in self.cpu_children + return self.device_memory_usage - sum( + child.device_memory_usage for child in self.cpu_children ) @property - def self_privateuse1_memory_usage(self): - if self.is_async or self.device_type != DeviceType.CPU: + def self_cuda_memory_usage(self): # To be deprecated + self.self_device_memory_usage + + @property + def cpu_time_total(self): + if self.device_type == DeviceType.CPU: + return self.time_range.elapsed_us() + else: return 0 - return self.privateuse1_memory_usage - sum( - child.privateuse1_memory_usage for child in self.cpu_children - ) @property def self_cpu_time_total(self): @@ -554,84 +557,50 @@ class FunctionEvent(FormattedTimesMixin): ) @property - def cuda_time_total(self): - if self.is_async or self.use_device: + def device_time_total(self): + if self.is_async or not self.use_device: return 0 if self.device_type == DeviceType.CPU: if not self.is_legacy: # account for the kernels in the children ops return sum(kinfo.duration for kinfo in self.kernels) + sum( - ch.cuda_time_total for ch in self.cpu_children + ch.device_time_total for ch in self.cpu_children ) else: # each legacy cpu events has a single (fake) kernel return sum(kinfo.duration for kinfo in self.kernels) else: - assert self.device_type == DeviceType.CUDA + assert self.device_type in [DeviceType.CUDA, DeviceType.PrivateUse1] return self.time_range.elapsed_us() @property - def self_cuda_time_total(self): - if self.is_async or self.use_device: - return 0 - if self.device_type == DeviceType.CPU: - return self.cuda_time_total - sum( - child.cuda_time_total for child in self.cpu_children - ) - else: - assert self.device_type == DeviceType.CUDA - return self.cuda_time_total + def cuda_time_total(self): # To be deprecated + self.device_time_total @property - def cpu_time_total(self): - if self.device_type == DeviceType.CPU: - return self.time_range.elapsed_us() - else: - return 0 - - @property - def self_privateuse1_time_total(self): + def self_device_time_total(self): if self.is_async or not self.use_device: return 0 if self.device_type == DeviceType.CPU: - return self.privateuse1_time_total - sum( - child.privateuse1_time_total for child in self.cpu_children + return self.device_time_total - sum( + [child.device_time_total for child in self.cpu_children] ) else: - assert self.device_type == DeviceType.CUDA - return self.privateuse1_time_total + assert self.device_type in [DeviceType.CUDA, DeviceType.PrivateUse1] + return self.device_time_total @property - def privateuse1_time_total(self): - if self.is_async or not self.use_device: - return 0 - if self.device_type == DeviceType.CPU: - if not self.is_legacy: - # account for the kernels in the children ops - return sum(kinfo.duration for kinfo in self.kernels) + sum( - ch.privateuse1_time_total for ch in self.cpu_children - ) - else: - # each legacy cpu events has a single (fake) kernel - return sum(kinfo.duration for kinfo in self.kernels) - else: - assert self.device_type == DeviceType.PrivateUse1 - return self.time_range.elapsed_us() + def self_cuda_time_total(self): # To be deprecated + self.self_device_time_total @property def key(self): return self.name def __repr__(self): - device_name = "cuda" if not self.use_device else self.use_device - device_time = ( - self.cuda_time_str if not self.use_device else self.privateuse1_time_str - ) - device_memory_usage = ( - self.cuda_memory_usage - if not self.use_device - else self.privateuse1_memory_usage - ) + device_name = self.use_device + device_time = self.device_time_str + device_memory_usage = self.device_memory_usage return ( f"<FunctionEvent id={self.id} name={self.name} device_type={self.device_type} node_id={self.node_id} " f"cpu_time={self.cpu_time_str} start_us={self.time_range.start} end_us={self.time_range.end} " @@ -653,20 +622,16 @@ class FunctionEventAvg(FormattedTimesMixin): self.is_remote: bool = False self.use_device: Optional[str] = None self.cpu_time_total: int = 0 - self.cuda_time_total: int = 0 - self.privateuse1_time_total: int = 0 + self.device_time_total: int = 0 self.self_cpu_time_total: int = 0 - self.self_cuda_time_total: int = 0 - self.self_privateuse1_time_total: int = 0 + self.self_device_time_total: int = 0 self.input_shapes: Optional[List[List[int]]] = None self.stack: Optional[List] = None self.scope: Optional[int] = None self.cpu_memory_usage: int = 0 - self.cuda_memory_usage: int = 0 - self.privateuse1_memory_usage: int = 0 + self.device_memory_usage: int = 0 self.self_cpu_memory_usage: int = 0 - self.self_cuda_memory_usage: int = 0 - self.self_privateuse1_memory_usage: int = 0 + self.self_device_memory_usage: int = 0 self.cpu_children: Optional[List[FunctionEvent]] = None self.cpu_parent: Optional[FunctionEvent] = None self.device_type: DeviceType = DeviceType.CPU @@ -694,17 +659,13 @@ class FunctionEventAvg(FormattedTimesMixin): assert isinstance(other, (FunctionEvent, FunctionEventAvg)) assert other.key == self.key self.cpu_time_total += other.cpu_time_total - self.cuda_time_total += other.cuda_time_total - self.privateuse1_time_total += other.privateuse1_time_total + self.device_time_total += other.device_time_total self.self_cpu_time_total += other.self_cpu_time_total - self.self_cuda_time_total += other.self_cuda_time_total - self.self_privateuse1_time_total += other.self_privateuse1_time_total + self.self_device_time_total += other.self_device_time_total self.cpu_memory_usage += other.cpu_memory_usage - self.cuda_memory_usage += other.cuda_memory_usage - self.privateuse1_memory_usage += other.privateuse1_memory_usage + self.device_memory_usage += other.device_memory_usage self.self_cpu_memory_usage += other.self_cpu_memory_usage - self.self_cuda_memory_usage += other.self_cuda_memory_usage - self.self_privateuse1_memory_usage += other.self_privateuse1_memory_usage + self.self_device_memory_usage += other.self_device_memory_usage self.count += other.count if self.flops is None: self.flops = other.flops @@ -717,19 +678,9 @@ class FunctionEventAvg(FormattedTimesMixin): def __repr__(self): device_name = "cuda" if not self.use_device else self.use_device - self_device_time = ( - self.self_cuda_time_total_str - if not self.use_device - else self.self_privateuse1_time_total_str - ) - device_time = ( - self.cuda_time_str if not self.use_device else self.privateuse1_time_str - ) - device_memory = ( - self.cuda_memory_usage - if not self.use_device - else self.privateuse1_memory_usage - ) + self_device_time = self.self_device_time_total_str + device_time = self.device_time_str + device_memory = self.device_memory_usage return ( f"<FunctionEventAvg key={self.key} self_cpu_time={self.self_cpu_time_total_str} cpu_time={self.cpu_time_str} " f" self_{device_name}_time={self_device_time} {device_name}_time={device_time} input_shapes={str(self.input_shapes)} " @@ -828,19 +779,14 @@ def _build_table( if len(events) == 0: return "" - has_cuda_time = any(event.self_cuda_time_total > 0 for event in events) - has_cuda_mem = any(event.self_cuda_memory_usage > 0 for event in events) - has_privateuse1_time = any( - event.self_privateuse1_time_total > 0 for event in events - ) - has_privateuse1_mem = any( - event.self_privateuse1_memory_usage > 0 for event in events - ) + has_device_time = any(event.self_device_time_total > 0 for event in events) + has_device_mem = any(event.self_device_memory_usage > 0 for event in events) use_device = events[0].use_device - if not use_device and (has_privateuse1_mem or has_privateuse1_time): - raise RuntimeError( - "use_device is None, but there is private device performance data." - ) + # Running on PrivateUse1 device with profiler but not enable + # ProfilerActivity.PrivateUse1 can also catch privateuse1 memory usage. + # Here only need to check has_privateuse1_time if not use_device. + if not use_device and has_device_time: + raise RuntimeError("use_device is None, but there is device performance data.") has_input_shapes = any( (event.input_shapes is not None and len(event.input_shapes) > 0) @@ -849,8 +795,16 @@ def _build_table( if sort_by is not None: events = EventList( - sorted(events, key=lambda evt: getattr(evt, sort_by), reverse=True), - use_cuda=has_cuda_time, + sorted( + events, + key=lambda evt: getattr( + evt, + sort_by.replace("cuda", "device") + .replace("xpu", "device") + .replace("privateuse1", "device"), + ), + reverse=True, + ), use_device=use_device, profile_memory=profile_memory, with_flops=with_flops, @@ -888,23 +842,14 @@ def _build_table( "CPU total", "CPU time avg", ] - if has_cuda_time: - headers.extend( - [ - "Self CUDA", - "Self CUDA %", - "CUDA total", - "CUDA time avg", - ] - ) - if has_privateuse1_time: - privateuse1 = use_device.upper() + device_name = use_device.upper() if use_device is not None else "None" + if has_device_time: headers.extend( [ - f"Self {privateuse1}", - f"Self {privateuse1} %", - f"{privateuse1} total", - f"{privateuse1} time avg", + f"Self {device_name}", + f"Self {device_name} %", + f"{device_name} total", + f"{device_name} time avg", ] ) if profile_memory: @@ -914,19 +859,11 @@ def _build_table( "Self CPU Mem", ] ) - if has_cuda_mem: + if has_device_mem: headers.extend( [ - "CUDA Mem", - "Self CUDA Mem", - ] - ) - if has_privateuse1_mem: - privateuse1 = use_device.upper() - headers.extend( - [ - f"{privateuse1} Mem", - f"Self {privateuse1} Mem", + f"{device_name} Mem", + f"Self {device_name} Mem", ] ) headers.append("# of Calls") @@ -1000,22 +937,16 @@ def _build_table( result.append(s) result.append("\n") # Yes, newline after the end as well - sum_self_cpu_time_total = sum(event.self_cpu_time_total for event in events) - sum_self_cuda_time_total = 0 - sum_self_privateuse1_time_total = 0 + sum_self_cpu_time_total = 0 + sum_self_device_time_total = 0 for evt in events: - if evt.device_type == DeviceType.CPU: + sum_self_cpu_time_total += evt.self_cpu_time_total + if evt.device_type == DeviceType.CPU and evt.is_legacy: # in legacy profiler, kernel info is stored in cpu events - if evt.is_legacy: - if not use_device: - sum_self_cuda_time_total += evt.self_cuda_time_total - else: - sum_self_privateuse1_time_total += evt.self_privateuse1_time_total - elif evt.device_type == DeviceType.CUDA: + sum_self_device_time_total += evt.self_device_time_total + elif evt.device_type in [DeviceType.CUDA, DeviceType.PrivateUse1]: # in kineto profiler, there're events with the correct device type (e.g. CUDA) - sum_self_cuda_time_total += evt.self_cuda_time_total - elif evt.device_type == DeviceType.PrivateUse1: - sum_self_privateuse1_time_total += evt.self_privateuse1_time_total + sum_self_device_time_total += evt.self_device_time_total # Actual printing if header is not None: @@ -1060,28 +991,16 @@ def _build_table( evt.cpu_time_total_str, # CPU total evt.cpu_time_str, # CPU time avg ] - if has_cuda_time: + if has_device_time: row_values.extend( [ - evt.self_cuda_time_total_str, - # CUDA time total % + evt.self_device_time_total_str, + # device time total % _format_time_share( - evt.self_cuda_time_total, sum_self_cuda_time_total + evt.self_device_time_total, sum_self_device_time_total ), - evt.cuda_time_total_str, - evt.cuda_time_str, # Cuda time avg - ] - ) - if has_privateuse1_time: - row_values.extend( - [ - evt.self_privateuse1_time_total_str, - # PrivateUse1 time total % - _format_time_share( - evt.self_privateuse1_time_total, sum_self_privateuse1_time_total - ), - evt.privateuse1_time_total_str, - evt.privateuse1_time_str, # PrivateUse1 time avg + evt.device_time_total_str, + evt.device_time_str, # device time avg ] ) if profile_memory: @@ -1093,22 +1012,13 @@ def _build_table( _format_memory(evt.self_cpu_memory_usage), ] ) - if has_cuda_mem: - row_values.extend( - [ - # CUDA Mem Total - _format_memory(evt.cuda_memory_usage), - # Self CUDA Mem Total - _format_memory(evt.self_cuda_memory_usage), - ] - ) - if has_privateuse1_mem: + if has_device_mem: row_values.extend( [ - # PrivateUse1 Mem Total - _format_memory(evt.privateuse1_memory_usage), - # Self PrivateUse1 Mem Total - _format_memory(evt.self_privateuse1_memory_usage), + # Device Mem Total + _format_memory(evt.device_memory_usage), + # Self Device Mem Total + _format_memory(evt.self_device_memory_usage), ] ) row_values.append( @@ -1144,10 +1054,9 @@ def _build_table( append(header_sep) append(f"Self CPU time total: {_format_time(sum_self_cpu_time_total)}") - if has_cuda_time: - append(f"Self CUDA time total: {_format_time(sum_self_cuda_time_total)}") - if has_privateuse1_time: + if has_device_time: append( - f"Self {use_device.upper()} time total: {_format_time(sum_self_privateuse1_time_total)}" + f"Self {use_device.upper() if use_device is not None else 'None'} " + f"time total: {_format_time(sum_self_device_time_total)}" ) return "".join(result) diff --git a/torch/csrc/profiler/kineto_shim.cpp b/torch/csrc/profiler/kineto_shim.cpp index 85f91bf8b2..41561c6f3e 100644 --- a/torch/csrc/profiler/kineto_shim.cpp +++ b/torch/csrc/profiler/kineto_shim.cpp @@ -342,6 +342,7 @@ c10::DeviceType deviceTypeFromActivity(libkineto::ActivityType activity_type) { case libkineto::ActivityType::USER_ANNOTATION: case libkineto::ActivityType::EXTERNAL_CORRELATION: case libkineto::ActivityType::CUDA_RUNTIME: + case libkineto::ActivityType::XPU_RUNTIME: case libkineto::ActivityType::CPU_INSTANT_EVENT: case libkineto::ActivityType::GLOW_RUNTIME: case libkineto::ActivityType::MTIA_RUNTIME: diff --git a/torch/profiler/profiler.py b/torch/profiler/profiler.py index fc7a61bf45..20094e5814 100644 --- a/torch/profiler/profiler.py +++ b/torch/profiler/profiler.py @@ -12,7 +12,6 @@ from typing_extensions import Self import torch import torch.autograd.profiler as prof -from torch._C import _get_privateuse1_backend_name from torch._C._profiler import ( _add_execution_trace_observer, _disable_execution_trace_observer, @@ -71,8 +70,10 @@ class _KinetoProfile: Args: activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values: - ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``. - Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA. + ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``, + ``torch.profiler.ProfilerActivity.XPU``. + Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA + or (when available) ProfilerActivity.XPU. record_shapes (bool): save information about operator's input shapes. profile_memory (bool): track tensor memory allocation/deallocation (see ``export_memory_timeline`` for more details). @@ -125,9 +126,13 @@ class _KinetoProfile: self.profiler: Optional[prof.profile] = None self.mem_tl: Optional[MemoryProfileTimeline] = None self.use_device = None - privateuse1_backend = _get_privateuse1_backend_name() - if privateuse1_backend != "privateuseone": - self.use_device = privateuse1_backend + if ProfilerActivity.CUDA in self.activities: + self.use_device = "cuda" + elif ProfilerActivity.XPU in self.activities: + self.use_device = "xpu" + else: + self.use_device = "privateuseone" + # user-defined metadata to be amended to the trace self.preset_metadata: Dict[str, str] = dict() @@ -143,7 +148,7 @@ class _KinetoProfile: use_cuda=(ProfilerActivity.CUDA in self.activities), use_cpu=(ProfilerActivity.CPU in self.activities), use_mtia=(ProfilerActivity.MTIA in self.activities), - use_device=None, + use_device=self.use_device, record_shapes=self.record_shapes, with_flops=self.with_flops, profile_memory=self.profile_memory, @@ -443,8 +448,10 @@ class profile(_KinetoProfile): Args: activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values: - ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``. - Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA. + ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``, + ``torch.profiler.ProfilerActivity.XPU``. + Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA + or (when available) ProfilerActivity.XPU. schedule (Callable): callable that takes step (int) as a single parameter and returns ``ProfilerAction`` value that specifies the profiler action to perform at each step. on_trace_ready (Callable): callable that is called at each step when ``schedule`` diff --git a/torch/testing/_internal/distributed/rpc/rpc_test.py b/torch/testing/_internal/distributed/rpc/rpc_test.py index e1f9cd052f..ee98aaa161 100644 --- a/torch/testing/_internal/distributed/rpc/rpc_test.py +++ b/torch/testing/_internal/distributed/rpc/rpc_test.py @@ -4605,22 +4605,22 @@ class CudaRpcTest(RpcAgentTestFixture): function_events = p.function_events for event in function_events: if event.is_async: - self.assertEqual(0, event.cuda_time_total) + self.assertEqual(0, event.device_time_total) self.assertEqual([], event.kernels) - self.assertEqual(0, event.cuda_time) + self.assertEqual(0, event.device_time) else: if event.node_id == 1: continue self.assertTrue(event.node_id in [dst_cuda_0, dst_cuda_1]) if get_name(event) in EXPECTED_REMOTE_EVENTS: - self.assertGreater(event.cuda_time_total, 0) + self.assertGreater(event.device_time_total, 0) self.assertEqual(1, len(event.kernels)) kernel = event.kernels[0] if event.node_id == dst_cuda_0: self.assertEqual(kernel.device, 0) if event.node_id == dst_cuda_1: self.assertEqual(kernel.device, 1) - self.assertGreater(event.cuda_time, 0) + self.assertGreater(event.device_time, 0) # Validate that EXPECTED_REMOTE_EVENTS is a subset of remotely profiled # events.
2.41.0
e24cc012b130869d0029280dcbb34efdd0032cc
Mon, 22 Apr 2024 01:34:22 +0000
[PATCH 0443/1000] fix Invalid call to aoti_torch_tensor_copy_ #123039 (#124037)
fixes #123039 In abi mode, ExternKernelSchedulerNode generates code using `aoti_torch_tensor_copy_` which requires `AtenTensorHandle`, but the allocation generates ArrayRefTensor to allocate mem in stack. To fix this issue, this PR prevents ExternKernelSchedulerNode from using stack-mem-allocation in abi, and creates AtenTensorHandle instead. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124037 Approved by: https://github.com/desertfire
diff --git a/test/inductor/test_cuda_cpp_wrapper.py b/test/inductor/test_cuda_cpp_wrapper.py index b662e2438c..fa717ab835 100644 --- a/test/inductor/test_cuda_cpp_wrapper.py +++ b/test/inductor/test_cuda_cpp_wrapper.py @@ -109,9 +109,7 @@ if config.abi_compatible: test_failures_cuda_wrapper[ f"{test_name}_dynamic_shapes" ] = test_torchinductor.TestFailure(("cuda_wrapper",), is_skip=False) - skip_list = [ - "test_multi_device_cuda", - ] + skip_list = [] for test_name in skip_list: test_failures_cuda_wrapper[test_name] = test_torchinductor.TestFailure( ("cuda_wrapper",), is_skip=True diff --git a/torch/_inductor/codegen/cpp_wrapper_cpu.py b/torch/_inductor/codegen/cpp_wrapper_cpu.py index 4c07930045..7fad66e7ac 100644 --- a/torch/_inductor/codegen/cpp_wrapper_cpu.py +++ b/torch/_inductor/codegen/cpp_wrapper_cpu.py @@ -895,9 +895,11 @@ class CppWrapperCpu(WrapperCodeGen): @cache_on_self def get_output_refs(self): return [ - f"torch::tensor({x.codegen_reference(self.wrapper_call)})" - if isinstance(x, ir.ShapeAsConstantBuffer) and not config.abi_compatible - else x.codegen_reference(self.wrapper_call) + ( + f"torch::tensor({x.codegen_reference(self.wrapper_call)})" + if isinstance(x, ir.ShapeAsConstantBuffer) and not config.abi_compatible + else x.codegen_reference(self.wrapper_call) + ) for x in V.graph.graph_outputs ] @@ -1097,9 +1099,11 @@ class CppWrapperCpu(WrapperCodeGen): outputs_str = "output_tensors" else: outputs = [ - f"output_tensors[{i}]" - if self.output_is_tensor[i] - else f"output_tensors[{i}].item()" + ( + f"output_tensors[{i}]" + if self.output_is_tensor[i] + else f"output_tensors[{i}].item()" + ) for i in range(len(V.graph.graph_outputs)) ] outputs_str = f"[{', '.join(outputs)}]" @@ -1394,6 +1398,7 @@ class CppWrapperCpu(WrapperCodeGen): and ir.is_contiguous_strides_for_shape( buffer.get_stride(), buffer.get_size() ) + and not buffer.is_extern() ) def make_buffer_free(self, buffer):
2.41.0
7035cc11aa3aefe1a45a9ba6d0cb4d2a6f2e7c1
Sun, 21 Apr 2024 11:09:44 -0700
[PATCH 0444/1000] [inductor] Refactor runtime files into torch._inductor.runtime (part 1) (#124552)
I am planning to make the compile_worker process not import torch so it can start up much faster. This stack is prep for that. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124552 Approved by: https://github.com/yanboliang
diff --git a/test/inductor/test_cuda_repro.py b/test/inductor/test_cuda_repro.py index c1d57d91d7..952a2eacaf 100644 --- a/test/inductor/test_cuda_repro.py +++ b/test/inductor/test_cuda_repro.py @@ -381,7 +381,7 @@ class CudaReproTests(TestCase): https://github.com/pytorch/torchdynamo/issues/1670 """ from torch._C import _cuda_getCurrentRawStream as get_cuda_stream - from torch._inductor.triton_heuristics import ( + from torch._inductor.runtime.triton_heuristics import ( CachingAutotuner, grid, HeuristicType, diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py index 3000be0f88..c63b1c6494 100644 --- a/test/inductor/test_torchinductor.py +++ b/test/inductor/test_torchinductor.py @@ -9643,7 +9643,7 @@ if HAS_GPU and RUN_GPU and not TEST_WITH_ASAN: copy_tests(CommonTemplate, GPUTests, GPU_TYPE) class TritonCodeGenTests(TestCase): - from torch._inductor.triton_heuristics import CachingAutotuner + from torch._inductor.runtime.triton_heuristics import CachingAutotuner class NoOpCompilerBackend: def __init__(self): @@ -9695,7 +9695,7 @@ if HAS_GPU and RUN_GPU and not TEST_WITH_ASAN: for val in mod.__dict__.values(): if isinstance( - val, torch._inductor.triton_heuristics.CachingAutotuner + val, torch._inductor.runtime.triton_heuristics.CachingAutotuner ): kernels.append(val) diff --git a/test/inductor/test_triton_heuristics.py b/test/inductor/test_triton_heuristics.py index 1841454ab9..ab54164edd 100644 --- a/test/inductor/test_triton_heuristics.py +++ b/test/inductor/test_triton_heuristics.py @@ -16,8 +16,8 @@ except ImportError: raise unittest.SkipTest("requires triton") # noqa: TRY200 from torch._inductor import config +from torch._inductor.runtime.triton_heuristics import triton_config from torch._inductor.test_case import run_tests, TestCase -from torch._inductor.triton_heuristics import triton_config class TestTritonHeuristics(TestCase): diff --git a/test/test_public_bindings.py b/test/test_public_bindings.py index 65aa339aff..96ffb9d03e 100644 --- a/test/test_public_bindings.py +++ b/test/test_public_bindings.py @@ -333,7 +333,7 @@ class TestPublicBindings(TestCase): "torch.utils.tensorboard._caffe2_graph", "torch._inductor.codegen.cuda.cuda_template", "torch._inductor.codegen.cuda.gemm_template", - "torch._inductor.triton_helpers", + "torch._inductor.runtime.triton_helpers", "torch.ao.pruning._experimental.data_sparsifier.lightning.callbacks.data_sparsity", "torch.backends._coreml.preprocess", "torch.contrib._tensorboard_vis", diff --git a/torch/_inductor/codegen/cpp_wrapper_cuda.py b/torch/_inductor/codegen/cpp_wrapper_cuda.py index 69838dccad..39d37348d7 100644 --- a/torch/_inductor/codegen/cpp_wrapper_cuda.py +++ b/torch/_inductor/codegen/cpp_wrapper_cuda.py @@ -6,10 +6,10 @@ from typing import Any, List, Optional, TYPE_CHECKING import sympy from torch._inductor.codecache import get_cpp_wrapper_cubin_path_name +from torch._inductor.runtime.triton_heuristics import grid as default_grid from .. import config from ..codecache import CudaKernelParamCache -from ..triton_heuristics import grid as default_grid from ..virtualized import V from .aoti_hipify_utils import maybe_hipify_code_wrapper from .codegen_device_driver import cuda_kernel_driver, cuda_kernel_header diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py index cf51b70c16..007e412e42 100644 --- a/torch/_inductor/codegen/triton.py +++ b/torch/_inductor/codegen/triton.py @@ -34,6 +34,7 @@ import torch.utils._pytree as pytree from torch._dynamo.utils import preserve_rng_state from torch._inductor.metrics import is_metric_table_enabled, log_kernel_metadata +from torch._inductor.runtime.triton_heuristics import AutotuneHint from torch._prims_common import is_integer_dtype from torch.utils._sympy.functions import FloorDiv, ModularIndexing from torch.utils._sympy.value_ranges import ValueRanges @@ -46,7 +47,6 @@ from ..dependencies import Dep, MemoryDep, StarDep, WeakDep from ..ir import IRNode, ReductionHint, TritonTemplateBuffer from ..optimize_indexing import indexing_dtype_strength_reduction from ..scheduler import BaseSchedulerNode, BaseScheduling, WhyNoFuse -from ..triton_heuristics import AutotuneHint from ..utils import ( cache_on_self, do_bench, @@ -120,10 +120,14 @@ def gen_common_triton_imports(): imports.splice( """ - from torch._inductor import triton_helpers, triton_heuristics + from torch._inductor.runtime import ( + triton_helpers, + triton_heuristics, + libdevice, + tl_math, + AutotuneHint, + ) from torch._inductor.ir import ReductionHint, TileHint - from torch._inductor.triton_helpers import libdevice, math as tl_math - from torch._inductor.triton_heuristics import AutotuneHint from torch._inductor.utils import instance_descriptor """ ) @@ -2652,7 +2656,7 @@ class TritonKernel(Kernel): from torch._dynamo.testing import rand_strided {} import torch - from torch._inductor.triton_heuristics import grid, split_scan_grid + from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid """.format( V.graph.device_ops.import_get_raw_stream_as("get_raw_stream") ) diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py index 6f28ea7ea8..17c2f58a76 100644 --- a/torch/_inductor/codegen/wrapper.py +++ b/torch/_inductor/codegen/wrapper.py @@ -34,6 +34,7 @@ from torch.utils._sympy.singleton_int import SingletonInt from .. import codecache, config, ir from ..ir import ReinterpretView +from ..runtime import triton_heuristics from ..utils import ( cache_on_self, get_benchmark_name, @@ -521,10 +522,11 @@ class WrapperCodeGen(CodeGen): """ import triton import triton.language as tl - from torch._inductor.triton_heuristics import grid, split_scan_grid, start_graph, end_graph + from {} import grid, split_scan_grid, start_graph, end_graph {} """.format( - V.graph.device_ops.import_get_raw_stream_as("get_raw_stream") + triton_heuristics.__name__, + V.graph.device_ops.import_get_raw_stream_as("get_raw_stream"), ) ) @@ -1266,9 +1268,9 @@ class WrapperCodeGen(CodeGen): def generate_reset_kernel_saved_flags(self): self.wrapper_call.splice( - """ + f""" for kernel in globals().values(): - if isinstance(kernel, torch._inductor.triton_heuristics.CachingAutotuner): + if isinstance(kernel, {triton_heuristics.__name__}.CachingAutotuner): kernel.cuda_kernel_saved = False """ ) @@ -1285,9 +1287,9 @@ class WrapperCodeGen(CodeGen): subsequent AOTInductor code generation and compilation. """ self.wrapper_call.splice( - """ + f""" for kernel in globals().values(): - if isinstance(kernel, torch._inductor.triton_heuristics.CachingAutotuner): + if isinstance(kernel, {triton_heuristics.__name__}.CachingAutotuner): if not kernel.cuda_kernel_saved: if len(kernel.launchers) == 0: kernel.precompile() diff --git a/torch/_inductor/runtime/__init__.py b/torch/_inductor/runtime/__init__.py new file mode 100644 index 0000000000..04f35cf3b4 --- /dev/null +++ b/torch/_inductor/runtime/__init__.py @@ -0,0 +1,12 @@ +from . import triton_helpers, triton_heuristics +from .triton_helpers import libdevice, math as tl_math +from .triton_heuristics import AutotuneHint + + +__all__ = [ + "triton_heuristics", + "triton_helpers", + "libdevice", + "tl_math", + "AutotuneHint", +] diff --git a/torch/_inductor/triton_helpers.py b/torch/_inductor/runtime/triton_helpers.py similarity index 96% rename from torch/_inductor/triton_helpers.py rename to torch/_inductor/runtime/triton_helpers.py index 61db5b1bc3..71b746bdf4 100644 --- a/torch/_inductor/triton_helpers.py +++ b/torch/_inductor/runtime/triton_helpers.py @@ -1,5 +1,18 @@ -import triton -import triton.language as tl +try: + import triton + import triton.language as tl +except ImportError: + + class triton: # type: ignore[no-redef] + @staticmethod + def jit(x): + return x + + class tl: # type: ignore[no-redef] + constexpr = None # type: ignore[var-annotated] + math = None # type: ignore[var-annotated] + extra = None # type: ignore[var-annotated] + # In the latest triton, math functions were shuffled around into different modules: # https://github.com/openai/triton/pull/3172 diff --git a/torch/_inductor/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py similarity index 99% rename from torch/_inductor/triton_heuristics.py rename to torch/_inductor/runtime/triton_heuristics.py index cf68fb020c..85860fd05c 100644 --- a/torch/_inductor/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -20,14 +20,13 @@ import torch import torch.autograd.profiler as autograd_profiler from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device from torch._dynamo.utils import dynamo_timed, get_first_attr -from torch.utils._triton import has_triton_package -from . import config -from .codecache import cache_dir, CudaKernelParamCache -from .coordinate_descent_tuner import CoordescTuner +from torch._inductor import config +from torch._inductor.codecache import cache_dir, CudaKernelParamCache +from torch._inductor.coordinate_descent_tuner import CoordescTuner -from .ir import ReductionHint, TileHint -from .utils import ( +from torch._inductor.ir import ReductionHint, TileHint +from torch._inductor.utils import ( ceildiv, conditional_product, create_bandwidth_info_str, @@ -37,6 +36,7 @@ from .utils import ( next_power_of_2, triton_config_to_hashable, ) +from torch.utils._triton import has_triton_package log = logging.getLogger(__name__) @@ -614,7 +614,7 @@ class CachingAutotuner(KernelInterface): return do_bench(kernel_call, rep=40, fast_flush=True) def clone_args(self, *args, **kwargs) -> Tuple[List[Any], Dict[str, Any]]: - from .compile_fx import clone_preserve_strides + from ..compile_fx import clone_preserve_strides # clone inplace buffers to avoid autotune contaminating them if # the kernel does in-place stores. avoid cloning other buffers because diff --git a/torch/_inductor/wrapper_benchmark.py b/torch/_inductor/wrapper_benchmark.py index c0205659ef..81a07fcf8d 100644 --- a/torch/_inductor/wrapper_benchmark.py +++ b/torch/_inductor/wrapper_benchmark.py @@ -49,7 +49,7 @@ def get_kernel_category(kernel_mod): def get_triton_kernel(mod): - from torch._inductor.triton_heuristics import CachingAutotuner + from torch._inductor.runtime.triton_heuristics import CachingAutotuner cand_list = [ v
2.41.0
4d47f5bbb07bed98b1eb8313607be6e94686269
Sun, 21 Apr 2024 11:09:44 -0700
[PATCH 0445/1000] [inductor] Refactor runtime files into torch._inductor.runtime (part 2) (#124553)
I am planning to make the compile_worker process not import torch so it can start up much faster. This stack is prep for that. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124553 Approved by: https://github.com/yanboliang ghstack dependencies: #124552
diff --git a/test/inductor/test_cuda_repro.py b/test/inductor/test_cuda_repro.py index 952a2eacaf..684f3cef8f 100644 --- a/test/inductor/test_cuda_repro.py +++ b/test/inductor/test_cuda_repro.py @@ -381,12 +381,8 @@ class CudaReproTests(TestCase): https://github.com/pytorch/torchdynamo/issues/1670 """ from torch._C import _cuda_getCurrentRawStream as get_cuda_stream - from torch._inductor.runtime.triton_heuristics import ( - CachingAutotuner, - grid, - HeuristicType, - ) - from torch._inductor.utils import instance_descriptor + from torch._inductor.runtime.hints import HeuristicType, instance_descriptor + from torch._inductor.runtime.triton_heuristics import CachingAutotuner, grid def autotune(configs, meta): def decorator(fn): diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py index 007e412e42..67d69efe7a 100644 --- a/torch/_inductor/codegen/triton.py +++ b/torch/_inductor/codegen/triton.py @@ -34,7 +34,7 @@ import torch.utils._pytree as pytree from torch._dynamo.utils import preserve_rng_state from torch._inductor.metrics import is_metric_table_enabled, log_kernel_metadata -from torch._inductor.runtime.triton_heuristics import AutotuneHint +from torch._inductor.runtime.hints import AutotuneHint from torch._prims_common import is_integer_dtype from torch.utils._sympy.functions import FloorDiv, ModularIndexing from torch.utils._sympy.value_ranges import ValueRanges @@ -44,8 +44,9 @@ from ..._dynamo.utils import counters from .. import config, ir, scheduler from ..codecache import code_hash, get_path, PyCodeCache from ..dependencies import Dep, MemoryDep, StarDep, WeakDep -from ..ir import IRNode, ReductionHint, TritonTemplateBuffer +from ..ir import IRNode, TritonTemplateBuffer from ..optimize_indexing import indexing_dtype_strength_reduction +from ..runtime.hints import ReductionHint from ..scheduler import BaseSchedulerNode, BaseScheduling, WhyNoFuse from ..utils import ( cache_on_self, @@ -120,15 +121,9 @@ def gen_common_triton_imports(): imports.splice( """ - from torch._inductor.runtime import ( - triton_helpers, - triton_heuristics, - libdevice, - tl_math, - AutotuneHint, - ) - from torch._inductor.ir import ReductionHint, TileHint - from torch._inductor.utils import instance_descriptor + from torch._inductor.runtime import triton_helpers, triton_heuristics + from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math + from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor """ ) return imports.getvalue() diff --git a/torch/_inductor/codegen/triton_split_scan.py b/torch/_inductor/codegen/triton_split_scan.py index fba8717328..c6b851dc42 100644 --- a/torch/_inductor/codegen/triton_split_scan.py +++ b/torch/_inductor/codegen/triton_split_scan.py @@ -2,7 +2,8 @@ import functools from typing import Optional, Set -from torch._inductor import config, ir +import torch._inductor.runtime.hints +from torch._inductor import config from torch._inductor.codegen.triton import ( IterationRangesRoot, @@ -36,7 +37,7 @@ class TritonSplitScanKernel(TritonKernel): *groups, index_dtype: str, mutations: Optional[Set[str]] = None, - reduction_hint=ir.ReductionHint.DEFAULT, + reduction_hint=torch._inductor.runtime.hints.ReductionHint.DEFAULT, min_elem_per_thread=0, ): super().__init__( diff --git a/torch/_inductor/codegen/triton_utils.py b/torch/_inductor/codegen/triton_utils.py index c8a7d92e3c..630f55ee94 100644 --- a/torch/_inductor/codegen/triton_utils.py +++ b/torch/_inductor/codegen/triton_utils.py @@ -5,7 +5,8 @@ import sympy import torch from .. import config -from ..utils import _type_of, instance_descriptor +from ..runtime.hints import instance_descriptor +from ..utils import _type_of from ..virtualized import V from .common import KernelArgType, SizeArg, TensorArg, WorkspaceArg diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py index ea7c7b0808..7e7cce890b 100644 --- a/torch/_inductor/ir.py +++ b/torch/_inductor/ir.py @@ -8,7 +8,6 @@ import re import textwrap import traceback from contextlib import nullcontext -from enum import Enum from functools import partial from typing import ( Any, @@ -61,6 +60,7 @@ from .dependencies import ( var_builder, ) from .ops_handler import OpCounterCSE +from .runtime.hints import ReductionHint from .utils import ( argsort, cache_on_self, @@ -533,18 +533,6 @@ class Scatter(Pointwise): ) -class ReductionHint(Enum): - INNER = 0 - OUTER = 1 - OUTER_TINY = 2 - DEFAULT = 3 - - -class TileHint(Enum): - SQUARE = 0 - DEFAULT = 1 - - REDUCTION_COMBINE_FN = { "any": ops_wrapper("logical_or"), "max": ops_wrapper("maximum"), diff --git a/torch/_inductor/runtime/__init__.py b/torch/_inductor/runtime/__init__.py index 04f35cf3b4..e69de29bb2 100644 --- a/torch/_inductor/runtime/__init__.py +++ b/torch/_inductor/runtime/__init__.py @@ -1,12 +0,0 @@ -from . import triton_helpers, triton_heuristics -from .triton_helpers import libdevice, math as tl_math -from .triton_heuristics import AutotuneHint - - -__all__ = [ - "triton_heuristics", - "triton_helpers", - "libdevice", - "tl_math", - "AutotuneHint", -] diff --git a/torch/_inductor/runtime/hints.py b/torch/_inductor/runtime/hints.py new file mode 100644 index 0000000000..082e7fcc89 --- /dev/null +++ b/torch/_inductor/runtime/hints.py @@ -0,0 +1,82 @@ +import collections +from dataclasses import fields +from enum import auto, Enum + + +class ReductionHint(Enum): + INNER = 0 + OUTER = 1 + OUTER_TINY = 2 + DEFAULT = 3 + + +class TileHint(Enum): + SQUARE = 0 + DEFAULT = 1 + + +# Attempt to import AttrsDescriptor from Triton +try: + from triton.compiler.compiler import AttrsDescriptor + + attrs_descriptor_available = True + # Determine if 'ids_of_folded_args' is a valid field for AttrsDescriptor + attr_desc_fields = {f.name for f in fields(AttrsDescriptor)} + ids_of_folded_args_available = "ids_of_folded_args" in attr_desc_fields + divisible_by_8_available = "divisible_by_8" in attr_desc_fields +except ImportError: + attrs_descriptor_available = False + +# Define `instance_descriptor` function with clear conditional handling +if attrs_descriptor_available: + + def instance_descriptor( + divisible_by_16=None, + equal_to_1=None, + ids_of_folded_args=None, + divisible_by_8=None, + ): + # Prepare the arguments for AttrsDescriptor + kwargs = { + "divisible_by_16": divisible_by_16, + "equal_to_1": equal_to_1, + } + + # Conditionally add 'ids_of_folded_args' if it's available in AttrsDescriptor + if ids_of_folded_args_available: + kwargs["ids_of_folded_args"] = ids_of_folded_args + if divisible_by_8_available: + kwargs["divisible_by_8"] = divisible_by_8 + + # Instantiate AttrsDescriptor with the prepared arguments + return AttrsDescriptor(**kwargs) + +else: + # Define a namedtuple as a fallback when AttrsDescriptor is not available + instance_descriptor = collections.namedtuple( # type: ignore[no-redef] + "instance_descriptor", + ["divisible_by_16", "equal_to_1", "ids_of_folded_args", "divisible_by_8"], + defaults=[tuple(), tuple(), tuple(), tuple()], + ) + + +_NUM_THREADS_PER_WARP = 32 + + +class HeuristicType(Enum): + PERSISTENT_REDUCTION = auto() + POINTWISE = auto() + REDUCTION = auto() + SPLIT_SCAN = auto() + TEMPLATE = auto() + USER_AUTOTUNE = auto() + + +class AutotuneHint(Enum): + ELEMENTS_PER_WARP_32 = 0 + + # Triton codegen tries to codegen set of AutotuneHints. + # Enum.__repr__ looks like "<AutotuneHint.ELEMENTS_PER_WARP_32: 0>"" + # which isn't valid python. + # Enum.__str__ will just return "AutotuneHint.ELEMENTS_PER_WARP_32". + __repr__ = Enum.__str__ diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index 85860fd05c..cb29da2f59 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -12,7 +12,6 @@ import os.path import re import threading import time -from enum import auto, Enum from typing import Any, Callable, Dict, List, Optional, Set, Tuple import torch @@ -24,8 +23,6 @@ from torch._dynamo.utils import dynamo_timed, get_first_attr from torch._inductor import config from torch._inductor.codecache import cache_dir, CudaKernelParamCache from torch._inductor.coordinate_descent_tuner import CoordescTuner - -from torch._inductor.ir import ReductionHint, TileHint from torch._inductor.utils import ( ceildiv, conditional_product, @@ -37,6 +34,13 @@ from torch._inductor.utils import ( triton_config_to_hashable, ) from torch.utils._triton import has_triton_package +from .hints import ( + _NUM_THREADS_PER_WARP, + AutotuneHint, + HeuristicType, + ReductionHint, + TileHint, +) log = logging.getLogger(__name__) @@ -59,28 +63,6 @@ else: ASTSource = None -_NUM_THREADS_PER_WARP = 32 - - -class HeuristicType(Enum): - PERSISTENT_REDUCTION = auto() - POINTWISE = auto() - REDUCTION = auto() - SPLIT_SCAN = auto() - TEMPLATE = auto() - USER_AUTOTUNE = auto() - - -class AutotuneHint(Enum): - ELEMENTS_PER_WARP_32 = 0 - - # Triton codegen tries to codegen set of AutotuneHints. - # Enum.__repr__ looks like "<AutotuneHint.ELEMENTS_PER_WARP_32: 0>"" - # which isn't valid python. - # Enum.__str__ will just return "AutotuneHint.ELEMENTS_PER_WARP_32". - __repr__ = Enum.__str__ - - def autotune_hints_to_configs( hints: Set[AutotuneHint], size_hints, block_size: int ) -> List[Config]: diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py index a2a80e1e5e..daddaaf04d 100644 --- a/torch/_inductor/utils.py +++ b/torch/_inductor/utils.py @@ -21,7 +21,6 @@ import tempfile import textwrap import time import unittest -from dataclasses import fields from datetime import datetime from io import StringIO from typing import ( @@ -689,51 +688,6 @@ def output_node(gm: torch.fx.GraphModule): return last_node -# Attempt to import AttrsDescriptor from Triton -try: - from triton.compiler.compiler import AttrsDescriptor - - attrs_descriptor_available = True - # Determine if 'ids_of_folded_args' is a valid field for AttrsDescriptor - attr_desc_fields = {f.name for f in fields(AttrsDescriptor)} - ids_of_folded_args_available = "ids_of_folded_args" in attr_desc_fields - divisible_by_8_available = "divisible_by_8" in attr_desc_fields -except ImportError: - attrs_descriptor_available = False - -# Define `instance_descriptor` function with clear conditional handling -if attrs_descriptor_available: - - def instance_descriptor( - divisible_by_16=None, - equal_to_1=None, - ids_of_folded_args=None, - divisible_by_8=None, - ): - # Prepare the arguments for AttrsDescriptor - kwargs = { - "divisible_by_16": divisible_by_16, - "equal_to_1": equal_to_1, - } - - # Conditionally add 'ids_of_folded_args' if it's available in AttrsDescriptor - if ids_of_folded_args_available: - kwargs["ids_of_folded_args"] = ids_of_folded_args - if divisible_by_8_available: - kwargs["divisible_by_8"] = divisible_by_8 - - # Instantiate AttrsDescriptor with the prepared arguments - return AttrsDescriptor(**kwargs) - -else: - # Define a namedtuple as a fallback when AttrsDescriptor is not available - instance_descriptor = collections.namedtuple( # type: ignore[no-redef] - "instance_descriptor", - ["divisible_by_16", "equal_to_1", "ids_of_folded_args", "divisible_by_8"], - defaults=[tuple(), tuple(), tuple(), tuple()], - ) - - _registered_caches: List[Any] = []
2.41.0
cf28b0ad59b1912d5783688b0f25f18b46efeb3
Sun, 21 Apr 2024 11:09:45 -0700
[PATCH 0446/1000] [inductor] Refactor runtime files into torch._inductor.runtime (part 3) (#124557)
I am planning to make the compile_worker process not import torch so it can start up much faster. This stack is prep for that. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124557 Approved by: https://github.com/yanboliang ghstack dependencies: #124552, #124553
diff --git a/benchmarks/dynamo/microbenchmarks/tensor_layout_mini_benchmark.py b/benchmarks/dynamo/microbenchmarks/tensor_layout_mini_benchmark.py index 3eebb8ca66..3738f041fe 100644 --- a/benchmarks/dynamo/microbenchmarks/tensor_layout_mini_benchmark.py +++ b/benchmarks/dynamo/microbenchmarks/tensor_layout_mini_benchmark.py @@ -1,6 +1,6 @@ import torch from torch._inductor import ir -from torch._inductor.utils import do_bench +from torch._inductor.runtime.runtime_utils import do_bench def to_channels_last(x): diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py index 4c89d978fd..db8f3aaaba 100644 --- a/test/inductor/test_aot_inductor.py +++ b/test/inductor/test_aot_inductor.py @@ -13,8 +13,8 @@ from torch._dynamo.testing import rand_strided, same from torch._dynamo.utils import counters from torch._inductor import config from torch._inductor.exc import CppWrapperCodeGenError +from torch._inductor.runtime.runtime_utils import cache_dir from torch._inductor.test_case import TestCase -from torch._inductor.utils import cache_dir from torch.export import Dim, export from torch.testing import FileCheck diff --git a/test/inductor/test_codecache.py b/test/inductor/test_codecache.py index 55a2233f15..96ed0d7022 100644 --- a/test/inductor/test_codecache.py +++ b/test/inductor/test_codecache.py @@ -19,8 +19,9 @@ from torch._inductor.codecache import ( TensorMetadata, TensorMetadataAndValues, ) +from torch._inductor.runtime.runtime_utils import cache_dir from torch._inductor.test_case import run_tests, TestCase -from torch._inductor.utils import cache_dir, fresh_inductor_cache +from torch._inductor.utils import fresh_inductor_cache from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( diff --git a/test/inductor/test_inductor_utils.py b/test/inductor/test_inductor_utils.py index 0c11ac0511..f86dd9219e 100644 --- a/test/inductor/test_inductor_utils.py +++ b/test/inductor/test_inductor_utils.py @@ -4,11 +4,11 @@ import functools import logging import torch +from torch._inductor.runtime.runtime_utils import do_bench from torch._inductor.test_case import run_tests, TestCase -from torch._inductor.utils import do_bench, do_bench_using_profiling - +from torch._inductor.utils import do_bench_using_profiling log = logging.getLogger(__name__) diff --git a/test/inductor/test_padding.py b/test/inductor/test_padding.py index 2270c33291..d9bf81e663 100644 --- a/test/inductor/test_padding.py +++ b/test/inductor/test_padding.py @@ -12,7 +12,8 @@ from torch._dynamo.testing import rand_strided, reduce_to_scalar_loss from torch._dynamo.utils import maybe_cprofile from torch._inductor import config, ir, metrics from torch._inductor.fx_passes import pad_mm as pad_mm_pass -from torch._inductor.utils import do_bench, run_and_get_code +from torch._inductor.runtime.runtime_utils import do_bench +from torch._inductor.utils import run_and_get_code from torch.testing._internal.inductor_utils import HAS_CUDA DO_PERF_TEST = os.environ.get("DO_PERF_TEST") == "1" diff --git a/torch/_inductor/autotune_process.py b/torch/_inductor/autotune_process.py index aef3d18f35..f45f33ffde 100644 --- a/torch/_inductor/autotune_process.py +++ b/torch/_inductor/autotune_process.py @@ -35,7 +35,7 @@ if TYPE_CHECKING: from torch._inductor.select_algorithm import TritonTemplateCaller from . import config -from .utils import do_bench +from .runtime.runtime_utils import do_bench from .virtualized import V CUDA_VISIBLE_DEVICES = "CUDA_VISIBLE_DEVICES" diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py index 3111901a1c..a5474b0616 100644 --- a/torch/_inductor/codecache.py +++ b/torch/_inductor/codecache.py @@ -59,7 +59,8 @@ from torch._dynamo.device_interface import ( from torch._dynamo.utils import counters, dynamo_timed from torch._inductor import config, exc, metrics from torch._inductor.codegen.cuda import cuda_env -from torch._inductor.utils import cache_dir, clear_on_fresh_inductor_cache, is_linux +from torch._inductor.runtime.runtime_utils import cache_dir +from torch._inductor.utils import clear_on_fresh_inductor_cache, is_linux from torch._subclasses.fake_tensor import ( extract_tensor_metadata, FakeTensor, diff --git a/torch/_inductor/codegen/cuda/cutlass_utils.py b/torch/_inductor/codegen/cuda/cutlass_utils.py index 134ebb93fe..40daf6da1c 100644 --- a/torch/_inductor/codegen/cuda/cutlass_utils.py +++ b/torch/_inductor/codegen/cuda/cutlass_utils.py @@ -8,10 +8,10 @@ from typing import Any, List, Optional import sympy import torch - -from ...codecache import cache_dir from ...config import cuda as inductor_cuda_config from ...ir import Layout + +from ...runtime.runtime_utils import cache_dir from .cuda_env import get_cuda_arch, get_cuda_version log = logging.getLogger(__name__) diff --git a/torch/_inductor/codegen/multi_kernel.py b/torch/_inductor/codegen/multi_kernel.py index e03ca8eca9..e4fc396c64 100644 --- a/torch/_inductor/codegen/multi_kernel.py +++ b/torch/_inductor/codegen/multi_kernel.py @@ -6,7 +6,8 @@ from torch._inductor.metrics import get_metric_table, is_metric_table_enabled from .. import config from ..codecache import PyCodeCache, TritonFuture -from ..utils import cache_on_self, do_bench +from ..runtime.runtime_utils import do_bench +from ..utils import cache_on_self from ..virtualized import V from .common import TensorArg diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py index 67d69efe7a..c03e6c6954 100644 --- a/torch/_inductor/codegen/triton.py +++ b/torch/_inductor/codegen/triton.py @@ -47,24 +47,26 @@ from ..dependencies import Dep, MemoryDep, StarDep, WeakDep from ..ir import IRNode, TritonTemplateBuffer from ..optimize_indexing import indexing_dtype_strength_reduction from ..runtime.hints import ReductionHint +from ..runtime.runtime_utils import ( + do_bench, + get_max_y_grid, + green_text, + next_power_of_2, + yellow_text, +) from ..scheduler import BaseSchedulerNode, BaseScheduling, WhyNoFuse from ..utils import ( cache_on_self, - do_bench, get_dtype_size, get_fused_kernel_name, get_kernel_metadata, - get_max_y_grid, - green_text, is_welford_reduction, - next_power_of_2, Placeholder, sympy_dot, sympy_index_symbol, sympy_product, sympy_subs, unique, - yellow_text, ) from ..virtualized import _ops as ops, OpsHandler, ReductionType, StoreMode, V from ..wrapper_benchmark import get_kernel_category_by_source_code diff --git a/torch/_inductor/coordinate_descent_tuner.py b/torch/_inductor/coordinate_descent_tuner.py index baf293d9f5..2511800bc1 100644 --- a/torch/_inductor/coordinate_descent_tuner.py +++ b/torch/_inductor/coordinate_descent_tuner.py @@ -4,7 +4,7 @@ import logging from typing import Callable, Optional from torch.utils._triton import has_triton -from .utils import red_text, triton_config_to_hashable +from .runtime.runtime_utils import red_text, triton_config_to_hashable if has_triton(): import triton diff --git a/torch/_inductor/fx_passes/pad_mm.py b/torch/_inductor/fx_passes/pad_mm.py index 40948dc461..ea4d45e389 100644 --- a/torch/_inductor/fx_passes/pad_mm.py +++ b/torch/_inductor/fx_passes/pad_mm.py @@ -2,6 +2,7 @@ import functools from typing import List, Optional, Union import torch +import torch._inductor.runtime.runtime_utils from torch import Tensor from torch._inductor import utils from torch._subclasses.fake_tensor import FakeTensor @@ -241,7 +242,7 @@ def should_pad_bench( return False do_bench = functools.partial( - utils.do_bench, + torch._inductor.runtime.runtime_utils.do_bench, warmup=5, ) diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py index 7e7cce890b..05d6fd3587 100644 --- a/torch/_inductor/ir.py +++ b/torch/_inductor/ir.py @@ -61,6 +61,7 @@ from .dependencies import ( ) from .ops_handler import OpCounterCSE from .runtime.hints import ReductionHint +from .runtime.runtime_utils import do_bench from .utils import ( argsort, cache_on_self, @@ -68,7 +69,6 @@ from .utils import ( convert_shape_to_inductor, convert_shape_to_symint, developer_warning, - do_bench, get_kernel_metadata, is_dynamic, is_gpu, diff --git a/torch/_inductor/kernel/mm_common.py b/torch/_inductor/kernel/mm_common.py index 12a280cb91..e04f87c523 100644 --- a/torch/_inductor/kernel/mm_common.py +++ b/torch/_inductor/kernel/mm_common.py @@ -9,7 +9,8 @@ from torch._inductor.select_algorithm import realize_inputs from torch._inductor.virtualized import V from .. import config as inductor_config -from ..utils import ceildiv as cdiv, next_power_of_2 +from ..runtime.runtime_utils import next_power_of_2 +from ..utils import ceildiv as cdiv log = logging.getLogger(__name__) diff --git a/torch/_inductor/runtime/runtime_utils.py b/torch/_inductor/runtime/runtime_utils.py new file mode 100644 index 0000000000..948ad0e5cf --- /dev/null +++ b/torch/_inductor/runtime/runtime_utils.py @@ -0,0 +1,142 @@ +from __future__ import annotations + +import functools +import getpass +import inspect +import operator +import os +import re +import tempfile + +import torch + + +def conditional_product(*args): + return functools.reduce(operator.mul, [x for x in args if x]) + + +def ceildiv(numer: int, denom: int) -> int: + return -(numer // -denom) + + +def next_power_of_2(n: int) -> int: + """Return the smallest power of 2 greater than or equal to n""" + n -= 1 + n |= n >> 1 + n |= n >> 2 + n |= n >> 4 + n |= n >> 8 + n |= n >> 16 + n |= n >> 32 + n += 1 + return n + + +def get_num_bytes(*args: torch.Tensor, num_in_out_args: int = 0) -> int: + """ + Return the total number of bytes the arguments of tensor type takes. + + For in/out args, tensor sizes are counted twice: once for reading and + once for writing. + + The first num_in_out_args arguments are in out tensors. + """ + return sum( + arg.numel() * arg.element_size() * (1 + int(i < num_in_out_args)) + for i, arg in enumerate(args) + if isinstance(arg, torch.Tensor) + ) + + +def triton_config_to_hashable(cfg): + """ + Convert triton config to a tuple that can uniquely identify it. We can use + the return value as a dictionary key. + """ + items = sorted(cfg.kwargs.items()) + items.append(("num_warps", cfg.num_warps)) + items.append(("num_stages", cfg.num_stages)) + return tuple(items) + + +def create_bandwidth_info_str(ms, num_gb, gb_per_s, prefix="", suffix="", color=True): + info_str = f"{prefix}{ms:.3f}ms \t{num_gb:.3f} GB \t {gb_per_s:7.2f}GB/s{suffix}" + slow = ms > 0.012 and gb_per_s < 650 + return red_text(info_str) if color and slow else info_str + + +def get_max_y_grid(): + return 65535 + + +def do_bench(*args, **kwargs): + @functools.lru_cache(None) + def load_triton(): + try: + # NB: Lazily load triton, as importing triton is slow + # see https://github.com/openai/triton/issues/1599 + from triton.testing import do_bench as triton_do_bench + except ImportError as exc: + raise NotImplementedError("requires Triton") from exc + + # triton PR https://github.com/openai/triton/pull/1513 change the + # quantile fields name from 'percentiles' to 'quantiles' + # and change the default value from (0.5, 0.2, 0.8) to None. + # This may break inductor since a caller expects a tuple may get a item. + # + # Add a wrapper to maintain the same behavior for inductor. + # Maybe we should have own implementation of this function? + return triton_do_bench, ( + "quantiles" + if inspect.signature(triton_do_bench).parameters.get("quantiles") + is not None + else "percentiles" + ) + + triton_do_bench, quantile_field_name = load_triton() + + if quantile_field_name not in kwargs: + kwargs[quantile_field_name] = (0.5, 0.2, 0.8) + return triton_do_bench(*args, **kwargs)[0] + + +def cache_dir() -> str: + cache_dir = os.environ.get("TORCHINDUCTOR_CACHE_DIR") + if cache_dir is None: + sanitized_username = re.sub(r'[\\/:*?"<>|]', "_", getpass.getuser()) + os.environ["TORCHINDUCTOR_CACHE_DIR"] = cache_dir = os.path.join( + tempfile.gettempdir(), + "torchinductor_" + sanitized_username, + ) + os.makedirs(cache_dir, exist_ok=True) + return cache_dir + + +HAS_COLORAMA = True +try: + import colorama +except ImportError: + HAS_COLORAMA = False + + +def _color_text(msg, color): + if not HAS_COLORAMA: + return msg + + return getattr(colorama.Fore, color.upper()) + msg + colorama.Fore.RESET + + +def green_text(msg): + return _color_text(msg, "green") + + +def yellow_text(msg): + return _color_text(msg, "yellow") + + +def red_text(msg): + return _color_text(msg, "red") + + +def blue_text(msg): + return _color_text(msg, "blue") diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index cb29da2f59..89be8f36d4 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -21,9 +21,17 @@ from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device from torch._dynamo.utils import dynamo_timed, get_first_attr from torch._inductor import config -from torch._inductor.codecache import cache_dir, CudaKernelParamCache from torch._inductor.coordinate_descent_tuner import CoordescTuner -from torch._inductor.utils import ( +from .hints import ( + _NUM_THREADS_PER_WARP, + AutotuneHint, + HeuristicType, + ReductionHint, + TileHint, +) + +from .runtime_utils import ( + cache_dir, ceildiv, conditional_product, create_bandwidth_info_str, @@ -33,20 +41,13 @@ from torch._inductor.utils import ( next_power_of_2, triton_config_to_hashable, ) -from torch.utils._triton import has_triton_package -from .hints import ( - _NUM_THREADS_PER_WARP, - AutotuneHint, - HeuristicType, - ReductionHint, - TileHint, -) - -log = logging.getLogger(__name__) - -if has_triton_package(): +try: import triton +except ImportError: + triton = None + +if triton is not None: from triton import Config from triton.runtime.autotuner import OutOfResources from triton.runtime.jit import KernelInterface @@ -57,12 +58,14 @@ if has_triton_package(): ASTSource = None else: Config = object - triton = None KernelInterface = object OutOfResources = object ASTSource = None +log = logging.getLogger(__name__) + + def autotune_hints_to_configs( hints: Set[AutotuneHint], size_hints, block_size: int ) -> List[Config]: @@ -681,6 +684,8 @@ class CachingAutotuner(KernelInterface): "meta": launcher.config.kwargs, } + from torch._inductor.codecache import CudaKernelParamCache + if torch.version.hip is None: CudaKernelParamCache.set(key, params, launcher.bin.asm["cubin"]) else: diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py index 32f734ba8b..2783e3edfb 100644 --- a/torch/_inductor/scheduler.py +++ b/torch/_inductor/scheduler.py @@ -35,6 +35,7 @@ from .codegen.common import get_scheduling_for_device, Kernel from .comm_analysis import estimate_nccl_collective_runtime from .dependencies import Dep, MemoryDep, StarDep, WeakDep from .ir import ComputedBuffer, MultiOutput, MultiOutputLayout +from .runtime.runtime_utils import green_text, red_text from .sizevars import SimplifyIndexing from .utils import ( cache_on_self, @@ -44,11 +45,9 @@ from .utils import ( get_device_tflops, get_dtype_size, get_gpu_dram_gbps, - green_text, is_collective, is_gpu, is_wait, - red_text, sympy_product, ) from .virtualized import V diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py index 59bbf05da7..e6304f604f 100644 --- a/torch/_inductor/select_algorithm.py +++ b/torch/_inductor/select_algorithm.py @@ -35,14 +35,8 @@ from .codegen.triton import ( from .codegen.triton_utils import config_of, signature_to_meta from .exc import CUDACompileError from .ir import ChoiceCaller, PrimitiveInfoType -from .utils import ( - do_bench, - get_dtype_size, - Placeholder, - sympy_dot, - sympy_product, - unique, -) +from .runtime.runtime_utils import do_bench +from .utils import get_dtype_size, Placeholder, sympy_dot, sympy_product, unique from .virtualized import V log = logging.getLogger(__name__) diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py index daddaaf04d..a7be602f87 100644 --- a/torch/_inductor/utils.py +++ b/torch/_inductor/utils.py @@ -5,7 +5,6 @@ import contextlib import dataclasses import enum import functools -import getpass import inspect import io import itertools @@ -14,7 +13,6 @@ import math import operator import os import platform -import re import shutil import sys import tempfile @@ -51,6 +49,7 @@ from torch.autograd.profiler_util import EventList from torch.fx.passes.shape_prop import ShapeProp from torch.utils._sympy.functions import CeilDiv, CleanDiv, FloorDiv, ModularIndexing from . import config +from .runtime.runtime_utils import ceildiv as runtime_ceildiv log = logging.getLogger(__name__) @@ -140,37 +139,6 @@ def do_bench_using_profiling(fn: Callable[[], Any], warmup=25, rep=100) -> float return res -def do_bench(*args, **kwargs): - @functools.lru_cache(None) - def load_triton(): - try: - # NB: Lazily load triton, as importing triton is slow - # see https://github.com/openai/triton/issues/1599 - from triton.testing import do_bench as triton_do_bench - except ImportError as exc: - raise NotImplementedError("requires Triton") from exc - - # triton PR https://github.com/openai/triton/pull/1513 change the - # quantile fields name from 'percentiles' to 'quantiles' - # and change the default value from (0.5, 0.2, 0.8) to None. - # This may break inductor since a caller expects a tuple may get a item. - # - # Add a wrapper to maintain the same behavior for inductor. - # Maybe we should have own implementation of this function? - return triton_do_bench, ( - "quantiles" - if inspect.signature(triton_do_bench).parameters.get("quantiles") - is not None - else "percentiles" - ) - - triton_do_bench, quantile_field_name = load_triton() - - if quantile_field_name not in kwargs: - kwargs[quantile_field_name] = (0.5, 0.2, 0.8) - return triton_do_bench(*args, **kwargs)[0] - - @functools.lru_cache(None) def has_torchvision_roi_align() -> bool: try: @@ -183,10 +151,6 @@ def has_torchvision_roi_align() -> bool: return False -def conditional_product(*args): - return functools.reduce(operator.mul, [x for x in args if x]) - - def decode_device(device: Union[Optional[torch.device], str]) -> torch.device: if device is None: return torch.tensor(0.0).device # default device @@ -222,20 +186,7 @@ def ceildiv( assert isinstance(numer, int) and isinstance( denom, int ), f"{numer}: {type(numer)}, {denom}: {type(denom)}" - return -(numer // -denom) - - -def next_power_of_2(n: int) -> int: - """Return the smallest power of 2 greater than or equal to n""" - n -= 1 - n |= n >> 1 - n |= n >> 2 - n |= n >> 4 - n |= n >> 8 - n |= n >> 16 - n |= n >> 32 - n += 1 - return n + return runtime_ceildiv(numer, denom) def _type_of(key): @@ -703,20 +654,6 @@ def clear_on_fresh_inductor_cache(obj: Any): return obj -@clear_on_fresh_inductor_cache -@functools.lru_cache(None) -def cache_dir() -> str: - cache_dir = os.environ.get("TORCHINDUCTOR_CACHE_DIR") - if cache_dir is None: - sanitized_username = re.sub(r'[\\/:*?"<>|]', "_", getpass.getuser()) - cache_dir = os.path.join( - tempfile.gettempdir(), - "torchinductor_" + sanitized_username, - ) - os.makedirs(cache_dir, exist_ok=True) - return cache_dir - - @contextlib.contextmanager def fresh_inductor_cache(cache_entries=None): """ @@ -1141,28 +1078,6 @@ def developer_warning(msg): log.info(msg) -def get_num_bytes(*args: torch.Tensor, num_in_out_args: int = 0) -> int: - """ - Return the total number of bytes the arguments of tensor type takes. - - For in/out args, tensor sizes are counted twice: once for reading and - once for writing. - - The first num_in_out_args arguments are in out tensors. - """ - return sum( - arg.numel() * arg.element_size() * (1 + int(i < num_in_out_args)) - for i, arg in enumerate(args) - if isinstance(arg, torch.Tensor) - ) - - -def create_bandwidth_info_str(ms, num_gb, gb_per_s, prefix="", suffix="", color=True): - info_str = f"{prefix}{ms:.3f}ms \t{num_gb:.3f} GB \t {gb_per_s:7.2f}GB/s{suffix}" - slow = ms > 0.012 and gb_per_s < 650 - return red_text(info_str) if color and slow else info_str - - def get_benchmark_name(): """ An experimental API used only when config.benchmark_kernel is true. @@ -1229,17 +1144,6 @@ def maybe_profile(should_profile, *args, **kwargs): yield -def triton_config_to_hashable(cfg): - """ - Convert triton config to a tuple that can uniquely identify it. We can use - the return value as a dictionary key. - """ - items = sorted(cfg.kwargs.items()) - items.append(("num_warps", cfg.num_warps)) - items.append(("num_stages", cfg.num_stages)) - return tuple(items) - - def parallel_num_threads(): threads = config.cpp.threads if threads < 1: @@ -1247,36 +1151,6 @@ def parallel_num_threads(): return threads -HAS_COLORAMA = True -try: - import colorama -except ImportError: - HAS_COLORAMA = False - - -def _color_text(msg, color): - if not HAS_COLORAMA: - return msg - - return getattr(colorama.Fore, color.upper()) + msg + colorama.Fore.RESET - - -def green_text(msg): - return _color_text(msg, "green") - - -def yellow_text(msg): - return _color_text(msg, "yellow") - - -def red_text(msg): - return _color_text(msg, "red") - - -def blue_text(msg): - return _color_text(msg, "blue") - - @functools.lru_cache(None) def get_device_tflops(dtype): from triton.testing import get_max_simd_tflops, get_max_tensorcore_tflops @@ -1320,10 +1194,6 @@ def reduction_num_outputs(reduction_type): return 3 if is_welford_reduction(reduction_type) else 1 -def get_max_y_grid(): - return 65535 - - def is_linux() -> bool: return platform.system() == "Linux" diff --git a/torch/_inductor/wrapper_benchmark.py b/torch/_inductor/wrapper_benchmark.py index 81a07fcf8d..31b81bba4a 100644 --- a/torch/_inductor/wrapper_benchmark.py +++ b/torch/_inductor/wrapper_benchmark.py @@ -4,7 +4,7 @@ from collections import defaultdict import torch from torch.autograd import DeviceType -from .utils import create_bandwidth_info_str, do_bench, get_num_bytes +from .runtime.runtime_utils import create_bandwidth_info_str, do_bench, get_num_bytes _kernel_category_choices = [ "foreach",
2.41.0
ea2a0951005c4bcb2491556a8548319c6cccfdb
Sun, 21 Apr 2024 11:09:45 -0700
[PATCH 0447/1000] [inductor] Refactor runtime files into torch._inductor.runtime (part 4) (#124559)
I am planning to make the compile_worker process not import torch so it can start up much faster. This stack is prep for that. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124559 Approved by: https://github.com/yanboliang ghstack dependencies: #124552, #124553, #124557
diff --git a/test/inductor/test_coordinate_descent_tuner.py b/test/inductor/test_coordinate_descent_tuner.py index 5b9f35fa9c..8f57cab4d3 100644 --- a/test/inductor/test_coordinate_descent_tuner.py +++ b/test/inductor/test_coordinate_descent_tuner.py @@ -18,7 +18,7 @@ except ImportError: raise unittest.SkipTest("requires triton") # noqa: TRY200 from torch._inductor import config -from torch._inductor.coordinate_descent_tuner import CoordescTuner +from torch._inductor.runtime.coordinate_descent_tuner import CoordescTuner config.benchmark_kernel = True config.coordinate_descent_tuning = True diff --git a/torch/_inductor/coordinate_descent_tuner.py b/torch/_inductor/runtime/coordinate_descent_tuner.py similarity index 98% rename from torch/_inductor/coordinate_descent_tuner.py rename to torch/_inductor/runtime/coordinate_descent_tuner.py index 2511800bc1..83f4973a1f 100644 --- a/torch/_inductor/coordinate_descent_tuner.py +++ b/torch/_inductor/runtime/coordinate_descent_tuner.py @@ -3,15 +3,14 @@ import itertools import logging from typing import Callable, Optional -from torch.utils._triton import has_triton -from .runtime.runtime_utils import red_text, triton_config_to_hashable +from torch._inductor.runtime.runtime_utils import red_text, triton_config_to_hashable -if has_triton(): +try: import triton -else: +except ImportError: triton = None -from . import config as inductor_config +from torch._inductor import config as inductor_config log = logging.getLogger(__name__) diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index 89be8f36d4..1b0467553b 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -21,7 +21,7 @@ from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device from torch._dynamo.utils import dynamo_timed, get_first_attr from torch._inductor import config -from torch._inductor.coordinate_descent_tuner import CoordescTuner +from .coordinate_descent_tuner import CoordescTuner from .hints import ( _NUM_THREADS_PER_WARP, AutotuneHint, @@ -29,7 +29,6 @@ from .hints import ( ReductionHint, TileHint, ) - from .runtime_utils import ( cache_dir, ceildiv,
2.41.0
ac30bc32ad300d70391ec552e5738d6ed66f9a5
Sun, 21 Apr 2024 11:09:46 -0700
[PATCH 0448/1000] [inductor] Refactor runtime files into torch._inductor.runtime (part 5) (#124560)
I am planning to make the compile_worker process not import torch so it can start up much faster. This stack is prep for that. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124560 Approved by: https://github.com/yanboliang ghstack dependencies: #124552, #124553, #124557, #124559
diff --git a/torch/_inductor/runtime/coordinate_descent_tuner.py b/torch/_inductor/runtime/coordinate_descent_tuner.py index 83f4973a1f..f280765aec 100644 --- a/torch/_inductor/runtime/coordinate_descent_tuner.py +++ b/torch/_inductor/runtime/coordinate_descent_tuner.py @@ -3,7 +3,7 @@ import itertools import logging from typing import Callable, Optional -from torch._inductor.runtime.runtime_utils import red_text, triton_config_to_hashable +from .runtime_utils import red_text, triton_config_to_hashable try: import triton diff --git a/torch/_inductor/runtime/runtime_utils.py b/torch/_inductor/runtime/runtime_utils.py index 948ad0e5cf..c0fdf65ec9 100644 --- a/torch/_inductor/runtime/runtime_utils.py +++ b/torch/_inductor/runtime/runtime_utils.py @@ -140,3 +140,24 @@ def red_text(msg): def blue_text(msg): return _color_text(msg, "blue") + + +def get_first_attr(obj, *attrs): + """ + Return the first available attribute or throw an exception if none is present. + """ + for attr in attrs: + if hasattr(obj, attr): + return getattr(obj, attr) + + raise AssertionError(f"{obj} does not has any of the attributes: {attrs}") + + +try: + dynamo_timed = torch._dynamo.utils.dynamo_timed +except AttributeError: # Compile workers only have a mock version of torch + + def dynamo_timed(original_function=None, phase_name=None): + if original_function: + return original_function + return dynamo_timed diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index 1b0467553b..f65cd6eaa5 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -16,9 +16,7 @@ from typing import Any, Callable, Dict, List, Optional, Set, Tuple import torch -import torch.autograd.profiler as autograd_profiler from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device -from torch._dynamo.utils import dynamo_timed, get_first_attr from torch._inductor import config from .coordinate_descent_tuner import CoordescTuner @@ -35,6 +33,8 @@ from .runtime_utils import ( conditional_product, create_bandwidth_info_str, do_bench, + dynamo_timed, + get_first_attr, get_max_y_grid, get_num_bytes, next_power_of_2, @@ -61,6 +61,13 @@ else: OutOfResources = object ASTSource = None +try: + autograd_profiler = torch.autograd.profiler +except AttributeError: # Compile workers only have a mock version of torch + + class autograd_profiler: # type: ignore[no-redef] + _is_profiler_enabled = False + log = logging.getLogger(__name__)
2.41.0
17c0af149855b5924a59170a18abecca97e2ce0
Sun, 21 Apr 2024 11:09:47 -0700
[PATCH 0449/1000] [inductor] Remove config check for 3D tiling (#124569)
This makes the check per-kernel (if 3D tiling is used), rather than global config. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124569 Approved by: https://github.com/yanboliang ghstack dependencies: #124552, #124553, #124557, #124559, #124560
diff --git a/test/inductor/test_triton_heuristics.py b/test/inductor/test_triton_heuristics.py index ab54164edd..2094797f9f 100644 --- a/test/inductor/test_triton_heuristics.py +++ b/test/inductor/test_triton_heuristics.py @@ -78,11 +78,6 @@ class TestTritonHeuristics(TestCase): def test_artificial_grid_cpp_wrapper(self): self._test_artificial_zgrid() - @config.patch("triton.max_tiles", 3) - def test_artificial_grid_max_tiles(self): - with self.assertRaisesRegex(Exception, "Generated y grid"): - self._test_artificial_zgrid() - if __name__ == "__main__": if IS_LINUX and HAS_GPU: diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py index c03e6c6954..fb1f460628 100644 --- a/torch/_inductor/codegen/triton.py +++ b/torch/_inductor/codegen/triton.py @@ -1017,6 +1017,7 @@ class IterationRangesRoot(IterationRanges): is_loop: bool, tensor_dim: Optional[int], grid_dim: Optional[int], + has_zdim: bool, ): if pid_cache is None: pid_cache = {} @@ -1044,6 +1045,7 @@ class IterationRangesRoot(IterationRanges): self.tensor_dim = tensor_dim # Index of corresponding dimension in the triton grid self.grid_dim = grid_dim + self.has_zdim = has_zdim def __repr__(self): return f"IterationRangesRoot({self.name!r}, {self.numel}, ...)" @@ -1135,7 +1137,7 @@ class IterationRangesRoot(IterationRanges): # z grid is only exercised when max_tiles == 3 (off by default). if ( self.grid_dim == 1 - and config.triton.max_tiles <= 2 + and not self.has_zdim and not (isinstance(self.numel, int) and self.numel <= get_max_y_grid()) ): key = f"{key} * (tl.program_id({self.grid_dim + 1}) + 1)" @@ -1415,6 +1417,7 @@ class TritonKernel(Kernel): is_loop=is_reduction and not self.persistent_reduction, tensor_dim=tensor_dim, grid_dim=grid_dim, + has_zdim="z" in active_prefixes, ) ) for tree in self.range_trees: diff --git a/torch/_inductor/codegen/triton_split_scan.py b/torch/_inductor/codegen/triton_split_scan.py index c6b851dc42..8df904946e 100644 --- a/torch/_inductor/codegen/triton_split_scan.py +++ b/torch/_inductor/codegen/triton_split_scan.py @@ -73,6 +73,7 @@ class TritonSplitScanKernel(TritonKernel): is_loop=False, tensor_dim=tensor_dim, grid_dim=grid_dim, + has_zdim=False, ) ) for tree in self.range_trees: diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index f65cd6eaa5..ff6388e30e 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -1647,21 +1647,19 @@ def grid(*numels): return numel return ceildiv(numel, block) - max_grid_dims = config.triton.max_tiles - def grid_fn(meta): x_grid = get_grid_dim(xnumel, meta.get("XBLOCK", 1)) y_grid = get_grid_dim(ynumel, meta.get("YBLOCK", None)) - MAX_Y_GRID = get_max_y_grid() - if znumel is None and max_grid_dims <= 2: - div = ceildiv(y_grid, MAX_Y_GRID) + max_y_grid = get_max_y_grid() + if znumel is None: + div = ceildiv(y_grid, max_y_grid) y_grid = y_grid // div z_grid = div else: z_grid = get_grid_dim(znumel, meta.get("ZBLOCK", None)) torch._check( - y_grid <= MAX_Y_GRID, + y_grid <= max_y_grid, lambda: f"Generated y grid beyond 2^16 ({y_grid}) not supported with z dimension present. File issue", )
2.41.0
af12447f85dfede191a113c052e58fa7b21a8b3
Sun, 21 Apr 2024 11:09:47 -0700
[PATCH 0450/1000] [inductor] Use compile time config values in runtime (#124561)
This removes usage of torch._inductor.config from `torch._inductor.runtime`. Fixing two issues: 1) If configs change we should really use the compile time ones 2) In compile workers, we want to use the parent process config Pull Request resolved: https://github.com/pytorch/pytorch/pull/124561 Approved by: https://github.com/yanboliang ghstack dependencies: #124552, #124553, #124557, #124559, #124560, #124569
diff --git a/test/inductor/test_coordinate_descent_tuner.py b/test/inductor/test_coordinate_descent_tuner.py index 8f57cab4d3..70618c06e9 100644 --- a/test/inductor/test_coordinate_descent_tuner.py +++ b/test/inductor/test_coordinate_descent_tuner.py @@ -5,6 +5,7 @@ import unittest from unittest import mock import torch +from torch._inductor.runtime.hints import TRITON_MAX_BLOCK from torch._inductor.test_case import run_tests, TestCase from torch.testing._internal.common_utils import IS_LINUX @@ -104,7 +105,7 @@ class TestCoordinateDescentTuner(TestCase): tuner = CoordescTuner(size_hints=size_hints) - max_block = config.triton.max_block + max_block = TRITON_MAX_BLOCK self.assertFalse(tuner.value_too_large("XBLOCK", max_block["X"])) self.assertTrue(tuner.value_too_large("XBLOCK", max_block["X"] * 2)) self.assertFalse(tuner.value_too_large("RBLOCK", max_block["R"])) diff --git a/test/inductor/test_triton_heuristics.py b/test/inductor/test_triton_heuristics.py index 2094797f9f..3a85211f81 100644 --- a/test/inductor/test_triton_heuristics.py +++ b/test/inductor/test_triton_heuristics.py @@ -16,6 +16,7 @@ except ImportError: raise unittest.SkipTest("requires triton") # noqa: TRY200 from torch._inductor import config +from torch._inductor.runtime.hints import TRITON_MAX_BLOCK from torch._inductor.runtime.triton_heuristics import triton_config from torch._inductor.test_case import run_tests, TestCase @@ -30,7 +31,7 @@ class TestTritonHeuristics(TestCase): key = f"{label}BLOCK" if key not in cfg.kwargs: continue - self.assertTrue(cfg.kwargs[key] <= config.triton.max_block[label]) + self.assertTrue(cfg.kwargs[key] <= TRITON_MAX_BLOCK[label]) def _test_artificial_zgrid(self): def forward(primals_1, primals_2, primals_5): diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py index fb1f460628..4950f5e802 100644 --- a/torch/_inductor/codegen/triton.py +++ b/torch/_inductor/codegen/triton.py @@ -46,7 +46,7 @@ from ..codecache import code_hash, get_path, PyCodeCache from ..dependencies import Dep, MemoryDep, StarDep, WeakDep from ..ir import IRNode, TritonTemplateBuffer from ..optimize_indexing import indexing_dtype_strength_reduction -from ..runtime.hints import ReductionHint +from ..runtime.hints import ReductionHint, TRITON_MAX_BLOCK from ..runtime.runtime_utils import ( do_bench, get_max_y_grid, @@ -252,7 +252,7 @@ class BlockPtrOptions: and not V.graph.sizevars.statically_known_equals(self.strides[i], 0) # type: ignore[arg-type] and not V.graph.sizevars.statically_known_multiple_of( self.shape[i], - config.triton.max_block[self.block_shape[i][0]], # type: ignore[arg-type] + TRITON_MAX_BLOCK[self.block_shape[i][0]], # type: ignore[arg-type] ) and not (V.kernel.no_x_dim and self.block_shape[i] == "XBLOCK") ): @@ -1771,9 +1771,9 @@ class TritonKernel(Kernel): continue # Masks are superfluous if numel is a multiple of BLOCK # (We use the fact that BLOCK is required by triton to be a power of 2) - if tree.prefix.upper() not in config.triton.max_block: + if tree.prefix.upper() not in TRITON_MAX_BLOCK: continue - max_block = config.triton.max_block[tree.prefix.upper()] + max_block = TRITON_MAX_BLOCK[tree.prefix.upper()] # Optional optimization: if block divides numel exactly, we will # never need to do a masked load to handle stragglers at the end. # It's faster to avoid masking at all. But it is sound to always @@ -2732,6 +2732,42 @@ class TritonKernel(Kernel): return "reduction" return "pointwise" + @staticmethod + def inductor_meta_common(): + inductor_meta = { + "backend_hash": torch.utils._triton.triton_hash_with_backend(), + "are_deterministic_algorithms_enabled": torch.are_deterministic_algorithms_enabled(), + "assert_indirect_indexing": config.assert_indirect_indexing, + "autotune_local_cache": config.autotune_local_cache, + "autotune_pointwise": config.triton.autotune_pointwise, + "autotune_remote_cache": config.autotune_remote_cache, + "dynamic_scale_rblock": config.dynamic_scale_rblock, + "max_autotune": config.max_autotune, + "max_autotune_pointwise": config.max_autotune_pointwise, + "min_split_scan_rblock": config.triton.min_split_scan_rblock, + "spill_threshold": config.triton.spill_threshold, + "store_cubin": config.triton.store_cubin, + } + if torch.version.hip is not None: + inductor_meta["is_hip"] = True + if config.is_fbcode(): + inductor_meta["is_fbcode"] = True + if config.profile_bandwidth: + inductor_meta["profile_bandwidth"] = config.profile_bandwidth + inductor_meta["profile_bandwidth_regex"] = config.profile_bandwidth_regex + inductor_meta["profile_bandwidth_output"] = config.profile_bandwidth_output + if config.coordinate_descent_tuning: + inductor_meta[ + "coordinate_descent_tuning" + ] = config.coordinate_descent_tuning + inductor_meta[ + "coordinate_descent_search_radius" + ] = config.coordinate_descent_search_radius + inductor_meta[ + "coordinate_descent_check_all_directions" + ] = config.coordinate_descent_check_all_directions + return inductor_meta + def codegen_kernel(self, name=None): code = IndentedBuffer() @@ -2807,8 +2843,9 @@ class TritonKernel(Kernel): "kernel_name": str(Placeholder.DESCRIPTIVE_NAME), "mutated_arg_names": mutated_args, "no_x_dim": self.no_x_dim, - "backend_hash": torch.utils._triton.triton_hash_with_backend(), + **self.inductor_meta_common(), } + num_gb = None if config.benchmark_kernel or config.profile_bandwidth: num_gb = self.estimate_kernel_num_bytes() / 1e9 diff --git a/torch/_inductor/codegen/triton_foreach.py b/torch/_inductor/codegen/triton_foreach.py index 449af125d8..a0acdcdae0 100644 --- a/torch/_inductor/codegen/triton_foreach.py +++ b/torch/_inductor/codegen/triton_foreach.py @@ -5,8 +5,6 @@ from typing import Dict, List, Tuple from sympy import Integer -import torch - from .. import metrics from ..scheduler import SchedulerNode from ..utils import ceildiv, Placeholder @@ -166,7 +164,7 @@ class ForeachKernel(Kernel): triton_meta["configs"] = [config_of(signature)] inductor_meta = { "kernel_name": str(Placeholder.DESCRIPTIVE_NAME), - "backend_hash": torch.utils._triton.triton_hash_with_backend(), + **TritonKernel.inductor_meta_common(), } return f""" @triton_heuristics.foreach( diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py index 17c2f58a76..18d4ccaf3e 100644 --- a/torch/_inductor/codegen/wrapper.py +++ b/torch/_inductor/codegen/wrapper.py @@ -1129,13 +1129,13 @@ class WrapperCodeGen(CodeGen): compile_wrapper = IndentedBuffer() compile_wrapper.writeline(f"async_compile.triton({original_name!r}, '''") - from .triton import gen_common_triton_imports + from .triton import gen_common_triton_imports, TritonKernel compile_wrapper.splice(gen_common_triton_imports()) inductor_meta = { "kernel_name": name, - "backend_hash": torch.utils._triton.triton_hash_with_backend(), + **TritonKernel.inductor_meta_common(), } configs = [ @@ -1264,7 +1264,7 @@ class WrapperCodeGen(CodeGen): self.wrapper_call.writeline("start_graph()") def generate_end_graph(self): - self.wrapper_call.writeline("end_graph()") + self.wrapper_call.writeline(f"end_graph({config.profile_bandwidth_output!r})") def generate_reset_kernel_saved_flags(self): self.wrapper_call.splice( diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py index ea9f8955b1..b5b8e16684 100644 --- a/torch/_inductor/config.py +++ b/torch/_inductor/config.py @@ -657,18 +657,6 @@ class triton: # hint to Triton when arguments are divisible by 16 divisible_by_16 = True - # theses are not enforced, but they are used by asserts in triton_heuristics.py - # NOTE: mobilevit_s in timm_models required X to be set to the higher value 2048 - - # Max RBLOCK will be large for multi-kernel since we do more aggressive - # persistent reduction. - max_block = { - "X": 2048, - "Y": 1024, - "Z": 1024, - "R": 4096 * (16 if multi_kernel else 1), - } - # Minimum RBLOCK to be used for a TritonSplitScanKernel # NOTE: This also indirectly controls the size of workspace buffer required min_split_scan_rblock = 256 diff --git a/torch/_inductor/runtime/coordinate_descent_tuner.py b/torch/_inductor/runtime/coordinate_descent_tuner.py index f280765aec..b5d10478a0 100644 --- a/torch/_inductor/runtime/coordinate_descent_tuner.py +++ b/torch/_inductor/runtime/coordinate_descent_tuner.py @@ -3,6 +3,8 @@ import itertools import logging from typing import Callable, Optional +from .hints import TRITON_MAX_BLOCK + from .runtime_utils import red_text, triton_config_to_hashable try: @@ -10,8 +12,6 @@ try: except ImportError: triton = None -from torch._inductor import config as inductor_config - log = logging.getLogger(__name__) @@ -44,32 +44,35 @@ class CoordescTuner: i.e., there are multiple local optima.. """ - def __init__(self, is_mm=False, name="unknown", size_hints=None): + def __init__( + self, is_mm=False, name="unknown", size_hints=None, inductor_meta=None + ): self.is_mm = is_mm # we will tune num_stages for mm self.cached_benchmark_results = {} self.name = name self.size_hints = size_hints + self.inductor_meta = inductor_meta or {} def get_xmax(self): - xmax = inductor_config.triton.max_block["X"] + xmax = TRITON_MAX_BLOCK["X"] if self.size_hints and len(self.size_hints) > 0: xmax = min(xmax, self.size_hints[0]) return xmax def get_ymax(self): - ymax = inductor_config.triton.max_block["Y"] + ymax = TRITON_MAX_BLOCK["Y"] if self.size_hints and len(self.size_hints) > 1: ymax = min(ymax, self.size_hints[1]) return ymax def get_zmax(self): - zmax = inductor_config.triton.max_block["Z"] + zmax = TRITON_MAX_BLOCK["Z"] if self.size_hints and len(self.size_hints) > 2: zmax = min(zmax, self.size_hints[2]) return zmax def get_rmax(self): - rmax = inductor_config.triton.max_block["R"] + rmax = TRITON_MAX_BLOCK["R"] if self.size_hints and len(self.size_hints) > 0: rmax = min(rmax, self.size_hints[-1]) # the last one is for reduction return rmax @@ -194,7 +197,7 @@ class CoordescTuner: candidate_values = self.get_neighbour_values( field, old_value, - radius=inductor_config.coordinate_descent_search_radius, + radius=self.inductor_meta.get("coordinate_descent_search_radius", 1), include_self=True, ) candidate_values_list.append(candidate_values) @@ -286,7 +289,9 @@ class CoordescTuner: improved = True best_config, best_timing = candidate_config, candidate_timing - if not improved and inductor_config.coordinate_descent_check_all_directions: + if not improved and self.inductor_meta.get( + "coordinate_descent_check_all_directions" + ): old_best_timing = best_timing improved, best_config, best_timing = self.check_all_tuning_directions( func, best_config, best_timing diff --git a/torch/_inductor/runtime/hints.py b/torch/_inductor/runtime/hints.py index 082e7fcc89..5b2b53ebff 100644 --- a/torch/_inductor/runtime/hints.py +++ b/torch/_inductor/runtime/hints.py @@ -3,6 +3,15 @@ from dataclasses import fields from enum import auto, Enum +# NOTE: if these fail asserts submit a PR to increase them +TRITON_MAX_BLOCK = { + "X": 2048, + "Y": 1024, + "Z": 1024, + "R": 4096 * 16, # * 16 is multi-kernel only +} + + class ReductionHint(Enum): INNER = 0 OUTER = 1 diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index ff6388e30e..1b042d4f4a 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -17,15 +17,15 @@ from typing import Any, Callable, Dict, List, Optional, Set, Tuple import torch from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device - -from torch._inductor import config from .coordinate_descent_tuner import CoordescTuner + from .hints import ( _NUM_THREADS_PER_WARP, AutotuneHint, HeuristicType, ReductionHint, TileHint, + TRITON_MAX_BLOCK, ) from .runtime_utils import ( cache_dir, @@ -111,12 +111,12 @@ def autotune_hints_to_configs( return configs -def disable_pointwise_autotuning(): +def disable_pointwise_autotuning(inductor_meta): # Autotuning can give different benchmarking results from run to run, and # therefore we disable autotuning when use_deterministic flag is on. - if torch.are_deterministic_algorithms_enabled(): + if inductor_meta.get("are_deterministic_algorithms_enabled"): return True - return not config.triton.autotune_pointwise + return not inductor_meta.get("autotune_pointwise", True) class CachingAutotuner(KernelInterface): @@ -179,7 +179,10 @@ class CachingAutotuner(KernelInterface): self.size_hints = size_hints self.coordesc_tuner = CoordescTuner( - is_mm=False, name=self.fn.__name__, size_hints=size_hints + is_mm=False, + name=self.fn.__name__, + size_hints=size_hints, + inductor_meta=self.inductor_meta, ) self.filename = filename @@ -220,11 +223,11 @@ class CachingAutotuner(KernelInterface): self.triton_meta["device"] ) if ( - config.dynamic_scale_rblock + self.inductor_meta.get("dynamic_scale_rblock", True) and self.heuristic_type == HeuristicType.REDUCTION and self.size_hints is not None # Disable for AMDGPU as Triton is not ready to return n_regs for a compiled_binary. - and torch.version.hip is None + and not self.inductor_meta.get("is_hip") # Disable for Intel GPU as Triton is not ready to return n_regs for a compiled_binary. and self.device_type != "xpu" and device_prop.major >= 8 @@ -307,9 +310,9 @@ class CachingAutotuner(KernelInterface): compile_meta["constants"][self.fn.arg_names.index(k)] = v compile_meta["num_warps"] = cfg.num_warps compile_meta["num_stages"] = cfg.num_stages - compile_meta["debug"] = ( - config.assert_indirect_indexing and torch.version.hip is None - ) + compile_meta["debug"] = self.inductor_meta.get( + "assert_indirect_indexing", True + ) and not self.inductor_meta.get("is_hip", False) # Setting device_type="hip" required on ROCm to pass down to triton compile_meta["device_type"] = ( @@ -561,7 +564,7 @@ class CachingAutotuner(KernelInterface): launcher.n_regs = getattr(binary, "n_regs", None) launcher.n_spills = getattr(binary, "n_spills", None) launcher.shared = binary_shared - launcher.store_cubin = config.triton.store_cubin + launcher.store_cubin = self.inductor_meta.get("store_cubin", False) # store this global variable to avoid the high overhead of reading it when calling run if launcher.store_cubin: launcher.fn = self.fn @@ -576,7 +579,9 @@ class CachingAutotuner(KernelInterface): # control over the kernel code; (ii) there is empirical evidence that # for some (complicated) custom Triton kernels, a register-spilling # config may yield the best latency. - if not self.custom_kernel and launcher.n_spills > config.triton.spill_threshold: + if not self.custom_kernel and launcher.n_spills > self.inductor_meta.get( + "spill_threshold", 16 + ): log.debug( "Skip config %s because of register spilling: %d", launcher.config, @@ -766,10 +771,9 @@ class CachingAutotuner(KernelInterface): if len(self.launchers) > 1: self.autotune_to_one_config(*args, grid=grid, **kwargs) - if ( - not getattr(self.launchers[0].config, "found_by_coordesc", False) - and config.coordinate_descent_tuning - ): + if not getattr( + self.launchers[0].config, "found_by_coordesc", False + ) and self.inductor_meta.get("coordinate_descent_tuning", False): self.launchers = [ self.coordinate_descent_tuning( self.launchers[0], *args, grid=grid, **kwargs @@ -844,7 +848,7 @@ def start_graph(): collected_calls.clear() -def end_graph(): +def end_graph(output_file): if len(collected_calls) == 0: return overall_time = sum(call[0] for call in collected_calls) @@ -856,7 +860,6 @@ def end_graph(): ) print(summary_str) print() - output_file = config.profile_bandwidth_output if output_file is not None: # sort perf numbers in descending order, i.e. placing the # most runtime-heavy kernels at the top of the list @@ -939,6 +942,7 @@ def load_cached_autotuning( best_config, configs_hash: str, configs: List[Config], + inductor_meta: Dict[str, Any], ): if best_config is None: return None @@ -948,7 +952,9 @@ def load_cached_autotuning( # Remove time taken for comparison best_config.pop("time_taken_ms", None) - if config.coordinate_descent_tuning and best_config.pop("found_by_coordesc", False): + if inductor_meta.get("coordinate_descent_tuning") and best_config.pop( + "found_by_coordesc", False + ): num_warps = best_config.pop("num_warps") num_stages = best_config.pop("num_stages") triton_config = Config(best_config, num_warps=num_warps, num_stages=num_stages) @@ -968,12 +974,12 @@ def load_cached_autotuning( return matching_configs[0] -def should_use_remote_autotune_cache(): - if config.autotune_remote_cache: +def should_use_remote_autotune_cache(inductor_meta): + if inductor_meta.get("autotune_remote_cache"): return True - if not config.is_fbcode(): + if not inductor_meta.get("is_fbcode"): return False - if torch.version.hip is not None: + if inductor_meta.get("is_hip"): return False from triton.runtime.fb_memcache import MEMCACHE_VERSION @@ -1002,22 +1008,24 @@ def cached_autotune( inductor_meta = {} if inductor_meta is None else inductor_meta # on disk caching logic and/or remote caching - if filename is not None and (len(configs) > 1 or config.coordinate_descent_tuning): + if filename is not None and ( + len(configs) > 1 or inductor_meta.get("coordinate_descent_tuning") + ): configs_hash = hash_configs(configs) cache_filename = None remote_cache = None remote_cache_key = None - if config.autotune_local_cache: + if inductor_meta.get("autotune_local_cache", True): cache_filename = os.path.splitext(filename)[0] + ".best_config" - if should_use_remote_autotune_cache(): + if should_use_remote_autotune_cache(inductor_meta): backend_hash = inductor_meta.get("backend_hash", None) if backend_hash is not None: key = backend_hash + configs_hash + "autotune-best-config-v2" key = hashlib.sha256(key.encode("utf-8")).hexdigest() try: - if config.is_fbcode(): + if inductor_meta.get("is_fbcode"): remote_cache = triton.runtime.fb_memcache.FbMemcacheRemoteAutotuneCacheBackend( key ) @@ -1040,7 +1048,9 @@ def cached_autotune( elif remote_cache is not None and remote_cache_key is not None: best_config = remote_cache.get(remote_cache_key) - best_config = load_cached_autotuning(best_config, configs_hash, configs) + best_config = load_cached_autotuning( + best_config, configs_hash, configs, inductor_meta + ) if best_config: configs = [best_config] @@ -1081,12 +1091,12 @@ def cached_autotune( assert tconfig.kwargs["XBLOCK"] == 1 tconfig.kwargs.pop("XBLOCK") - if config.profile_bandwidth: + if inductor_meta.get("profile_bandwidth"): return DebugAutotuner( fn, triton_meta=triton_meta, inductor_meta=inductor_meta, - regex_filter=config.profile_bandwidth_regex, + regex_filter=inductor_meta["profile_bandwidth_regex"], configs=configs, save_cache_hook=save_cache_hook, mutated_arg_names=mutated_arg_names, @@ -1134,7 +1144,7 @@ def check_config(cfg, *, xnumel=None, ynumel=None, znumel=None): f"TritonKernel.indexing assumes numel == 1 => BLOCK == 1" f" but {label.lower()}numel=={numel} and {label}BLOCK={block} (cfg={cfg})." ) - max_block = config.triton.max_block[label] + max_block = TRITON_MAX_BLOCK[label] max_block_str = f'config.triton.max_block["{label}"]' assert max_block % block == 0, ( f"TritonKernel.indexing assumes {label}BLOCK divides {max_block_str}" @@ -1186,13 +1196,13 @@ def triton_config( # if we are below original block size, scale up where we can; # or if the calculated grid size is larger than the limit, we bump up the corresponding dimension - while x < min(size_hints[0], config.triton.max_block["X"]) and ( + while x < min(size_hints[0], TRITON_MAX_BLOCK["X"]) and ( x * maxGridSize[0] < size_hints[0] or conditional_product(x, y, z) < target ): x *= 2 while ( y - and y < min(size_hints[1], config.triton.max_block["Y"]) + and y < min(size_hints[1], TRITON_MAX_BLOCK["Y"]) and ( y * maxGridSize[1] < size_hints[1] or conditional_product(x, y, z) < target ) @@ -1200,7 +1210,7 @@ def triton_config( y *= 2 while ( z - and z < min(size_hints[2], config.triton.max_block["Z"]) + and z < min(size_hints[2], TRITON_MAX_BLOCK["Z"]) and ( z * maxGridSize[2] < size_hints[2] or conditional_product(x, y, z) < target ) @@ -1263,9 +1273,7 @@ def triton_config_reduction(size_hints, x, r, num_stages=1, num_warps=None) -> C num_warps = conditional_product(x, r) // 128 num_warps = next_power_of_2(min(max(num_warps, 2), 8)) check_config(cfg, xnumel=size_hints[0]) - assert ( - r <= config.triton.max_block["R"] - ), f"increase config.triton.MAX_BLOCK['r'] to {r}" + assert r <= TRITON_MAX_BLOCK["R"], f"increase TRITON_MAX_BLOCK['r'] to {r}" return Config(cfg, num_warps=num_warps, num_stages=num_stages) @@ -1296,9 +1304,7 @@ def triton_config_tiled_reduction(size_hints, x, y, r, num_stages=1): cfg = {"XBLOCK": x, "YBLOCK": y, "RBLOCK": r} num_warps = next_power_of_2(min(max(conditional_product(x, y, r) // 256, 1), 8)) check_config(cfg, xnumel=size_hints[0], ynumel=size_hints[1]) - assert ( - r <= config.triton.max_block["R"] - ), f"increase config.triton.MAX_BLOCK['r'] to {r}" + assert r <= TRITON_MAX_BLOCK["R"], f"increase TRITON_MAX_BLOCK['r'] to {r}" return Config(cfg, num_warps=num_warps, num_stages=num_stages) @@ -1328,8 +1334,9 @@ def pointwise( ) if len(size_hints) == 1: - if disable_pointwise_autotuning() and not ( - config.max_autotune or config.max_autotune_pointwise + if disable_pointwise_autotuning(inductor_meta) and not ( + inductor_meta.get("max_autotune") + or inductor_meta.get("max_autotune_pointwise") ): return cached_autotune( size_hints, @@ -1357,8 +1364,11 @@ def pointwise( filename=filename, ) if len(size_hints) == 2: - if (disable_pointwise_autotuning() or tile_hint == TileHint.SQUARE) and not ( - config.max_autotune or config.max_autotune_pointwise + if ( + disable_pointwise_autotuning(inductor_meta) or tile_hint == TileHint.SQUARE + ) and not ( + inductor_meta.get("max_autotune") + or inductor_meta.get("max_autotune_pointwise") ): return cached_autotune( size_hints, @@ -1385,7 +1395,7 @@ def pointwise( heuristic_type=HeuristicType.POINTWISE, ) if len(size_hints) == 3: - if disable_pointwise_autotuning(): + if disable_pointwise_autotuning(inductor_meta): return cached_autotune( size_hints, [triton_config_with_settings(size_hints, 16, 16, 16)], @@ -1428,7 +1438,7 @@ def _reduction_configs( tiny_config = triton_config_reduction( size_hints, 2 * (256 // rnumel) if rnumel <= 256 else 1, min(rnumel, 2048) ) - if config.max_autotune or config.max_autotune_pointwise: + if inductor_meta.get("max_autotune") or inductor_meta.get("max_autotune_pointwise"): pass # skip all these cases elif reduction_hint == ReductionHint.INNER: return [contiguous_config] @@ -1436,7 +1446,7 @@ def _reduction_configs( return [outer_config] elif reduction_hint == ReductionHint.OUTER_TINY: return [tiny_config] - if disable_pointwise_autotuning(): + if disable_pointwise_autotuning(inductor_meta): return [triton_config_reduction(size_hints, 32, 128)] return [ contiguous_config, @@ -1515,7 +1525,7 @@ def persistent_reduction( # we don't need RBLOCK for persistent reduction c.kwargs.pop("RBLOCK") - if disable_pointwise_autotuning(): + if disable_pointwise_autotuning(inductor_meta): configs = configs[:1] return cached_autotune( @@ -1542,14 +1552,13 @@ def split_scan( size_hints = [1, *size_hints[1:]] assert triton_meta is not None - rnumel = size_hints[-1] if len(size_hints) != 2: raise NotImplementedError(f"size_hints: {size_hints}") configs = _reduction_configs(size_hints=size_hints, inductor_meta=inductor_meta) # Fixup configs to enforce the minimum RBLOCK size - min_rblock = config.triton.min_split_scan_rblock + min_rblock = inductor_meta.get("min_split_scan_rblock", 256) for cfg in configs: if cfg.kwargs["RBLOCK"] < min_rblock: cfg.kwargs["RBLOCK"] = min_rblock diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py index e6304f604f..b50950bae4 100644 --- a/torch/_inductor/select_algorithm.py +++ b/torch/_inductor/select_algorithm.py @@ -162,7 +162,7 @@ class TritonTemplateKernel(TritonKernel): inductor_meta = { "kernel_name": str(Placeholder.DESCRIPTIVE_NAME), - "backend_hash": torch.utils._triton.triton_hash_with_backend(), + **TritonKernel.inductor_meta_common(), } if config.profile_bandwidth or config.benchmark_kernel: num_gb = self.estimate_kernel_num_bytes() / 1e9
2.41.0
4bce27f0d12bf7226b37dfe365660aad456701a
Mon, 22 Apr 2024 07:20:10 +0000
[PATCH 0451/1000] Revert "fix Invalid call to aoti_torch_tensor_copy_ #123039 (#124037)"
This reverts commit 6e24cc012b130869d0029280dcbb34efdd0032cc. Reverted https://github.com/pytorch/pytorch/pull/124037 on behalf of https://github.com/jeanschmidt due to seems to have introduced a regression in pull / linux-focal-cuda12.1-py3.10-gcc9 / test (default, 3, 5, linux.4xlarge.nvidia.gpu) ([comment](https://github.com/pytorch/pytorch/pull/124037#issuecomment-2068659093))
diff --git a/test/inductor/test_cuda_cpp_wrapper.py b/test/inductor/test_cuda_cpp_wrapper.py index fa717ab835..b662e2438c 100644 --- a/test/inductor/test_cuda_cpp_wrapper.py +++ b/test/inductor/test_cuda_cpp_wrapper.py @@ -109,7 +109,9 @@ if config.abi_compatible: test_failures_cuda_wrapper[ f"{test_name}_dynamic_shapes" ] = test_torchinductor.TestFailure(("cuda_wrapper",), is_skip=False) - skip_list = [] + skip_list = [ + "test_multi_device_cuda", + ] for test_name in skip_list: test_failures_cuda_wrapper[test_name] = test_torchinductor.TestFailure( ("cuda_wrapper",), is_skip=True diff --git a/torch/_inductor/codegen/cpp_wrapper_cpu.py b/torch/_inductor/codegen/cpp_wrapper_cpu.py index 7fad66e7ac..4c07930045 100644 --- a/torch/_inductor/codegen/cpp_wrapper_cpu.py +++ b/torch/_inductor/codegen/cpp_wrapper_cpu.py @@ -895,11 +895,9 @@ class CppWrapperCpu(WrapperCodeGen): @cache_on_self def get_output_refs(self): return [ - ( - f"torch::tensor({x.codegen_reference(self.wrapper_call)})" - if isinstance(x, ir.ShapeAsConstantBuffer) and not config.abi_compatible - else x.codegen_reference(self.wrapper_call) - ) + f"torch::tensor({x.codegen_reference(self.wrapper_call)})" + if isinstance(x, ir.ShapeAsConstantBuffer) and not config.abi_compatible + else x.codegen_reference(self.wrapper_call) for x in V.graph.graph_outputs ] @@ -1099,11 +1097,9 @@ class CppWrapperCpu(WrapperCodeGen): outputs_str = "output_tensors" else: outputs = [ - ( - f"output_tensors[{i}]" - if self.output_is_tensor[i] - else f"output_tensors[{i}].item()" - ) + f"output_tensors[{i}]" + if self.output_is_tensor[i] + else f"output_tensors[{i}].item()" for i in range(len(V.graph.graph_outputs)) ] outputs_str = f"[{', '.join(outputs)}]" @@ -1398,7 +1394,6 @@ class CppWrapperCpu(WrapperCodeGen): and ir.is_contiguous_strides_for_shape( buffer.get_stride(), buffer.get_size() ) - and not buffer.is_extern() ) def make_buffer_free(self, buffer):
2.41.0
2bd1abc62c17c349e1d53d512f3e0964ad1d77a
Mon, 22 Apr 2024 00:48:20 +0200
[PATCH 0452/1000] [Inductor Cutlass backend] Tolerate dynamic shapes (#121497)
Previously, when the Cutlass backend was enabled, using dynamic shapes could lead to exceptions during JIT. With this change, there are guards in place to just disable the Cutlass backend if dynamic dimensions are involved. In addition, if no choices for a GEMM are available using the selected backends, then an ATen Kernel is used as fallback, even if the ATen backend is not enabled. Test: CI Additional unit test in test_cutlass_backend.py Pull Request resolved: https://github.com/pytorch/pytorch/pull/121497 Approved by: https://github.com/jansel
diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py index db8f3aaaba..1575aaa573 100644 --- a/test/inductor/test_aot_inductor.py +++ b/test/inductor/test_aot_inductor.py @@ -6,6 +6,7 @@ import tempfile import types import unittest from typing import Dict, Tuple +from unittest import skip import torch import torch._inductor @@ -396,6 +397,7 @@ class AOTInductorTestsTemplate: ) self.check_model(Model(), example_inputs) + @skip("Test was marked as expected failure, but does not fail always anymore.") def test_dynamic_smem_above_default_limit(self): class Model(torch.nn.Module): def forward(self, x, y): @@ -2675,7 +2677,6 @@ def fail_non_abi_compatible_cuda(is_skip=False): # test_failures, xfail by default, set is_skip=True to skip CPU_TEST_FAILURES = { "test_add_complex": fail_stack_allocation(is_skip=True), - "test_addmm_multiple_dynamic": fail_with_and_without_stack_allocation(), "test_bmm_multiple_dynamic": fail_with_and_without_stack_allocation(), # FIXME: failed with Segfault while exiting the Python runtime "test_duplicate_constant_folding": fail_with_and_without_stack_allocation( @@ -2685,7 +2686,6 @@ CPU_TEST_FAILURES = { "test_dynamic_cat": fail_minimal_arrayref_interface(), # https://github.com/pytorch/pytorch/issues/122978 "test_dynamic_scalar": fail_stack_allocation(is_skip=True), - "test_dynamic_smem_above_default_limit": fail_with_and_without_stack_allocation(), # https://github.com/pytorch/pytorch/issues/122980 "test_fft_c2c": fail_stack_allocation(is_skip=True), # TODO: test_freezing_abi_compatible_cpu somehow fails on CI but not locally, @@ -2920,14 +2920,10 @@ copy_tests( "non_abi_compatible_cpu", # test_failures, xfail by default, set is_skip=True to skip { - "test_addmm_multiple_dynamic": TestFailure(("non_abi_compatible_cpu",)), "test_bmm_multiple_dynamic": TestFailure(("non_abi_compatible_cpu",)), "test_duplicate_constant_folding": TestFailure( ("non_abi_compatible_cpu",), is_skip=True ), - "test_dynamic_smem_above_default_limit": TestFailure( - ("non_abi_compatible_cpu",) - ), # TODO: test_freezing_non_abi_compatible_cpu somehow fails on CI but not locally, # NotImplementedError: Cannot access storage of OpaqueTensorImpl "test_freezing": TestFailure(("non_abi_compatible_cpu",), is_skip=True), diff --git a/test/inductor/test_cutlass_backend.py b/test/inductor/test_cutlass_backend.py index 0bcd895b00..d43d71afd4 100644 --- a/test/inductor/test_cutlass_backend.py +++ b/test/inductor/test_cutlass_backend.py @@ -130,7 +130,7 @@ class TestCutlassBackend(TestCase): # TODO: Enable dynamic test cases when dynamic support is added. @unittest.skipIf(not SM75OrLater, "need sm_75") @unittest.skipIf(config.is_fbcode(), "fbcode requires different CUTLASS path setup") - @parametrize("dynamic", (False,)) + @parametrize("dynamic", (False, True)) @parametrize("max_autotune_gemm_backends", ("CUTLASS", "ATen,Triton,CUTLASS")) @unittest.mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()}) def test_max_autotune_cutlass_backend_regular_mm( @@ -398,12 +398,7 @@ class TestCutlassBackend(TestCase): # Broadcast first dim. compare_results(4096, 25728, 2048, 2.0, 0.4, [2048]) # Broadcast last dim. - if not SM90OrLater and max_autotune_gemm_backends == "CUTLASS": - with self.assertRaisesRegex(RuntimeError, "No choices to select"): - # CUTLASS2 doesn't support Bias last-dim broadcast. - compare_results(4096, 25728, 2048, 2.0, 0.4, [4096, 1]) - else: - compare_results(4096, 25728, 2048, 2.0, 0.4, [4096, 1]) + compare_results(4096, 25728, 2048, 2.0, 0.4, [4096, 1]) # TODO: Enable dynamic test cases when dynamic support is added. @unittest.skipIf(not SM80OrLater, "need sm_80") diff --git a/torch/_inductor/kernel/mm.py b/torch/_inductor/kernel/mm.py index f325a5a0b0..3b786d9e6c 100644 --- a/torch/_inductor/kernel/mm.py +++ b/torch/_inductor/kernel/mm.py @@ -6,10 +6,13 @@ import torch from torch._inductor.virtualized import V from .. import config as inductor_config from ..codegen.cuda.gemm_template import CUTLASSGemmTemplate +from ..codegen.wrapper import WrapperCodeGen +from ..ir import FlexibleLayout from ..lowering import register_lowering from ..select_algorithm import ( autotune_select_algorithm, ExternKernelChoice, + NoValidChoicesError, TritonTemplate, ) from ..utils import ( @@ -124,10 +127,18 @@ aten_bias_addmm = ExternKernelChoice(bias_addmm, None) def tuned_mm(mat1, mat2, *, layout=None): m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=layout) - # options to tune from - choices = [aten_mm.bind((mat1, mat2), layout)] if use_aten_gemm_kernels() else [] + aten_layout = layout + if not use_max_autotune(): + aten_layout = FlexibleLayout( + device=layout.device, dtype=layout.dtype, size=layout.size + ) - if use_triton_template(layout): + # options to tune from + choices = ( + [aten_mm.bind((mat1, mat2), aten_layout)] if use_aten_gemm_kernels() else [] + ) + static_shape, is_nonzero = _is_static_problem([mat1, mat2], layout) + if is_nonzero and use_triton_template(layout): for config in mm_configs(m, n, k): mm_template.maybe_append_choice( choices, @@ -136,26 +147,38 @@ def tuned_mm(mat1, mat2, *, layout=None): **mm_options(config, m, n, k, layout), ) - if use_cutlass_template(layout, m, n, k): - CUTLASSGemmTemplate.add_cutlass_gemm_choices( - choices, layout, [mat1, mat2], fuseable=True, non_fuseable=True - ) - - from torch._inductor.ir import FixedLayout, FlexibleLayout - - if ( - len(choices) == 1 - and use_aten_gemm_kernels() - and isinstance(layout, FixedLayout) - ): - # If we are not autotuning, we can swap to a FlexibleLayout - # in order to get fusion optimizations to kick in, e.g. ConcatFusion - layout = FlexibleLayout( - device=layout.device, dtype=layout.dtype, size=layout.size - ) - choices = [aten_mm.bind((mat1, mat2), layout)] - - return autotune_select_algorithm("mm", choices, [mat1, mat2], layout) + if static_shape and is_nonzero and use_cutlass_template(layout, m, n, k): + CUTLASSGemmTemplate.add_cutlass_gemm_choices(choices, layout, [mat1, mat2]) + + if len(choices) == 0 and not use_aten_gemm_kernels(): + log.warning("No choices for GEMM, using ATen backend as fallback") + choices.append(aten_mm.bind((mat1, mat2), aten_layout)) + try: + return autotune_select_algorithm("mm", choices, [mat1, mat2], layout) + except NoValidChoicesError: + log.warning("All choices for GEMM were invalid, using ATen backend as fallback") + return aten_mm.bind((mat1, mat2), aten_layout).output_node() + + +def _is_static_problem(inputs_tensors, layout): + # checks whether all input tensors and the output layout + # have a static shape by attempting to convert the dimensions + # to int + static_shape = True + static_size = WrapperCodeGen.statically_known_list_of_ints_or_none(layout.size) + if static_size is None: + nonzero = True + for s in layout.size: + sz = WrapperCodeGen.statically_known_int_or_none(s) + if sz is not None and sz == 0: + nonzero = False + break + return False, nonzero + numel = 1 + for dim in static_size: + numel *= dim + nonzero = numel > 0 + return static_shape, nonzero @register_lowering(aten._int_mm, type_promotion_kind=None) @@ -163,22 +186,22 @@ def tuned_int_mm(mat1, mat2, *, layout=None): m, n, k, layout, mat1, mat2 = mm_args( mat1, mat2, layout=layout, out_dtype=torch.int32 ) + static_shape, is_nonzero = _is_static_problem([mat1, mat2], layout) + use_cutlass = static_shape and is_nonzero and use_cutlass_template(layout, m, n, k) + choices = ( [aten__int_mm.bind((mat1, mat2), layout)] if use_aten_gemm_kernels() else [] ) # TODO: Re-enable eager mode implementation once cuBLAS is fixed - if m * n != 0 and ( - use_cutlass_template(layout, m, n, k) - or use_triton_template(layout, enable_int32=True) - ): + if use_cutlass or use_triton_template(layout, enable_int32=True): choices = [] - if m * n != 0 and use_cutlass_template(layout, m, n, k): + if use_cutlass: CUTLASSGemmTemplate.add_cutlass_gemm_choices( choices, layout, [mat1, mat2], fuseable=True, non_fuseable=True ) - if m * n != 0 and use_triton_template(layout, enable_int32=True): + if is_nonzero and use_triton_template(layout, enable_int32=True): for config in int8_mm_configs(m, n, k): mm_template.maybe_append_choice( choices, @@ -186,13 +209,20 @@ def tuned_int_mm(mat1, mat2, *, layout=None): layout=layout, **mm_options(config, m, n, k, layout), ) - return autotune_select_algorithm("int_mm", choices, [mat1, mat2], layout) + try: + return autotune_select_algorithm("int_mm", choices, [mat1, mat2], layout) + except NoValidChoicesError: + log.warning("All choices for GEMM were invalid, using ATen backend as fallback") + choices = [aten__int_mm.bind((mat1, mat2), layout)] + return autotune_select_algorithm("int_mm", choices, [mat1, mat2], layout) @register_lowering(aten.addmm, type_promotion_kind=None) def tuned_addmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None): + ordered_kwargs_for_cpp_kernel = ("beta", "alpha") m, n, k, layout, mat1, mat2, inp_expanded = mm_args(mat1, mat2, inp, layout=layout) - if m * n == 0 or not use_max_autotune(): + static_shape, is_nonzero = _is_static_problem([inp, mat1, mat2], layout) + if (not is_nonzero) or (not use_max_autotune()): # Use a FlexibleLayout if we are not autotuning. # This allows padding strides for the output. from torch._inductor.ir import FixedLayout, FlexibleLayout @@ -242,7 +272,7 @@ def tuned_addmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None): ), ) - if use_triton_template(layout): + if is_nonzero and use_triton_template(layout): for config in mm_configs(m, n, k): mm_template.maybe_append_choice( choices, @@ -253,20 +283,57 @@ def tuned_addmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None): epilogue_fn=addmm_epilogue(layout.dtype, alpha, beta), ) - if use_cutlass_template(layout, m, n, k): + if static_shape and is_nonzero and use_cutlass_template(layout, m, n, k): CUTLASSGemmTemplate.add_cutlass_gemm_choices( choices, layout, [mat1, mat2, inp_expanded], alpha=alpha, beta=beta, - input_reorder=[2, 0, 1], - fuseable=False, ) - return autotune_select_algorithm( - "addmm", choices, [inp_expanded, mat1, mat2], layout - ) + use_aten = use_aten_gemm_kernels() + if len(choices) == 0 and not use_aten: + log.warning("No choices for GEMM, using ATen backend as fallback") + use_aten = True + + if use_aten: + choices.append( + aten_addmm.bind( + (inp_expanded, mat1, mat2), + layout, + ordered_kwargs_for_cpp_kernel, + alpha=alpha, + beta=beta, + ) + ) + + if ( + inp_expanded.get_stride()[0] == 0 + and inp_expanded.get_device().type == "cuda" + and inductor_config.triton.autotune_cublasLt + ): + # unexpand inp to make sure fused addmm from cublasLt is used + choices.insert( + 0, + aten_bias_addmm.bind( + (inp_expanded, mat1, mat2), layout, alpha=alpha, beta=beta + ), + ) + try: + return autotune_select_algorithm( + "addmm", choices, [inp_expanded, mat1, mat2], layout + ) + except NoValidChoicesError: + log.warning("All choices for GEMM were invalid, using ATen backend as fallback") + fallback_choice = aten_addmm.bind( + (inp, mat1, mat2), + layout, + ordered_kwargs_for_cpp_kernel, + alpha=alpha, + beta=beta, + ) + return fallback_choice.output_node() def fallback_mixed_mm(mat1, mat2, *, out): @@ -284,6 +351,7 @@ def _is_sm7x_or_older_gpu(index: Optional[int]) -> bool: def tuned_mixed_mm(mat1, mat2, mat2_dtype): m, n, k, layout, mat1, mat2 = mm_args(mat1, mat2, layout=None) + static_shape, is_nonzero = _is_static_problem([mat1, mat2], layout) fallback = aten_fallback_mixed_mm.bind((mat1, mat2), layout) @@ -307,14 +375,13 @@ def tuned_mixed_mm(mat1, mat2, mat2_dtype): **mm_options(config, m, n, k, layout, b_prologue_cast_type), ) - if m * n != 0 and use_cutlass_template(layout, m, n, k): + if static_shape and is_nonzero and use_cutlass_template(layout, m, n, k): CUTLASSGemmTemplate.add_cutlass_gemm_choices( choices, layout, [mat1, mat2], fuseable=True, non_fuseable=True ) if skip_triton and not choices: choices = [fallback] - return autotune_select_algorithm("mixed_mm", choices, [mat1, mat2], layout) diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py index b50950bae4..5360c41765 100644 --- a/torch/_inductor/select_algorithm.py +++ b/torch/_inductor/select_algorithm.py @@ -880,6 +880,10 @@ class ErrorFromChoice(RuntimeError): self.choice = choice +class NoValidChoicesError(RuntimeError): + pass + + class AlgorithmSelectorCache(PersistentCache): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
2.41.0
d4bef624842a73ff3c47bee0155568c21f73c08
Thu, 18 Apr 2024 12:50:31 +0000
[PATCH 0453/1000] Intel GPU oneDNN upstreaming: Conv primitive integration (#117512)
# Motivation This PR is a part of RFC #114848,. This PR would depend on oneDNN compilation in #117098 and basic integration support in #117112. Some runtime support is needed in #116019. This PR provides the oneDNN integration code for Convolution and Deconvolution related operators. All aten convolution operators(conv, deconv, and conv-pointwise fusion) will goes into this layer before executing oneDNN primitive. The integration code is responsible for providing correct memory description for primitive and accompanied with primitive attribute description. Wit this PR land, we will add Conv related operators accompanied with their registration. Co-authored-by: xiaolil1 <xiaoli.liu@intel.com> Co-authored-by: lei,zhenyuan <zhenyuan.lei@intel.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/117512 Approved by: https://github.com/EikanWang, https://github.com/malfet
diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/Conv.cpp b/aten/src/ATen/native/mkldnn/xpu/detail/Conv.cpp new file mode 100644 index 0000000000..6e1cf0e222 --- /dev/null +++ b/aten/src/ATen/native/mkldnn/xpu/detail/Conv.cpp @@ -0,0 +1,451 @@ +#include <c10/xpu/XPUFunctions.h> + +#include <ATen/ATen.h> +#include <ATen/core/grad_mode.h> +#include <ATen/record_function.h> +#include <c10/core/MemoryFormat.h> + +#include <ATen/native/mkldnn/xpu/detail/Attr.h> +#include <ATen/native/mkldnn/xpu/detail/Utils.h> + +#include <oneapi/dnnl/dnnl.hpp> + +namespace at::native::onednn { + +constexpr int src_batch_size_dim = 0; +constexpr int weight_dst_channels_dim = 0; + +static inline dnnl::memory::dims conv_dst_tz( + int64_t ndim, + IntArrayRef src_size, + IntArrayRef weight_size, + IntArrayRef padding_front_top_left, + IntArrayRef padding_back_bottom_right, + IntArrayRef stride, + IntArrayRef dilation) { + bool has_dilation = dilation.size() > 0; + dnnl::memory::dims dst_size(ndim); + dst_size[0] = src_size[src_batch_size_dim]; + dst_size[1] = weight_size[weight_dst_channels_dim]; + for (int d = 2; d < ndim; ++d) { + auto dilate = has_dilation ? dilation[d - 2] : 1; + auto kernel = dilate * (weight_size[d] - 1) + 1; + dst_size[d] = + (src_size[d] + + (padding_front_top_left[d - 2] + padding_back_bottom_right[d - 2]) - + kernel) / + stride[d - 2] + + 1; + } + return dst_size; +} + +static inline dnnl::memory::dims compatible_dilation(IntArrayRef& dilation) { + dnnl::memory::dims ret = dilation.vec(); + for (auto it = ret.begin(); it != ret.end(); it++) { + *it -= 1; + } + return ret; +} + +static inline dnnl::memory::format_tag conv_src_fmt( + const int64_t ndim, + const bool is_channels_last = false) { + if (!is_channels_last) { + return (ndim == 3) + ? dnnl::memory::format_tag::ncw + : ((ndim == 4) ? dnnl::memory::format_tag::nchw + : ((ndim == 5) ? dnnl::memory::format_tag::ncdhw + : dnnl::memory::format_tag::undef)); + } else { + return (ndim == 3) + ? dnnl::memory::format_tag::nwc + : ((ndim == 4) ? dnnl::memory::format_tag::nhwc + : ((ndim == 5) ? dnnl::memory::format_tag::ndhwc + : dnnl::memory::format_tag::undef)); + } +} + +static inline dnnl::memory::format_tag conv_weight_fmt( + const int64_t ndim, + const bool grouped = false, + const bool is_channels_last = false) { + if (!is_channels_last) { + return (ndim == 3) + ? (grouped ? dnnl::memory::format_tag::goiw : dnnl::memory::format_tag::oiw) + : (ndim == 4) + ? (grouped ? dnnl::memory::format_tag::goihw : dnnl::memory::format_tag::oihw) + : ((ndim == 5) ? (grouped ? dnnl::memory::format_tag::goidhw + : dnnl::memory::format_tag::oidhw) + : dnnl::memory::format_tag::undef); + } else { + return (ndim == 3) + ? (grouped ? dnnl::memory::format_tag::gowi : dnnl::memory::format_tag::owi) + : (ndim == 4) + ? (grouped ? dnnl::memory::format_tag::gohwi : dnnl::memory::format_tag::ohwi) + : ((ndim == 5) ? (grouped ? dnnl::memory::format_tag::godhwi + : dnnl::memory::format_tag::odhwi) + : dnnl::memory::format_tag::undef); + } +} + +static inline dnnl::memory::dims compatible_weight_dims( + const int64_t ndim, + const int64_t groups, + const int64_t oc, + const int64_t ic, + const IntArrayRef wsizes) { + if (ndim == 3) { + auto kw = wsizes[2]; + return (groups != 1) ? dnnl::memory::dims({groups, oc / groups, ic / groups, kw}) + : dnnl::memory::dims({oc, ic, kw}); + } else if (ndim == 4) { + auto kh = wsizes[2]; + auto kw = wsizes[3]; + return (groups != 1) + ? dnnl::memory::dims({groups, oc / groups, ic / groups, kh, kw}) + : dnnl::memory::dims({oc, ic, kh, kw}); + } else if (ndim == 5) { + auto kd = wsizes[2]; + auto kh = wsizes[3]; + auto kw = wsizes[4]; + return (groups != 1) + ? dnnl::memory::dims({groups, oc / groups, ic / groups, kd, kh, kw}) + : dnnl::memory::dims({oc, ic, kd, kh, kw}); + } + + return {}; +} + +static std::tuple< + dnnl::memory::desc, + dnnl::memory::desc, + dnnl::memory::desc> + conv_get_md( + const at::Tensor& src, + const at::Tensor& weight, + const at::Tensor& dst, + int64_t groups, + bool is_channels_last) { + // create memory desc from the src/weight/dst tensors + dnnl::memory::desc src_usr_md, weight_usr_md, dst_usr_md; + auto ndim = src.ndimension(); + auto fmt_src = + conv_src_fmt(ndim, is_channels_last); + + auto src_size = src.sizes().vec(); + auto src_data_t = get_onednn_dtype_include_double(src); + src_usr_md = dnnl::memory::desc(src_size, src_data_t, fmt_src); + + auto dst_size = dst.sizes().vec(); + auto dst_data_t = get_onednn_dtype_include_double(dst); + dst_usr_md = dnnl::memory::desc(dst_size, dst_data_t, fmt_src); + + auto ic = src.size(1); + auto oc = dst.size(1); + auto wei_data_t = get_onednn_dtype_include_double(weight); + dnnl::memory::dims weight_size = + compatible_weight_dims(ndim, groups, oc, ic, weight.sizes()); + auto fmt_weight = conv_weight_fmt( + ndim, + groups != 1, + is_channels_last); + weight_usr_md = dnnl::memory::desc(weight_size, wei_data_t, fmt_weight); + + return {src_usr_md, weight_usr_md, dst_usr_md}; +} + +sycl::event convolution( + at::Tensor& dst, + const at::Tensor& src, + const at::Tensor& weight, + const at::Tensor& bia, + IntArrayRef padding_front_top_left, + IntArrayRef padding_back_bottom_right, + IntArrayRef stride, + IntArrayRef dilation, + int64_t groups, + Attr& attr, + const std::vector<sycl::event>& deps) { + auto engine = + GpuEngineManager::Instance().get_engine({c10::kXPU, c10::xpu::current_device()}); + auto stream = GpuStreamManager::Instance().get_stream(); + + bool is_channels_last = use_channels_last_for_conv(src, weight, false); + + // create usr_md for tensors, and md for conv primitive + dnnl::memory::desc src_md, weight_md, dst_md; + std::tie(src_md, weight_md, dst_md) = conv_get_md(src, weight, dst, groups, is_channels_last); + + auto bia_fmt = dnnl::memory::format_tag::x; + auto bia_md = bia.defined() + ? dnnl::memory::desc( + {dst.size(1)}, get_onednn_dtype_include_double(bia), bia_fmt) + : dnnl::memory::desc(); + + // create conv primitive descriptor + dnnl::memory::dims _stride = stride.vec(); + dnnl::memory::dims _padding_front_top_left = padding_front_top_left.vec(); + dnnl::memory::dims _padding_back_bottom_right = padding_back_bottom_right.vec(); + dnnl::memory::dims _dilation = compatible_dilation(dilation); + + // extract post ops + dnnl::primitive_attr pattr; + dnnl::post_ops po = attr.extract_post_ops(dst); + pattr.set_post_ops(po); + + pattr.set_scratchpad_mode(dnnl::scratchpad_mode::user); + + #if ONEDNN_SUPPORT_DETERMINISTIC + if(at::globalContext().deterministicAlgorithms()) + pattr.set_deterministic(true); + #endif + + auto conv_fwd_pd = dnnl::convolution_forward::primitive_desc( + engine, + dnnl::prop_kind::forward, + dnnl::algorithm::convolution_direct, + src_md, + weight_md, + bia_md, + dst_md, + _stride, + _dilation, + _padding_front_top_left, + _padding_back_bottom_right, + pattr); + + dnnl::memory src_m, weight_m, dst_m, bia_m; + at::Tensor src_blocked, weight_blocked, dst_blocked = dst; + + src_m = make_onednn_memory(src_md, engine, src.data_ptr()); + weight_m = make_onednn_memory(weight_md, engine, weight.data_ptr()); + dst_m = make_onednn_memory(dst_md, engine, dst.data_ptr()); + + + std::unordered_map<int, dnnl::memory> args; + if (bia.defined()) { + bia_m = make_onednn_memory(bia_md, engine, bia.data_ptr()); + args.insert({DNNL_ARG_BIAS, bia_m}); + } + auto expected_dst_md = conv_fwd_pd.dst_desc(); + if (attr.with_binary()) + attr.construct_post_binary(conv_fwd_pd, args); + + args.insert({DNNL_ARG_SRC, src_m}); + args.insert({DNNL_ARG_WEIGHTS, weight_m}); + args.insert({DNNL_ARG_DST, dst_m}); + + size_t scratchpad_size = conv_fwd_pd.scratchpad_desc().get_size(); + at::Tensor scratchpad_tensor = at::empty( + {static_cast<int64_t>(scratchpad_size)}, src.options().dtype(at::kByte), c10::nullopt); + auto scratchpad_m = make_onednn_memory( + conv_fwd_pd.scratchpad_desc(), engine, scratchpad_tensor.data_ptr()); + args.insert({DNNL_ARG_SCRATCHPAD, scratchpad_m}); + + auto conv_forward = dnnl::convolution_forward(conv_fwd_pd); + auto conv_fwd_event = dnnl::sycl_interop::execute(conv_forward, stream, args, deps); + + return conv_fwd_event; +} + +sycl::event convolution_backward_weights( + at::Tensor& diff_weight, + at::Tensor& diff_bia, + const at::Tensor& diff_dst, + const at::Tensor& src, + IntArrayRef diff_weight_aten_size, + IntArrayRef padding_front_top_left, + IntArrayRef padding_back_bottom_right, + IntArrayRef stride, + IntArrayRef dilation, + int64_t groups, + const std::vector<sycl::event>& deps) { + auto engine = + GpuEngineManager::Instance().get_engine({c10::kXPU, c10::xpu::current_device()}); + auto stream = GpuStreamManager::Instance().get_stream(); + + bool is_channels_last = use_channels_last_for_conv(src, diff_dst, /*is_transposed=*/false); + + // create dnnl::memory desc + dnnl::memory::desc src_md, weight_md, dst_md; + std::tie(src_md, weight_md, dst_md) = + conv_get_md(src, diff_weight, diff_dst, groups, is_channels_last); + dnnl::memory::format_tag bia_fmt = dnnl::memory::format_tag::x; + auto bia_md = diff_bia.defined() + ? dnnl::memory::desc({diff_dst.size(1)}, src_md.get_data_type(), bia_fmt) + : dnnl::memory::desc(); + + // create fwd primitive hint + dnnl::memory::dims _stride = stride.vec(); + dnnl::memory::dims _padding_front_top_left = padding_front_top_left.vec(); + dnnl::memory::dims _padding_back_bottom_right = padding_back_bottom_right.vec(); + dnnl::memory::dims _dilation = compatible_dilation(dilation); + dnnl::primitive_attr pattr; + + #if ONEDNN_SUPPORT_DETERMINISTIC + if(at::globalContext().deterministicAlgorithms()) + pattr.set_deterministic(true); + #endif + + pattr.set_scratchpad_mode(dnnl::scratchpad_mode::user); + auto conv_fwd_pd = dnnl::convolution_forward::primitive_desc( + engine, + dnnl::prop_kind::forward, + dnnl::algorithm::convolution_direct, + src_md, + weight_md, + bia_md, + dst_md, + _stride, + _dilation, + _padding_front_top_left, + _padding_back_bottom_right, + pattr); + + // create bwd weight primitive + auto conv_bwd_w_pd = dnnl::convolution_backward_weights::primitive_desc( + engine, + dnnl::algorithm::convolution_direct, + src_md, + weight_md, + bia_md, + dst_md, + _stride, + _dilation, + _padding_front_top_left, + _padding_back_bottom_right, + conv_fwd_pd, + pattr); + + // create bwd memory + at::Tensor expected_src, expected_diff_dst, expected_diff_weight; + dnnl::memory src_m, diff_dst_m, diff_weight_m; + + src_m = make_onednn_memory(src_md, engine, src.data_ptr()); + diff_dst_m = make_onednn_memory(dst_md, engine, diff_dst.data_ptr()); + diff_weight_m = make_onednn_memory(weight_md, engine, diff_weight.data_ptr()); + + // insert args + std::unordered_map<int, dnnl::memory> args; + args.insert({DNNL_ARG_DIFF_DST, diff_dst_m}); + args.insert({DNNL_ARG_SRC, src_m}); + args.insert({DNNL_ARG_DIFF_WEIGHTS, diff_weight_m}); + if (diff_bia.defined()) { + dnnl::memory diff_bia_m = + make_onednn_memory(bia_md, engine, diff_bia.data_ptr()); + args.insert({DNNL_ARG_DIFF_BIAS, diff_bia_m}); + } + + size_t scratchpad_size = conv_bwd_w_pd.scratchpad_desc().get_size(); + at::Tensor scratchpad_tensor = at::empty( + {static_cast<int64_t>(scratchpad_size)}, src.options().dtype(at::kByte), c10::nullopt); + auto scratchpad_m = make_onednn_memory( + conv_bwd_w_pd.scratchpad_desc(), engine, scratchpad_tensor.data_ptr()); + args.insert({DNNL_ARG_SCRATCHPAD, scratchpad_m}); + + // execute primitive + auto conv_bwd_w = dnnl::convolution_backward_weights(conv_bwd_w_pd); + sycl::event conv_bwd_w_event = dnnl::sycl_interop::execute(conv_bwd_w, stream, args, deps); + + return conv_bwd_w_event; +} + +sycl::event convolution_backward_data( + at::Tensor& diff_src, + const at::Tensor& diff_dst, + const at::Tensor& weight, + IntArrayRef padding_front_top_left, + IntArrayRef padding_back_bottom_right, + IntArrayRef stride, + IntArrayRef dilation, + int64_t groups, + bool bias_defined, + const std::vector<sycl::event>& deps) { + auto engine = + GpuEngineManager::Instance().get_engine({c10::kXPU, c10::xpu::current_device()}); + auto stream = GpuStreamManager::Instance().get_stream(); + + bool is_channels_last = use_channels_last_for_conv(diff_dst, weight, /*is_transposed=*/false); + + // create memory desc + dnnl::memory::desc src_md, weight_md, dst_md; + std::tie(src_md, weight_md, dst_md) = + conv_get_md(diff_src, weight, diff_dst, groups, is_channels_last); + dnnl::memory::format_tag bia_fmt = dnnl::memory::format_tag::x; + auto bia_md = bias_defined + ? dnnl::memory::desc({diff_dst.size(1)}, weight_md.get_data_type(), bia_fmt) + : dnnl::memory::desc(); + + // create fwd primitive desc hint + dnnl::primitive_attr pattr; + + #if ONEDNN_SUPPORT_DETERMINISTIC + if(at::globalContext().deterministicAlgorithms()) + pattr.set_deterministic(true); + #endif + + pattr.set_scratchpad_mode(dnnl::scratchpad_mode::user); + dnnl::memory::dims _stride = stride.vec(); + dnnl::memory::dims _padding_front_top_left = padding_front_top_left.vec(); + dnnl::memory::dims _padding_back_bottom_right = padding_back_bottom_right.vec(); + dnnl::memory::dims _dilation = compatible_dilation(dilation); + auto conv_forward_pd = dnnl::convolution_forward::primitive_desc( + engine, + dnnl::prop_kind::forward, + dnnl::algorithm::convolution_direct, + src_md, + weight_md, + bia_md, + dst_md, + _stride, + _dilation, + _padding_front_top_left, + _padding_back_bottom_right, + pattr); + + auto conv_backward_data_pd = dnnl::convolution_backward_data::primitive_desc( + engine, + dnnl::algorithm::convolution_direct, + src_md, + weight_md, + dst_md, + _stride, + _dilation, + _padding_front_top_left, + _padding_back_bottom_right, + conv_forward_pd, + pattr); + + // create memory + at::Tensor expected_src, expected_wei, expected_dst; + dnnl::memory diff_dst_m, wei_m, diff_src_m; + + diff_src_m = make_onednn_memory(src_md, engine, diff_src.data_ptr()); + wei_m = make_onednn_memory(weight_md, engine, weight.data_ptr()); + diff_dst_m = make_onednn_memory(dst_md, engine, diff_dst.data_ptr()); + + + // insert args + std::unordered_map<int, dnnl::memory> args; + size_t scratchpad_size = conv_backward_data_pd.scratchpad_desc().get_size(); + at::Tensor scratchpad_tensor = at::empty( + {static_cast<int64_t>(scratchpad_size)}, diff_dst.options().dtype(at::kByte), c10::nullopt); + auto scratchpad_memory = make_onednn_memory( + conv_backward_data_pd.scratchpad_desc(), + engine, + scratchpad_tensor.data_ptr()); + args.insert({DNNL_ARG_SCRATCHPAD, scratchpad_memory}); + args.insert({DNNL_ARG_DIFF_DST, diff_dst_m}); + args.insert({DNNL_ARG_WEIGHTS, wei_m}); + args.insert({DNNL_ARG_DIFF_SRC, diff_src_m}); + + // execute primitive + auto conv_backward_data = + dnnl::convolution_backward_data(conv_backward_data_pd); + auto conv_backward_data_event = dnnl::sycl_interop::execute(conv_backward_data, stream, args, deps); + return conv_backward_data_event; + +} + +} // namespace at::native::onednn diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/Deconv.cpp b/aten/src/ATen/native/mkldnn/xpu/detail/Deconv.cpp new file mode 100644 index 0000000000..b8465c62c7 --- /dev/null +++ b/aten/src/ATen/native/mkldnn/xpu/detail/Deconv.cpp @@ -0,0 +1,435 @@ +#include <c10/xpu/XPUFunctions.h> +#include <ATen/ATen.h> + +#include <oneapi/dnnl/dnnl.hpp> +#include <ATen/native/mkldnn/xpu/detail/oneDNNContext.h> +#include <ATen/native/mkldnn/xpu/detail/Utils.h> +#include <ATen/native/mkldnn/xpu/detail/Attr.h> + +namespace at::native::onednn { + +static inline dnnl::memory::dims deconv_compatible_dilation(IntArrayRef& dilation) { + dnnl::memory::dims ret = dilation.vec(); + for (auto it = ret.begin(); it != ret.end(); it++) { + *it -= 1; + } + return ret; +} + +static inline std::vector<int64_t> compatible_groups_deconv_strides( + const at::Tensor& weight, + dnnl::memory::dims group_size) { + std::vector<int64_t> strides = weight.strides().vec(); + strides[0] = weight.strides()[1]; + strides[1] = weight.strides()[0]; + strides.insert(strides.begin(), group_size[2] * weight.strides()[0]); + return strides; +} + +dnnl::memory::dims deconv_dst_size( + IntArrayRef src_size, + IntArrayRef weight_size, + IntArrayRef padding, + IntArrayRef stride, + IntArrayRef dilation, + IntArrayRef dst_padding, + int64_t groups) { + auto dim = src_size.size(); + dnnl::memory::dims dst_size(dim); + auto kernel_size = weight_size.slice(2); + + dst_size[0] = src_size[0]; + dst_size[1] = weight_size[1] * groups; + for (size_t d = 2; d < dim; ++d) { + dst_size[d] = (src_size[d] - 1) * stride[d - 2] - 2 * padding[d - 2] + + (dilation[d - 2] * (kernel_size[d - 2] - 1) + 1) + dst_padding[d - 2]; + } + return dst_size; +} + +static inline dnnl::memory::format_tag deconv_src_fmt( + const int64_t ndim, + const bool is_channels_last = false) { + // 3D: n/c/w (n/w/c) [a/b/c (a/c/b)] + // 4D: n/c/h/w (n/h/w/c) [a/b/c/d (a/c/d/b)] + // 5D: n/c/d/h/w (n/d/h/w/c) [a/b/c/d/e (a/c/d/e/b)] + if (!is_channels_last) { + return (ndim == 3) + ? dnnl::memory::format_tag::ncw + : ((ndim == 4) ? dnnl::memory::format_tag::nchw + : ((ndim == 5) ? dnnl::memory::format_tag::ncdhw + : dnnl::memory::format_tag::undef)); + } else { + return (ndim == 3) + ? dnnl::memory::format_tag::nwc + : ((ndim == 4) ? dnnl::memory::format_tag::nhwc + : ((ndim == 5) ? dnnl::memory::format_tag::ndhwc + : dnnl::memory::format_tag::undef)); + } +} + +static inline std::vector<int64_t> deconv_weight_fmt( + const at::Tensor& weight, + const int64_t ndim, + dnnl::memory::dims weight_size, + const bool grouped = false, + const bool is_channels_last = false) { + // 3D fmt: (g)i/o/w ((g)i/w/o) [b/a/c (b/c/a)] + // 4D fmt: (g)i/o/h/w ((g)i/h/w/o) [b/a/c/d (b/c/d/a)] + // 5D fmt: (g)i/o/d/h/w ((g)i/d/h/w/o) [b/a/c/d/e (b/c/d/e/a)] + auto strides_ = weight.strides().vec(); + std::vector<int64_t> strides; + if (grouped) { + strides = compatible_groups_deconv_strides(weight, weight_size); + } else { + strides = strides_; + std::swap(strides[0], strides[1]); + } + return strides; +} + +static inline dnnl::memory::dims deconv_compatible_weight_dims( + int64_t ndim, + int64_t groups, + int64_t oc, + int64_t ic, + IntArrayRef weight_size) { + if (ndim == 3) { + auto kw = weight_size[2]; + return (groups != 1) ? dnnl::memory::dims({groups, oc / groups, ic / groups, kw}) + : dnnl::memory::dims({oc, ic, kw}); + } else if (ndim == 4) { + auto kh = weight_size[2]; + auto kw = weight_size[3]; + return (groups != 1) + ? dnnl::memory::dims({groups, oc / groups, ic / groups, kh, kw}) + : dnnl::memory::dims({oc, ic, kh, kw}); + } else if (ndim == 5) { + auto kd = weight_size[2]; + auto kh = weight_size[3]; + auto kw = weight_size[4]; + return (groups != 1) + ? dnnl::memory::dims({groups, oc / groups, ic / groups, kd, kh, kw}) + : dnnl::memory::dims({oc, ic, kd, kh, kw}); + } else { + TORCH_CHECK(0, "unsupported dimension in xpu oneDNN deconvolution..."); + } +} + +static std::tuple< + dnnl::memory::desc, + dnnl::memory::desc, + dnnl::memory::desc> +deconv_get_plain_md( + const at::Tensor& src, + const at::Tensor& weight, + const at::Tensor& dst, + int64_t groups, + bool is_channels_last_suggested) { + auto ndim = src.ndimension(); + auto src_data_t = get_onednn_dtype_include_double(src); + auto fmt_src = deconv_src_fmt(ndim, is_channels_last_suggested); + auto src_usr_md = dnnl::memory::desc(src.sizes().vec(), src_data_t, fmt_src); + + auto dst_data_t = get_onednn_dtype_include_double(dst); + auto dst_usr_md = dnnl::memory::desc(dst.sizes().vec(), dst_data_t, fmt_src); + + auto ic = src.size(1); + auto oc = dst.size(1); + dnnl::memory::dims weight_size = + deconv_compatible_weight_dims(ndim, groups, oc, ic, weight.sizes()); + auto weight_dt = get_onednn_dtype_include_double(weight); + auto fmt_weight = deconv_weight_fmt( + weight, ndim, weight_size, groups != 1, is_channels_last_suggested); + dnnl::memory::desc weight_usr_md = dnnl::memory::desc(weight_size, weight_dt, fmt_weight); + + return {src_usr_md, weight_usr_md, dst_usr_md}; +} + +sycl::event deconvolution( + at::Tensor& dst, + const at::Tensor& src, + const at::Tensor& weight, + const at::Tensor& bia, + IntArrayRef stride, + IntArrayRef padding, + IntArrayRef dst_padding, + IntArrayRef dilation, + int64_t groups, + Attr& attr, + const std::vector<sycl::event>& deps) { + auto engine = + GpuEngineManager::Instance().get_engine({c10::kXPU, c10::xpu::current_device()}); + auto stream = GpuStreamManager::Instance().get_stream(); + + bool is_channels_last_suggested = use_channels_last_for_conv(src, weight, /*is_transposed=*/true); + + // create usr_md for tensors, and md for conv primitive + dnnl::memory::desc src_md, weight_md, dst_md; + + std::tie(src_md, weight_md, dst_md) = + deconv_get_plain_md(src, weight, dst, groups, is_channels_last_suggested); + + dnnl::memory::format_tag bia_fmt = dnnl::memory::format_tag::x; + auto bia_md = bia.defined() + ? dnnl::memory::desc( + {dst.size(1)}, get_onednn_dtype_include_double(bia), bia_fmt) + : dnnl::memory::desc(); + + // create primitive desc + dnnl::memory::dims _stride = stride.vec(); + dnnl::memory::dims _padding = padding.vec(); + dnnl::memory::dims _dilation = deconv_compatible_dilation(dilation); + + // construct primitive attr + dnnl::primitive_attr pattr; + dnnl::post_ops po = attr.extract_post_ops(dst); + pattr.set_post_ops(po); + #if ONEDNN_SUPPORT_DETERMINISTIC + if(at::globalContext().deterministicAlgorithms()) + pattr.set_deterministic(true); + #endif + + pattr.set_scratchpad_mode(dnnl::scratchpad_mode::user); + + auto deconv_fwd_pd = dnnl::deconvolution_forward::primitive_desc( + engine, + dnnl::prop_kind::forward, + dnnl::algorithm::deconvolution_direct, + src_md, + weight_md, + bia_md, + dst_md, + _stride, + _dilation, + _padding, + _padding, + pattr); + + dnnl::memory src_m, weight_m, dst_m, bia_m; + at::Tensor src_blocked, weight_blocked, dst_blocked = dst; + + src_m = make_onednn_memory(src_md, engine, src.data_ptr()); + weight_m = make_onednn_memory(weight_md, engine, weight.data_ptr()); + dst_m = make_onednn_memory(dst_md, engine, dst.data_ptr()); + + std::unordered_map<int, dnnl::memory> args; + args.insert({DNNL_ARG_SRC, src_m}); + args.insert({DNNL_ARG_WEIGHTS, weight_m}); + args.insert({DNNL_ARG_DST, dst_m}); + + if (bia.defined()) { + auto bia_m = make_onednn_memory(bia_md, engine, bia.data_ptr()); + args.insert({DNNL_ARG_BIAS, bia_m}); + } + if (attr.with_binary()) + attr.construct_post_binary(deconv_fwd_pd, args); + + size_t scratchpad_size = deconv_fwd_pd.scratchpad_desc().get_size(); + at::Tensor scratchpad_tensor = at::empty( + {static_cast<int64_t>(scratchpad_size)}, src.options().dtype(at::kByte), c10::nullopt); + auto scratchpad_m = make_onednn_memory( + deconv_fwd_pd.scratchpad_desc(), engine, scratchpad_tensor.data_ptr()); + args.insert({DNNL_ARG_SCRATCHPAD, scratchpad_m}); + + auto deconv_fwd = dnnl::deconvolution_forward(deconv_fwd_pd); + sycl::event deconv_event = dnnl::sycl_interop::execute(deconv_fwd, stream, args, deps); + return deconv_event; + +} + +sycl::event deconvolution_backward_data( + at::Tensor& diff_src, + const at::Tensor& diff_dst, + const at::Tensor& weight, + IntArrayRef stride, + IntArrayRef padding, + IntArrayRef dilation, + int64_t groups, + bool bias_defined, + const std::vector<sycl::event>& deps) { + auto engine = + GpuEngineManager::Instance().get_engine({c10::kXPU, c10::xpu::current_device()}); + auto stream = GpuStreamManager::Instance().get_stream(); + + bool is_channels_last_suggested = + use_channels_last_for_conv(diff_dst, weight, /*is_transposed=*/true); + // create memory desc + dnnl::memory::desc src_md, weight_md, dst_md; + std::tie(src_md, weight_md, dst_md) = + deconv_get_plain_md( + diff_src, weight, diff_dst, groups, is_channels_last_suggested); + + dnnl::memory::format_tag bia_fmt = dnnl::memory::format_tag::x; + auto bias_md = bias_defined + ? dnnl::memory::desc({diff_dst.size(1)}, weight_md.get_data_type(), bia_fmt) + : dnnl::memory::desc(); + + // create fwd primitive desc hint + dnnl::primitive_attr pattr; + pattr.set_scratchpad_mode(dnnl::scratchpad_mode::user); + #if ONEDNN_SUPPORT_DETERMINISTIC + if(at::globalContext().deterministicAlgorithms()) + pattr.set_deterministic(true); + #endif + + dnnl::memory::dims _stride = stride.vec(); + dnnl::memory::dims _padding = padding.vec(); + dnnl::memory::dims _dilation = deconv_compatible_dilation(dilation); + auto deconv_fwd_pd = dnnl::deconvolution_forward::primitive_desc( + engine, + dnnl::prop_kind::forward, + dnnl::algorithm::deconvolution_direct, + src_md, + weight_md, + bias_md, + dst_md, + _stride, + _dilation, + _padding, + _padding, + pattr); + + // create bwd primitive desc + auto deconv_backward_data_pd = dnnl::deconvolution_backward_data::primitive_desc( + engine, + dnnl::algorithm::deconvolution_direct, + src_md, + weight_md, + dst_md, + _stride, + _dilation, + _padding, + _padding, + deconv_fwd_pd); + + // create memory + dnnl::memory diff_dst_m, wei_m, diff_src_m; + + diff_src_m = make_onednn_memory(src_md, engine, diff_src.data_ptr()); + wei_m = make_onednn_memory(weight_md, engine, weight.data_ptr()); + diff_dst_m = make_onednn_memory(dst_md, engine, diff_dst.data_ptr()); + + // insert args + std::unordered_map<int, dnnl::memory> args; + size_t scratchpad_size = deconv_backward_data_pd.scratchpad_desc().get_size(); + at::Tensor scratchpad_tensor = at::empty( + {static_cast<int64_t>(scratchpad_size)}, diff_dst.options().dtype(at::kByte), c10::nullopt); + auto scratchpad_memory = make_onednn_memory( + deconv_backward_data_pd.scratchpad_desc(), + engine, + scratchpad_tensor.data_ptr()); + args.insert({DNNL_ARG_SCRATCHPAD, scratchpad_memory}); + args.insert({DNNL_ARG_DIFF_DST, diff_dst_m}); + args.insert({DNNL_ARG_WEIGHTS, wei_m}); + args.insert({DNNL_ARG_DIFF_SRC, diff_src_m}); + + // execute primitive + auto deconv_backward_data = + dnnl::deconvolution_backward_data(deconv_backward_data_pd); + sycl::event deconv_bwd_data_event = dnnl::sycl_interop::execute(deconv_backward_data, stream, args, deps); + return deconv_bwd_data_event; + +} + +sycl::event deconvolution_backward_weights( + at::Tensor& diff_weight, + at::Tensor& diff_bia, + const at::Tensor& diff_dst, + const at::Tensor& src, + IntArrayRef stride, + IntArrayRef padding, + IntArrayRef dilation, + int64_t groups, + const std::vector<sycl::event>& deps) { + auto engine = + GpuEngineManager::Instance().get_engine({c10::kXPU, c10::xpu::current_device()}); + auto stream = GpuStreamManager::Instance().get_stream(); + + bool is_channels_last_suggested = + use_channels_last_for_conv(src, diff_dst, /*is_transposed=*/true); + + // create memory desc + dnnl::memory::desc src_md, weight_md, dst_md; + std::tie(src_md, weight_md, dst_md) = deconv_get_plain_md( + src, diff_weight, diff_dst, groups, is_channels_last_suggested); + + dnnl::memory::format_tag bia_fmt = dnnl::memory::format_tag::x; + auto bia_md = diff_bia.defined() + ? dnnl::memory::desc({diff_dst.size(1)}, src_md.get_data_type(), bia_fmt) + : dnnl::memory::desc(); + + // create fwd primitive desc hint + dnnl::memory::dims _stride = stride.vec(); + dnnl::memory::dims _padding = padding.vec(); + dnnl::memory::dims _dilation = deconv_compatible_dilation(dilation); + dnnl::primitive_attr pattr; + + #if ONEDNN_SUPPORT_DETERMINISTIC + if(at::globalContext().deterministicAlgorithms()) + pattr.set_deterministic(true); + #endif + pattr.set_scratchpad_mode(dnnl::scratchpad_mode::user); + auto deconv_fwd_pd = dnnl::deconvolution_forward::primitive_desc( + engine, + dnnl::prop_kind::forward, + dnnl::algorithm::deconvolution_direct, + src_md, + weight_md, + bia_md, + dst_md, + _stride, + _dilation, + _padding, + _padding, + pattr); + + auto deconv_bwd_w_pd = dnnl::deconvolution_backward_weights::primitive_desc( + engine, + dnnl::algorithm::deconvolution_direct, + src_md, + weight_md, + bia_md, + dst_md, + _stride, + _dilation, + _padding, + _padding, + deconv_fwd_pd, + pattr); + + // create bwd dnnl::memory + dnnl::memory src_m, diff_dst_m, diff_weight_m; + + src_m = make_onednn_memory(src_md, engine, src.data_ptr()); + diff_dst_m = make_onednn_memory(dst_md, engine, diff_dst.data_ptr()); + diff_weight_m = make_onednn_memory(weight_md, engine, diff_weight.data_ptr()); + + // insert args + std::unordered_map<int, dnnl::memory> args; + args.insert({DNNL_ARG_DIFF_DST, diff_dst_m}); + args.insert({DNNL_ARG_SRC, src_m}); + args.insert({DNNL_ARG_DIFF_WEIGHTS, diff_weight_m}); + + if (diff_bia.defined()) { + dnnl::memory diff_bia_m = + make_onednn_memory(bia_md, engine, diff_bia.data_ptr()); + args.insert({DNNL_ARG_DIFF_BIAS, diff_bia_m}); + } + + size_t scratchpad_size = deconv_bwd_w_pd.scratchpad_desc().get_size(); + at::Tensor scratchpad_tensor = at::empty( + {static_cast<int64_t>(scratchpad_size)}, src.options().dtype(at::kByte), c10::nullopt); + auto scratchpad_m = make_onednn_memory( + deconv_bwd_w_pd.scratchpad_desc(), engine, scratchpad_tensor.data_ptr()); + args.insert({DNNL_ARG_SCRATCHPAD, scratchpad_m}); + + // execute primitive + auto deconv_bwd_w = dnnl::deconvolution_backward_weights(deconv_bwd_w_pd); + + sycl::event deconv_bwd_w_event = dnnl::sycl_interop::execute(deconv_bwd_w, stream, args, deps); + return deconv_bwd_w_event; + +} + +} // namespace at::native::onednn diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/Utils.cpp b/aten/src/ATen/native/mkldnn/xpu/detail/Utils.cpp index 73a37d275b..8dd3dc329c 100644 --- a/aten/src/ATen/native/mkldnn/xpu/detail/Utils.cpp +++ b/aten/src/ATen/native/mkldnn/xpu/detail/Utils.cpp @@ -349,4 +349,32 @@ bool binary_valid( return false; } +static inline bool is_channels_last(at::MemoryFormat fmt){ + return (at::MemoryFormat::ChannelsLast == fmt) || (at::MemoryFormat::ChannelsLast3d == fmt); +} + +static inline bool is_smf_channels_last(const Tensor& t){ + return is_channels_last(t.suggest_memory_format()); +} + +bool use_channels_last_for_conv( + const at::Tensor& src, + const at::Tensor& weight, + bool is_transpose){ + + if (!src.defined() || src.is_sparse()) { + // suggest channels_first + return false; + } + + auto suggest_channels_last_format = + (is_smf_channels_last(src) || is_smf_channels_last(weight)); + if (suggest_channels_last_format) { + // suggest channels_last + return true; + } + + return false; +} + } diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/Utils.h b/aten/src/ATen/native/mkldnn/xpu/detail/Utils.h index 1fcb669d53..2929d3159e 100644 --- a/aten/src/ATen/native/mkldnn/xpu/detail/Utils.h +++ b/aten/src/ATen/native/mkldnn/xpu/detail/Utils.h @@ -53,4 +53,9 @@ bool binary_valid( const at::Tensor& other, bool is_fusion = false); +bool use_channels_last_for_conv( + const at::Tensor& src, + const at::Tensor& weight, + bool is_transpose); + } // namespace at::native::onednn diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/oneDNN.h b/aten/src/ATen/native/mkldnn/xpu/detail/oneDNN.h index a34edfff36..54669d490f 100644 --- a/aten/src/ATen/native/mkldnn/xpu/detail/oneDNN.h +++ b/aten/src/ATen/native/mkldnn/xpu/detail/oneDNN.h @@ -5,7 +5,6 @@ #include <ATen/native/mkldnn/xpu/detail/Attr.h> #include <ATen/native/mkldnn/xpu/detail/Utils.h> - namespace at::native::onednn{ TORCH_API sycl::event matmul( @@ -17,4 +16,77 @@ TORCH_API sycl::event matmul( Attr attr, const std::vector<sycl::event>& deps = {}); +TORCH_API sycl::event convolution( + at::Tensor& dst, + const at::Tensor& src, + const at::Tensor& weight, + const at::Tensor& bia, + IntArrayRef padding_front_top_left, + IntArrayRef padding_back_bottom_right, + IntArrayRef stride, + IntArrayRef dilation, + int64_t groups, + Attr& attr, + const std::vector<sycl::event>& deps = {}); + +TORCH_API sycl::event convolution_backward_weights( + at::Tensor& diff_weight, + at::Tensor& diff_bia, + const at::Tensor& diff_dst, + const at::Tensor& src, + IntArrayRef diff_weight_aten_size, + IntArrayRef padding_front_top_left, + IntArrayRef padding_back_bottom_right, + IntArrayRef stride, + IntArrayRef dilation, + int64_t groups, + const std::vector<sycl::event>& deps = {}); + +TORCH_API sycl::event convolution_backward_data( + at::Tensor& diff_src, + const at::Tensor& diff_dst, + const at::Tensor& weight, + IntArrayRef padding_front_top_left, + IntArrayRef padding_back_bottom_right, + IntArrayRef stride, + IntArrayRef dilation, + int64_t groups, + bool bias_defined, + const std::vector<sycl::event>& deps = {}); + +TORCH_API sycl::event deconvolution( + at::Tensor& dst, + const at::Tensor& src, + const at::Tensor& weight, + const at::Tensor& bia, + IntArrayRef stride, + IntArrayRef padding, + IntArrayRef dst_padding, + IntArrayRef dilation, + int64_t groups, + Attr& attr, + const std::vector<sycl::event>& deps = {}); + +TORCH_API sycl::event deconvolution_backward_data( + at::Tensor& diff_src, + const at::Tensor& diff_dst, + const at::Tensor& weight, + IntArrayRef stride, + IntArrayRef padding, + IntArrayRef dilation, + int64_t groups, + bool bias_defined, + const std::vector<sycl::event>& deps = {}); + +TORCH_API sycl::event deconvolution_backward_weights( + at::Tensor& diff_weight, + at::Tensor& diff_bia, + const at::Tensor& diff_dst, + const at::Tensor& src, + IntArrayRef stride, + IntArrayRef padding, + IntArrayRef dilation, + int64_t groups, + const std::vector<sycl::event>& deps = {}); + } // namespace at::native::onednn
2.41.0
d7b5d32b67a1c976addbabc772f4791acab9e7a
Mon, 22 Apr 2024 07:57:02 +0000
[PATCH 0454/1000] Intel GPU oneDNN Upstreaming: Convolution operators support (#117529)
# Motivation This PR is a part of RFC #114848,. This PR would depend on oneDNN compilation in #117098 and basic integration support in #117112 and Conv integration code in #117512. Some runtime support is needed in #116019. This PR implements the convolution and deconvolution operators for XPU that should be defined in `aten` libraries. Also, the backward support is also supported. With this PR, the conv-related operators should be functionality ready. Co-authored-by: xiaolil1 <xiaoli.liu@intel.com> Co-authored-by: lei,zhenyuan <zhenyuan.lei@intel.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/117529 Approved by: https://github.com/EikanWang, https://github.com/malfet ghstack dependencies: #117512
diff --git a/aten/src/ATen/native/mkldnn/xpu/Conv.cpp b/aten/src/ATen/native/mkldnn/xpu/Conv.cpp new file mode 100644 index 0000000000..1b5054256e --- /dev/null +++ b/aten/src/ATen/native/mkldnn/xpu/Conv.cpp @@ -0,0 +1,729 @@ +#include <vector> + +#include <ATen/core/ATen_fwd.h> +#include <ATen/core/interned_strings.h> +#include <ATen/ops/full.h> +#include <ATen/ops/neg.h> +#include <c10/core/Scalar.h> +#include <c10/util/Exception.h> +#include <c10/util/Optional.h> +#include <ATen/native/utils/ParamUtils.h> +#include <ATen/native/mkldnn/xpu/detail/oneDNN.h> +#include <torch/library.h> + +using namespace dnnl; +using namespace at::native; +using namespace at::native::onednn; + +namespace at::native { +namespace xpu { +namespace impl { + +struct ConvParams { + std::vector<int64_t> stride; + std::vector<int64_t> padding; + std::vector<int64_t> dilation; + bool transposed; + std::vector<int64_t> output_padding; + int groups; + bool benchmark; + bool deterministic; + + bool is_strided() const; + bool is_dilated() const; + bool is_padded() const; + bool is_output_padding_neg() const; + bool is_output_padding_big() const; + bool is_padding_neg() const; + bool is_stride_nonpos() const; + void view1d_as_2d(); + bool use_cpu_depthwise3x3_winograd( + const at::Tensor& input, + const at::Tensor& weight) const; + bool is_depthwise(const at::Tensor& input, const at::Tensor& weight) const; +}; + +std::ostream& operator<<(std::ostream& out, const ConvParams& params) { + out << "ConvParams {" + << " stride = " << IntArrayRef{params.stride} + << " padding = " << IntArrayRef{params.padding} + << " dilation = " << IntArrayRef{params.dilation} + << " transposed = " << params.transposed + << " output_padding = " << IntArrayRef{params.output_padding} + << " groups = " << params.groups << " benchmark = " << params.benchmark + << " deterministic = " << params.deterministic << "}"; + return out; +} + +bool ConvParams::is_strided() const { + bool is_strided = false; + for (int s : stride) { + is_strided |= (s != 1); + } + return is_strided; +} + +bool ConvParams::is_dilated() const { + bool is_dilated = false; + for (int d : dilation) { + is_dilated |= (d != 1); + } + return is_dilated; +} + +bool ConvParams::is_padded() const { + bool is_padded = false; + for (int p : padding) { + is_padded |= (p != 0); + } + return is_padded; +} + +bool ConvParams::is_output_padding_neg() const { + bool is_non_neg = false; + for (int p : output_padding) { + is_non_neg |= (p < 0); + } + return is_non_neg; +} + +bool ConvParams::is_output_padding_big() const { + bool is_big = false; + for (size_t i = 0; i < output_padding.size(); i++) { + is_big |= + (output_padding[i] >= stride[i] || output_padding[i] >= dilation[i]); + } + return is_big; +} + +bool ConvParams::is_padding_neg() const { + bool is_non_neg = false; + for (int p : padding) { + is_non_neg |= (p < 0); + } + return is_non_neg; +} + +bool ConvParams::is_stride_nonpos() const { + bool is_nonpos = false; + for (int s : stride) { + is_nonpos |= (s <= 0); + } + return is_nonpos; +} + +void ConvParams::view1d_as_2d() { + if (stride.size() == 1) { + stride.insert(stride.begin(), 1); + padding.insert(padding.begin(), 0); + dilation.insert(dilation.begin(), 1); + output_padding.insert(output_padding.begin(), 0); + } +} + +bool ConvParams::use_cpu_depthwise3x3_winograd( + const at::Tensor& input, + const at::Tensor& weight) const { + return false; +} + +bool ConvParams::is_depthwise(const at::Tensor& input, const at::Tensor& weight) + const { + return !transposed && input.ndimension() == 4 && input.size(1) == groups && + groups > 1 && // no point if there is only a single group + weight.size(0) % input.size(1) == + 0; // output channels must be a multiple of input channels +} + +static void check_shape_forward( + const at::Tensor& input, + const at::Tensor& weight, + const at::Tensor& bias, + const ConvParams& params, + bool input_is_mkldnn) { + int64_t k = input.ndimension(); + int64_t weight_dim = weight.ndimension(); + std::vector<int64_t> weight_sizes(weight_dim); + if ((weight_dim == k + 1) && input_is_mkldnn) { + weight_sizes[0] = weight.size(0) * weight.size(1); + std::copy_n(weight.sizes().cbegin() + 2, k - 1, weight_sizes.begin() + 1); + weight_dim = k; + } else { + std::copy_n(weight.sizes().cbegin(), weight_dim, weight_sizes.begin()); + } + int64_t groups = params.groups; + auto padding = params.padding; + auto output_padding = params.output_padding; + auto stride = params.stride; + auto dilation = params.dilation; + bool transposed = params.transposed; + + TORCH_CHECK(!params.is_padding_neg(), "negative padding is not supported"); + TORCH_CHECK( + !params.is_output_padding_neg(), + "negative output_padding is not supported"); + TORCH_CHECK( + !params.is_stride_nonpos(), "non-positive stride is not supported"); + + TORCH_CHECK( + weight_dim == k, + "Expected ", + weight_dim, + "-dimensional input for ", + weight_dim, + "-dimensional weight ", + weight_sizes, + ", but got ", + k, + "-dimensional input of size ", + input.sizes(), + " instead"); + TORCH_CHECK( + weight_sizes[0] >= groups, + "Given groups=", + groups, + ", expected weight to be at least ", + groups, + " at dimension 0, but got weight of size ", + weight_sizes, + " instead"); + TORCH_CHECK( + weight_sizes[0] % groups == 0, + "Given groups=", + groups, + ", expected weight to be divisible by ", + groups, + " at dimension 0, but got weight of size ", + weight_sizes, + " instead"); + + if (!transposed) { + std::vector<int64_t> input_shape; + std::vector<int64_t> kernel_shape; + bool kernel_size_correct = true; + + TORCH_CHECK( + input.size(1) == (weight_sizes[1] * groups), + "Given groups=", + groups, + ", weight of size ", + weight_sizes, + ", expected input", + input.sizes(), + " to have ", + (weight_sizes[1] * groups), + " channels, but got ", + input.size(1), + " channels instead"); + TORCH_CHECK( + !bias.defined() || + (bias.ndimension() == 1 && bias.size(0) == weight_sizes[0]), + "Given weight of size ", + weight_sizes, + ", expected bias to be 1-dimensional with ", + weight_sizes[0], + " elements", + ", but got bias of size ", + bias.sizes(), + " instead"); + + for (int i = 2; i < k; ++i) { + input_shape.push_back(input.size(i) + 2 * padding[i - 2]); + kernel_shape.push_back(dilation[i - 2] * (weight_sizes[i] - 1) + 1); + if (input_shape.back() < kernel_shape.back()) { + kernel_size_correct = false; + } + } + + TORCH_CHECK( + input_shape.size() == kernel_shape.size(), + "Inconsistent shape between Input and Kernel"); + + if (!kernel_size_correct) { + std::ostringstream input_ss; + std::ostringstream kernel_ss; + std::ostringstream output_ss; + std::string separator = ""; + + for (int i = 0, len = input_shape.size(); i < len; ++i) { + input_ss << separator << input_shape[i]; + kernel_ss << separator << kernel_shape[i]; + separator = " x "; + } + + TORCH_CHECK( + 0, + "Calculated padded input size per channel: (", + input_ss.str(), + "). " + "Kernel size: (", + kernel_ss.str(), + "). Kernel size can't be greater than actual input size"); + } + } else { + TORCH_CHECK( + input.size(1) == weight_sizes[0], + "Given transposed=", + transposed, + ", weight of size ", + weight_sizes, + ", expected input", + input.sizes(), + " to have ", + weight_sizes[0], + " channels, but got ", + input.size(1), + " channels instead"); + TORCH_CHECK( + !bias.defined() || + (bias.ndimension() == 1 && + bias.size(0) == weight_sizes[1] * groups), + "Given transposed=", + transposed, + ", weight of size ", + weight_sizes, + ", expected bias to be 1-dimensional with ", + weight_sizes[1] * groups, + " elements", + ", but got bias of size ", + bias.sizes(), + " instead"); + } +} + +static at::Tensor view4d(const at::Tensor& tensor) { + TORCH_CHECK( + tensor.ndimension() == 3, + "expected 3D tensor, got tensor with ", + tensor.ndimension(), + " dimensions instead"); + return tensor.unsqueeze(2); +} + +static at::Tensor view3d(const at::Tensor& tensor) { + TORCH_CHECK( + tensor.ndimension() == 4, + "expected 4D tensor, got tensor with ", + tensor.ndimension(), + " dimensions instead"); + return tensor.squeeze(2); +} + +Attr get_onednn_conv_sum_attr( + const Tensor& input_r, + const Tensor& weight_r, + IntArrayRef stride_, + IntArrayRef padding_, + IntArrayRef dilation_, + Tensor& accumu, + double scale, + Tensor& output, + bool& is_fused, + Attr attr = Attr(), + bool force_inplace = false) { + is_fused = true; + if (scale == 0.f) + return attr; + + auto ndim = input_r.ndimension(); + auto output_size = conv_dst_size( + ndim, + input_r.sizes(), + weight_r.sizes(), + padding_, + padding_, + stride_, + dilation_); + MemoryFormat mem_fmt = at::MemoryFormat::Contiguous; + auto input_fmt = input_r.suggest_memory_format(); + auto input_is_cl = (input_fmt == at::MemoryFormat::ChannelsLast || input_fmt == at::MemoryFormat::ChannelsLast3d); + auto weight_fmt = weight_r.suggest_memory_format(); + auto weight_is_cl = (weight_fmt == at::MemoryFormat::ChannelsLast || weight_fmt == at::MemoryFormat::ChannelsLast3d); + + bool propagate_channels_last = input_is_cl || weight_is_cl; + if (propagate_channels_last) + mem_fmt = get_cl_tag_by_ndim(ndim); + + Tensor out = at::empty(output_size, input_r.options().memory_format(mem_fmt)); + if (!onednn::binary_valid(out, accumu)) { + is_fused = false; + return attr; + } + + // For post-sum and post-binary-add, onednn needs sum/binary scale=1.f + // Thus we need the following transformation + // conv(src, wei) + scale * accumu + // scale * (1/scale * conv(src, wei) + sum (or binary)) + if (scale != 1.f) + attr.append_post_eltwise( + /* scale */ 1.f, + /* alpha */ 1.f / scale, + /* beta */ 0.f, + attr.kind_with_linear); + + if (force_inplace) { + // If sizes are the same, post sum is used. + output = accumu; + attr.append_post_sum(/* sum_scale */ 1.f); + } else { + // If sizes are different, post binary is used. + attr.append_post_binary(attr.kind_with_binary_add, accumu); + } + + if (scale != 1.f) + attr.append_post_eltwise( + /* scale */ 1.f, + /* alpha */ scale, + /* beta */ 0.f, + attr.kind_with_linear); + + return attr; +} + +} // namespace impl + +using namespace impl; + +Tensor _convolution_out( + Tensor& output_r, + const Tensor& input_r, + const Tensor& weight_r, + const Tensor& bias_r, + IntArrayRef stride_, + IntArrayRef padding_, + IntArrayRef dilation_, + bool transposed_, + IntArrayRef output_padding_, + int64_t groups_, + Attr attr, + IntArrayRef pad_nd = IntArrayRef({})) { + auto ndim = input_r.ndimension(); + TORCH_CHECK( + 3 == ndim || 4 == ndim || 5 == ndim, + "convolution only supports 3D, 4D, 5D tensor"); + // get computation format for Conv/TransposedConv + bool is_channels_last_suggested = use_channels_last_for_conv(input_r, weight_r, transposed_); + + Tensor input = input_r, weight = weight_r; + // PyTorch does not support ChannelsLast1D case, + // thus we need the transformation here + if (ndim == 3) { + input = view4d(input_r); + weight = view4d(weight_r); + } + // ensure the input/weight/bias/output are congituous in desired format + at::MemoryFormat mfmt = is_channels_last_suggested + ? get_cl_tag_by_ndim(input.ndimension()) + : at::MemoryFormat::Contiguous; + auto bias = bias_r.defined() ? bias_r.contiguous() : bias_r; + input = input.contiguous(mfmt); + weight = weight.contiguous(mfmt); + + auto k = weight.ndimension(); + if (k == input.ndimension() + 1) { + k = input.ndimension(); + } + int64_t dim = k - 2; + TORCH_CHECK(dim > 0, "weight should have at least three dimensions"); + + ConvParams params; + if (ndim == 3) { + // PyTorch does not support ChannelsLast1D case, + // thus we need the transformation here + params.stride = stride_.vec(); + params.padding = padding_.vec(); + params.dilation = dilation_.vec(); + params.transposed = transposed_; + params.output_padding = output_padding_.vec(); + params.groups = groups_; + params.view1d_as_2d(); + } else { + params.stride = expand_param_if_needed(stride_, "stride", dim); + // PyTorch default Conv padding should be a single integer value + // or a list of values to match the conv dimensions + // conv2d, the number of padding values should be 1 or 2 + // conv3d, the number of padding values should be 1 or 3 + // the padding value will be padded into both side of Conv input (D, H, W) + params.padding = expand_param_if_needed(padding_, "padding", dim); + params.dilation = expand_param_if_needed(dilation_, "dilation", dim); + params.transposed = transposed_; + params.output_padding = + expand_param_if_needed(output_padding_, "output_padding", dim); + params.groups = groups_; + } + check_shape_forward(input, weight, bias, params, true); + + Tensor output; + if (transposed_) { + // create output and propagate memory format + if (!output_r.defined()) { + auto dst_tz = deconv_dst_size( + input.sizes(), + weight.sizes(), + params.padding, + params.stride, + params.dilation, + params.output_padding, + params.groups); + output = at::empty(dst_tz, input.options(), mfmt); + } + + onednn::deconvolution( + output, + input, + weight, + bias, + params.stride, + params.padding, + params.output_padding, + params.dilation, + params.groups, + attr); + } else { + // oneDNN supports padding the two sides of src with different values + // the padding order should be front_top_left and back_bottom_right + auto padding_front_top_left = params.padding; + auto padding_back_bottom_right = params.padding; + + // PyTorch constant_pad_nd: + // can pad different value to the two sides of Conv input (W, H, D) + // (padding_left, padding_right, + // padding_top, padding_bottom, + // padding_front, padding_back) + if (pad_nd.vec().size() > 0) { + for (int i = 0; i < dim; ++i) { + padding_front_top_left[i] += pad_nd[2 * dim - 2 * i - 2]; // 4, 2, 0 + padding_back_bottom_right[i] += pad_nd[2 * dim - 2 * i - 1]; // 5, 3, 1 + } + } + + // create output and propagate memory format + if (! output_r.defined()) { + auto dst_tz = conv_dst_size( + input.ndimension(), + input.sizes(), + weight.sizes(), + padding_front_top_left, + padding_back_bottom_right, + params.stride, + params.dilation); + output = at::empty(dst_tz, input.options(), mfmt); + } + onednn::convolution( + output, + input, + weight, + bias, + padding_front_top_left, + padding_back_bottom_right, + params.stride, + params.dilation, + params.groups, + attr); + } + + if (ndim == 3) { + output = view3d(output); + } + if (output_r.defined() && !output_r.is_same(output)) { + output_r.copy_(output); + } else { + output_r = output; + } + return output_r; +} + +Tensor _convolution( + const Tensor& input_r, + const Tensor& weight_r, + const Tensor& bias_r, + IntArrayRef stride_, + IntArrayRef padding_, + IntArrayRef dilation_, + bool transposed_, + IntArrayRef output_padding_, + int64_t groups_, + Attr attr) { + Tensor output_r; + return _convolution_out( + output_r, + input_r, + weight_r, + bias_r, + stride_, + padding_, + dilation_, + transposed_, + output_padding_, + groups_, + attr); +} + +Tensor convolution_overrideable( + const Tensor& input_r, + const Tensor& weight_r, + const c10::optional<at::Tensor>& bias_r_opt, + IntArrayRef stride_, + IntArrayRef padding_, + IntArrayRef dilation_, + bool transposed_, + IntArrayRef output_padding_, + int64_t groups_) { + c10::MaybeOwned<Tensor> bias_r_maybe_owned = + at::borrow_from_optional_tensor(bias_r_opt); + const Tensor& bias_r = *bias_r_maybe_owned; + return _convolution( + input_r, + weight_r, + bias_r, + stride_, + padding_, + dilation_, + transposed_, + output_padding_, + groups_, + Attr()); +} + +std::tuple<Tensor, Tensor, Tensor> convolution_backward_overrideable( + const Tensor& grad_output, + const Tensor& input, + const Tensor& weight, + IntArrayRef stride, + IntArrayRef padding, + IntArrayRef dilation, + bool transposed, + IntArrayRef output_padding, + int64_t groups, + std::array<bool, 3> output_mask) { + auto ndim = input.ndimension(); + TORCH_CHECK( + 3 == ndim || 4 == ndim || 5 == ndim, + "convolution bwd only supports 3D, 4D, 5D tensor"); + TORCH_CHECK( + grad_output.scalar_type() == ScalarType::Float || + grad_output.scalar_type() == ScalarType::BFloat16 || + grad_output.scalar_type() == ScalarType::Double || + grad_output.scalar_type() == ScalarType::Half, + "so far only support float, bfloat16, half and double convolution backward in XPU backend, your data type is ", + grad_output.scalar_type()); + + bool is_channels_last_suggested = use_channels_last_for_conv(input, weight, transposed); + + Tensor grad_output_, input_, weight_; + IntArrayRef stride_, padding_, dilation_, output_padding_; + bool transposed_; + int64_t groups_; + ConvParams params; + if (3 == ndim) { + grad_output_ = view4d(grad_output); + input_ = view4d(input); + weight_ = view4d(weight); + params.stride = stride.vec(); + params.padding = padding.vec(); + params.dilation = dilation.vec(); + params.transposed = transposed; + params.output_padding = output_padding.vec(); + params.groups = groups; + params.view1d_as_2d(); + stride_ = params.stride; + padding_ = params.padding; + dilation_ = params.dilation; + transposed_ = params.transposed; + output_padding_ = params.output_padding; + groups_ = params.groups; + } else { + grad_output_ = grad_output; + input_ = input; + weight_ = weight; + stride_ = stride; + padding_ = padding; + dilation_ = dilation; + transposed_ = transposed; + output_padding_ = output_padding; + groups_ = groups; + } + + // ensure the tensors are contiguous + auto mfmt = is_channels_last_suggested ? get_cl_tag_by_ndim(input_.ndimension()) + : at::MemoryFormat::Contiguous; + grad_output_ = grad_output_.contiguous(mfmt); + weight_ = weight_.contiguous(mfmt); + input_ = input_.contiguous(mfmt); + + auto opt = grad_output_.options(); + Tensor grad_input = at::empty(input_.sizes(), opt, mfmt); + Tensor grad_weight = at::empty(weight_.sizes(), opt, mfmt); + Tensor grad_bias; + if (output_mask[2]) + grad_bias = at::empty({grad_output_.size(1)}, opt); + + if (output_mask[0]) { + if (input.numel() > 0) { + if (transposed_) { + onednn::deconvolution_backward_data( + grad_input, + grad_output_, + weight_, + stride_, + padding_, + dilation_, + groups_, + output_mask[2]); + } else { + onednn::convolution_backward_data( + grad_input, + grad_output_, + weight_, + padding_, + padding_, + stride_, + dilation_, + groups_, + output_mask[2]); + } + } + } + if (output_mask[1] || output_mask[2]) { + if (input.numel() > 0) { + if (transposed_) { + onednn::deconvolution_backward_weights( + grad_weight, + grad_bias, + grad_output_, + input_, + stride_, + padding_, + dilation_, + groups_); + } else { + onednn::convolution_backward_weights( + grad_weight, + grad_bias, + grad_output_, + input_, + weight_.sizes(), + padding_, + padding_, + stride_, + dilation_, + groups_); + } + } + } + + if (3 == ndim) { + if (output_mask[0]) + grad_input = view3d(grad_input); + grad_weight = view3d(grad_weight); + } + return std::tuple<Tensor, Tensor, Tensor>{grad_input, grad_weight, grad_bias}; +} + +TORCH_LIBRARY_IMPL(aten, XPU, m){ + m.impl("convolution_overrideable", TORCH_FN(convolution_overrideable)); + m.impl("convolution_backward_overrideable", TORCH_FN(convolution_backward_overrideable)); +} + +} // namespace xpu +} // namespace at::native diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/Conv.cpp b/aten/src/ATen/native/mkldnn/xpu/detail/Conv.cpp index 6e1cf0e222..87ddd0af34 100644 --- a/aten/src/ATen/native/mkldnn/xpu/detail/Conv.cpp +++ b/aten/src/ATen/native/mkldnn/xpu/detail/Conv.cpp @@ -15,7 +15,7 @@ namespace at::native::onednn { constexpr int src_batch_size_dim = 0; constexpr int weight_dst_channels_dim = 0; -static inline dnnl::memory::dims conv_dst_tz( +dnnl::memory::dims conv_dst_size( int64_t ndim, IntArrayRef src_size, IntArrayRef weight_size, diff --git a/aten/src/ATen/native/mkldnn/xpu/detail/oneDNN.h b/aten/src/ATen/native/mkldnn/xpu/detail/oneDNN.h index 54669d490f..0c219fc8c6 100644 --- a/aten/src/ATen/native/mkldnn/xpu/detail/oneDNN.h +++ b/aten/src/ATen/native/mkldnn/xpu/detail/oneDNN.h @@ -89,4 +89,22 @@ TORCH_API sycl::event deconvolution_backward_weights( int64_t groups, const std::vector<sycl::event>& deps = {}); +dnnl::memory::dims conv_dst_size( + int64_t ndim, + IntArrayRef src_tz, + IntArrayRef wgh_tz, + IntArrayRef padding_front_top_left, + IntArrayRef padding_back_bottom_right, + IntArrayRef stride, + IntArrayRef dilation); + +dnnl::memory::dims deconv_dst_size( + IntArrayRef src_size, + IntArrayRef wgh_size, + IntArrayRef padding, + IntArrayRef stride, + IntArrayRef dilation, + IntArrayRef dst_padding, + int64_t groups); + } // namespace at::native::onednn diff --git a/test/xpu/test_conv.py b/test/xpu/test_conv.py new file mode 100644 index 0000000000..a0b1368c8e --- /dev/null +++ b/test/xpu/test_conv.py @@ -0,0 +1,1247 @@ +# Owner(s): ["module: intel"] + +import itertools +import math +import unittest +from itertools import product + +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +from torch.testing import make_tensor +from torch.testing._internal.common_cuda import tf32_is_not_fp32 +from torch.testing._internal.common_device_type import ( + dtypes, + instantiate_device_type_tests, +) +from torch.testing._internal.common_dtype import floating_types_and +from torch.testing._internal.common_nn import _test_module_empty_input, NNTestCase +from torch.testing._internal.common_utils import ( + dtype2prec_DONTUSE, + gradcheck, + gradgradcheck, + parametrize as parametrize_test, + run_tests, + set_default_dtype, + TEST_SCIPY, + TEST_WITH_ROCM, +) + +AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() +if TEST_SCIPY: + import scipy.ndimage + import scipy.signal + + +class TestConvolutionNNDeviceType(NNTestCase): + def run_conv_double_back_test( + self, + kern, + stride, + padding, + chan_in, + chan_out, + batch_size, + inp_size, + dilation, + no_weight, + groups=1, + use_xpu=False, + use_bias=True, + dtype=torch.double, + ): + device = torch.device("xpu" if use_xpu else "cpu") + x = torch.randn( + batch_size, + chan_in, + inp_size, + inp_size, + device=device, + dtype=dtype, + requires_grad=True, + ) + weight = torch.randn( + chan_out, + chan_in // groups, + kern, + kern, + device=device, + dtype=dtype, + requires_grad=not no_weight, + ) + if use_bias: + bias = torch.randn(chan_out, device=device, dtype=dtype, requires_grad=True) + else: + bias = None + + def func(*inputs): + if use_bias: + lx, lweight, lbias = inputs + else: + lx, lweight = inputs + lbias = None + out = F.conv2d(lx, lweight, lbias, stride, padding, dilation, groups) + return out + + if use_bias: + inputs = x, weight, bias + else: + inputs = x, weight + + dummy_out = func(*inputs) + grad_y = torch.randn_like( + dummy_out, device=device, dtype=dtype, requires_grad=True + ) + + if dtype == torch.float: + (g,) = torch.autograd.grad(dummy_out.sum(), x, create_graph=True) + return g.requires_grad + + return gradgradcheck(func, inputs, (grad_y,)) + + @dtypes(*floating_types_and(torch.half, torch.bfloat16)) + def test_Conv2d_large_workspace(self, device, dtype): + sizes = [ + (1, 256, 109, 175), + (1, 256, 80, 128), + (1, 256, 120, 192), + ] + + def run_test(benchmark): + conv = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1).to(device, dtype) + for size in sizes: + x = torch.randn(size, device=device, dtype=dtype) + out = conv(x.detach().clone().requires_grad_()) + out.backward(torch.ones_like(out)) + + run_test(benchmark=False) + run_test(benchmark=True) + + @dtypes(torch.half, torch.float) + def test_ConvTranspose2d_large_output_padding(self, device, dtype): + net1 = torch.nn.ConvTranspose2d( + 128, 64, kernel_size=3, stride=2, padding=1, output_padding=1 + ).to(device=device, dtype=dtype) + net2 = torch.nn.ConvTranspose2d( + 64, 32, kernel_size=3, stride=2, padding=1, output_padding=1 + ).to(device=device, dtype=dtype) + net3 = torch.nn.ConvTranspose2d( + 32, 3, kernel_size=3, stride=2, padding=1, output_padding=1 + ).to(device=device, dtype=dtype) + x = torch.rand(1, 128, 6, 6, device=device, dtype=dtype, requires_grad=True) + x = net1(x) + x = net2(x) + x = net3(x) + x.backward(torch.randn_like(x)) + + @dtypes(torch.float, torch.double, torch.half) + def test_Conv2d_depthwise_naive_groups(self, device, dtype): + if dtype == torch.half and "xpu" in device: + self.skipTest( + "The accuracy issue of dtype fp16 would be fixed in oneDNN v3.4" + ) + for depth_multiplier in [1, 2]: + m = nn.Conv2d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to( + device, dtype + ) + i = ( + torch.randn(2, 2, 6, 6, device=device, dtype=dtype) + .div_(2) + .requires_grad_() + ) + output = m(i) + grad_output = ( + torch.randn(2, 2 * depth_multiplier, 4, 4, device=device, dtype=dtype) + / 2 + ) + output.backward(grad_output) + + offset = 1 * depth_multiplier + + m1 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype) + m1.weight.data = m.weight.data[:offset].clone() + m1.bias.data = m.bias.data[:offset].clone() + i1 = i.detach()[:, :1].clone().requires_grad_() + output1 = m1(i1) + output1.backward(grad_output[:, :offset].contiguous()) + + m2 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype) + m2.weight.data.copy_(m.weight.data[offset:]) + m2.bias.data.copy_(m.bias.data[offset:]) + i2 = i.detach()[:, 1:].clone().requires_grad_() + output2 = m2(i2) + output2.backward(grad_output[:, offset:].contiguous()) + + self.assertEqual( + output, + torch.cat([output1, output2], 1), + atol=dtype2prec_DONTUSE[dtype], + rtol=0, + ) + self.assertEqual( + i.grad.data, + torch.cat([i1.grad.data, i2.grad.data], 1), + atol=dtype2prec_DONTUSE[dtype], + rtol=0, + ) + self.assertEqual( + m.bias.grad.data, + torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0), + atol=dtype2prec_DONTUSE[dtype], + rtol=0, + ) + self.assertEqual( + m.weight.grad.data, + torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0), + atol=dtype2prec_DONTUSE[dtype], + rtol=0, + ) + + @dtypes(torch.float, torch.double, torch.half) + def test_Conv3d_depthwise_naive_groups(self, device, dtype): + if dtype == torch.half and "xpu" in device: + self.skipTest( + "The accuracy issue of dtype fp16 would be fixed in oneDNN v3.4" + ) + for depth_multiplier in [1, 2]: + m = nn.Conv3d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to( + device, dtype + ) + i = ( + torch.randn(2, 2, 6, 6, 6, device=device, dtype=dtype) + .div_(2) + .requires_grad_() + ) + output = m(i) + grad_output = ( + torch.randn( + 2, 2 * depth_multiplier, 4, 4, 4, device=device, dtype=dtype + ) + / 2 + ) + output.backward(grad_output) + + offset = 1 * depth_multiplier + + m1 = nn.Conv3d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype) + m1.weight.data = m.weight.data[:offset].clone() + m1.bias.data = m.bias.data[:offset].clone() + i1 = i.detach()[:, :1].clone().requires_grad_() + output1 = m1(i1) + output1.backward(grad_output[:, :offset].contiguous()) + + m2 = nn.Conv3d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype) + m2.weight.data.copy_(m.weight.data[offset:]) + m2.bias.data.copy_(m.bias.data[offset:]) + i2 = i.detach()[:, 1:].clone().requires_grad_() + output2 = m2(i2) + output2.backward(grad_output[:, offset:].contiguous()) + atol, rtol = (3e-4, 3e-2) + + self.assertEqual( + output, torch.cat([output1, output2], 1), atol=atol, rtol=rtol + ) + self.assertEqual( + i.grad.data, + torch.cat([i1.grad.data, i2.grad.data], 1), + atol=dtype2prec_DONTUSE[dtype], + rtol=0, + ) + self.assertEqual( + m.bias.grad.data, + torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0), + atol=dtype2prec_DONTUSE[dtype], + rtol=0, + ) + self.assertEqual( + m.weight.grad.data, + torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0), + atol=atol, + rtol=rtol, + ) + + @dtypes(torch.float, torch.double, torch.half) + def test_noncontig_conv_grad(self, device, dtype): + module = nn.Conv2d(3, 5, kernel_size=3, padding=1).to(device, dtype) + input = torch.randn( + 2, 3, 10, 10, dtype=dtype, device=device, requires_grad=True + ) + output = module(input) + + grad = torch.randn(2, 2, 5, 10, 10, dtype=dtype, device=device)[:, 1] + assert not grad.is_contiguous() + output.backward(grad, retain_graph=True) + self.assertIsNotNone(input.grad) + result = input.grad.data.clone() + input.grad.data.zero_() + + output.backward(grad.contiguous()) + self.assertEqual( + result, input.grad.data, atol=dtype2prec_DONTUSE[dtype], rtol=0 + ) + + @dtypes(torch.double) + def test_conv_double_backward(self, device, dtype): + with torch.backends.cudnn.flags(enabled=True, deterministic=True): + batch_size = 1 + for kern, inp_size, dilations in [(3, 5, [1, 2]), (4, 9, [1])]: + for stride, padding, chan_in, chan_out, dilation in product( + [1], [2], [2], [3], dilations + ): + no_weight = stride == 2 + result = self.run_conv_double_back_test( + kern, + stride, + padding, + chan_in, + chan_out, + batch_size, + inp_size, + dilation, + no_weight, + use_xpu=True, + dtype=dtype, + ) + self.assertTrue(result, "Conv double backward test failed") + + def test_conv_double_backward_no_bias(self): + kern, stride = 3, 2 + chan_in, chan_out = 2, 4 + batch_size, inp_size = 2, 5 + padding, dilation = 1, 1 + no_weight, use_bias = False, True + result = self.run_conv_double_back_test( + kern, + stride, + padding, + chan_in, + chan_out, + batch_size, + inp_size, + dilation, + no_weight, + use_bias=use_bias, + ) + self.assertTrue(result, "Conv double backward test failed") + + def test_conv_double_backward_groups(self): + kern, stride, padding = 3, 1, 2 + chan_in, chan_out = 2, 4 + batch_size, inp_size, dilation = 2, 6, 1 + no_weight = False + groups = 2 + result = self.run_conv_double_back_test( + kern, + stride, + padding, + chan_in * groups, + chan_out * groups, + batch_size, + inp_size, + dilation, + no_weight, + groups=groups, + ) + self.assertTrue(result, "Conv double backward test failed") + + def test_conv_double_backward_stride(self): + batch_size = 2 + for kern, inp_size, dilations in [(3, 5, [1, 2]), (3, 7, [1])]: + for stride, padding, chan_in, chan_out, dilation in product( + [2], [0, 1], [1], [2], dilations + ): + no_weight = False + self.run_conv_double_back_test( + kern, + stride, + padding, + chan_in, + chan_out, + batch_size, + inp_size, + dilation, + no_weight, + ) + + @dtypes(torch.float) + def test_conv1d_same_padding(self, device, dtype): + test_args = [ + range(50, 55), + [1, 2, 3, 8], + range(1, 4), + [1], + ] + for in_size, k_size, dilation, stride in itertools.product(*test_args): + x = torch.rand(1, 1, in_size, device=device, dtype=dtype) + y = torch.rand(1, 1, k_size, device=device, dtype=dtype) + z = F.conv1d(x, y, padding="same", dilation=dilation, stride=stride) + self.assertEqual(z.size(2), int(math.ceil(in_size / stride))) + + x = torch.rand(1, 1, 12, device=device, dtype=dtype) + y = torch.rand(1, 1, 3, device=device, dtype=dtype) + expect = F.conv1d(x, y, padding=1) + actual = F.conv1d(x, y, padding="same") + self.assertEqual(expect, actual) + + x = torch.rand(1, 1, 12, device=device, dtype=dtype) + y = torch.rand(1, 1, 4, device=device, dtype=dtype) + expect = F.conv1d(x, y, padding=3, dilation=2) + actual = F.conv1d(x, y, padding="same", dilation=2) + self.assertEqual(expect, actual) + + expect = F.conv1d(x, y, padding=5, dilation=3)[..., 1:] + actual = F.conv1d(x, y, padding="same", dilation=3) + self.assertEqual(expect, actual) + + @dtypes(torch.float) + def test_conv3d_same_padding(self, device, dtype): + rtol, atol = None, None + x = torch.rand(1, 1, 10, 11, 12, device=device, dtype=dtype) + y = torch.rand(1, 1, 1, 2, 5, device=device, dtype=dtype) + expect = F.conv3d(x, y, padding=(0, 1, 2))[..., :, 1:, :] + actual = F.conv3d(x, y, padding="same") + self.assertEqual(expect, actual, rtol=rtol, atol=atol) + + expect = F.conv3d(x, y, padding=(0, 1, 4), dilation=2) + actual = F.conv3d(x, y, padding="same", dilation=2) + self.assertEqual(expect, actual, rtol=rtol, atol=atol) + + y = torch.rand(1, 1, 4, 4, 4, device=device, dtype=dtype) + expect = F.conv3d(x, y, padding=5, dilation=3)[..., 1:, 1:, 1:] + actual = F.conv3d(x, y, padding="same", dilation=3) + self.assertEqual(expect, actual, rtol=rtol, atol=atol) + + @dtypes(torch.float) + def test_conv1d_valid_padding(self, device, dtype): + x = torch.rand(1, 1, 10, device=device, dtype=dtype) + y = torch.rand(1, 1, 4, device=device, dtype=dtype) + expect = F.conv1d(x, y) + actual = F.conv1d(x, y, padding="valid") + self.assertEqual(expect, actual) + + @dtypes(torch.float) + def test_conv2d_valid_padding(self, device, dtype): + x = torch.rand(1, 1, 1, 10, device=device, dtype=dtype) + y = torch.rand(1, 1, 1, 4, device=device, dtype=dtype) + expect = F.conv2d(x, y) + actual = F.conv2d(x, y, padding="valid") + self.assertEqual(expect, actual) + + @dtypes(torch.float) + def test_conv3d_valid_padding(self, device, dtype): + x = torch.rand(1, 1, 1, 1, 10, dtype=dtype, device=device) + y = torch.rand(1, 1, 1, 1, 4, dtype=dtype, device=device) + expect = F.conv3d(x, y) + actual = F.conv3d(x, y, padding="valid") + self.assertEqual(expect, actual) + + @dtypes(torch.float) + def test_conv1d_same_padding_backward(self, device, dtype): + x = torch.rand(1, 1, 12, dtype=dtype, device=device, requires_grad=True) + y = torch.rand(1, 1, 4, dtype=dtype, device=device, requires_grad=True) + + z = F.conv1d(x, y, padding=3, dilation=2) + z.sum().abs().backward() + gx_expect, gy_expect = x.grad, y.grad + x.grad, y.grad = None, None + + z = F.conv1d(x, y, padding="same", dilation=2) + z.sum().abs().backward() + self.assertEqual(gx_expect, x.grad) + self.assertEqual(gy_expect, y.grad) + x.grad, y.grad = None, None + + z = F.conv1d(x, y, padding=2)[..., 1:] + z.sum().abs().backward() + gx_expect, gy_expect = x.grad, y.grad + x.grad, y.grad = None, None + + z = F.conv1d(x, y, padding="same") + z.sum().abs().backward() + self.assertEqual(gx_expect, x.grad) + self.assertEqual(gy_expect, y.grad) + + @dtypes(torch.float) + def test_conv2d_same_padding_backward(self, device, dtype): + x = torch.rand(1, 1, 10, 11, device=device, dtype=dtype, requires_grad=True) + y = torch.rand(1, 1, 4, 5, device=device, dtype=dtype, requires_grad=True) + + z = F.conv2d(x, y, padding=(3, 4), dilation=2) + z.sum().abs().backward() + gx_expect, gy_expect = x.grad, y.grad + x.grad, y.grad = None, None + + z = F.conv2d(x, y, padding="same", dilation=2) + z.sum().abs().backward() + self.assertEqual(gx_expect, x.grad) + self.assertEqual(gy_expect, y.grad) + x.grad, y.grad = None, None + + y = torch.rand(1, 1, 4, 4, device=device, dtype=dtype, requires_grad=True) + z = F.conv2d(x, y, padding=2)[..., 1:, 1:] + z.sum().abs().backward() + gx_expect, gy_expect = x.grad, y.grad + x.grad, y.grad = None, None + + z = F.conv2d(x, y, padding="same") + z.sum().abs().backward() + self.assertEqual(gx_expect, x.grad) + self.assertEqual(gy_expect, y.grad) + + @dtypes(torch.double) + def test_conv3d_same_padding_backward(self, device, dtype): + x = torch.rand(1, 1, 1, 11, 12, dtype=dtype, device=device, requires_grad=True) + y = torch.rand(1, 1, 1, 2, 5, dtype=dtype, device=device, requires_grad=True) + z = F.conv3d(x, y, padding=(0, 1, 4), dilation=2) + z.sum().abs().backward() + gx_expect, gy_expect = x.grad, y.grad + x.grad, y.grad = None, None + + z = F.conv3d(x, y, padding="same", dilation=2) + z.sum().abs().backward() + self.assertEqual(gx_expect, x.grad) + self.assertEqual(gy_expect, y.grad) + x.grad, y.grad = None, None + gradcheck( + lambda x, y: F.conv3d(x, y, padding="same", dilation=2), + (x, y), + check_forward_ad=True, + nondet_tol=1e-5, + ) + gradgradcheck( + lambda x, y: F.conv3d(x, y, padding="same", dilation=2), + (x, y), + check_fwd_over_rev=True, + ) + + y = torch.rand(1, 1, 1, 4, 4, dtype=dtype, device=device, requires_grad=True) + z = F.conv3d(x, y, padding=2)[..., 1:, 1:] + z.sum().abs().backward() + gx_expect, gy_expect = x.grad, y.grad + x.grad, y.grad = None, None + + z = F.conv3d(x, y, padding="same") + z.sum().abs().backward() + self.assertEqual(gx_expect, x.grad) + self.assertEqual(gy_expect, y.grad) + gradcheck( + lambda x, y: F.conv3d(x, y, padding="same"), + (x, y), + check_forward_ad=True, + nondet_tol=1e-5, + ) + gradgradcheck( + lambda x, y: F.conv3d(x, y, padding="same"), + (x, y), + check_fwd_over_rev=True, + ) + + @dtypes(torch.float) + def test_conv1d_valid_padding_backward(self, device, dtype): + x = torch.rand(1, 1, 10, dtype=dtype, device=device, requires_grad=True) + y = torch.rand(1, 1, 4, dtype=dtype, device=device, requires_grad=True) + F.conv1d(x, y, padding=0).sum().abs().backward() + gx_expect, gy_expect = x.grad, y.grad + x.grad, y.grad = None, None + F.conv1d(x, y, padding="valid").sum().abs().backward() + gx_actual, gy_actual = x.grad, y.grad + self.assertEqual(gx_expect, gx_actual) + self.assertEqual(gy_expect, gy_actual) + + @unittest.skipIf(not TEST_SCIPY, "Scipy required for the test.") + @dtypes(torch.float) + @parametrize_test("mode", ("valid", "same")) + def test_conv1d_vs_scipy(self, device, dtype, mode): + t = make_tensor((1, 10), device=device, dtype=dtype) + feat_dim = t.shape[1] + weight_even = make_tensor((1, 1, 4), device=device, dtype=dtype) + weight_odd = make_tensor((1, 1, 5), device=device, dtype=dtype) + + def _test(t, weight, mode): + t_a = t.view(-1).cpu().numpy() + w_a = weight.view(-1).cpu().numpy() + expected = scipy.signal.convolve(t_a, w_a, mode=mode) + + kwargs = {"padding": mode} + if mode == "same": + p = weight.shape[2] // 2 + t = torch.nn.functional.pad(t, (p, p)) + kwargs.pop("padding") + + weight_flipped = torch.flip(weight, (2,)) + actual = torch.nn.functional.conv1d(t, weight_flipped, **kwargs).squeeze(0) + if mode == "same": + actual = actual[:feat_dim] + + self.assertEqual(actual, expected, atol=2e-5, rtol=2e-5) + + with set_default_dtype(torch.float): + _test(t, weight_even, mode) + _test(t, weight_odd, mode) + + @unittest.skipIf(not TEST_SCIPY, "Scipy required for the test.") + @dtypes(torch.float) + @parametrize_test("mode", ("valid", "same")) + def test_conv2d_vs_scipy(self, device, dtype, mode): + t = make_tensor((1, 5, 10), device=device, dtype=dtype) + weight_even = make_tensor((1, 1, 2, 4), device=device, dtype=dtype) + weight_odd = make_tensor((1, 1, 3, 5), device=device, dtype=dtype) + + def _test(t, weight, mode): + t_a = t.squeeze(0).cpu().numpy() + w_a = weight.squeeze(0).squeeze(0).cpu().numpy() + expected = scipy.signal.convolve2d(t_a, w_a, mode=mode) + + kwargs = {"padding": mode} + if mode == "same": + left_right_pad = weight.shape[3] // 2 + top_bottom_pad = weight.shape[2] // 2 + p = (left_right_pad, left_right_pad, top_bottom_pad, top_bottom_pad) + t = torch.nn.functional.pad(t, p) + kwargs.pop("padding") + + weight_flipped = torch.flip(weight, (2, 3)) + actual = torch.nn.functional.conv2d(t, weight_flipped, **kwargs).squeeze(0) + if mode == "same": + actual = actual[:5, :10] + + self.assertEqual(actual, expected, rtol=2e-5, atol=5e-6) + + with set_default_dtype(torch.float): + _test(t, weight_even, mode) + _test(t, weight_odd, mode) + + @unittest.skipIf(not TEST_SCIPY, "Scipy required for the test.") + @dtypes(torch.float) + @parametrize_test("mode", ("valid", "same")) + def test_conv3d_vs_scipy(self, device, dtype, mode): + t = make_tensor((1, 5, 5, 10), device=device, dtype=dtype) + weight_even = make_tensor((1, 1, 2, 2, 4), device=device, dtype=dtype) + weight_odd = make_tensor((1, 1, 2, 3, 5), device=device, dtype=dtype) + + def _test(t, weight, mode): + t_a = t.squeeze(0).cpu().numpy() + w_a = weight.squeeze(0).squeeze(0).cpu().numpy() + expected = scipy.signal.convolve(t_a, w_a, mode=mode) + kwargs = {"padding": mode} + if mode == "same": + left_right_pad = weight.shape[4] // 2 + top_bottom_pad = weight.shape[3] // 2 + front_back_pad = weight.shape[2] // 2 + p = ( + left_right_pad, + left_right_pad, + top_bottom_pad, + top_bottom_pad, + front_back_pad, + front_back_pad, + ) + t = torch.nn.functional.pad(t, p) + kwargs.pop("padding") + weight_flipped = torch.flip(weight, (2, 3, 4)) + actual = torch.nn.functional.conv3d(t, weight_flipped, **kwargs).squeeze(0) + if mode == "same": + actual = actual[:5, :5, :10] + self.assertEqual(actual, expected, rtol=2e-5, atol=5e-6) + + with set_default_dtype(torch.float): + _test(t, weight_even, mode) + _test(t, weight_odd, mode) + + @dtypes(torch.float) + def test_conv2d_valid_padding_backward(self, device, dtype): + x = torch.rand(1, 1, 1, 10, device=device, dtype=dtype, requires_grad=True) + y = torch.rand(1, 1, 1, 4, device=device, dtype=dtype, requires_grad=True) + F.conv2d(x, y, padding=0).sum().abs().backward() + gx_expect, gy_expect = x.grad, y.grad + x.grad, y.grad = None, None + F.conv2d(x, y, padding="valid").sum().abs().backward() + gx_actual, gy_actual = x.grad, y.grad + self.assertEqual(gx_expect, gx_actual) + self.assertEqual(gy_expect, gy_actual) + + @dtypes(torch.double) + def test_conv3d_valid_padding_backward(self, device, dtype): + x = torch.rand(1, 1, 1, 1, 10, dtype=dtype, device=device, requires_grad=True) + y = torch.rand(1, 1, 1, 1, 4, dtype=dtype, device=device, requires_grad=True) + F.conv3d(x, y, padding=0).sum().abs().backward() + gx_expect, gy_expect = x.grad, y.grad + x.grad, y.grad = None, None + + F.conv3d(x, y, padding="valid").sum().abs().backward() + gx_actual, gy_actual = x.grad, y.grad + self.assertEqual(gx_expect, gx_actual) + self.assertEqual(gy_expect, gy_actual) + gradcheck( + lambda x, y: F.conv3d(x, y, padding="valid"), + (x, y), + check_forward_ad=True, + ) + gradgradcheck( + lambda x, y: F.conv3d(x, y, padding="valid"), + (x, y), + check_fwd_over_rev=True, + ) + + @parametrize_test("N", range(2, 4), name_fn=lambda N: f"ConvTranspose{N}d") + def test_conv_transpose_with_output_size_and_no_batch_dim(self, device, N): + inp = torch.randn((1, 15, 13) if N == 2 else (1, 15, 13, 13), device=device) + output_size = (1, 240, 200) if N == 2 else (1, 240, 200, 200) + ConvTransposeNd = getattr(nn, f"ConvTranspose{N}d") + m = ConvTransposeNd( + 1, 1, kernel_size=16, stride=16, padding=7, bias=False, device=device + ) + output = m(inp, output_size=output_size) + self.assertEqual(output.shape, output_size) + + @dtypes(torch.float) + def test_conv_empty_channel(self, device, dtype): + in_channels = 0 + mod = torch.nn.Conv1d(in_channels, 8, 2, stride=2, dtype=dtype).to(device) + inp = torch.randn(2, 0, 15, device=device, dtype=dtype) + _test_module_empty_input(self, mod, inp, check_size=False) + + with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"): + inp = torch.randn(2, 1, 0, device=device, dtype=dtype) + mod(inp) + + mod = torch.nn.Conv2d(in_channels, 33, 3, stride=2, dtype=dtype).to(device) + inp = torch.randn(2, 0, 50, 100, device=device, dtype=dtype) + _test_module_empty_input(self, mod, inp, check_size=False) + + with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"): + inp = torch.randn(2, 1, 40, 0, device=device, dtype=dtype) + mod(inp) + + mod = torch.nn.Conv3d(in_channels, 33, 3, stride=2, dtype=dtype).to(device) + inp = torch.randn(2, 0, 50, 20, 40, device=device, dtype=dtype) + _test_module_empty_input(self, mod, inp, check_size=False) + + with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"): + inp = torch.randn(2, 1, 50, 0, 40, device=device, dtype=dtype) + mod(inp) + + def test_group_conv_empty(self, device): + mod = torch.nn.Conv2d(4, 4, stride=2, kernel_size=3, padding=1, groups=4).to( + device + ) + inp = torch.randn(0, 4, 4, 4, device=device) + _test_module_empty_input(self, mod, inp, check_size=False) + + def test_group_convTranspose_empty(self, device): + mod = torch.nn.ConvTranspose2d( + 4, 4, stride=2, kernel_size=3, padding=1, groups=4 + ).to(device) + inp = torch.randn(0, 4, 4, 4, device=device) + _test_module_empty_input(self, mod, inp, check_size=False) + + def test_convTranspose_empty(self, device): + mod = torch.nn.ConvTranspose2d(4, 4, stride=2, kernel_size=3, padding=1).to( + device + ) + inp = torch.randn(0, 4, 4, 4, device=device) + _test_module_empty_input(self, mod, inp, check_size=False) + + def test_conv_large_nosplit(self, device): + dtype = torch.half + conv1 = nn.Conv2d(2, 2, 8, 8).to(device).to(dtype) + input_large = torch.randn(1, 2, 1024, 1024 * 1024, dtype=dtype, device=device) + conv1(input_large) + conv2 = torch.nn.Conv2d(1, 1024, 1, 1).to(device).to(dtype) + input_large = torch.randn(1, 1, 2048, 1024, dtype=dtype, device=device) + conv2(input_large) + + def test_conv_noncontig_weights(self, device): + for dim in (1, 2, 3): + for grouped in (False, True): + nc = 3 + groups = 3 if grouped else 1 + w = torch.randn([3] * dim, device=device) + w = w.expand([nc, int(nc / groups)] + list(w.shape)) + w = w.detach().requires_grad_() + x = torch.randn( + [1, nc] + ([5] * dim), device=device, requires_grad=True + ) + y = getattr(F, f"conv{dim}d")(x, w, groups=groups) + y.sum().backward() + y = getattr(F, f"conv_transpose{dim}d")(x, w, groups=groups) + y.sum().backward() + + def test_conv_noncontig_weights_and_bias(self, device): + for bias in [True, False]: + conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=bias).to( + device, torch.float + ) + input_nc = torch.randn( + (1, 3, 224, 224, 2), device=device, dtype=torch.float + )[:, :, :, :, 1] + input_c = input_nc.contiguous() + weight_nc = torch.randn((64, 3, 7, 7, 2), device=device, dtype=torch.float)[ + :, :, :, :, 1 + ] + conv1.weight = nn.Parameter(weight_nc) + weight_c = conv1.weight.contiguous() + if bias: + bias_nc = torch.randn((64, 2), device=device, dtype=torch.float)[:, 1] + conv1.bias = nn.Parameter(bias_nc) + bias_c = conv1.bias.contiguous() + out1 = conv1(input_nc) + conv1.weight = nn.Parameter(weight_c) + if bias: + conv1.bias = nn.Parameter(bias_c) + out2 = conv1(input_c) + self.assertEqual(out1, out2) + + def test_conv_transposed_large(self, device): + dtype = torch.half if self.device_type == "cuda" else torch.float + conv = nn.ConvTranspose2d(1, 1, 1, 1, bias=False).to(device).to(dtype) + input_large = torch.randn(4096, 1, 512, 1024, dtype=dtype, device=device) + ret = conv(input_large) + maxdiff0 = ( + (ret.narrow(0, 0, 1024) - conv(input_large.narrow(0, 0, 1024))) + .abs_() + .max() + .item() + ) + maxdiff1 = ( + (ret.narrow(0, 1024, 1024) - conv(input_large.narrow(0, 1024, 1024))) + .abs_() + .max() + .item() + ) + maxdiff2 = ( + (ret.narrow(0, 2048, 1024) - conv(input_large.narrow(0, 2048, 1024))) + .abs_() + .max() + .item() + ) + maxdiff3 = ( + (ret.narrow(0, 3072, 1024) - conv(input_large.narrow(0, 3072, 1024))) + .abs_() + .max() + .item() + ) + self.assertEqual(maxdiff0, 0) + self.assertEqual(maxdiff1, 0) + self.assertEqual(maxdiff2, 0) + self.assertEqual(maxdiff3, 0) + + def test_conv_large(self, device): + dtype = torch.half if self.device_type == "cuda" else torch.float + conv = nn.Conv2d(2, 2, 8, 8, bias=False).to(device).to(dtype) + input_large = torch.randn(4097, 2, 512, 512, dtype=dtype, device=device) + ret = conv(input_large) + self.assertEqual(ret[:2048], conv(input_large[:2048])) + self.assertEqual(ret[2048:4096], conv(input_large[2048:4096])) + self.assertEqual(ret[4096:], conv(input_large[4096:])) + + conv.zero_grad() + ret.view(4097, -1).max(dim=1).values.sum().backward() + del ret + grad1 = conv.weight.grad.detach().clone() + conv.zero_grad() + conv(input_large[:2048]).view(2048, -1).max(dim=1).values.sum().backward() + conv(input_large[2048:4096]).view(2048, -1).max(dim=1).values.sum().backward() + conv(input_large[4096:]).view(1, -1).max(dim=1).values.sum().backward() + grad2 = conv.weight.grad.detach().clone() + scale = 1 / grad2.abs().mean() + grad1 = grad1 * scale + grad2 = grad2 * scale + self.assertEqual(grad1, grad2, atol=5e-2, rtol=5e-3) + + def test_Conv2d_size_1_kernel(self, device): + x_cpu = torch.randn(2, 3, 5, 5) + conv_cpu = torch.nn.Conv2d(3, 3, kernel_size=1) + y_cpu = conv_cpu(x_cpu) + y = torch.rand_like(y_cpu) + y_cpu.backward(y) + + with cudnn.flags(enabled=False): + conv_cuda = torch.nn.Conv2d(3, 3, kernel_size=1).to(device) + conv_cuda.bias.data.copy_(conv_cpu.bias.data) + conv_cuda.weight.data.copy_(conv_cpu.weight.data) + y_cuda = conv_cuda(x_cpu.to(device)) + y_cuda.backward(y.to(device)) + + self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False) + self.assertEqual( + conv_cpu.bias.grad.data, + conv_cuda.bias.grad.data, + atol=1e-5, + rtol=0, + exact_device=False, + ) + self.assertEqual( + conv_cpu.weight.grad.data, + conv_cuda.weight.grad.data, + atol=1e-5, + rtol=0, + exact_device=False, + ) + + def test_ConvTranspose2d_size_1_kernel(self, device): + x_cpu = torch.randn(2, 3, 5, 5) + conv_cpu = torch.nn.ConvTranspose2d(3, 3, kernel_size=1) + y_cpu = conv_cpu(x_cpu) + y = torch.rand_like(y_cpu) + y_cpu.backward(y) + conv_cuda = torch.nn.ConvTranspose2d(3, 3, kernel_size=1).to(device) + conv_cuda.bias.data.copy_(conv_cpu.bias.data) + conv_cuda.weight.data.copy_(conv_cpu.weight.data) + y_cuda = conv_cuda(x_cpu.to(device)) + y_cuda.backward(y.to(device)) + + self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False) + self.assertEqual( + conv_cpu.bias.grad.data, + conv_cuda.bias.grad.data, + atol=1e-5, + rtol=0, + exact_device=False, + ) + self.assertEqual( + conv_cpu.weight.grad.data, + conv_cuda.weight.grad.data, + atol=1e-5, + rtol=0, + exact_device=False, + ) + + def test_ConvTranspose3d_size_1_kernel(self, device): + with set_default_dtype(torch.double): + x_cpu = torch.randn(2, 3, 3, 5, 5) + conv_cpu = torch.nn.ConvTranspose3d(3, 3, kernel_size=1) + y_cpu = conv_cpu(x_cpu) + y = torch.rand_like(y_cpu) + y_cpu.backward(y) + conv_cuda = torch.nn.ConvTranspose3d(3, 3, kernel_size=1).to(device) + conv_cuda.bias.data.copy_(conv_cpu.bias.data) + conv_cuda.weight.data.copy_(conv_cpu.weight.data) + y_cuda = conv_cuda(x_cpu.to(device)) + y_cuda.backward(y.to(device)) + + self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False) + self.assertEqual( + conv_cpu.bias.grad.data, + conv_cuda.bias.grad.data, + atol=1e-5, + rtol=0, + exact_device=False, + ) + self.assertEqual( + conv_cpu.weight.grad.data, + conv_cuda.weight.grad.data, + atol=1e-5, + rtol=0, + exact_device=False, + ) + + @dtypes(torch.float) + def test_Conv2d_naive_groups(self, device, dtype): + m = nn.Conv2d(4, 4, kernel_size=3, groups=2).to(device, dtype) + i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True) + output = m(i) + grad_output = torch.randn(2, 4, 4, 4, device=device, dtype=dtype) + output.backward(grad_output) + + m1 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype) + m1.weight.data.copy_(m.weight.data[:2]) + m1.bias.data.copy_(m.bias.data[:2]) + i1 = i.data[:, :2].contiguous().requires_grad_(True) + output1 = m1(i1) + output1.backward(grad_output[:, :2].contiguous()) + + m2 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype) + m2.weight.data.copy_(m.weight.data[2:]) + m2.bias.data.copy_(m.bias.data[2:]) + i2 = i.data[:, 2:].contiguous().requires_grad_(True) + output2 = m2(i2) + output2.backward(grad_output[:, 2:].contiguous()) + + self.assertEqual(output, torch.cat([output1, output2], 1)) + self.assertEqual( + i.grad.data, + torch.cat([i1.grad.data, i2.grad.data], 1), + atol=dtype2prec_DONTUSE[dtype], + rtol=0, + ) + self.assertEqual( + m.bias.grad.data, + torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0), + atol=dtype2prec_DONTUSE[dtype], + rtol=0, + ) + self.assertEqual( + m.weight.grad.data, + torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0), + atol=dtype2prec_DONTUSE[dtype], + rtol=0, + ) + + @dtypes(torch.double) + def test_Conv2d_backward_depthwise(self, device, dtype): + x = torch.randn(2, 2, 4, 20, device=device, dtype=dtype, requires_grad=True) + weight = torch.randn(2, 1, 3, 5, device=device, dtype=dtype, requires_grad=True) + + def conv2d_depthwise(x, weight): + return torch.nn.functional.conv2d( + x, weight, bias=None, stride=(1, 10), groups=2 + ) + + torch.autograd.gradcheck(conv2d_depthwise, (x, weight)) + + @dtypes(torch.half, torch.float) + def test_conv_cudnn_nhwc(self, device, dtype): + def helper(n, c, h, w, out_channels, kernel_size, groups): + input = torch.randint(-3, 3, (n, c, h, w), dtype=dtype, device=device).to( + memory_format=torch.channels_last + ) + input.requires_grad_() + conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups).to( + device=device, dtype=dtype, memory_format=torch.channels_last + ) + for p in conv.parameters(): + p.data = torch.randint_like(p, -3, 3) + + ref_input = input.detach().clone().contiguous().double().requires_grad_() + ref_conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups) + ref_conv.load_state_dict(conv.state_dict()) + ref_conv = ref_conv.to( + device=device, dtype=torch.double, memory_format=torch.contiguous_format + ) + + out = conv(input) + ref_out = ref_conv(ref_input) + + grad = torch.randint_like(out, -3, 3) + ref_grad = grad.detach().clone().double().contiguous() + + out.backward(grad) + ref_out.backward(ref_grad) + + self.assertTrue(out.is_contiguous(memory_format=torch.channels_last)) + self.assertTrue(input.grad.is_contiguous(memory_format=torch.channels_last)) + self.assertTrue( + conv.weight.grad.is_contiguous(memory_format=torch.channels_last) + ) + + self.assertTrue(ref_out.is_contiguous()) + self.assertTrue(ref_input.grad.is_contiguous()) + self.assertTrue(ref_conv.weight.grad.is_contiguous()) + + self.assertEqual(out, ref_out, exact_dtype=False) + self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False) + self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False) + self.assertEqual(input.grad, ref_input.grad, exact_dtype=False) + + helper(2, 8, 4, 4, out_channels=4, kernel_size=3, groups=1) + helper(2, 8, 4, 4, out_channels=8, kernel_size=3, groups=8) + helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=1) + helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=16) + + @dtypes(torch.half, torch.float) + def test_conv_cudnn_ndhwc(self, device, dtype): + def helper(n, c, d, h, w, out_channels, kernel_size, groups): + input = torch.randint( + -2, 2, (n, c, d, h, w), dtype=dtype, device=device + ).to(memory_format=torch.channels_last_3d) + input.requires_grad_() + conv = nn.Conv3d(c, out_channels, kernel_size, groups=groups).to( + device=device, dtype=dtype, memory_format=torch.channels_last_3d + ) + for p in conv.parameters(): + p.data = torch.randint_like(p, -2, 2) + + ref_input = input.detach().clone().contiguous().double().requires_grad_() + ref_conv = nn.Conv3d(c, out_channels, kernel_size, groups=groups) + ref_conv.load_state_dict(conv.state_dict()) + ref_conv = ref_conv.to( + device=device, dtype=torch.double, memory_format=torch.contiguous_format + ) + + out = conv(input) + ref_out = ref_conv(ref_input) + + grad = torch.randint_like(out, -2, 2) + ref_grad = grad.detach().clone().double().contiguous() + + out.backward(grad) + ref_out.backward(ref_grad) + + self.assertTrue(out.is_contiguous(memory_format=torch.channels_last_3d)) + self.assertTrue( + input.grad.is_contiguous(memory_format=torch.channels_last_3d) + ) + self.assertTrue( + conv.weight.grad.is_contiguous(memory_format=torch.channels_last_3d) + ) + + self.assertTrue(ref_out.is_contiguous()) + self.assertTrue(ref_input.grad.is_contiguous()) + self.assertTrue(ref_conv.weight.grad.is_contiguous()) + + self.assertEqual(out, ref_out, exact_dtype=False) + self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False) + self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False) + self.assertEqual(input.grad, ref_input.grad, exact_dtype=False) + + helper(2, 8, 4, 4, 4, out_channels=4, kernel_size=3, groups=1) + helper(2, 8, 4, 4, 4, out_channels=8, kernel_size=3, groups=8) + helper(1, 16, 18, 18, 18, out_channels=16, kernel_size=3, groups=1) + helper(1, 16, 18, 18, 18, out_channels=16, kernel_size=3, groups=16) + + def _run_conv( + self, + layer, + device, + inp, + grad, + ref_conv, + ref_input, + ref_out, + input_format, + weight_format, + grad_format, + output_format, + ): + conv = ( + layer(inp.size(1), grad.size(1), ref_conv.weight.size(2)).float().to(device) + ) + conv.load_state_dict(ref_conv.state_dict()) + weight_data = ( + conv.weight.detach().clone().contiguous(memory_format=weight_format) + ) + conv.weight.data = weight_data.resize_( + weight_data.size(), memory_format=weight_format + ) + input = inp.clone().contiguous(memory_format=input_format) + input.resize_(input.size(), memory_format=input_format) + input = input.requires_grad_() + grad = grad.contiguous(memory_format=grad_format) + grad.resize_(grad.size(), memory_format=grad_format) + out = conv(input) + out.backward(grad) + self.assertTrue(out.is_contiguous(memory_format=output_format)) + self.assertEqual(out, ref_out) + self.assertEqual(conv.weight.grad, ref_conv.weight.grad) + self.assertEqual(conv.bias.grad, ref_conv.bias.grad) + self.assertEqual(input.grad, ref_input.grad) + + def _test_conv_cudnn_nhwc_nchw(self, layer, n, c, h, w, k, filter_size, device): + data = torch.randint(1, 10, (n, c, h, w), dtype=torch.float32, device=device) + ref_input = data.clone().contiguous().requires_grad_(True) + ref_conv = layer(c, k, filter_size).float().to(device) + ref_out = ref_conv(ref_input) + grad = torch.randint(1, 10, ref_out.size(), dtype=torch.float32, device=device) + ref_out.backward(grad) + + for w_f in [torch.contiguous_format, torch.channels_last]: + for g_f in [torch.contiguous_format, torch.channels_last]: + for input_format in [torch.contiguous_format, torch.channels_last]: + output_format = torch.contiguous_format + if input_format == torch.channels_last: + output_format = torch.channels_last + if w_f == torch.channels_last: + output_format = torch.channels_last + self._run_conv( + layer, + device, + data, + grad, + ref_conv, + ref_input, + ref_out, + input_format, + w_f, + g_f, + output_format, + ) + + @dtypes(torch.float, torch.double) + def test_conv_cudnn_nhwc_support(self, device, dtype): + input = torch.randn( + (1, 16, 1, 1), dtype=dtype, device=device, requires_grad=True + ) + weight = torch.randn( + (8, 16, 3, 3), dtype=dtype, device=device, requires_grad=True + ) + weight = weight.to(memory_format=torch.channels_last) + o = torch.conv2d(input, weight, None, (2, 1), (1, 1), (1, 1), 1) + self.assertTrue(o.is_contiguous(memory_format=torch.channels_last)) + o.sum().backward() + + @dtypes(torch.float) + def test_conv2d_no_grad(self, device, dtype): + for batch in [1, 2, 3]: + for groups in [1, 2, 4]: + input = torch.rand(batch, groups, 8, 8, dtype=dtype, device=device) + m = nn.Conv2d( + groups, + 8, + kernel_size=(3, 3), + groups=groups, + dtype=dtype, + device=device, + ) + with torch.no_grad(): + output_ng = m(input) + output = m(input) + self.assertEqual(output, output_ng, rtol=1e-2, atol=1e-5) + + def test_conv_double_backward_strided_with_3D_input_and_weight(self, device): + input = torch.randn(2, 3, 6, device=device) + weight = torch.randn(3, 3, 3, device=device) + bias = torch.randn(3, device=device) + stride = (2,) + padding = (1,) + dilation = (1,) + transposed = False + output_padding = (0,) + groups = 1 + output = torch.ops.aten.convolution( + input, + weight, + bias, + stride, + padding, + dilation, + transposed, + output_padding, + groups, + ) + + ggI = torch.randn(input.shape, device=device) + ggW = torch.randn(weight.shape, device=device) + ggB = torch.randn(bias.shape, device=device) + gO = torch.randn(output.shape, device=device) + output_mask = [True, True, True] + ( + grad_grad_output, + grad_input, + grad_weight, + ) = torch.ops.aten._convolution_double_backward( + ggI, + ggW, + ggB, + gO, + weight, + input, + stride, + padding, + dilation, + transposed, + output_padding, + groups, + output_mask, + ) + + self.assertEqual(grad_grad_output.shape, gO.shape) + self.assertEqual(grad_input.shape, input.shape) + self.assertEqual(grad_weight.shape, weight.shape) + + +instantiate_device_type_tests(TestConvolutionNNDeviceType, globals(), only_for="xpu") + +if __name__ == "__main__": + run_tests()
2.41.0
5037c389c7731820fd04b5217260997888acb2b
Mon, 22 Apr 2024 11:30:15 +0200
[PATCH 0455/1000] [Inductor cutlass backend] Fix tests: skipIfROCm always skips when using as class annotation (#123930)
I previously added @skipIfRocm as a class annotation within test/inductor/test_cutlass_backend.py - turns out this annotation always skips if applied at class level, so I need to skip Cutlass tests on ROCm differently.. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123930 Approved by: https://github.com/jansel ghstack dependencies: #121497
diff --git a/test/inductor/test_cutlass_backend.py b/test/inductor/test_cutlass_backend.py index d43d71afd4..5dc93b7325 100644 --- a/test/inductor/test_cutlass_backend.py +++ b/test/inductor/test_cutlass_backend.py @@ -14,7 +14,6 @@ from torch.testing._internal.common_cuda import SM75OrLater, SM80OrLater, SM90Or from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, - skipIfRocm, ) from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA @@ -27,6 +26,11 @@ _CUTLASS_DIR = os.path.join(os.path.dirname(__file__), "../../third_party/cutlas log = logging.getLogger(__name__) +HAS_CUDA = HAS_CUDA and not torch.version.hip +SM75OrLater = SM75OrLater and not torch.version.hip +SM80OrLater = SM80OrLater and not torch.version.hip +SM90OrLater = SM90OrLater and not torch.version.hip + def _get_path_without_sccache() -> str: """ @@ -37,7 +41,6 @@ def _get_path_without_sccache() -> str: return ":".join(path_envs) -@skipIfRocm @instantiate_parametrized_tests class TestCutlassBackend(TestCase): def setUp(self):
2.41.0
77ab8a4c0d483bc0166a7eaa482a57a5c42c381
Mon, 22 Apr 2024 14:44:44 +0000
[PATCH 0456/1000] Revert "[Environment Variable][1/N] Use thread-safe env variable API in c10 (#119449)"
This reverts commit a56e057814565b2ae33b2106b4d0136179aa18f8. Reverted https://github.com/pytorch/pytorch/pull/119449 on behalf of https://github.com/jeanschmidt due to Broken internal signals, @albanD please help get this sorted :) ([comment](https://github.com/pytorch/pytorch/pull/119449#issuecomment-2069716129))
diff --git a/c10/core/impl/alloc_cpu.cpp b/c10/core/impl/alloc_cpu.cpp index def4c3a3a9..9b7ae22f9f 100644 --- a/c10/core/impl/alloc_cpu.cpp +++ b/c10/core/impl/alloc_cpu.cpp @@ -3,7 +3,6 @@ #include <c10/core/alignment.h> #include <c10/util/Flags.h> #include <c10/util/Logging.h> -#include <c10/util/env.h> #include <c10/util/irange.h> #include <c10/util/numa.h> @@ -54,8 +53,8 @@ void memset_junk(void* data, size_t num) { #if defined(__linux__) && !defined(__ANDROID__) static inline bool is_thp_alloc_enabled() { static bool value = [&] { - auto env = c10::utils::check_env("THP_MEM_ALLOC_ENABLE"); - return env.has_value() ? env.value() : 0; + const char* ptr = std::getenv("THP_MEM_ALLOC_ENABLE"); + return ptr != nullptr ? std::atoi(ptr) : 0; }(); return value; } diff --git a/c10/cuda/CUDAAllocatorConfig.cpp b/c10/cuda/CUDAAllocatorConfig.cpp index ca38dfd6a4..1f81ed47b6 100644 --- a/c10/cuda/CUDAAllocatorConfig.cpp +++ b/c10/cuda/CUDAAllocatorConfig.cpp @@ -234,7 +234,7 @@ size_t CUDAAllocatorConfig::parseAllocatorConfig( return i; } -void CUDAAllocatorConfig::parseArgs(const std::optional<std::string>& env) { +void CUDAAllocatorConfig::parseArgs(const char* env) { // If empty, set the default values m_max_split_size = std::numeric_limits<size_t>::max(); m_roundup_power2_divisions.assign(kRoundUpPowerOfTwoIntervals, 0); @@ -242,16 +242,16 @@ void CUDAAllocatorConfig::parseArgs(const std::optional<std::string>& env) { bool used_cudaMallocAsync = false; bool used_native_specific_option = false; - if (!env.has_value()) { + if (env == nullptr) { return; } { std::lock_guard<std::mutex> lock(m_last_allocator_settings_mutex); - m_last_allocator_settings = env.value(); + m_last_allocator_settings = env; } std::vector<std::string> config; - lexArgs(env.value().c_str(), config); + lexArgs(env, config); for (size_t i = 0; i < config.size(); i++) { std::string_view config_item_view(config[i]); diff --git a/c10/cuda/CUDAAllocatorConfig.h b/c10/cuda/CUDAAllocatorConfig.h index db5c9e1c8f..3106fc1b46 100644 --- a/c10/cuda/CUDAAllocatorConfig.h +++ b/c10/cuda/CUDAAllocatorConfig.h @@ -2,7 +2,6 @@ #include <c10/cuda/CUDAMacros.h> #include <c10/util/Exception.h> -#include <c10/util/env.h> #include <atomic> #include <cstddef> @@ -73,13 +72,14 @@ class C10_CUDA_API CUDAAllocatorConfig { static CUDAAllocatorConfig& instance() { static CUDAAllocatorConfig* s_instance = ([]() { auto inst = new CUDAAllocatorConfig(); - inst->parseArgs(c10::utils::get_env("PYTORCH_CUDA_ALLOC_CONF")); + const char* env = getenv("PYTORCH_CUDA_ALLOC_CONF"); + inst->parseArgs(env); return inst; })(); return *s_instance; } - void parseArgs(const std::optional<std::string>& env); + void parseArgs(const char* env); private: CUDAAllocatorConfig(); diff --git a/c10/cuda/CUDACachingAllocator.cpp b/c10/cuda/CUDACachingAllocator.cpp index afac5272b6..c472e82ce2 100644 --- a/c10/cuda/CUDACachingAllocator.cpp +++ b/c10/cuda/CUDACachingAllocator.cpp @@ -8,7 +8,6 @@ #include <c10/util/CallOnce.h> #include <c10/util/ScopeExit.h> #include <c10/util/UniqueVoidPtr.h> -#include <c10/util/env.h> #include <c10/util/flat_hash_map.h> #include <c10/util/hash.h> #include <c10/util/irange.h> @@ -2832,7 +2831,7 @@ class DeviceCachingAllocator { // errors, since the caching allocator foils cuda-memcheck. bool forceUncachedAllocator() { static bool force_uncached = - c10::utils::has_env("PYTORCH_NO_CUDA_MEMORY_CACHING"); + getenv("PYTORCH_NO_CUDA_MEMORY_CACHING") != nullptr; return force_uncached; } @@ -3364,9 +3363,9 @@ struct BackendStaticInitializer { // version checks, to CUDAAllocatorConfig's runtime doublecheck. If this // works, maybe we should move all of CUDAAllocatorConfig here? CUDAAllocator* parseEnvForBackend() { - const auto val = c10::utils::get_env("PYTORCH_CUDA_ALLOC_CONF"); - if (val.has_value()) { - const std::string& config = val.value(); + const char* val = getenv("PYTORCH_CUDA_ALLOC_CONF"); + if (val != nullptr) { + const std::string config(val); std::regex exp("[\\s,]+"); std::sregex_token_iterator it(config.begin(), config.end(), exp, -1); diff --git a/c10/cuda/CUDADeviceAssertionHost.cpp b/c10/cuda/CUDADeviceAssertionHost.cpp index ec41e6230f..1d52af7812 100644 --- a/c10/cuda/CUDADeviceAssertionHost.cpp +++ b/c10/cuda/CUDADeviceAssertionHost.cpp @@ -3,7 +3,6 @@ #include <c10/cuda/CUDAFunctions.h> #include <c10/util/Backtrace.h> #include <c10/util/Exception.h> -#include <c10/util/env.h> #include <c10/util/irange.h> #include <cuda_runtime.h> @@ -81,8 +80,8 @@ bool dsa_check_if_all_devices_support_managed_memory() { } bool env_flag_set(const char* env_var_name) { - const auto env_flag = c10::utils::check_env(env_var_name); - return env_flag.has_value() && env_flag.value(); + const char* const env_string = std::getenv(env_var_name); + return (env_string == nullptr) ? false : std::strcmp(env_string, "0"); } /// Deleter for UVM/managed memory pointers diff --git a/c10/cuda/CUDAMiscFunctions.cpp b/c10/cuda/CUDAMiscFunctions.cpp index 9ef724813e..11ea775366 100644 --- a/c10/cuda/CUDAMiscFunctions.cpp +++ b/c10/cuda/CUDAMiscFunctions.cpp @@ -1,14 +1,12 @@ #include <c10/cuda/CUDAMiscFunctions.h> -#include <c10/util/env.h> +#include <cstdlib> namespace c10::cuda { -// NOLINTNEXTLINE(bugprone-exception-escape,-warnings-as-errors) const char* get_cuda_check_suffix() noexcept { - static auto device_blocking_flag = - c10::utils::check_env("CUDA_LAUNCH_BLOCKING"); + static char* device_blocking_flag = getenv("CUDA_LAUNCH_BLOCKING"); static bool blocking_enabled = - (device_blocking_flag.has_value() && device_blocking_flag.value()); + (device_blocking_flag && atoi(device_blocking_flag)); if (blocking_enabled) { return ""; } else { diff --git a/c10/test/util/DeadlockDetection_test.cpp b/c10/test/util/DeadlockDetection_test.cpp index 05ae154e22..35c4953f6d 100644 --- a/c10/test/util/DeadlockDetection_test.cpp +++ b/c10/test/util/DeadlockDetection_test.cpp @@ -1,8 +1,9 @@ #include <c10/util/DeadlockDetection.h> -#include <c10/util/env.h> #include <gtest/gtest.h> +#include <cstdlib> + using namespace ::testing; using namespace c10::impl; @@ -22,7 +23,7 @@ TEST(DeadlockDetection, basic) { #ifndef _WIN32 TEST(DeadlockDetection, disable) { - c10::utils::set_env("TORCH_DISABLE_DEADLOCK_DETECTION", "1"); + setenv("TORCH_DISABLE_DEADLOCK_DETECTION", "1", 1); DummyPythonGILHooks hooks; SetPythonGILHooks(&hooks); SetPythonGILHooks(&hooks); diff --git a/c10/util/DeadlockDetection.cpp b/c10/util/DeadlockDetection.cpp index 4b00d24534..320fa7873c 100644 --- a/c10/util/DeadlockDetection.cpp +++ b/c10/util/DeadlockDetection.cpp @@ -1,5 +1,6 @@ #include <c10/util/DeadlockDetection.h> -#include <c10/util/env.h> + +#include <cstdlib> namespace c10::impl { @@ -7,7 +8,7 @@ namespace { PythonGILHooks* python_gil_hooks = nullptr; bool disable_detection() { - return c10::utils::has_env("TORCH_DISABLE_DEADLOCK_DETECTION"); + return std::getenv("TORCH_DISABLE_DEADLOCK_DETECTION") != nullptr; } } // namespace diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp index 17459f69fa..e9c9e9c2f3 100644 --- a/c10/util/Logging.cpp +++ b/c10/util/Logging.cpp @@ -1,7 +1,6 @@ #include <c10/util/Backtrace.h> #include <c10/util/Flags.h> #include <c10/util/Logging.h> -#include <c10/util/env.h> #ifdef FBCODE_CAFFE2 #include <folly/synchronization/SanitizeThread.h> #endif @@ -11,6 +10,7 @@ #endif #include <algorithm> +#include <cstdlib> #include <iostream> // Common code that we use regardless of whether we use glog or not. @@ -94,8 +94,8 @@ using DDPUsageLoggerType = std::function<void(const DDPLoggingData&)>; namespace { bool IsAPIUsageDebugMode() { - auto val = c10::utils::get_env("PYTORCH_API_USAGE_STDERR"); - return val.has_value() && !val.value().empty(); // any non-empty value + const char* val = getenv("PYTORCH_API_USAGE_STDERR"); + return val && *val; // any non-empty value } void APIUsageDebug(const string& event) { @@ -438,10 +438,10 @@ namespace c10::detail { namespace { void setLogLevelFlagFromEnv() { - auto level_env = c10::utils::get_env("TORCH_CPP_LOG_LEVEL"); + const char* level_str = std::getenv("TORCH_CPP_LOG_LEVEL"); // Not set, fallback to the default level (i.e. WARNING). - std::string level{level_env.has_value() ? level_env.value() : ""}; + std::string level{level_str != nullptr ? level_str : ""}; if (level.empty()) { return; } diff --git a/c10/util/env.cpp b/c10/util/env.cpp deleted file mode 100644 index 865c6b9497..0000000000 --- a/c10/util/env.cpp +++ /dev/null @@ -1,108 +0,0 @@ -#include <c10/util/Exception.h> -#include <c10/util/env.h> -#include <fmt/format.h> -#include <cstdlib> -#include <shared_mutex> - -namespace c10::utils { - -static std::shared_mutex env_mutex; - -// Set an environment variable. -void set_env(const char* name, const char* value, bool overwrite) { - std::lock_guard lk(env_mutex); -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4996) -#endif -#ifdef _MSC_VER - if (!overwrite) { - // NOLINTNEXTLINE(concurrency-mt-unsafe) - if (std::getenv(name) != nullptr) { - return; - } - } - auto full_env_variable = fmt::format("{}={}", name, value); - // NOLINTNEXTLINE(concurrency-mt-unsafe) - auto err = putenv(full_env_variable.c_str()); - TORCH_INTERNAL_ASSERT( - err == 0, - "putenv failed for environment \"", - name, - "\", the error is: ", - err); -#else - // NOLINTNEXTLINE(concurrency-mt-unsafe) - auto err = setenv(name, value, static_cast<int>(overwrite)); - TORCH_INTERNAL_ASSERT( - err == 0, - "setenv failed for environment \"", - name, - "\", the error is: ", - err); -#endif -#ifdef _MSC_VER -#pragma warning(pop) -#endif - return; -} - -// Checks an environment variable is set. -bool has_env(const char* name) noexcept { - std::shared_lock lk(env_mutex); -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4996) -#endif - // NOLINTNEXTLINE(concurrency-mt-unsafe) - auto envar = std::getenv(name); -#ifdef _MSC_VER -#pragma warning(pop) -#endif - return envar != nullptr; -} - -// Reads an environment variable and returns the content if it is set -std::optional<std::string> get_env(const char* name) noexcept { - std::shared_lock lk(env_mutex); -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning(disable : 4996) -#endif - // NOLINTNEXTLINE(concurrency-mt-unsafe) - auto envar = std::getenv(name); -#ifdef _MSC_VER -#pragma warning(pop) -#endif - if (envar != nullptr) { - return std::string(envar); - } - return std::nullopt; -} - -// Reads an environment variable and returns -// - optional<true>, if set equal to "1" -// - optional<false>, if set equal to "0" -// - nullopt, otherwise -// -// NB: -// Issues a warning if the value of the environment variable is not 0 or 1. -std::optional<bool> check_env(const char* name) { - auto env_opt = get_env(name); - if (env_opt.has_value()) { - if (*env_opt == "0") { - return false; - } - if (*env_opt == "1") { - return true; - } - TORCH_WARN( - "Ignoring invalid value for boolean flag ", - name, - ": ", - *env_opt, - "valid values are 0 or 1."); - } - return std::nullopt; -} -} // namespace c10::utils diff --git a/c10/util/env.h b/c10/util/env.h index 04b7585861..3db116c7db 100644 --- a/c10/util/env.h +++ b/c10/util/env.h @@ -1,20 +1,11 @@ #pragma once -#include <c10/macros/Export.h> +#include <c10/util/Exception.h> +#include <cstdlib> +#include <cstring> #include <optional> -#include <string> namespace c10::utils { - -// Set an environment variable. -C10_API void set_env( - const char* name, - const char* value, - bool overwrite = true); - -// Checks an environment variable is set. -C10_API bool has_env(const char* name) noexcept; - // Reads an environment variable and returns // - optional<true>, if set equal to "1" // - optional<false>, if set equal to "0" @@ -22,10 +13,29 @@ C10_API bool has_env(const char* name) noexcept; // // NB: // Issues a warning if the value of the environment variable is not 0 or 1. -C10_API std::optional<bool> check_env(const char* name); - -// Reads the value of an environment variable if it is set. -// However, check_env should be used if the value is assumed to be a flag. -C10_API std::optional<std::string> get_env(const char* name) noexcept; - +inline std::optional<bool> check_env(const char* name) { +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4996) +#endif + auto envar = std::getenv(name); +#ifdef _MSC_VER +#pragma warning(pop) +#endif + if (envar) { + if (strcmp(envar, "0") == 0) { + return false; + } + if (strcmp(envar, "1") == 0) { + return true; + } + TORCH_WARN( + "Ignoring invalid value for boolean flag ", + name, + ": ", + envar, + "valid values are 0 or 1."); + } + return std::nullopt; +} } // namespace c10::utils diff --git a/c10/util/tempfile.cpp b/c10/util/tempfile.cpp index f106885a88..28c3c7f14f 100644 --- a/c10/util/tempfile.cpp +++ b/c10/util/tempfile.cpp @@ -1,5 +1,4 @@ #include <c10/util/Exception.h> -#include <c10/util/env.h> #include <c10/util/tempfile.h> #include <fmt/format.h> @@ -23,11 +22,10 @@ static std::string make_filename(std::string_view name_prefix) { // We see if any of these environment variables is set and use their value, or // else default the temporary directory to `/tmp`. - std::string tmp_directory = "/tmp"; + const char* tmp_directory = "/tmp"; for (const char* variable : {"TMPDIR", "TMP", "TEMP", "TEMPDIR"}) { - auto path_opt = c10::utils::get_env(variable); - if (path_opt.has_value()) { - tmp_directory = path_opt.value(); + if (const char* path = getenv(variable)) { + tmp_directory = path; break; } }
2.41.0
ede882c0b1d5ccc95b0c82ca5e206eb2dfb2911
Mon, 22 Apr 2024 15:38:22 +0000
[PATCH 0458/1000] preferred blas library; cublaslt gemm implementation (#122106)
Following the example of PyTorch supporting a preferred Linalg library (cusolver or magma), this PR introduces a preferred blas library selector of either cublas or cublaslt for CUDA and hipblas or hipblaslt for ROCm via normal hipification of sources. The default blas implementation remains cublas or hipblas. cublaslt or hipblaslt can be enabled using environment variable TORCH_BLAS_PREFER_CUBLASLT=1 (or TORCH_BLAS_PREFER_HIPBLASLT=1 as an alias) or by calling `torch.backends.cuda.preferred_blas_library(backend="cublaslt")` or as an alias `backend="hipblaslt"`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/122106 Approved by: https://github.com/lezcano
diff --git a/aten/src/ATen/BlasBackend.h b/aten/src/ATen/BlasBackend.h new file mode 100644 index 0000000000..7f8c321ad9 --- /dev/null +++ b/aten/src/ATen/BlasBackend.h @@ -0,0 +1,27 @@ +#pragma once + +#include <c10/util/Exception.h> + +#include <ostream> +#include <string> + +namespace at { + +enum class BlasBackend : int8_t { Cublas, Cublaslt }; + +inline std::string BlasBackendToString(at::BlasBackend backend) { + switch (backend) { + case BlasBackend::Cublas: + return "at::BlasBackend::Cublas"; + case BlasBackend::Cublaslt: + return "at::BlasBackend::Cublaslt"; + default: + TORCH_CHECK(false, "Unknown blas backend"); + } +} + +inline std::ostream& operator<<(std::ostream& stream, at::BlasBackend backend) { + return stream << BlasBackendToString(backend); +} + +} // namespace at diff --git a/aten/src/ATen/Context.cpp b/aten/src/ATen/Context.cpp index 9cd8244c6f..7aa7542749 100644 --- a/aten/src/ATen/Context.cpp +++ b/aten/src/ATen/Context.cpp @@ -263,6 +263,23 @@ void Context::setLinalgPreferredBackend(at::LinalgBackend b) { } } +at::BlasBackend Context::blasPreferredBackend() const { + return blas_preferred_backend; +} + +void Context::setBlasPreferredBackend(at::BlasBackend b) { + TORCH_CHECK((b != at::BlasBackend::Cublaslt) || hasCuBLASLt(), + "Cannot set preferred backend to cuBLASLt if PyTorch has not been compiled with cuBLASLt."); + if (b != at::BlasBackend::Cublas) { + TORCH_WARN_ONCE( + "torch.backends.cuda.preferred_blas_library is an experimental feature. " + "If you see any error or unexpected behavior when this flag is set " + "please file an issue on GitHub." + ); + } + blas_preferred_backend = b; +} + bool Context::allowFP16ReductionCuBLAS() const { return allow_fp16_reduction_cublas; } diff --git a/aten/src/ATen/Context.h b/aten/src/ATen/Context.h index 931cd86e77..5c5036caa9 100644 --- a/aten/src/ATen/Context.h +++ b/aten/src/ATen/Context.h @@ -1,5 +1,6 @@ #pragma once +#include <ATen/BlasBackend.h> #include <ATen/CPUGeneratorImpl.h> #include <ATen/DeviceAccelerator.h> #include <ATen/LinalgBackend.h> @@ -120,6 +121,9 @@ class TORCH_API Context { static bool hasCuSOLVER() { return detail::getCUDAHooks().hasCuSOLVER(); } + static bool hasCuBLASLt() { + return detail::getCUDAHooks().hasCuBLASLt(); + } static bool hasHIP() { return detail::getHIPHooks().hasHIP(); } @@ -208,6 +212,9 @@ class TORCH_API Context { at::LinalgBackend linalgPreferredBackend() const; void setLinalgPreferredBackend(at::LinalgBackend); + at::BlasBackend blasPreferredBackend() const; + void setBlasPreferredBackend(at::BlasBackend); + // Note [Enabling Deterministic Operations] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Operations in PyTorch that normally act nondeterministically, but have an @@ -371,6 +378,11 @@ class TORCH_API Context { c10::utils::check_env("TORCH_LINALG_PREFER_CUSOLVER") == true ? at::LinalgBackend::Cusolver : at::LinalgBackend::Default; + at::BlasBackend blas_preferred_backend = + (c10::utils::check_env("TORCH_BLAS_PREFER_CUBLASLT") == true || + c10::utils::check_env("TORCH_BLAS_PREFER_HIPBLASLT") == true) + ? at::BlasBackend::Cublaslt + : at::BlasBackend::Cublas; #ifdef C10_MOBILE bool release_original_weights = true; #else diff --git a/aten/src/ATen/cuda/CUDABlas.cpp b/aten/src/ATen/cuda/CUDABlas.cpp index 0bce4b9a99..f9ac77b53e 100644 --- a/aten/src/ATen/cuda/CUDABlas.cpp +++ b/aten/src/ATen/cuda/CUDABlas.cpp @@ -236,8 +236,289 @@ namespace at::cuda::blas { CUDABLAS_NONNEGINT_CHECK(bgemm<Dtype>, num_batches); \ } while (0) +#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700) + +#if defined(USE_ROCM) && ROCM_VERSION >= 50700 && ROCM_VERSION < 60000 +// only for rocm 5.7 where we first supported hipblaslt, it was difficult +// to hipify correctly without this change. +#define hipDataType hipblasDatatype_t +#endif + +// hipblaslt custom types were a temporary work-around +#if defined(USE_ROCM) && ROCM_VERSION >= 60000 && defined(HIPBLASLT_CUSTOM_DATA_TYPE) +hipblasltDatatype_t hipToLt(hipDataType type) { + switch (type) { + case HIP_R_32F: return HIPBLASLT_R_32F; + case HIP_R_64F: return HIPBLASLT_R_64F; + case HIP_R_16F: return HIPBLASLT_R_16F; + case HIP_R_8I: return HIPBLASLT_R_8I; + case HIP_C_32F: return HIPBLASLT_C_32F; + case HIP_C_64F: return HIPBLASLT_C_64F; + case HIP_C_16F: return HIPBLASLT_C_16F; + case HIP_C_8I: return HIPBLASLT_C_8I; + case HIP_R_8U: return HIPBLASLT_R_8U; + case HIP_C_8U: return HIPBLASLT_C_8U; + case HIP_R_32I: return HIPBLASLT_R_32I; + case HIP_C_32I: return HIPBLASLT_C_32I; + case HIP_R_32U: return HIPBLASLT_R_32U; + case HIP_C_32U: return HIPBLASLT_C_32U; + case HIP_R_16BF: return HIPBLASLT_R_16B; + case HIP_C_16BF: return HIPBLASLT_C_16B; + default: TORCH_CHECK(false, "unknown hipDataType"); + } +} +#define HIPTOLT(type) hipToLt(type) +#else +#define HIPTOLT(type) type +#endif + +#if defined(USE_ROCM) && ROCM_VERSION >= 60000 && defined(HIPBLASLT_CUSTOM_COMPUTE_TYPE) +hipblasLtComputeType_t hipblasToLt(hipblasComputeType_t type) { + switch (type) { + case HIPBLAS_COMPUTE_32F: return HIPBLASLT_COMPUTE_F32; + case HIPBLAS_COMPUTE_32F_FAST_16F: return HIPBLASLT_COMPUTE_F32_FAST_F16; + case HIPBLAS_COMPUTE_32F_FAST_TF32: return HIPBLASLT_COMPUTE_F32_FAST_XF32; + case HIPBLAS_COMPUTE_64F: return HIPBLASLT_COMPUTE_F64; + case HIPBLAS_COMPUTE_32I: return HIPBLASLT_COMPUTE_I32; + default: TORCH_CHECK(false, "unknown hipblasComputeType_t"); + } +} +#define HIPCOMPTOLT(type) hipblasToLt(type) +#else +#define HIPCOMPTOLT(type) type +#endif + +namespace { +// Following the pattern of CuSparseDescriptor +// Defined here for now because this is the only place cublas_lt interface is +// used but can be moved to a header once cublas_lt interface is used in +// multiple places. +template <typename T, cublasStatus_t (*destructor)(T*)> +struct CuBlasLtDeleter { + void operator()(T* x) { + if (x != nullptr) { + TORCH_CUDABLAS_CHECK(destructor(x)); + } + } +}; + +template <typename T, cublasStatus_t (*destructor)(T*)> +class CuBlasLtDescriptor { + public: + T* descriptor() const { + return descriptor_.get(); + } + T* descriptor() { + return descriptor_.get(); + } + + protected: + std::unique_ptr<T, CuBlasLtDeleter<T, destructor>> descriptor_; +}; + +class CuBlasLtMatmulDescriptor : public CuBlasLtDescriptor< + cublasLtMatmulDescOpaque_t, + &cublasLtMatmulDescDestroy> { + public: + CuBlasLtMatmulDescriptor( + cublasComputeType_t compute_type, + cudaDataType_t scale_type) { + cublasLtMatmulDesc_t raw_descriptor = nullptr; + TORCH_CUDABLAS_CHECK( + cublasLtMatmulDescCreate(&raw_descriptor, HIPCOMPTOLT(compute_type), HIPTOLT(scale_type))); + descriptor_.reset(raw_descriptor); + } + template <typename T> + inline void setAttribute(cublasLtMatmulDescAttributes_t attr, const T value) { + TORCH_CUDABLAS_CHECK(::cublasLtMatmulDescSetAttribute(descriptor(), attr, &value, sizeof(T))); + } +}; + +class CuBlasLtMatrixLayout : public CuBlasLtDescriptor< + cublasLtMatrixLayoutOpaque_t, + &cublasLtMatrixLayoutDestroy> { + public: + CuBlasLtMatrixLayout( + cudaDataType_t type, + uint64_t rows, + uint64_t cols, + int64_t ld, + bool t = false) { + cublasLtMatrixLayout_t raw_descriptor = nullptr; + TORCH_CUDABLAS_CHECK( + cublasLtMatrixLayoutCreate(&raw_descriptor, HIPTOLT(type), t ? cols : rows, t ? rows : cols, ld)); + descriptor_.reset(raw_descriptor); + } + template <typename T> + inline void setAttribute(cublasLtMatrixLayoutAttribute_t attr, const T value) { + TORCH_CUDABLAS_CHECK(::cublasLtMatrixLayoutSetAttribute(descriptor(), attr, &value, sizeof(T))); + } +}; + +class CuBlasLtMatmulPreference : public CuBlasLtDescriptor< + cublasLtMatmulPreferenceOpaque_t, + &cublasLtMatmulPreferenceDestroy> { + public: + CuBlasLtMatmulPreference() { + cublasLtMatmulPreference_t raw_descriptor = nullptr; + TORCH_CUDABLAS_CHECK(cublasLtMatmulPreferenceCreate(&raw_descriptor)); + descriptor_.reset(raw_descriptor); + } + template <typename T> + inline void setAttribute(cublasLtMatmulPreferenceAttributes_t attr, const T value) { + TORCH_CUDABLAS_CHECK(::cublasLtMatmulPreferenceSetAttribute(descriptor(), attr, &value, sizeof(T))); + } +}; +} // namespace + +#endif + +template <typename Dtype> +inline void bgemm_internal_cublaslt(CUDABLAS_BGEMM_ARGTYPES(Dtype)) { +#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700) + cudaDataType_t abcType = CUDA_R_32F; + cublasComputeType_t computeType = CUBLAS_COMPUTE_32F; + cudaDataType_t scaleType = CUDA_R_32F; + if constexpr (std::is_same_v<Dtype, double>) { + abcType = CUDA_R_64F; + computeType = CUBLAS_COMPUTE_64F; + scaleType = CUDA_R_64F; + } else if constexpr (std::is_same_v<Dtype, float>) { +#ifndef USE_ROCM + if (at::globalContext().allowTF32CuBLAS()) { + computeType = CUBLAS_COMPUTE_32F_FAST_TF32; + } +#endif + } else if constexpr (std::is_same_v<Dtype, c10::complex<double>>) { + abcType = CUDA_C_64F; + computeType = CUBLAS_COMPUTE_64F; + scaleType = CUDA_C_64F; + } else if constexpr (std::is_same_v<Dtype, c10::complex<float>>) { + abcType = CUDA_C_32F; + scaleType = CUDA_C_32F; + } else if constexpr (std::is_same_v<Dtype, at::Half>) { + abcType = CUDA_R_16F; + } else if constexpr (std::is_same_v<Dtype, at::BFloat16>) { + abcType = CUDA_R_16BF; + } else { + AT_ERROR("at::cuda::blas::bgemm_internal_cublaslt: not implemented for ", typeid(Dtype).name()); + } + + globalContext().alertCuBLASConfigNotDeterministic(); + cublasLtHandle_t ltHandle = at::cuda::getCurrentCUDABlasLtHandle(); + cublasOperation_t opa = _cublasOpFromChar(transa); + cublasOperation_t opb = _cublasOpFromChar(transb); + _cublasAdjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); + + CuBlasLtMatmulDescriptor computeDesc(computeType, scaleType); + computeDesc.setAttribute(CUBLASLT_MATMUL_DESC_TRANSA, opa); + computeDesc.setAttribute(CUBLASLT_MATMUL_DESC_TRANSB, opb); + CuBlasLtMatrixLayout Adesc(abcType, m, k, lda, opa == CUBLAS_OP_T); + CuBlasLtMatrixLayout Bdesc(abcType, k, n, ldb, opb == CUBLAS_OP_T); + CuBlasLtMatrixLayout Cdesc(abcType, m, n, ldc); + + if (num_batches > 1) { + int num_batches_as_int = static_cast<int>(num_batches); + Adesc.setAttribute(CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, num_batches_as_int); + Bdesc.setAttribute(CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, num_batches_as_int); + Cdesc.setAttribute(CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, num_batches_as_int); + Adesc.setAttribute(CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, stridea); + Bdesc.setAttribute(CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, strideb); + Cdesc.setAttribute(CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET, stridec); + } + + CuBlasLtMatmulPreference preference; + // See https://github.com/pytorch/pytorch/issues/73328 for reasoning behind + // setting this to 1M. + size_t workspaceSize = _getWorkspaceSize(); + preference.setAttribute(CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, workspaceSize); + +#ifndef USE_ROCM + uint32_t a_alignment = _getAlignment(reinterpret_cast<uintptr_t>(a)); + uint32_t b_alignment = _getAlignment(reinterpret_cast<uintptr_t>(b)); + uint32_t c_alignment = _getAlignment(reinterpret_cast<uintptr_t>(c)); + preference.setAttribute(CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_A_BYTES, a_alignment); + preference.setAttribute(CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_B_BYTES, b_alignment); + preference.setAttribute(CUBLASLT_MATMUL_PREF_MIN_ALIGNMENT_C_BYTES, c_alignment); +#endif + + auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); + auto workspace = allocator.allocate(workspaceSize); + TORCH_CHECK(workspace.get() != nullptr, "OOM trying to allocate workspace for cublaslt"); + + cublasLtMatmulHeuristicResult_t heuristicResult = {}; + int returnedResult = 0; + TORCH_CUDABLAS_CHECK(cublasLtMatmulAlgoGetHeuristic( + ltHandle, + computeDesc.descriptor(), + Adesc.descriptor(), + Bdesc.descriptor(), + Cdesc.descriptor(), + Cdesc.descriptor(), + preference.descriptor(), + 1, + &heuristicResult, + &returnedResult)); + if (returnedResult == 0) { + TORCH_CUDABLAS_CHECK(CUBLAS_STATUS_NOT_SUPPORTED); + } + + cublasStatus_t cublasStatus = cublasLtMatmul( + ltHandle, + computeDesc.descriptor(), + &alpha, + a, + Adesc.descriptor(), + b, + Bdesc.descriptor(), + &beta, + c, + Cdesc.descriptor(), + c, + Cdesc.descriptor(), + &heuristicResult.algo, + workspace.mutable_get(), + workspaceSize, + at::cuda::getCurrentCUDAStream()); + TORCH_CHECK( + cublasStatus == CUBLAS_STATUS_SUCCESS, + "CUDA error: ", + at::cuda::blas::_cublasGetErrorEnum(cublasStatus), + " when calling cublasLtMatmul with transpose_mat1 ", + (opa == CUBLAS_OP_T), + " transpose_mat2 ", + (opb == CUBLAS_OP_T), + " m ", + m, + " n ", + n, + " k ", + k, + " lda ", + lda, + " ldb ", + ldb, + " ldc ", + ldc, + " abcType ", + abcType, + " computeType ", + computeType, + " scaleType ", + scaleType); +#else + AT_ERROR("at::cuda::blas::bgemm_internal_cublaslt: not implemented for ", typeid(Dtype).name()); +#endif +} + + +template <typename Dtype> +inline void bgemm_internal_cublas(CUDABLAS_BGEMM_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::bgemm_internal_cublas: not implemented for ", typeid(Dtype).name()); +} + template <> -void bgemm_internal<double>(CUDABLAS_BGEMM_ARGTYPES(double)) { +void bgemm_internal_cublas<double>(CUDABLAS_BGEMM_ARGTYPES(double)) { // See Note [Writing Nondeterministic Operations] globalContext().alertCuBLASConfigNotDeterministic(); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); @@ -250,7 +531,7 @@ void bgemm_internal<double>(CUDABLAS_BGEMM_ARGTYPES(double)) { } template <> -void bgemm_internal<float>(CUDABLAS_BGEMM_ARGTYPES(float)) { +void bgemm_internal_cublas<float>(CUDABLAS_BGEMM_ARGTYPES(float)) { // See Note [Writing Nondeterministic Operations] globalContext().alertCuBLASConfigNotDeterministic(); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); @@ -263,7 +544,7 @@ void bgemm_internal<float>(CUDABLAS_BGEMM_ARGTYPES(float)) { } template <> -void bgemm_internal<c10::complex<double>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<double>)) { +void bgemm_internal_cublas<c10::complex<double>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<double>)) { // See Note [Writing Nondeterministic Operations] globalContext().alertCuBLASConfigNotDeterministic(); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); @@ -278,7 +559,7 @@ void bgemm_internal<c10::complex<double>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<d } template <> -void bgemm_internal<c10::complex<float>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<float>)) { +void bgemm_internal_cublas<c10::complex<float>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<float>)) { // See Note [Writing Nondeterministic Operations] globalContext().alertCuBLASConfigNotDeterministic(); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); @@ -293,7 +574,7 @@ void bgemm_internal<c10::complex<float>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<fl } template <> -void bgemm_internal<at::Half>(CUDABLAS_BGEMM_ARGTYPES(at::Half)) { +void bgemm_internal_cublas<at::Half>(CUDABLAS_BGEMM_ARGTYPES(at::Half)) { // See Note [Writing Nondeterministic Operations] globalContext().alertCuBLASConfigNotDeterministic(); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); @@ -340,7 +621,7 @@ void bgemm_internal<at::Half>(CUDABLAS_BGEMM_ARGTYPES(at::Half)) { } template <> -void bgemm_internal<at::BFloat16>(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16)) { +void bgemm_internal_cublas<at::BFloat16>(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16)) { // See Note [Writing Nondeterministic Operations] globalContext().alertCuBLASConfigNotDeterministic(); BGEMM_CHECK_ARGVALUES(at::BFloat16); @@ -366,6 +647,87 @@ void bgemm_internal<at::BFloat16>(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16)) { CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } +template <> +void bgemm_internal<double>(CUDABLAS_BGEMM_ARGTYPES(double)) +{ + if (at::globalContext().blasPreferredBackend() == BlasBackend::Cublaslt) { +#ifdef USE_ROCM + // hipblaslt does not support double gemm yet + bgemm_internal_cublas<double>(CUDABLAS_BGEMM_ARGS(double)); +#else + bgemm_internal_cublaslt<double>(CUDABLAS_BGEMM_ARGS(double)); +#endif + } + else { + bgemm_internal_cublas<double>(CUDABLAS_BGEMM_ARGS(double)); + } +} + +template <> +void bgemm_internal<float>(CUDABLAS_BGEMM_ARGTYPES(float)) +{ + if (at::globalContext().blasPreferredBackend() == BlasBackend::Cublaslt) { + bgemm_internal_cublaslt<float>(CUDABLAS_BGEMM_ARGS(float)); + } + else { + bgemm_internal_cublas<float>(CUDABLAS_BGEMM_ARGS(float)); + } +} + +template <> +void bgemm_internal<c10::complex<double>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<double>)) +{ + if (at::globalContext().blasPreferredBackend() == BlasBackend::Cublaslt) { +#ifdef USE_ROCM + // hipblaslt does not support complex<double> gemm yet + bgemm_internal_cublas<c10::complex<double>>(CUDABLAS_BGEMM_ARGS(c10::complex<double>)); +#else + bgemm_internal_cublaslt<c10::complex<double>>(CUDABLAS_BGEMM_ARGS(c10::complex<double>)); +#endif + } + else { + bgemm_internal_cublas<c10::complex<double>>(CUDABLAS_BGEMM_ARGS(c10::complex<double>)); + } +} + +template <> +void bgemm_internal<c10::complex<float>>(CUDABLAS_BGEMM_ARGTYPES(c10::complex<float>)) +{ + if (at::globalContext().blasPreferredBackend() == BlasBackend::Cublaslt) { +#ifdef USE_ROCM + // hipblaslt does not support complex<float> gemm yet + bgemm_internal_cublas<c10::complex<float>>(CUDABLAS_BGEMM_ARGS(c10::complex<float>)); +#else + bgemm_internal_cublaslt<c10::complex<float>>(CUDABLAS_BGEMM_ARGS(c10::complex<float>)); +#endif + } + else { + bgemm_internal_cublas<c10::complex<float>>(CUDABLAS_BGEMM_ARGS(c10::complex<float>)); + } +} + +template <> +void bgemm_internal<at::Half>(CUDABLAS_BGEMM_ARGTYPES(at::Half)) +{ + if (at::globalContext().blasPreferredBackend() == BlasBackend::Cublaslt) { + bgemm_internal_cublaslt<at::Half>(CUDABLAS_BGEMM_ARGS(at::Half)); + } + else { + bgemm_internal_cublas<at::Half>(CUDABLAS_BGEMM_ARGS(at::Half)); + } +} + +template <> +void bgemm_internal<at::BFloat16>(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16)) +{ + if (at::globalContext().blasPreferredBackend() == BlasBackend::Cublaslt) { + bgemm_internal_cublaslt<at::BFloat16>(CUDABLAS_BGEMM_ARGS(at::BFloat16)); + } + else { + bgemm_internal_cublas<at::BFloat16>(CUDABLAS_BGEMM_ARGS(at::BFloat16)); + } +} + template <typename DType> inline void bgemm_tunable(CUDABLAS_BGEMM_ARGTYPES(DType)) { tunable::GemmStridedBatchedParams<DType> params; @@ -477,8 +839,19 @@ void bgemm<at::BFloat16>(CUDABLAS_BGEMM_ARGTYPES(at::BFloat16)) { } } +template <typename Dtype> +inline void gemm_internal_cublaslt(CUDABLAS_GEMM_ARGTYPES(Dtype)) { + // forward to bgemm implementation but set strides and batches to 0 + bgemm_internal_cublaslt(transa, transb, m, n, k, alpha, a, lda, 0, b, ldb, 0, beta, c, ldc, 0, 0); +} + +template <typename Dtype> +inline void gemm_internal_cublas(CUDABLAS_GEMM_ARGTYPES(Dtype)) { + AT_ERROR("at::cuda::blas::gemm_internal_cublas: not implemented for ", typeid(Dtype).name()); +} + template <> -void gemm_internal<double>(CUDABLAS_GEMM_ARGTYPES(double)) { +void gemm_internal_cublas<double>(CUDABLAS_GEMM_ARGTYPES(double)) { // See Note [Writing Nondeterministic Operations] globalContext().alertCuBLASConfigNotDeterministic(); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); @@ -491,7 +864,7 @@ void gemm_internal<double>(CUDABLAS_GEMM_ARGTYPES(double)) { } template <> -void gemm_internal<float>(CUDABLAS_GEMM_ARGTYPES(float)) { +void gemm_internal_cublas<float>(CUDABLAS_GEMM_ARGTYPES(float)) { // See Note [Writing Nondeterministic Operations] globalContext().alertCuBLASConfigNotDeterministic(); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); @@ -504,7 +877,7 @@ void gemm_internal<float>(CUDABLAS_GEMM_ARGTYPES(float)) { } template <> -void gemm_internal<c10::complex<double>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<double>)) { +void gemm_internal_cublas<c10::complex<double>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<double>)) { // See Note [Writing Nondeterministic Operations] globalContext().alertCuBLASConfigNotDeterministic(); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); @@ -519,7 +892,7 @@ void gemm_internal<c10::complex<double>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<dou } template <> -void gemm_internal<c10::complex<float>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<float>)) { +void gemm_internal_cublas<c10::complex<float>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<float>)) { // See Note [Writing Nondeterministic Operations] globalContext().alertCuBLASConfigNotDeterministic(); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); @@ -534,7 +907,7 @@ void gemm_internal<c10::complex<float>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<floa } template <> -void gemm_internal<at::Half>(CUDABLAS_GEMM_ARGTYPES(at::Half)) { +void gemm_internal_cublas<at::Half>(CUDABLAS_GEMM_ARGTYPES(at::Half)) { // See Note [Writing Nondeterministic Operations] globalContext().alertCuBLASConfigNotDeterministic(); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); @@ -630,7 +1003,7 @@ void gemm_internal<at::Half>(CUDABLAS_GEMM_ARGTYPES(at::Half)) { } template <> -void gemm_internal<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)) { +void gemm_internal_cublas<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)) { globalContext().alertCuBLASConfigNotDeterministic(); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cublasOperation_t opa = _cublasOpFromChar(transa); @@ -674,6 +1047,87 @@ void gemm_internal<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)) { TORCH_CUDABLAS_CHECK(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); } +template <> +void gemm_internal<double>(CUDABLAS_GEMM_ARGTYPES(double)) +{ + if (at::globalContext().blasPreferredBackend() == BlasBackend::Cublaslt) { +#ifdef USE_ROCM + // hipblaslt does not support double gemm yet + gemm_internal_cublas<double>(CUDABLAS_GEMM_ARGS(double)); +#else + gemm_internal_cublaslt<double>(CUDABLAS_GEMM_ARGS(double)); +#endif + } + else { + gemm_internal_cublas<double>(CUDABLAS_GEMM_ARGS(double)); + } +} + +template <> +void gemm_internal<float>(CUDABLAS_GEMM_ARGTYPES(float)) +{ + if (at::globalContext().blasPreferredBackend() == BlasBackend::Cublaslt) { + gemm_internal_cublaslt<float>(CUDABLAS_GEMM_ARGS(float)); + } + else { + gemm_internal_cublas<float>(CUDABLAS_GEMM_ARGS(float)); + } +} + +template <> +void gemm_internal<c10::complex<double>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<double>)) +{ + if (at::globalContext().blasPreferredBackend() == BlasBackend::Cublaslt) { +#ifdef USE_ROCM + // hipblaslt does not support complex gemm yet + gemm_internal_cublas<c10::complex<double>>(CUDABLAS_GEMM_ARGS(c10::complex<double>)); +#else + gemm_internal_cublaslt<c10::complex<double>>(CUDABLAS_GEMM_ARGS(c10::complex<double>)); +#endif + } + else { + gemm_internal_cublas<c10::complex<double>>(CUDABLAS_GEMM_ARGS(c10::complex<double>)); + } +} + +template <> +void gemm_internal<c10::complex<float>>(CUDABLAS_GEMM_ARGTYPES(c10::complex<float>)) +{ + if (at::globalContext().blasPreferredBackend() == BlasBackend::Cublaslt) { +#ifdef USE_ROCM + // hipblaslt does not support complex gemm yet + gemm_internal_cublas<c10::complex<float>>(CUDABLAS_GEMM_ARGS(c10::complex<float>)); +#else + gemm_internal_cublaslt<c10::complex<float>>(CUDABLAS_GEMM_ARGS(c10::complex<float>)); +#endif + } + else { + gemm_internal_cublas<c10::complex<float>>(CUDABLAS_GEMM_ARGS(c10::complex<float>)); + } +} + +template <> +void gemm_internal<at::Half>(CUDABLAS_GEMM_ARGTYPES(at::Half)) +{ + if (at::globalContext().blasPreferredBackend() == BlasBackend::Cublaslt) { + gemm_internal_cublaslt<at::Half>(CUDABLAS_GEMM_ARGS(at::Half)); + } + else { + gemm_internal_cublas<at::Half>(CUDABLAS_GEMM_ARGS(at::Half)); + } +} + +template <> +void gemm_internal<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)) +{ + if (at::globalContext().blasPreferredBackend() == BlasBackend::Cublaslt) { + gemm_internal_cublaslt<at::BFloat16>(CUDABLAS_GEMM_ARGS(at::BFloat16)); + } + else { + gemm_internal_cublas<at::BFloat16>(CUDABLAS_GEMM_ARGS(at::BFloat16)); + } +} + template <typename DType> inline void gemm_tunable(CUDABLAS_GEMM_ARGTYPES(DType)) { tunable::GemmParams<DType> params; @@ -783,135 +1237,6 @@ void gemm<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)) { #if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700) -#if defined(USE_ROCM) && ROCM_VERSION >= 50700 && ROCM_VERSION < 60000 -// only for rocm 5.7 where we first supported hipblaslt, it was difficult -// to hipify correctly without this change. -#define hipDataType hipblasDatatype_t -#endif - -// hipblaslt custom types were a temporary work-around -#if defined(USE_ROCM) && ROCM_VERSION >= 60000 && defined(HIPBLASLT_CUSTOM_DATA_TYPE) -hipblasltDatatype_t hipToLt(hipDataType type) { - switch (type) { - case HIP_R_32F: return HIPBLASLT_R_32F; - case HIP_R_64F: return HIPBLASLT_R_64F; - case HIP_R_16F: return HIPBLASLT_R_16F; - case HIP_R_8I: return HIPBLASLT_R_8I; - case HIP_C_32F: return HIPBLASLT_C_32F; - case HIP_C_64F: return HIPBLASLT_C_64F; - case HIP_C_16F: return HIPBLASLT_C_16F; - case HIP_C_8I: return HIPBLASLT_C_8I; - case HIP_R_8U: return HIPBLASLT_R_8U; - case HIP_C_8U: return HIPBLASLT_C_8U; - case HIP_R_32I: return HIPBLASLT_R_32I; - case HIP_C_32I: return HIPBLASLT_C_32I; - case HIP_R_32U: return HIPBLASLT_R_32U; - case HIP_C_32U: return HIPBLASLT_C_32U; - case HIP_R_16BF: return HIPBLASLT_R_16B; - case HIP_C_16BF: return HIPBLASLT_C_16B; - default: TORCH_CHECK(false, "unknown hipDataType"); - } -} -#define HIPTOLT(type) hipToLt(type) -#else -#define HIPTOLT(type) type -#endif - -#if defined(USE_ROCM) && ROCM_VERSION >= 60000 && defined(HIPBLASLT_CUSTOM_COMPUTE_TYPE) -hipblasLtComputeType_t hipblasToLt(hipblasComputeType_t type) { - switch (type) { - case HIPBLAS_COMPUTE_32F: return HIPBLASLT_COMPUTE_F32; - case HIPBLAS_COMPUTE_32F_FAST_16F: return HIPBLASLT_COMPUTE_F32_FAST_F16; - case HIPBLAS_COMPUTE_32F_FAST_TF32: return HIPBLASLT_COMPUTE_F32_FAST_XF32; - case HIPBLAS_COMPUTE_64F: return HIPBLASLT_COMPUTE_F64; - case HIPBLAS_COMPUTE_32I: return HIPBLASLT_COMPUTE_I32; - default: TORCH_CHECK(false, "unknown hipblasComputeType_t"); - } -} -#define HIPCOMPTOLT(type) hipblasToLt(type) -#else -#define HIPCOMPTOLT(type) type -#endif - -namespace { -// Following the pattern of CuSparseDescriptor -// Defined here for now because this is the only place cublas_lt interface is -// used but can be moved to a header once cublas_lt interface is used in -// multiple places. -template <typename T, cublasStatus_t (*destructor)(T*)> -struct CuBlasLtDeleter { - void operator()(T* x) { - if (x != nullptr) { - TORCH_CUDABLAS_CHECK(destructor(x)); - } - } -}; - -template <typename T, cublasStatus_t (*destructor)(T*)> -class CuBlasLtDescriptor { - public: - T* descriptor() const { - return descriptor_.get(); - } - T* descriptor() { - return descriptor_.get(); - } - - protected: - std::unique_ptr<T, CuBlasLtDeleter<T, destructor>> descriptor_; -}; - -class CuBlasLtMatmulDescriptor : public CuBlasLtDescriptor< - cublasLtMatmulDescOpaque_t, - &cublasLtMatmulDescDestroy> { - public: - CuBlasLtMatmulDescriptor( - cublasComputeType_t compute_type, - cudaDataType_t scale_type) { - cublasLtMatmulDesc_t raw_descriptor = nullptr; - TORCH_CUDABLAS_CHECK( - cublasLtMatmulDescCreate(&raw_descriptor, HIPCOMPTOLT(compute_type), HIPTOLT(scale_type))); - descriptor_.reset(raw_descriptor); - } - template <typename T> - inline void setAttribute(cublasLtMatmulDescAttributes_t attr, const T value) { - TORCH_CUDABLAS_CHECK(::cublasLtMatmulDescSetAttribute(descriptor(), attr, &value, sizeof(T))); - } -}; - -class CuBlasLtMatrixLayout : public CuBlasLtDescriptor< - cublasLtMatrixLayoutOpaque_t, - &cublasLtMatrixLayoutDestroy> { - public: - CuBlasLtMatrixLayout( - cudaDataType_t type, - uint64_t rows, - uint64_t cols, - int64_t ld, - bool t = false) { - cublasLtMatrixLayout_t raw_descriptor = nullptr; - TORCH_CUDABLAS_CHECK( - cublasLtMatrixLayoutCreate(&raw_descriptor, HIPTOLT(type), t ? cols : rows, t ? rows : cols, ld)); - descriptor_.reset(raw_descriptor); - } -}; - -class CuBlasLtMatmulPreference : public CuBlasLtDescriptor< - cublasLtMatmulPreferenceOpaque_t, - &cublasLtMatmulPreferenceDestroy> { - public: - CuBlasLtMatmulPreference() { - cublasLtMatmulPreference_t raw_descriptor = nullptr; - TORCH_CUDABLAS_CHECK(cublasLtMatmulPreferenceCreate(&raw_descriptor)); - descriptor_.reset(raw_descriptor); - } - template <typename T> - inline void setAttribute(cublasLtMatmulPreferenceAttributes_t attr, const T value) { - TORCH_CUDABLAS_CHECK(::cublasLtMatmulPreferenceSetAttribute(descriptor(), attr, &value, sizeof(T))); - } -}; -} // namespace - template <typename Dtype> void gemm_and_bias( bool transpose_mat1, @@ -997,6 +1322,7 @@ void gemm_and_bias( auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); auto workspace = allocator.allocate(workspaceSize); + TORCH_CHECK(workspace.get() != nullptr, "OOM trying to allocate workspace for cublaslt"); cublasLtMatmulHeuristicResult_t heuristicResult = {}; int returnedResult = 0; @@ -1182,6 +1508,7 @@ if (isFloat8Type(result_dtype)) { size_t workspaceSize = _getWorkspaceSize(); auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); auto workspace = allocator.allocate(workspaceSize); + TORCH_CHECK(workspace.get() != nullptr, "OOM trying to allocate workspace for cublaslt"); CuBlasLtMatmulPreference preference; preference.setAttribute(CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, workspaceSize); diff --git a/aten/src/ATen/cuda/detail/CUDAHooks.cpp b/aten/src/ATen/cuda/detail/CUDAHooks.cpp index 03e881f4e7..d3b80af2e8 100644 --- a/aten/src/ATen/cuda/detail/CUDAHooks.cpp +++ b/aten/src/ATen/cuda/detail/CUDAHooks.cpp @@ -184,6 +184,16 @@ bool CUDAHooks::hasCuSOLVER() const { #endif } +bool CUDAHooks::hasCuBLASLt() const { +#if defined(CUDART_VERSION) + return true; +#elif AT_ROCM_ENABLED() && defined(ROCM_VERSION) && ROCM_VERSION >= 50700 + return true; +#else + return false; +#endif +} + bool CUDAHooks::hasROCM() const { // Currently, this is same as `compiledWithMIOpen`. // But in future if there are ROCm builds without MIOpen, diff --git a/aten/src/ATen/cuda/detail/CUDAHooks.h b/aten/src/ATen/cuda/detail/CUDAHooks.h index dddeab1e26..2002bd1b77 100644 --- a/aten/src/ATen/cuda/detail/CUDAHooks.h +++ b/aten/src/ATen/cuda/detail/CUDAHooks.h @@ -27,6 +27,7 @@ struct CUDAHooks : public at::CUDAHooksInterface { bool hasMAGMA() const override; bool hasCuDNN() const override; bool hasCuSOLVER() const override; + bool hasCuBLASLt() const override; bool hasROCM() const override; const at::cuda::NVRTC& nvrtc() const override; DeviceIndex current_device() const override; diff --git a/aten/src/ATen/detail/CUDAHooksInterface.h b/aten/src/ATen/detail/CUDAHooksInterface.h index e0e96fe14d..860e49ff3d 100644 --- a/aten/src/ATen/detail/CUDAHooksInterface.h +++ b/aten/src/ATen/detail/CUDAHooksInterface.h @@ -101,6 +101,10 @@ struct TORCH_API CUDAHooksInterface : AcceleratorHooksInterface { return false; } + virtual bool hasCuBLASLt() const { + return false; + } + virtual bool hasROCM() const { return false; } diff --git a/docs/source/backends.rst b/docs/source/backends.rst index 5943f12f62..ef3c720e83 100644 --- a/docs/source/backends.rst +++ b/docs/source/backends.rst @@ -66,6 +66,8 @@ torch.backends.cuda Clears a cuFFT plan cache. +.. autofunction:: torch.backends.cuda.preferred_blas_library + .. autofunction:: torch.backends.cuda.preferred_linalg_library .. autoclass:: torch.backends.cuda.SDPAParams diff --git a/test/test_linalg.py b/test/test_linalg.py index 1216094118..5053b5e8fe 100644 --- a/test/test_linalg.py +++ b/test/test_linalg.py @@ -18,7 +18,7 @@ from torch.testing._internal.common_utils import \ TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, iter_indices, make_fullrank_matrices_with_distinct_singular_values, freeze_rng_state, IS_ARM64, IS_SANDCASTLE, TEST_OPT_EINSUM, parametrize, skipIfTorchDynamo, - setLinalgBackendsToDefaultFinally) + setBlasBackendsToDefaultFinally, setLinalgBackendsToDefaultFinally) from torch.testing._internal.common_device_type import \ (instantiate_device_type_tests, dtypes, has_cusolver, has_hipsolver, onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride, @@ -43,6 +43,15 @@ assert torch.get_default_dtype() is torch.float32 if TEST_SCIPY: import scipy +def blaslt_supported_device(): + if torch.cuda.is_available(): + if torch.version.hip: + for arch in ['gfx90a', 'gfx94']: + if arch in torch.cuda.get_device_properties(0).gcnArchName: + return True + else: + return True + return False @unittest.skipIf(IS_ARM64, "Issue with numpy version on arm") class TestLinalg(TestCase): @@ -4418,33 +4427,48 @@ class TestLinalg(TestCase): @dtypesIfCUDA(torch.float, torch.complex64) # Integer matmul just supported on CPU @dtypes(torch.int64, torch.float, torch.complex64) + @setBlasBackendsToDefaultFinally def test_matmul_small_brute_force_1d_Nd(self, device, dtype): - make_arg = partial(make_tensor, device=device, dtype=dtype) + for backend in ["cublas", "cublaslt"]: + if torch.device(device).type == 'cuda': + torch.backends.cuda.preferred_blas_library(backend) + + make_arg = partial(make_tensor, device=device, dtype=dtype) - for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(1), (True, False), (True, False)): - x = make_arg(size_x, noncontiguous=nctg_x) - y = make_arg(size_y, noncontiguous=nctg_y) - self.check_single_matmul(x, y) + for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(1), (True, False), (True, False)): + x = make_arg(size_x, noncontiguous=nctg_x) + y = make_arg(size_y, noncontiguous=nctg_y) + self.check_single_matmul(x, y) @dtypesIfCUDA(torch.float, torch.complex64) # Integer matmul just supported on CPU @dtypes(torch.int64, torch.float, torch.complex64) + @setBlasBackendsToDefaultFinally def test_matmul_small_brute_force_2d_Nd(self, device, dtype): - make_arg = partial(make_tensor, device=device, dtype=dtype) + for backend in ["cublas", "cublaslt"]: + if torch.device(device).type == 'cuda': + torch.backends.cuda.preferred_blas_library(backend) + + make_arg = partial(make_tensor, device=device, dtype=dtype) - for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(2), (True, False), (True, False)): - x = make_arg(size_x, noncontiguous=nctg_x) - y = make_arg(size_y, noncontiguous=nctg_y) - self.check_single_matmul(x, y) + for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(2), (True, False), (True, False)): + x = make_arg(size_x, noncontiguous=nctg_x) + y = make_arg(size_y, noncontiguous=nctg_y) + self.check_single_matmul(x, y) @dtypesIfCUDA(torch.float, torch.complex64) # Integer matmul just supported on CPU @dtypes(torch.int64, torch.float, torch.complex64) + @setBlasBackendsToDefaultFinally def test_matmul_small_brute_force_3d_Nd(self, device, dtype): - make_arg = partial(make_tensor, device=device, dtype=dtype) + for backend in ["cublas", "cublaslt"]: + if torch.device(device).type == 'cuda': + torch.backends.cuda.preferred_blas_library(backend) + + make_arg = partial(make_tensor, device=device, dtype=dtype) - for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(3), (True, False), (True, False)): - x = make_arg(size_x, noncontiguous=nctg_x) - y = make_arg(size_y, noncontiguous=nctg_y) - self.check_single_matmul(x, y) + for (size_x, size_y), nctg_x, nctg_y in product(self.gen_sizes_matmul(3), (True, False), (True, False)): + x = make_arg(size_x, noncontiguous=nctg_x) + y = make_arg(size_y, noncontiguous=nctg_y) + self.check_single_matmul(x, y) @dtypes(torch.float, torch.complex64) def test_matmul_out_kernel_errors_with_autograd(self, device, dtype): @@ -7961,6 +7985,27 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2: self.assertEqual(out_ref, out1.cpu()) self.assertEqual(out1, out2) + @onlyCUDA + @unittest.skipIf(not blaslt_supported_device(), "blasLt not supported on current device") + @setBlasBackendsToDefaultFinally + def test_preferred_blas_library(self): + # The main purpose of this test is to make sure these "backend" calls work normally without raising exceptions. + m1 = torch.randint(2, 5, (2048, 2400), device='cuda', dtype=torch.float) + m2 = torch.randint(2, 5, (128, 2400), device='cuda', dtype=torch.float) + + torch.backends.cuda.preferred_blas_library('cublaslt') + out1 = torch.nn.functional.linear(m1, m2) + + torch.backends.cuda.preferred_blas_library('cublas') + out2 = torch.nn.functional.linear(m1, m2) + + # Although blas preferred flags doesn't affect CPU currently, + # we set this to make sure the flag can switch back to default normally. + out_ref = torch.nn.functional.linear(m1.cpu(), m2.cpu()) + + self.assertEqual(out1, out2) + self.assertEqual(out_ref, out2.cpu()) + def test_permute_matmul(self): a = torch.ones([2, 5, 24, 24]) b = torch.ones([3, 2, 5, 24, 24]) diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in index 0cccc6ff92..dd7047b6b6 100644 --- a/torch/_C/__init__.pyi.in +++ b/torch/_C/__init__.pyi.in @@ -1213,6 +1213,13 @@ class _LinalgBackend: class BatchNormBackend(Enum): ... +def _get_blas_preferred_backend() -> torch._C._BlasBackend: ... +def _set_blas_preferred_backend(arg: torch._C._BlasBackend): ... + +class _BlasBackend: + Cublas: _BlasBackend + Cublaslt: _BlasBackend + class ConvBackend(Enum): ... class Tag(Enum): diff --git a/torch/_dynamo/trace_rules.py b/torch/_dynamo/trace_rules.py index ba25524915..4f90f5533d 100644 --- a/torch/_dynamo/trace_rules.py +++ b/torch/_dynamo/trace_rules.py @@ -564,6 +564,7 @@ torch_c_binding_in_graph_functions = dict.fromkeys( "torch._C._get_autograd_fallback_mode", "torch._C._get_backcompat_broadcast_warn", "torch._C._get_backcompat_keepdim_warn", + "torch._C._get_blas_preferred_backend", "torch._C._get_caught_jit_exception_class_name", "torch._C._get_caught_jit_exception_original_msg", "torch._C._get_constant_bool_symnode", @@ -1092,6 +1093,7 @@ torch_c_binding_in_graph_functions = dict.fromkeys( "torch._C._set_autograd_fallback_mode", "torch._C._set_backcompat_broadcast_warn", "torch._C._set_backcompat_keepdim_warn", + "torch._C._set_blas_preferred_backend", "torch._C._set_cached_tensors_enabled", "torch._C._set_check_sparse_tensor_invariants", "torch._C._set_conj", @@ -2385,6 +2387,7 @@ torch_non_c_binding_in_graph_functions = dict.fromkeys( "torch.backends.cuda.mem_efficient_sdp_enabled", "torch.backends.cuda.cudnn_sdp_enabled", "torch.backends.cuda.enable_cudnn_sdp", + "torch.backends.cuda.preferred_blas_library", "torch.backends.cuda.preferred_linalg_library", "torch.backends.cuda.sdp_kernel", "torch.backends.cudnn._init", diff --git a/torch/backends/cuda/__init__.py b/torch/backends/cuda/__init__.py index da0cea5c6c..6d079bdf06 100644 --- a/torch/backends/cuda/__init__.py +++ b/torch/backends/cuda/__init__.py @@ -12,6 +12,7 @@ __all__ = [ "cuFFTPlanCacheManager", "cuBLASModule", "preferred_linalg_library", + "preferred_blas_library", "cufft_plan_cache", "matmul", "SDPBackend", @@ -207,6 +208,56 @@ def preferred_linalg_library( return torch._C._get_linalg_preferred_backend() +_BlasBackends = { + "cublas": torch._C._BlasBackend.Cublas, + "cublaslt": torch._C._BlasBackend.Cublaslt, + "hipblaslt": torch._C._BlasBackend.Cublaslt, # alias +} +_BlasBackends_str = ", ".join(_BlasBackends.keys()) + + +def preferred_blas_library( + backend: Union[None, str, torch._C._BlasBackend] = None +) -> torch._C._BlasBackend: + r""" + Override the library PyTorch uses for BLAS operations. Choose between cuBLAS and cuBLASLt. + + .. warning:: This flag is experimental and subject to change. + + When PyTorch runs a CUDA BLAS operation it defaults to cuBLAS even if both cuBLAS and cuBLASLt are available. + For PyTorch built for ROCm, hipBLAS and hipBLASLt may offer different performance. + This flag (a :class:`str`) allows overriding which BLAS library to use. + + * If `"cublas"` is set then cuBLAS will be used wherever possible. + * If `"cublaslt"` is set then cuBLASLt will be used wherever possible. + * When no input is given, this function returns the currently preferred library. + * User may use the environment variable TORCH_BLAS_PREFER_CUBLASLT=1 to set the preferred library to cuBLASLt + globally. + This flag only sets the initial value of the preferred library and the preferred library + may still be overridden by this function call later in your script. + + Note: When a library is preferred other libraries may still be used if the preferred library + doesn't implement the operation(s) called. + This flag may achieve better performance if PyTorch's library selection is incorrect + for your application's inputs. + + """ + if backend is None: + pass + elif isinstance(backend, str): + if backend not in _BlasBackends: + raise RuntimeError( + "Unknown input value. " f"Choose from: {_BlasBackends_str}." + ) + torch._C._set_blas_preferred_backend(_BlasBackends[backend]) + elif isinstance(backend, torch._C._BlasBackend): + torch._C._set_blas_preferred_backend(backend) + else: + raise RuntimeError("Unknown input value type.") + + return torch._C._get_blas_preferred_backend() + + from torch._C import _SDPAParams as SDPAParams, _SDPBackend as SDPBackend # Set the __module__ attribute diff --git a/torch/csrc/Module.cpp b/torch/csrc/Module.cpp index 9343a48813..cad0da5b74 100644 --- a/torch/csrc/Module.cpp +++ b/torch/csrc/Module.cpp @@ -8,6 +8,7 @@ #endif #include <ATen/ATen.h> +#include <ATen/BlasBackend.h> #include <ATen/DLConvertor.h> #include <ATen/ExpandUtils.h> #include <ATen/LegacyVmapMode.h> @@ -1933,6 +1934,17 @@ Call this whenever a new thread is created in order to propagate values from return at::globalContext().linalgPreferredBackend(); }); + py::enum_<at::BlasBackend>(py_module, "_BlasBackend") + .value("Cublas", at::BlasBackend::Cublas) + .value("Cublaslt", at::BlasBackend::Cublaslt); + + py_module.def("_set_blas_preferred_backend", [](at::BlasBackend b) { + at::globalContext().setBlasPreferredBackend(b); + }); + py_module.def("_get_blas_preferred_backend", []() { + return at::globalContext().blasPreferredBackend(); + }); + py_module.def( "_construct_storage_from_data_pointer", [](int64_t data_ptr, c10::Device device, size_t size_bytes) { diff --git a/torch/testing/_internal/common_utils.py b/torch/testing/_internal/common_utils.py index c750e04bbc..2f6c0f8e2b 100644 --- a/torch/testing/_internal/common_utils.py +++ b/torch/testing/_internal/common_utils.py @@ -1632,6 +1632,19 @@ def setLinalgBackendsToDefaultFinally(fn): return _fn +# Reverts the blas backend back to default to make sure potential failures in one +# test do not affect other tests +def setBlasBackendsToDefaultFinally(fn): + @wraps(fn) + def _fn(*args, **kwargs): + _preferred_backend = torch.backends.cuda.preferred_blas_library() + try: + fn(*args, **kwargs) + finally: + torch.backends.cuda.preferred_blas_library(_preferred_backend) + return _fn + + # Context manager for setting deterministic flag and automatically # resetting it to its original value class DeterministicGuard: diff --git a/torch/utils/hipify/cuda_to_hip_mappings.py b/torch/utils/hipify/cuda_to_hip_mappings.py index 6652240938..10f09e81eb 100644 --- a/torch/utils/hipify/cuda_to_hip_mappings.py +++ b/torch/utils/hipify/cuda_to_hip_mappings.py @@ -7326,6 +7326,11 @@ CUDA_IDENTIFIER_MAP = collections.OrderedDict( ("cublasLtMatrixLayout_t", ("hipblasLtMatrixLayout_t", CONV_MATH_FUNC, API_BLAS)), ("cublasLtMatrixLayoutOpaque_t", ("hipblasLtMatrixLayoutOpaque_t", CONV_MATH_FUNC, API_BLAS)), ("cublasLtMatrixLayoutAttribute_t", ("hipblasLtMatrixLayoutAttribute_t", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatrixLayoutCreate", ("hipblasLtMatrixLayoutCreate", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatrixLayoutDestroy", ("hipblasLtMatrixLayoutDestroy", CONV_MATH_FUNC, API_BLAS)), + ("cublasLtMatrixLayoutSetAttribute", ("hipblasLtMatrixLayoutSetAttribute", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT", ("HIPBLASLT_MATRIX_LAYOUT_BATCH_COUNT", CONV_MATH_FUNC, API_BLAS)), + ("CUBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET", ("HIPBLASLT_MATRIX_LAYOUT_STRIDED_BATCH_OFFSET", CONV_MATH_FUNC, API_BLAS)), ("cublasLtMatmulPreference_t", ("hipblasLtMatmulPreference_t", CONV_MATH_FUNC, API_BLAS)), ("cublasLtMatmulPreferenceOpaque_t", ("hipblasLtMatmulPreferenceOpaque_t", CONV_MATH_FUNC, API_BLAS)), ("cublasLtMatmulPreferenceAttributes_t", ("hipblasLtMatmulPreferenceAttributes_t", CONV_MATH_FUNC, API_BLAS)), @@ -7333,8 +7338,6 @@ CUDA_IDENTIFIER_MAP = collections.OrderedDict( ("CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES", ("HIPBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES", CONV_MATH_FUNC, API_BLAS)), ("cublasLtMatmulAlgo_t", ("hipblasLtMatmulAlgo_t", CONV_MATH_FUNC, API_BLAS)), ("cublasLtMatmulHeuristicResult_t", ("hipblasLtMatmulHeuristicResult_t", CONV_MATH_FUNC, API_BLAS)), - ("cublasLtMatrixLayoutCreate", ("hipblasLtMatrixLayoutCreate", CONV_MATH_FUNC, API_BLAS)), - ("cublasLtMatrixLayoutDestroy", ("hipblasLtMatrixLayoutDestroy", CONV_MATH_FUNC, API_BLAS)), ("cublasLtCreate", ("hipblasLtCreate", CONV_MATH_FUNC, API_BLAS)), ("cublasLtDestroy", ("hipblasLtDestroy", CONV_MATH_FUNC, API_BLAS)), ("cublasLtMatmulDescCreate", ("hipblasLtMatmulDescCreate", CONV_MATH_FUNC, API_BLAS)),
2.41.0
c2ac4476ce2d1a41ea808e6738c52724d4e9cfc
Mon, 22 Apr 2024 15:53:38 +0000
[PATCH 0459/1000] Allow ONNX models without parameters (#121904)
Currently, if initializers are available, they are included in the ONNX model. If they are not available, the model is serialized without them. However, there are times in which the initializers are avaialable, but the user prefers not to include them in the model, say for visualizing it on Netron or because the initialziers will be specified along with the inputs in the onnx runtime of choice. This PR allow users to pass `include_initializers` to `ONNXProgram.save()` API. Fixes #100996 Pull Request resolved: https://github.com/pytorch/pytorch/pull/121904 Approved by: https://github.com/titaiwangms
diff --git a/test/onnx/test_fx_to_onnx.py b/test/onnx/test_fx_to_onnx.py index 2f81818909..f5de6aca08 100644 --- a/test/onnx/test_fx_to_onnx.py +++ b/test/onnx/test_fx_to_onnx.py @@ -762,6 +762,100 @@ class TestFxToOnnx(pytorch_test_common.ExportTestCase): onnx_program.save(tmp_onnx_file.name) onnx.checker.check_model(tmp_onnx_file.name, full_check=True) + @common_utils.parametrize( + "include_initializer", + [ + common_utils.subtest( + True, + name="include_initializer", + ), + common_utils.subtest( + False, + name="dont_include_initializer", + ), + ], + ) + @common_utils.parametrize( + "use_fake_mode", + [ + common_utils.subtest( + True, + name="use_fake_mode", + ), + common_utils.subtest( + False, + name="no_fake_mode", + ), + ], + ) + @common_utils.parametrize( + "use_exported_program", + [ + common_utils.subtest( + True, + name="use_exported_program", + ), + common_utils.subtest( + False, + name="no_exported_program", + ), + ], + ) + def test_save_with_without_initializer( + self, include_initializer, use_fake_mode, use_exported_program + ): + class MNISTModel(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 32, 3, 1, bias=False) + self.conv2 = nn.Conv2d(32, 64, 3, 1, bias=False) + self.fc1 = nn.Linear(9216, 128, bias=False) + self.fc2 = nn.Linear(128, 10, bias=False) + + def forward(self, tensor_x: torch.Tensor): + tensor_x = self.conv1(tensor_x) + tensor_x = F.sigmoid(tensor_x) + tensor_x = self.conv2(tensor_x) + tensor_x = F.sigmoid(tensor_x) + tensor_x = F.max_pool2d(tensor_x, 2) + tensor_x = torch.flatten(tensor_x, 1) + tensor_x = self.fc1(tensor_x) + tensor_x = F.sigmoid(tensor_x) + tensor_x = self.fc2(tensor_x) + output = F.log_softmax(tensor_x, dim=1) + return output + + state_dict = MNISTModel().state_dict() + if use_fake_mode: + with torch.onnx.enable_fake_mode() as ctx: + model = MNISTModel() + tensor_x = torch.rand((64, 1, 28, 28), dtype=torch.float32) + if use_exported_program: + model = torch.export.export(model, args=(tensor_x,)) + export_options = torch.onnx.ExportOptions(fake_context=ctx) + else: + model = MNISTModel() + tensor_x = torch.rand((64, 1, 28, 28), dtype=torch.float32) + if use_exported_program: + model = torch.export.export(model, args=(tensor_x,)) + export_options = torch.onnx.ExportOptions() + + onnx_program = torch.onnx.dynamo_export( + model, tensor_x, export_options=export_options + ) + with tempfile.NamedTemporaryFile(suffix=".onnx") as tmp_onnx_file: + onnx_program.save( + tmp_onnx_file.name, + include_initializers=include_initializer, + model_state=state_dict if include_initializer else None, + ) + onnx_model = onnx.load(tmp_onnx_file.name) + self.assertEqual( + (include_initializer and len(onnx_model.graph.initializer) > 0) + or (not include_initializer and len(onnx_model.graph.initializer) == 0), + True, + ) + def test_export_with_print(self): class PrintModule(torch.nn.Module): def forward(self, x): diff --git a/torch/onnx/_internal/exporter.py b/torch/onnx/_internal/exporter.py index de14e42d6b..7831a362ae 100644 --- a/torch/onnx/_internal/exporter.py +++ b/torch/onnx/_internal/exporter.py @@ -1010,6 +1010,7 @@ class ONNXProgram: self, destination: Union[str, io.BufferedIOBase], *, + include_initializers: bool = True, model_state: Optional[Union[Dict[str, Any], str]] = None, serializer: Optional[ONNXProgramSerializer] = None, ) -> None: @@ -1021,12 +1022,18 @@ class ONNXProgram: If `destination` is a string, besides saving the ONNX model into a file, model weights are also stored in separate files in the same directory as the ONNX model. E.g. for `destination="/path/model.onnx"`, the initializers are saved in "/path/" folder along with "onnx.model". + include_initializers: Whether to include initializers in the ONNX graph as external data. + Cannot be combined with `model_state_dict`. model_state: The state_dict of the PyTorch model containing all weights on it. It can be either a string with the path to a checkpoint or a dictionary with the actual model state. The supported file formats are the same as those supported by `torch.load` and `safetensors.safe_open`. Required when :func:`enable_fake_mode` is used but real initializers are needed on the ONNX graph. serializer: The serializer to use. If not specified, the model will be serialized as Protobuf. """ + + assert ( + include_initializers is True or model_state is None + ), "Cannot specify both `include_initializers=False` and `model_state`." if serializer is None: if isinstance(destination, str): serializer = LargeProtobufONNXProgramSerializer(destination) @@ -1035,21 +1042,27 @@ class ONNXProgram: # Add initializers when symbolic tracing is enabled _model_state_files: List[Union[str, io.BytesIO, Dict[str, Any]]] = [] - if model_state is not None: - assert isinstance( - model_state, (dict, str) - ), "model_state must be a path to the model's state_dict or the actual state_dict" - # NOTE: For dict, there can be performance penalty or high memory usage that might lead to OOM - # if the dict wasn't loaded with torch.load(..., mmap=True, map_location="cpu") - _model_state_files.append(model_state) - elif self._fake_context and self._fake_context.state_dict_paths: - # Load state from previous model.load_state_dict() call within enable_fake_mode() context - for path in self._fake_context.state_dict_paths: - if path in _model_state_files: - # ignore duplicate - continue - if os.path.exists(path): # type: ignore[arg-type] - _model_state_files.append(path) + if include_initializers: + if model_state is not None: + assert isinstance( + model_state, (dict, str) + ), "model_state must be a path to the model's state_dict or the actual state_dict" + # NOTE: For dict, there can be performance penalty or high memory usage that might lead to OOM + # if the dict wasn't loaded with torch.load(..., mmap=True, map_location="cpu") + _model_state_files.append(model_state) + elif self._fake_context and self._fake_context.state_dict_paths: + # Load state from previous model.load_state_dict() call within enable_fake_mode() context + for path in self._fake_context.state_dict_paths: + if path in _model_state_files: + # ignore duplicate + continue + if os.path.exists(path): # type: ignore[arg-type] + _model_state_files.append(path) + else: + # self.model_proto.graph.initializer.clear() not available in older protobuf versions + initializer_count = len(self.model_proto.graph.initializer) + for _ in range(initializer_count): + del self.model_proto.graph.initializer[0] if _model_state_files: if not isinstance(destination, str):
2.41.0
ee514e6283768f3e1d034281e0f74cf30165d93
Mon, 22 Apr 2024 16:45:01 +0000
[PATCH 0460/1000] [CI] Upgrade xpu driver to LTS_803.29 (#123920)
Upgrade xpu driver from 647.21 to LTS 803.29 Works for #114850 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123920 Approved by: https://github.com/chuanqi129, https://github.com/EikanWang, https://github.com/huydhn
diff --git a/.ci/docker/common/install_xpu.sh b/.ci/docker/common/install_xpu.sh index 813a7c4e27..d98ad2049b 100644 --- a/.ci/docker/common/install_xpu.sh +++ b/.ci/docker/common/install_xpu.sh @@ -3,7 +3,7 @@ set -xe # Intel® software for general purpose GPU capabilities. -# Refer to https://dgpu-docs.intel.com/releases/stable_647_21_20230714.html +# Refer to https://dgpu-docs.intel.com/releases/LTS_803.29_20240131.html # Intel® oneAPI Base Toolkit (version 2024.0.0) has been updated to include functional and security updates. # Refer to https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit-download.html @@ -21,7 +21,7 @@ function install_ubuntu() { | gpg --dearmor | tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null # Add the signed entry to APT sources and configure the APT client to use the Intel repository - echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/production/2328 unified" \ + echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" \ | tee /etc/apt/sources.list.d/intel-gpu-jammy.list echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" \ | tee /etc/apt/sources.list.d/oneAPI.list
2.41.0
e2e56b5abacc979338dc27b1fc16e6f21f22c35
Mon, 22 Apr 2024 16:55:18 +0000
[PATCH 0461/1000] s390x: update using vectorization builtins (#124396)
With gcc >= 12 on s390x store builtins are accidentally optimized out due to bad type aliasing. Ensure that proper corresponding types are used, and if types do mismatch, first store data into array of correct type and then memcpy it to destination pointer. See also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=114676 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124396 Approved by: https://github.com/malfet
diff --git a/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h b/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h index 93594bc7da..cec4cd3281 100644 --- a/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h +++ b/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h @@ -393,40 +393,84 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> { C10_ALWAYS_INLINE Vectorized(T s) : _vec0{vec_splats((ElementType)s)}, _vec1{vec_splats((ElementType)s)} {} - static Vectorized<value_type> C10_ALWAYS_INLINE - loadu(const void* ptr, int count = size()) { - if (count == size()) { + template <typename U, typename DUMMY = void> + struct LoaduHelper { + static Vectorized<T> C10_ALWAYS_INLINE + loadu(const U* ptr, int count = size()) { + __at_align__ ElementType tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(ElementType)); + return { - vec_xl(offset0, reinterpret_cast<const ElementType*>(ptr)), - vec_xl(offset16, reinterpret_cast<const ElementType*>(ptr))}; + vec_xl(offset0, &(tmp_values[0])), + vec_xl(offset16, &(tmp_values[0]))}; } + }; + + template <typename DUMMY> + struct LoaduHelper<ElementType, DUMMY> { + static Vectorized<T> C10_ALWAYS_INLINE + loadu(const ElementType* ptr, int count = size()) { + if (count == size()) { + return { + vec_xl(offset0, ptr), + vec_xl(offset16, ptr)}; + } - __at_align__ ElementType tmp_values[size()] = {}; - std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(ElementType)); + __at_align__ ElementType tmp_values[size()] = {}; + std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(ElementType)); - return { - vec_xl(offset0, reinterpret_cast<const ElementType*>(tmp_values)), - vec_xl(offset16, reinterpret_cast<const ElementType*>(tmp_values))}; + return { + vec_xl(offset0, &(tmp_values[0])), + vec_xl(offset16, &(tmp_values[0]))}; + } + }; + + template <typename U> + static Vectorized<T> C10_ALWAYS_INLINE + loadu(const U* ptr, int count = size()) { + return LoaduHelper<U>::loadu(ptr, count); } - static Vectorized<value_type> C10_ALWAYS_INLINE - loadu_one_fourth(const void* ptr) { + template <typename U> + static Vectorized<T> C10_ALWAYS_INLINE + loadu_one_fourth(const U* ptr) { // load only first 8 bytes // only intended to be used with uint8_t return loadu(ptr, 8 / sizeof(ElementType)); } - void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { - if (count == size()) { - vec_xst(_vec0, offset0, reinterpret_cast<ElementType*>(ptr)); - vec_xst(_vec1, offset16, reinterpret_cast<ElementType*>(ptr)); - } else if (count > 0) { - __at_align__ ElementType tmp_values[size()]; - vec_xst(_vec0, offset0, reinterpret_cast<ElementType*>(tmp_values)); - vec_xst(_vec1, offset16, reinterpret_cast<ElementType*>(tmp_values)); - std::memcpy( - ptr, tmp_values, std::min(count, size()) * sizeof(ElementType)); + template <typename U, typename DUMMY = void> + struct StoreHelper { + static void C10_ALWAYS_INLINE store(const Vectorized<T> &vec, U* ptr, int count = size()) { + if (count > 0) { + __at_align__ ElementType tmp_values[size()]; + vec_xst(vec._vec0, offset0, &(tmp_values[0])); + vec_xst(vec._vec1, offset16, &(tmp_values[0])); + std::memcpy( + ptr, tmp_values, std::min(count, size()) * sizeof(ElementType)); + } } + }; + + template <typename DUMMY> + struct StoreHelper<ElementType, DUMMY> { + static void C10_ALWAYS_INLINE store(const Vectorized<T> &vec, ElementType* ptr, int count = size()) { + if (count == size()) { + vec_xst(vec._vec0, offset0, ptr); + vec_xst(vec._vec1, offset16, ptr); + } else if (count > 0) { + __at_align__ ElementType tmp_values[size()]; + vec_xst(vec._vec0, offset0, &(tmp_values[0])); + vec_xst(vec._vec1, offset16, &(tmp_values[0])); + std::memcpy( + ptr, tmp_values, std::min(count, size()) * sizeof(ElementType)); + } + } + }; + + template <typename U> + void C10_ALWAYS_INLINE store(U* ptr, int count = size()) const { + return StoreHelper<U>::store(*this, ptr, count); } C10_ALWAYS_INLINE const vtype& vec0() const { @@ -1710,12 +1754,14 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_quant<T>()>> { return _vec; } + template <typename U> static Vectorized<T> C10_ALWAYS_INLINE - loadu(const void* ptr, int count = size()) { + loadu(const U* ptr, int count = size()) { return Vectorized<T>{vinner_type::loadu(ptr, count)}; } - void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { + template <typename U> + void C10_ALWAYS_INLINE store(U* ptr, int count = size()) const { _vec.store(ptr, count); } @@ -2174,12 +2220,14 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_complex<T>()>> { return _vec.data(); } + template <typename U> static Vectorized<T> C10_ALWAYS_INLINE - loadu(const void* ptr, int count = size()) { + loadu(const U* ptr, int count = size()) { return Vectorized<T>{vinner_type::loadu(ptr, 2 * count)}; } - void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { + template <typename U> + void C10_ALWAYS_INLINE store(U* ptr, int count = size()) const { return _vec.store(ptr, 2 * count); }
2.41.0
7c35334c12f7f62e9b829f2235ac845da5c3782
Mon, 22 Apr 2024 16:57:07 +0000
[PATCH 0462/1000] Fix build on s390x (#123250)
Rename s390x-specific zvector functions with same name. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123250 Approved by: https://github.com/malfet
diff --git a/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h b/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h index cec4cd3281..9b53745b03 100644 --- a/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h +++ b/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h @@ -1505,19 +1505,19 @@ inline ZSimdVect<int> vec_flt_int(const ZSimdVect<float> x) { #define vec_flt_int vec_signed #endif -Vectorized<float> convert_to_float(const Vectorized<int32_t>& x) { +Vectorized<float> zvec_convert_to_float(const Vectorized<int32_t>& x) { return {vec_int_flt(x.vec0()), vec_int_flt(x.vec1())}; } -Vectorized<int32_t> convert_to_int(const Vectorized<float>& x) { +Vectorized<int32_t> zvec_convert_to_int(const Vectorized<float>& x) { return {vec_flt_int(x.vec0()), vec_flt_int(x.vec1())}; } -Vectorized<double> convert_to_float(const Vectorized<int64_t>& x) { +Vectorized<double> zvec_convert_to_float(const Vectorized<int64_t>& x) { return {vec_double(x.vec0()), vec_double(x.vec1())}; } -Vectorized<int64_t> convert_to_int(const Vectorized<double>& x) { +Vectorized<int64_t> zvec_convert_to_int(const Vectorized<double>& x) { return {vec_signed(x.vec0()), vec_signed(x.vec1())}; } @@ -1575,13 +1575,13 @@ Vectorized<int64_t> C10_ALWAYS_INLINE fmadd( template <> Vectorized<int64_t> C10_ALWAYS_INLINE convert_to_int_of_same_size<double>(const Vectorized<double>& src) { - return convert_to_int(src); + return zvec_convert_to_int(src); } template <> Vectorized<int32_t> C10_ALWAYS_INLINE convert_to_int_of_same_size<float>(const Vectorized<float>& src) { - return convert_to_int(src); + return zvec_convert_to_int(src); } template <> @@ -1593,7 +1593,7 @@ inline void convert(const int32_t* src, float* dst, int64_t n) { const int32_t* src_a = src + i; float* dst_a = dst + i; auto input_vec = Vectorized<int32_t>::loadu(src_a); - auto output_vec = convert_to_float(input_vec); + auto output_vec = zvec_convert_to_float(input_vec); output_vec.store(dst_a); } @@ -1610,7 +1610,7 @@ inline void convert(const int64_t* src, double* dst, int64_t n) { const int64_t* src_a = src + i; double* dst_a = dst + i; auto input_vec = Vectorized<int64_t>::loadu(src_a); - auto output_vec = convert_to_float(input_vec); + auto output_vec = zvec_convert_to_float(input_vec); output_vec.store(dst_a); } for (; i < n; i++) { @@ -1789,7 +1789,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_quant<T>()>> { Vectorized<float> scale, Vectorized<float> zero_point, Vectorized<float> scale_zp_premul) const { - auto float_val = convert_to_float(_vec); + auto float_val = zvec_convert_to_float(_vec); return {fmadd(scale, float_val, scale_zp_premul)}; } @@ -1799,7 +1799,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_quant<T>()>> { float_vec_return_type dequantize( Vectorized<float> scale, Vectorized<float> zero_point) const { - auto float_val = convert_to_float(_vec); + auto float_val = zvec_convert_to_float(_vec); return {(float_val - zero_point) * scale}; } @@ -1814,7 +1814,7 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_quant<T>()>> { Vectorized<float> vecf = rhs[0]; vecf = vecf * Vectorized<float>(inverse_scale); vecf = vecf.rint() + Vectorized<float>((float)(zero_point)); - auto veci = convert_to_int(vecf); + auto veci = zvec_convert_to_int(vecf); return Vectorized<T>{veci}; } @@ -1827,10 +1827,10 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_quant<T>()>> { float multiplier, int32_t zero_point) { Vectorized<T> vi = inp[0]; - auto vecf = convert_to_float(vi.vec()); + auto vecf = zvec_convert_to_float(vi.vec()); vecf = vecf * Vectorized<float>(multiplier); vecf = vecf.rint(); - auto veci = convert_to_int(vecf) + Vectorized<int>(zero_point); + auto veci = zvec_convert_to_int(vecf) + Vectorized<int>(zero_point); return Vectorized<T>{veci}; } @@ -1865,11 +1865,11 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_quant<T>()>> { auto ret32_0 = unpack(ret16.first); auto ret32_1 = unpack(ret16.second); - auto vecf_0 = convert_to_float(ret32_0.first); - auto vecf_1 = convert_to_float(ret32_0.second); + auto vecf_0 = zvec_convert_to_float(ret32_0.first); + auto vecf_1 = zvec_convert_to_float(ret32_0.second); - auto vecf_2 = convert_to_float(ret32_1.first); - auto vecf_3 = convert_to_float(ret32_1.second); + auto vecf_2 = zvec_convert_to_float(ret32_1.first); + auto vecf_3 = zvec_convert_to_float(ret32_1.second); return { fmadd(scale, vecf_0, scale_zp_premul), fmadd(scale, vecf_1, scale_zp_premul), @@ -1888,11 +1888,11 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_quant<T>()>> { auto ret32_0 = unpack(ret16.first); auto ret32_1 = unpack(ret16.second); - auto vecf_0 = convert_to_float(ret32_0.first); - auto vecf_1 = convert_to_float(ret32_0.second); + auto vecf_0 = zvec_convert_to_float(ret32_0.first); + auto vecf_1 = zvec_convert_to_float(ret32_0.second); - auto vecf_2 = convert_to_float(ret32_1.first); - auto vecf_3 = convert_to_float(ret32_1.second); + auto vecf_2 = zvec_convert_to_float(ret32_1.first); + auto vecf_3 = zvec_convert_to_float(ret32_1.second); return { (vecf_0 - zero_point) * scale, @@ -1927,10 +1927,10 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_quant<T>()>> { vecf4 = vecf4.rint() + vec_zero_point; vecf6 = vecf6.rint() + vec_zero_point; - auto veci0 = convert_to_int(vecf0); - auto veci2 = convert_to_int(vecf2); - auto veci4 = convert_to_int(vecf4); - auto veci6 = convert_to_int(vecf6); + auto veci0 = zvec_convert_to_int(vecf0); + auto veci2 = zvec_convert_to_int(vecf2); + auto veci4 = zvec_convert_to_int(vecf4); + auto veci6 = zvec_convert_to_int(vecf6); auto vecshi0 = pack(veci0, veci2); auto vecshi2 = pack(veci4, veci6); @@ -1954,11 +1954,11 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_quant<T>()>> { Vectorized<c10::qint32> vi2 = inp[2]; Vectorized<c10::qint32> vi3 = inp[3]; - auto vecf0 = convert_to_float(vi0.vec()); - auto vecf2 = convert_to_float(vi1.vec()); + auto vecf0 = zvec_convert_to_float(vi0.vec()); + auto vecf2 = zvec_convert_to_float(vi1.vec()); - auto vecf4 = convert_to_float(vi2.vec()); - auto vecf6 = convert_to_float(vi3.vec()); + auto vecf4 = zvec_convert_to_float(vi2.vec()); + auto vecf6 = zvec_convert_to_float(vi3.vec()); vecf0 = vecf0 * vec_multiplier; vecf2 = vecf2 * vec_multiplier; @@ -1971,10 +1971,10 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_quant<T>()>> { vecf4 = vecf4.rint(); vecf6 = vecf6.rint(); - auto veci0 = convert_to_int(vecf0); - auto veci2 = convert_to_int(vecf2); - auto veci4 = convert_to_int(vecf4); - auto veci6 = convert_to_int(vecf6); + auto veci0 = zvec_convert_to_int(vecf0); + auto veci2 = zvec_convert_to_int(vecf2); + auto veci4 = zvec_convert_to_int(vecf4); + auto veci6 = zvec_convert_to_int(vecf6); veci0 = veci0 + vec_zero_point; veci2 = veci2 + vec_zero_point; @@ -2845,7 +2845,7 @@ inline convert_int8_to_float(const Vectorized<T> &src) { // Only handle first 64 bits auto vec_int = src.to_vec_float_helper(); - return convert_to_float(vec_int); + return zvec_convert_to_float(vec_int); } template <typename T> @@ -2854,7 +2854,7 @@ inline convert_float_to_int8(const Vectorized<float> &src) { constexpr auto min_val = std::numeric_limits<T>::min(); constexpr auto max_val = std::numeric_limits<T>::max(); - auto vec_int = clamp(convert_to_int(src), Vectorized<int32_t>(min_val), Vectorized<int32_t>(max_val)); + auto vec_int = clamp(zvec_convert_to_int(src), Vectorized<int32_t>(min_val), Vectorized<int32_t>(max_val)); return vec_int.to_vec_uint8_helper(); }
2.41.0
2f8bfae9cf8d3e3729ad8137d72a2c088c6a39c
Fri, 19 Apr 2024 19:48:22 +0000
[PATCH 0463/1000] Make torch._inductor.dependencies.Dep a proper class (#124407)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124407 Approved by: https://github.com/peterbell10
diff --git a/torch/_inductor/dependencies.py b/torch/_inductor/dependencies.py index 2d89636346..46c9a21cf1 100644 --- a/torch/_inductor/dependencies.py +++ b/torch/_inductor/dependencies.py @@ -1,3 +1,4 @@ +import abc import collections import dataclasses import itertools @@ -25,12 +26,37 @@ from .virtualized import OpsHandler, ReductionType, V log = logging.getLogger(__name__) is_indirect = re.compile(r"indirect|tmp").search -Dep = Union["MemoryDep", "StarDep", "WeakDep"] -class MemoryDep(typing.NamedTuple): +class Dep(abc.ABC): name: str - index: sympy.Expr # type: ignore[assignment] + index: sympy.Expr + + @abc.abstractmethod + def rename(self, renames: Dict[str, str]) -> "Dep": + pass + + @abc.abstractmethod + def get_numel(self) -> sympy.Expr: + pass + + @abc.abstractmethod + def numbytes_hint(self): + pass + + @abc.abstractmethod + def has_unbacked_symbols(self) -> bool: + pass + + @abc.abstractmethod + def is_contiguous(self) -> bool: + pass + + +@dataclasses.dataclass(frozen=True) +class MemoryDep(Dep): + name: str + index: sympy.Expr var_names: Tuple[sympy.Symbol, ...] size: Tuple[sympy.Expr, ...] @@ -109,10 +135,11 @@ class MemoryDep(typing.NamedTuple): return any(is_indirect(v.name) for v in self.index.free_symbols) # type: ignore[attr-defined] -class StarDep(typing.NamedTuple): - # depends on the entire buffer +@dataclasses.dataclass(frozen=True) +class StarDep(Dep): name: str + # depends on the entire buffer @property def index(self): raise NotImplementedError("StarDep does not have an index") @@ -149,7 +176,8 @@ class StarDep(typing.NamedTuple): # # It is weak because if it turns out A's read is never used, we can still # eliminate it -class WeakDep(typing.NamedTuple): +@dataclasses.dataclass(frozen=True) +class WeakDep(Dep): name: str @property @@ -174,7 +202,8 @@ class WeakDep(typing.NamedTuple): return False -class IndexExprDep(typing.NamedTuple): +@dataclasses.dataclass(frozen=True) +class IndexExprDep: index: sympy.Expr # type: ignore[assignment] var_names: Tuple[sympy.Symbol, ...] size: Tuple[sympy.Expr, ...]
2.41.0
77e7b7c54772b9a92bd3ee48c5162e448c0c320
Fri, 19 Apr 2024 13:37:58 -0700
[PATCH 0464/1000] Make some kernel static asserts clearer (#124519)
Users get int/int64_t and double/float confused a lot. Test Plan: - tested locally Pull Request resolved: https://github.com/pytorch/pytorch/pull/124519 Approved by: https://github.com/Skylion007
diff --git a/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h b/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h index 5308499edd..ccd94ff1de 100644 --- a/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h +++ b/aten/src/ATen/core/boxing/impl/make_boxed_from_unboxed_functor.h @@ -183,7 +183,7 @@ namespace impl { struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<float, T>::value>> { // There is no reason to support float when we have double. Keep the API lean. static_assert(guts::false_t<T>::value, - "You tried to register a kernel with an unsupported input type: float. Please use double instead."); + "You tried to register a kernel with an unsupported input type: float. Please use double instead; you should use `double` in the C++ function signature and `float` in the schema string."); }; template<class T, bool AllowDeprecatedTypes> struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const char*, T>::value>> { @@ -198,7 +198,7 @@ namespace impl { template<class T, bool AllowDeprecatedTypes> struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> { static_assert(guts::false_t<T>::value, - "You tried to register a kernel with an unsupported integral input type. Please use int64_t instead."); + "You tried to register a kernel with an unsupported integral input type. Please use int64_t instead; you should use `int64_t` in the C++ function signature and `int` in the schema string."); }; template<class T, bool AllowDeprecatedTypes> struct assert_is_valid_input_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const c10::SymInt&, T>::value>> { @@ -283,7 +283,7 @@ namespace impl { struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<float, T>::value>> { // There is no reason to support float when we have double. Keep the API lean. static_assert(guts::false_t<T>::value, - "You tried to register a kernel with an unsupported output type: float. Please use double instead."); + "You tried to register a kernel with an unsupported output type: float. Please use double instead; you should use `double` in the C++ function signature and `float` in the schema string."); }; template<class T, bool AllowDeprecatedTypes> struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_same<const char*, T>::value>> { @@ -298,7 +298,7 @@ namespace impl { template<class T, bool AllowDeprecatedTypes> struct assert_is_valid_output_type<T, AllowDeprecatedTypes, std::enable_if_t<std::is_integral<T>::value && !guts::typelist::contains<supported_primitive_arg_types, T>::value>> { static_assert(guts::false_t<T>::value, - "You tried to register a kernel with an unsupported integral output type. Please use int64_t instead."); + "You tried to register a kernel with an unsupported integral output type. Please use int64_t instead; you should use `int64_t` in the C++ function signature and `int` in the schema string."); }; // ivalue_to_arg
2.41.0
f44ef93ab74dca80f79280b40d3b60d053c6014
Mon, 22 Apr 2024 18:28:05 +0000
[PATCH 0467/1000] Revert "[inductor] Refactor runtime files into torch._inductor.runtime (part 5) (#124560)"
This reverts commit 3ac30bc32ad300d70391ec552e5738d6ed66f9a5. Reverted https://github.com/pytorch/pytorch/pull/124560 on behalf of https://github.com/jeanschmidt due to There are internal breakages, already discussed with author and he'll FF ([comment](https://github.com/pytorch/pytorch/pull/124552#issuecomment-2070548223))
diff --git a/torch/_inductor/runtime/coordinate_descent_tuner.py b/torch/_inductor/runtime/coordinate_descent_tuner.py index f280765aec..83f4973a1f 100644 --- a/torch/_inductor/runtime/coordinate_descent_tuner.py +++ b/torch/_inductor/runtime/coordinate_descent_tuner.py @@ -3,7 +3,7 @@ import itertools import logging from typing import Callable, Optional -from .runtime_utils import red_text, triton_config_to_hashable +from torch._inductor.runtime.runtime_utils import red_text, triton_config_to_hashable try: import triton diff --git a/torch/_inductor/runtime/runtime_utils.py b/torch/_inductor/runtime/runtime_utils.py index c0fdf65ec9..948ad0e5cf 100644 --- a/torch/_inductor/runtime/runtime_utils.py +++ b/torch/_inductor/runtime/runtime_utils.py @@ -140,24 +140,3 @@ def red_text(msg): def blue_text(msg): return _color_text(msg, "blue") - - -def get_first_attr(obj, *attrs): - """ - Return the first available attribute or throw an exception if none is present. - """ - for attr in attrs: - if hasattr(obj, attr): - return getattr(obj, attr) - - raise AssertionError(f"{obj} does not has any of the attributes: {attrs}") - - -try: - dynamo_timed = torch._dynamo.utils.dynamo_timed -except AttributeError: # Compile workers only have a mock version of torch - - def dynamo_timed(original_function=None, phase_name=None): - if original_function: - return original_function - return dynamo_timed diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index f65cd6eaa5..1b0467553b 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -16,7 +16,9 @@ from typing import Any, Callable, Dict, List, Optional, Set, Tuple import torch +import torch.autograd.profiler as autograd_profiler from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device +from torch._dynamo.utils import dynamo_timed, get_first_attr from torch._inductor import config from .coordinate_descent_tuner import CoordescTuner @@ -33,8 +35,6 @@ from .runtime_utils import ( conditional_product, create_bandwidth_info_str, do_bench, - dynamo_timed, - get_first_attr, get_max_y_grid, get_num_bytes, next_power_of_2, @@ -61,13 +61,6 @@ else: OutOfResources = object ASTSource = None -try: - autograd_profiler = torch.autograd.profiler -except AttributeError: # Compile workers only have a mock version of torch - - class autograd_profiler: # type: ignore[no-redef] - _is_profiler_enabled = False - log = logging.getLogger(__name__)
2.41.0
3d6c2fe9b473c3bb57bb3ac2b9b60488104c397
Mon, 22 Apr 2024 18:28:05 +0000
[PATCH 0468/1000] Revert "[inductor] Refactor runtime files into torch._inductor.runtime (part 4) (#124559)"
This reverts commit 9ea2a0951005c4bcb2491556a8548319c6cccfdb. Reverted https://github.com/pytorch/pytorch/pull/124559 on behalf of https://github.com/jeanschmidt due to There are internal breakages, already discussed with author and he'll FF ([comment](https://github.com/pytorch/pytorch/pull/124552#issuecomment-2070548223))
diff --git a/test/inductor/test_coordinate_descent_tuner.py b/test/inductor/test_coordinate_descent_tuner.py index 8f57cab4d3..5b9f35fa9c 100644 --- a/test/inductor/test_coordinate_descent_tuner.py +++ b/test/inductor/test_coordinate_descent_tuner.py @@ -18,7 +18,7 @@ except ImportError: raise unittest.SkipTest("requires triton") # noqa: TRY200 from torch._inductor import config -from torch._inductor.runtime.coordinate_descent_tuner import CoordescTuner +from torch._inductor.coordinate_descent_tuner import CoordescTuner config.benchmark_kernel = True config.coordinate_descent_tuning = True diff --git a/torch/_inductor/runtime/coordinate_descent_tuner.py b/torch/_inductor/coordinate_descent_tuner.py similarity index 98% rename from torch/_inductor/runtime/coordinate_descent_tuner.py rename to torch/_inductor/coordinate_descent_tuner.py index 83f4973a1f..2511800bc1 100644 --- a/torch/_inductor/runtime/coordinate_descent_tuner.py +++ b/torch/_inductor/coordinate_descent_tuner.py @@ -3,14 +3,15 @@ import itertools import logging from typing import Callable, Optional -from torch._inductor.runtime.runtime_utils import red_text, triton_config_to_hashable +from torch.utils._triton import has_triton +from .runtime.runtime_utils import red_text, triton_config_to_hashable -try: +if has_triton(): import triton -except ImportError: +else: triton = None -from torch._inductor import config as inductor_config +from . import config as inductor_config log = logging.getLogger(__name__) diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index 1b0467553b..89be8f36d4 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -21,7 +21,7 @@ from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device from torch._dynamo.utils import dynamo_timed, get_first_attr from torch._inductor import config -from .coordinate_descent_tuner import CoordescTuner +from torch._inductor.coordinate_descent_tuner import CoordescTuner from .hints import ( _NUM_THREADS_PER_WARP, AutotuneHint, @@ -29,6 +29,7 @@ from .hints import ( ReductionHint, TileHint, ) + from .runtime_utils import ( cache_dir, ceildiv,
2.41.0
b90af0bf51f35f17622cd332347bf56b831f2e8
Mon, 22 Apr 2024 18:28:05 +0000
[PATCH 0469/1000] Revert "[inductor] Refactor runtime files into torch._inductor.runtime (part 3) (#124557)"
This reverts commit fcf28b0ad59b1912d5783688b0f25f18b46efeb3. Reverted https://github.com/pytorch/pytorch/pull/124557 on behalf of https://github.com/jeanschmidt due to There are internal breakages, already discussed with author and he'll FF ([comment](https://github.com/pytorch/pytorch/pull/124552#issuecomment-2070548223))
diff --git a/benchmarks/dynamo/microbenchmarks/tensor_layout_mini_benchmark.py b/benchmarks/dynamo/microbenchmarks/tensor_layout_mini_benchmark.py index 3738f041fe..3eebb8ca66 100644 --- a/benchmarks/dynamo/microbenchmarks/tensor_layout_mini_benchmark.py +++ b/benchmarks/dynamo/microbenchmarks/tensor_layout_mini_benchmark.py @@ -1,6 +1,6 @@ import torch from torch._inductor import ir -from torch._inductor.runtime.runtime_utils import do_bench +from torch._inductor.utils import do_bench def to_channels_last(x): diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py index d3b0c42d7c..0103c43c65 100644 --- a/test/inductor/test_aot_inductor.py +++ b/test/inductor/test_aot_inductor.py @@ -14,8 +14,8 @@ from torch._dynamo.testing import rand_strided, same from torch._dynamo.utils import counters from torch._inductor import config from torch._inductor.exc import CppWrapperCodeGenError -from torch._inductor.runtime.runtime_utils import cache_dir from torch._inductor.test_case import TestCase +from torch._inductor.utils import cache_dir from torch.export import Dim, export from torch.testing import FileCheck diff --git a/test/inductor/test_codecache.py b/test/inductor/test_codecache.py index 96ed0d7022..55a2233f15 100644 --- a/test/inductor/test_codecache.py +++ b/test/inductor/test_codecache.py @@ -19,9 +19,8 @@ from torch._inductor.codecache import ( TensorMetadata, TensorMetadataAndValues, ) -from torch._inductor.runtime.runtime_utils import cache_dir from torch._inductor.test_case import run_tests, TestCase -from torch._inductor.utils import fresh_inductor_cache +from torch._inductor.utils import cache_dir, fresh_inductor_cache from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( diff --git a/test/inductor/test_inductor_utils.py b/test/inductor/test_inductor_utils.py index f86dd9219e..0c11ac0511 100644 --- a/test/inductor/test_inductor_utils.py +++ b/test/inductor/test_inductor_utils.py @@ -4,11 +4,11 @@ import functools import logging import torch -from torch._inductor.runtime.runtime_utils import do_bench from torch._inductor.test_case import run_tests, TestCase -from torch._inductor.utils import do_bench_using_profiling +from torch._inductor.utils import do_bench, do_bench_using_profiling + log = logging.getLogger(__name__) diff --git a/test/inductor/test_padding.py b/test/inductor/test_padding.py index d9bf81e663..2270c33291 100644 --- a/test/inductor/test_padding.py +++ b/test/inductor/test_padding.py @@ -12,8 +12,7 @@ from torch._dynamo.testing import rand_strided, reduce_to_scalar_loss from torch._dynamo.utils import maybe_cprofile from torch._inductor import config, ir, metrics from torch._inductor.fx_passes import pad_mm as pad_mm_pass -from torch._inductor.runtime.runtime_utils import do_bench -from torch._inductor.utils import run_and_get_code +from torch._inductor.utils import do_bench, run_and_get_code from torch.testing._internal.inductor_utils import HAS_CUDA DO_PERF_TEST = os.environ.get("DO_PERF_TEST") == "1" diff --git a/torch/_inductor/autotune_process.py b/torch/_inductor/autotune_process.py index f45f33ffde..aef3d18f35 100644 --- a/torch/_inductor/autotune_process.py +++ b/torch/_inductor/autotune_process.py @@ -35,7 +35,7 @@ if TYPE_CHECKING: from torch._inductor.select_algorithm import TritonTemplateCaller from . import config -from .runtime.runtime_utils import do_bench +from .utils import do_bench from .virtualized import V CUDA_VISIBLE_DEVICES = "CUDA_VISIBLE_DEVICES" diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py index a5474b0616..3111901a1c 100644 --- a/torch/_inductor/codecache.py +++ b/torch/_inductor/codecache.py @@ -59,8 +59,7 @@ from torch._dynamo.device_interface import ( from torch._dynamo.utils import counters, dynamo_timed from torch._inductor import config, exc, metrics from torch._inductor.codegen.cuda import cuda_env -from torch._inductor.runtime.runtime_utils import cache_dir -from torch._inductor.utils import clear_on_fresh_inductor_cache, is_linux +from torch._inductor.utils import cache_dir, clear_on_fresh_inductor_cache, is_linux from torch._subclasses.fake_tensor import ( extract_tensor_metadata, FakeTensor, diff --git a/torch/_inductor/codegen/cuda/cutlass_utils.py b/torch/_inductor/codegen/cuda/cutlass_utils.py index 40daf6da1c..134ebb93fe 100644 --- a/torch/_inductor/codegen/cuda/cutlass_utils.py +++ b/torch/_inductor/codegen/cuda/cutlass_utils.py @@ -8,10 +8,10 @@ from typing import Any, List, Optional import sympy import torch + +from ...codecache import cache_dir from ...config import cuda as inductor_cuda_config from ...ir import Layout - -from ...runtime.runtime_utils import cache_dir from .cuda_env import get_cuda_arch, get_cuda_version log = logging.getLogger(__name__) diff --git a/torch/_inductor/codegen/multi_kernel.py b/torch/_inductor/codegen/multi_kernel.py index e4fc396c64..e03ca8eca9 100644 --- a/torch/_inductor/codegen/multi_kernel.py +++ b/torch/_inductor/codegen/multi_kernel.py @@ -6,8 +6,7 @@ from torch._inductor.metrics import get_metric_table, is_metric_table_enabled from .. import config from ..codecache import PyCodeCache, TritonFuture -from ..runtime.runtime_utils import do_bench -from ..utils import cache_on_self +from ..utils import cache_on_self, do_bench from ..virtualized import V from .common import TensorArg diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py index c03e6c6954..67d69efe7a 100644 --- a/torch/_inductor/codegen/triton.py +++ b/torch/_inductor/codegen/triton.py @@ -47,26 +47,24 @@ from ..dependencies import Dep, MemoryDep, StarDep, WeakDep from ..ir import IRNode, TritonTemplateBuffer from ..optimize_indexing import indexing_dtype_strength_reduction from ..runtime.hints import ReductionHint -from ..runtime.runtime_utils import ( - do_bench, - get_max_y_grid, - green_text, - next_power_of_2, - yellow_text, -) from ..scheduler import BaseSchedulerNode, BaseScheduling, WhyNoFuse from ..utils import ( cache_on_self, + do_bench, get_dtype_size, get_fused_kernel_name, get_kernel_metadata, + get_max_y_grid, + green_text, is_welford_reduction, + next_power_of_2, Placeholder, sympy_dot, sympy_index_symbol, sympy_product, sympy_subs, unique, + yellow_text, ) from ..virtualized import _ops as ops, OpsHandler, ReductionType, StoreMode, V from ..wrapper_benchmark import get_kernel_category_by_source_code diff --git a/torch/_inductor/coordinate_descent_tuner.py b/torch/_inductor/coordinate_descent_tuner.py index 2511800bc1..baf293d9f5 100644 --- a/torch/_inductor/coordinate_descent_tuner.py +++ b/torch/_inductor/coordinate_descent_tuner.py @@ -4,7 +4,7 @@ import logging from typing import Callable, Optional from torch.utils._triton import has_triton -from .runtime.runtime_utils import red_text, triton_config_to_hashable +from .utils import red_text, triton_config_to_hashable if has_triton(): import triton diff --git a/torch/_inductor/fx_passes/pad_mm.py b/torch/_inductor/fx_passes/pad_mm.py index ea4d45e389..40948dc461 100644 --- a/torch/_inductor/fx_passes/pad_mm.py +++ b/torch/_inductor/fx_passes/pad_mm.py @@ -2,7 +2,6 @@ import functools from typing import List, Optional, Union import torch -import torch._inductor.runtime.runtime_utils from torch import Tensor from torch._inductor import utils from torch._subclasses.fake_tensor import FakeTensor @@ -242,7 +241,7 @@ def should_pad_bench( return False do_bench = functools.partial( - torch._inductor.runtime.runtime_utils.do_bench, + utils.do_bench, warmup=5, ) diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py index 59ac4ee395..dbd61f790c 100644 --- a/torch/_inductor/ir.py +++ b/torch/_inductor/ir.py @@ -61,7 +61,6 @@ from .dependencies import ( ) from .ops_handler import OpCounterCSE from .runtime.hints import ReductionHint -from .runtime.runtime_utils import do_bench from .utils import ( argsort, cache_on_self, @@ -69,6 +68,7 @@ from .utils import ( convert_shape_to_inductor, convert_shape_to_symint, developer_warning, + do_bench, get_kernel_metadata, is_dynamic, is_gpu, diff --git a/torch/_inductor/kernel/mm_common.py b/torch/_inductor/kernel/mm_common.py index e04f87c523..12a280cb91 100644 --- a/torch/_inductor/kernel/mm_common.py +++ b/torch/_inductor/kernel/mm_common.py @@ -9,8 +9,7 @@ from torch._inductor.select_algorithm import realize_inputs from torch._inductor.virtualized import V from .. import config as inductor_config -from ..runtime.runtime_utils import next_power_of_2 -from ..utils import ceildiv as cdiv +from ..utils import ceildiv as cdiv, next_power_of_2 log = logging.getLogger(__name__) diff --git a/torch/_inductor/runtime/runtime_utils.py b/torch/_inductor/runtime/runtime_utils.py deleted file mode 100644 index 948ad0e5cf..0000000000 --- a/torch/_inductor/runtime/runtime_utils.py +++ /dev/null @@ -1,142 +0,0 @@ -from __future__ import annotations - -import functools -import getpass -import inspect -import operator -import os -import re -import tempfile - -import torch - - -def conditional_product(*args): - return functools.reduce(operator.mul, [x for x in args if x]) - - -def ceildiv(numer: int, denom: int) -> int: - return -(numer // -denom) - - -def next_power_of_2(n: int) -> int: - """Return the smallest power of 2 greater than or equal to n""" - n -= 1 - n |= n >> 1 - n |= n >> 2 - n |= n >> 4 - n |= n >> 8 - n |= n >> 16 - n |= n >> 32 - n += 1 - return n - - -def get_num_bytes(*args: torch.Tensor, num_in_out_args: int = 0) -> int: - """ - Return the total number of bytes the arguments of tensor type takes. - - For in/out args, tensor sizes are counted twice: once for reading and - once for writing. - - The first num_in_out_args arguments are in out tensors. - """ - return sum( - arg.numel() * arg.element_size() * (1 + int(i < num_in_out_args)) - for i, arg in enumerate(args) - if isinstance(arg, torch.Tensor) - ) - - -def triton_config_to_hashable(cfg): - """ - Convert triton config to a tuple that can uniquely identify it. We can use - the return value as a dictionary key. - """ - items = sorted(cfg.kwargs.items()) - items.append(("num_warps", cfg.num_warps)) - items.append(("num_stages", cfg.num_stages)) - return tuple(items) - - -def create_bandwidth_info_str(ms, num_gb, gb_per_s, prefix="", suffix="", color=True): - info_str = f"{prefix}{ms:.3f}ms \t{num_gb:.3f} GB \t {gb_per_s:7.2f}GB/s{suffix}" - slow = ms > 0.012 and gb_per_s < 650 - return red_text(info_str) if color and slow else info_str - - -def get_max_y_grid(): - return 65535 - - -def do_bench(*args, **kwargs): - @functools.lru_cache(None) - def load_triton(): - try: - # NB: Lazily load triton, as importing triton is slow - # see https://github.com/openai/triton/issues/1599 - from triton.testing import do_bench as triton_do_bench - except ImportError as exc: - raise NotImplementedError("requires Triton") from exc - - # triton PR https://github.com/openai/triton/pull/1513 change the - # quantile fields name from 'percentiles' to 'quantiles' - # and change the default value from (0.5, 0.2, 0.8) to None. - # This may break inductor since a caller expects a tuple may get a item. - # - # Add a wrapper to maintain the same behavior for inductor. - # Maybe we should have own implementation of this function? - return triton_do_bench, ( - "quantiles" - if inspect.signature(triton_do_bench).parameters.get("quantiles") - is not None - else "percentiles" - ) - - triton_do_bench, quantile_field_name = load_triton() - - if quantile_field_name not in kwargs: - kwargs[quantile_field_name] = (0.5, 0.2, 0.8) - return triton_do_bench(*args, **kwargs)[0] - - -def cache_dir() -> str: - cache_dir = os.environ.get("TORCHINDUCTOR_CACHE_DIR") - if cache_dir is None: - sanitized_username = re.sub(r'[\\/:*?"<>|]', "_", getpass.getuser()) - os.environ["TORCHINDUCTOR_CACHE_DIR"] = cache_dir = os.path.join( - tempfile.gettempdir(), - "torchinductor_" + sanitized_username, - ) - os.makedirs(cache_dir, exist_ok=True) - return cache_dir - - -HAS_COLORAMA = True -try: - import colorama -except ImportError: - HAS_COLORAMA = False - - -def _color_text(msg, color): - if not HAS_COLORAMA: - return msg - - return getattr(colorama.Fore, color.upper()) + msg + colorama.Fore.RESET - - -def green_text(msg): - return _color_text(msg, "green") - - -def yellow_text(msg): - return _color_text(msg, "yellow") - - -def red_text(msg): - return _color_text(msg, "red") - - -def blue_text(msg): - return _color_text(msg, "blue") diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index 89be8f36d4..cb29da2f59 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -21,17 +21,9 @@ from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device from torch._dynamo.utils import dynamo_timed, get_first_attr from torch._inductor import config +from torch._inductor.codecache import cache_dir, CudaKernelParamCache from torch._inductor.coordinate_descent_tuner import CoordescTuner -from .hints import ( - _NUM_THREADS_PER_WARP, - AutotuneHint, - HeuristicType, - ReductionHint, - TileHint, -) - -from .runtime_utils import ( - cache_dir, +from torch._inductor.utils import ( ceildiv, conditional_product, create_bandwidth_info_str, @@ -41,13 +33,20 @@ from .runtime_utils import ( next_power_of_2, triton_config_to_hashable, ) +from torch.utils._triton import has_triton_package +from .hints import ( + _NUM_THREADS_PER_WARP, + AutotuneHint, + HeuristicType, + ReductionHint, + TileHint, +) -try: - import triton -except ImportError: - triton = None -if triton is not None: +log = logging.getLogger(__name__) + +if has_triton_package(): + import triton from triton import Config from triton.runtime.autotuner import OutOfResources from triton.runtime.jit import KernelInterface @@ -58,14 +57,12 @@ if triton is not None: ASTSource = None else: Config = object + triton = None KernelInterface = object OutOfResources = object ASTSource = None -log = logging.getLogger(__name__) - - def autotune_hints_to_configs( hints: Set[AutotuneHint], size_hints, block_size: int ) -> List[Config]: @@ -684,8 +681,6 @@ class CachingAutotuner(KernelInterface): "meta": launcher.config.kwargs, } - from torch._inductor.codecache import CudaKernelParamCache - if torch.version.hip is None: CudaKernelParamCache.set(key, params, launcher.bin.asm["cubin"]) else: diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py index 2783e3edfb..32f734ba8b 100644 --- a/torch/_inductor/scheduler.py +++ b/torch/_inductor/scheduler.py @@ -35,7 +35,6 @@ from .codegen.common import get_scheduling_for_device, Kernel from .comm_analysis import estimate_nccl_collective_runtime from .dependencies import Dep, MemoryDep, StarDep, WeakDep from .ir import ComputedBuffer, MultiOutput, MultiOutputLayout -from .runtime.runtime_utils import green_text, red_text from .sizevars import SimplifyIndexing from .utils import ( cache_on_self, @@ -45,9 +44,11 @@ from .utils import ( get_device_tflops, get_dtype_size, get_gpu_dram_gbps, + green_text, is_collective, is_gpu, is_wait, + red_text, sympy_product, ) from .virtualized import V diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py index 2485de547b..36801e3c28 100644 --- a/torch/_inductor/select_algorithm.py +++ b/torch/_inductor/select_algorithm.py @@ -35,8 +35,14 @@ from .codegen.triton import ( from .codegen.triton_utils import config_of, signature_to_meta from .exc import CUDACompileError from .ir import ChoiceCaller, PrimitiveInfoType -from .runtime.runtime_utils import do_bench -from .utils import get_dtype_size, Placeholder, sympy_dot, sympy_product, unique +from .utils import ( + do_bench, + get_dtype_size, + Placeholder, + sympy_dot, + sympy_product, + unique, +) from .virtualized import V log = logging.getLogger(__name__) diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py index a7be602f87..daddaaf04d 100644 --- a/torch/_inductor/utils.py +++ b/torch/_inductor/utils.py @@ -5,6 +5,7 @@ import contextlib import dataclasses import enum import functools +import getpass import inspect import io import itertools @@ -13,6 +14,7 @@ import math import operator import os import platform +import re import shutil import sys import tempfile @@ -49,7 +51,6 @@ from torch.autograd.profiler_util import EventList from torch.fx.passes.shape_prop import ShapeProp from torch.utils._sympy.functions import CeilDiv, CleanDiv, FloorDiv, ModularIndexing from . import config -from .runtime.runtime_utils import ceildiv as runtime_ceildiv log = logging.getLogger(__name__) @@ -139,6 +140,37 @@ def do_bench_using_profiling(fn: Callable[[], Any], warmup=25, rep=100) -> float return res +def do_bench(*args, **kwargs): + @functools.lru_cache(None) + def load_triton(): + try: + # NB: Lazily load triton, as importing triton is slow + # see https://github.com/openai/triton/issues/1599 + from triton.testing import do_bench as triton_do_bench + except ImportError as exc: + raise NotImplementedError("requires Triton") from exc + + # triton PR https://github.com/openai/triton/pull/1513 change the + # quantile fields name from 'percentiles' to 'quantiles' + # and change the default value from (0.5, 0.2, 0.8) to None. + # This may break inductor since a caller expects a tuple may get a item. + # + # Add a wrapper to maintain the same behavior for inductor. + # Maybe we should have own implementation of this function? + return triton_do_bench, ( + "quantiles" + if inspect.signature(triton_do_bench).parameters.get("quantiles") + is not None + else "percentiles" + ) + + triton_do_bench, quantile_field_name = load_triton() + + if quantile_field_name not in kwargs: + kwargs[quantile_field_name] = (0.5, 0.2, 0.8) + return triton_do_bench(*args, **kwargs)[0] + + @functools.lru_cache(None) def has_torchvision_roi_align() -> bool: try: @@ -151,6 +183,10 @@ def has_torchvision_roi_align() -> bool: return False +def conditional_product(*args): + return functools.reduce(operator.mul, [x for x in args if x]) + + def decode_device(device: Union[Optional[torch.device], str]) -> torch.device: if device is None: return torch.tensor(0.0).device # default device @@ -186,7 +222,20 @@ def ceildiv( assert isinstance(numer, int) and isinstance( denom, int ), f"{numer}: {type(numer)}, {denom}: {type(denom)}" - return runtime_ceildiv(numer, denom) + return -(numer // -denom) + + +def next_power_of_2(n: int) -> int: + """Return the smallest power of 2 greater than or equal to n""" + n -= 1 + n |= n >> 1 + n |= n >> 2 + n |= n >> 4 + n |= n >> 8 + n |= n >> 16 + n |= n >> 32 + n += 1 + return n def _type_of(key): @@ -654,6 +703,20 @@ def clear_on_fresh_inductor_cache(obj: Any): return obj +@clear_on_fresh_inductor_cache +@functools.lru_cache(None) +def cache_dir() -> str: + cache_dir = os.environ.get("TORCHINDUCTOR_CACHE_DIR") + if cache_dir is None: + sanitized_username = re.sub(r'[\\/:*?"<>|]', "_", getpass.getuser()) + cache_dir = os.path.join( + tempfile.gettempdir(), + "torchinductor_" + sanitized_username, + ) + os.makedirs(cache_dir, exist_ok=True) + return cache_dir + + @contextlib.contextmanager def fresh_inductor_cache(cache_entries=None): """ @@ -1078,6 +1141,28 @@ def developer_warning(msg): log.info(msg) +def get_num_bytes(*args: torch.Tensor, num_in_out_args: int = 0) -> int: + """ + Return the total number of bytes the arguments of tensor type takes. + + For in/out args, tensor sizes are counted twice: once for reading and + once for writing. + + The first num_in_out_args arguments are in out tensors. + """ + return sum( + arg.numel() * arg.element_size() * (1 + int(i < num_in_out_args)) + for i, arg in enumerate(args) + if isinstance(arg, torch.Tensor) + ) + + +def create_bandwidth_info_str(ms, num_gb, gb_per_s, prefix="", suffix="", color=True): + info_str = f"{prefix}{ms:.3f}ms \t{num_gb:.3f} GB \t {gb_per_s:7.2f}GB/s{suffix}" + slow = ms > 0.012 and gb_per_s < 650 + return red_text(info_str) if color and slow else info_str + + def get_benchmark_name(): """ An experimental API used only when config.benchmark_kernel is true. @@ -1144,6 +1229,17 @@ def maybe_profile(should_profile, *args, **kwargs): yield +def triton_config_to_hashable(cfg): + """ + Convert triton config to a tuple that can uniquely identify it. We can use + the return value as a dictionary key. + """ + items = sorted(cfg.kwargs.items()) + items.append(("num_warps", cfg.num_warps)) + items.append(("num_stages", cfg.num_stages)) + return tuple(items) + + def parallel_num_threads(): threads = config.cpp.threads if threads < 1: @@ -1151,6 +1247,36 @@ def parallel_num_threads(): return threads +HAS_COLORAMA = True +try: + import colorama +except ImportError: + HAS_COLORAMA = False + + +def _color_text(msg, color): + if not HAS_COLORAMA: + return msg + + return getattr(colorama.Fore, color.upper()) + msg + colorama.Fore.RESET + + +def green_text(msg): + return _color_text(msg, "green") + + +def yellow_text(msg): + return _color_text(msg, "yellow") + + +def red_text(msg): + return _color_text(msg, "red") + + +def blue_text(msg): + return _color_text(msg, "blue") + + @functools.lru_cache(None) def get_device_tflops(dtype): from triton.testing import get_max_simd_tflops, get_max_tensorcore_tflops @@ -1194,6 +1320,10 @@ def reduction_num_outputs(reduction_type): return 3 if is_welford_reduction(reduction_type) else 1 +def get_max_y_grid(): + return 65535 + + def is_linux() -> bool: return platform.system() == "Linux" diff --git a/torch/_inductor/wrapper_benchmark.py b/torch/_inductor/wrapper_benchmark.py index 31b81bba4a..81a07fcf8d 100644 --- a/torch/_inductor/wrapper_benchmark.py +++ b/torch/_inductor/wrapper_benchmark.py @@ -4,7 +4,7 @@ from collections import defaultdict import torch from torch.autograd import DeviceType -from .runtime.runtime_utils import create_bandwidth_info_str, do_bench, get_num_bytes +from .utils import create_bandwidth_info_str, do_bench, get_num_bytes _kernel_category_choices = [ "foreach",
2.41.0
6eea7c6a5c5a3f937f4e6fdd9f5e94da78de982
Mon, 22 Apr 2024 18:28:05 +0000
[PATCH 0471/1000] Revert "[inductor] Refactor runtime files into torch._inductor.runtime (part 1) (#124552)"
This reverts commit a7035cc11aa3aefe1a45a9ba6d0cb4d2a6f2e7c1. Reverted https://github.com/pytorch/pytorch/pull/124552 on behalf of https://github.com/jeanschmidt due to There are internal breakages, already discussed with author and he'll FF ([comment](https://github.com/pytorch/pytorch/pull/124552#issuecomment-2070548223))
diff --git a/test/inductor/test_cuda_repro.py b/test/inductor/test_cuda_repro.py index 952a2eacaf..c1d57d91d7 100644 --- a/test/inductor/test_cuda_repro.py +++ b/test/inductor/test_cuda_repro.py @@ -381,7 +381,7 @@ class CudaReproTests(TestCase): https://github.com/pytorch/torchdynamo/issues/1670 """ from torch._C import _cuda_getCurrentRawStream as get_cuda_stream - from torch._inductor.runtime.triton_heuristics import ( + from torch._inductor.triton_heuristics import ( CachingAutotuner, grid, HeuristicType, diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py index c63b1c6494..3000be0f88 100644 --- a/test/inductor/test_torchinductor.py +++ b/test/inductor/test_torchinductor.py @@ -9643,7 +9643,7 @@ if HAS_GPU and RUN_GPU and not TEST_WITH_ASAN: copy_tests(CommonTemplate, GPUTests, GPU_TYPE) class TritonCodeGenTests(TestCase): - from torch._inductor.runtime.triton_heuristics import CachingAutotuner + from torch._inductor.triton_heuristics import CachingAutotuner class NoOpCompilerBackend: def __init__(self): @@ -9695,7 +9695,7 @@ if HAS_GPU and RUN_GPU and not TEST_WITH_ASAN: for val in mod.__dict__.values(): if isinstance( - val, torch._inductor.runtime.triton_heuristics.CachingAutotuner + val, torch._inductor.triton_heuristics.CachingAutotuner ): kernels.append(val) diff --git a/test/inductor/test_triton_heuristics.py b/test/inductor/test_triton_heuristics.py index ab54164edd..1841454ab9 100644 --- a/test/inductor/test_triton_heuristics.py +++ b/test/inductor/test_triton_heuristics.py @@ -16,8 +16,8 @@ except ImportError: raise unittest.SkipTest("requires triton") # noqa: TRY200 from torch._inductor import config -from torch._inductor.runtime.triton_heuristics import triton_config from torch._inductor.test_case import run_tests, TestCase +from torch._inductor.triton_heuristics import triton_config class TestTritonHeuristics(TestCase): diff --git a/test/test_public_bindings.py b/test/test_public_bindings.py index 96ffb9d03e..65aa339aff 100644 --- a/test/test_public_bindings.py +++ b/test/test_public_bindings.py @@ -333,7 +333,7 @@ class TestPublicBindings(TestCase): "torch.utils.tensorboard._caffe2_graph", "torch._inductor.codegen.cuda.cuda_template", "torch._inductor.codegen.cuda.gemm_template", - "torch._inductor.runtime.triton_helpers", + "torch._inductor.triton_helpers", "torch.ao.pruning._experimental.data_sparsifier.lightning.callbacks.data_sparsity", "torch.backends._coreml.preprocess", "torch.contrib._tensorboard_vis", diff --git a/torch/_inductor/codegen/cpp_wrapper_cuda.py b/torch/_inductor/codegen/cpp_wrapper_cuda.py index 39d37348d7..69838dccad 100644 --- a/torch/_inductor/codegen/cpp_wrapper_cuda.py +++ b/torch/_inductor/codegen/cpp_wrapper_cuda.py @@ -6,10 +6,10 @@ from typing import Any, List, Optional, TYPE_CHECKING import sympy from torch._inductor.codecache import get_cpp_wrapper_cubin_path_name -from torch._inductor.runtime.triton_heuristics import grid as default_grid from .. import config from ..codecache import CudaKernelParamCache +from ..triton_heuristics import grid as default_grid from ..virtualized import V from .aoti_hipify_utils import maybe_hipify_code_wrapper from .codegen_device_driver import cuda_kernel_driver, cuda_kernel_header diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py index 007e412e42..cf51b70c16 100644 --- a/torch/_inductor/codegen/triton.py +++ b/torch/_inductor/codegen/triton.py @@ -34,7 +34,6 @@ import torch.utils._pytree as pytree from torch._dynamo.utils import preserve_rng_state from torch._inductor.metrics import is_metric_table_enabled, log_kernel_metadata -from torch._inductor.runtime.triton_heuristics import AutotuneHint from torch._prims_common import is_integer_dtype from torch.utils._sympy.functions import FloorDiv, ModularIndexing from torch.utils._sympy.value_ranges import ValueRanges @@ -47,6 +46,7 @@ from ..dependencies import Dep, MemoryDep, StarDep, WeakDep from ..ir import IRNode, ReductionHint, TritonTemplateBuffer from ..optimize_indexing import indexing_dtype_strength_reduction from ..scheduler import BaseSchedulerNode, BaseScheduling, WhyNoFuse +from ..triton_heuristics import AutotuneHint from ..utils import ( cache_on_self, do_bench, @@ -120,14 +120,10 @@ def gen_common_triton_imports(): imports.splice( """ - from torch._inductor.runtime import ( - triton_helpers, - triton_heuristics, - libdevice, - tl_math, - AutotuneHint, - ) + from torch._inductor import triton_helpers, triton_heuristics from torch._inductor.ir import ReductionHint, TileHint + from torch._inductor.triton_helpers import libdevice, math as tl_math + from torch._inductor.triton_heuristics import AutotuneHint from torch._inductor.utils import instance_descriptor """ ) @@ -2656,7 +2652,7 @@ class TritonKernel(Kernel): from torch._dynamo.testing import rand_strided {} import torch - from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid + from torch._inductor.triton_heuristics import grid, split_scan_grid """.format( V.graph.device_ops.import_get_raw_stream_as("get_raw_stream") ) diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py index 17c2f58a76..6f28ea7ea8 100644 --- a/torch/_inductor/codegen/wrapper.py +++ b/torch/_inductor/codegen/wrapper.py @@ -34,7 +34,6 @@ from torch.utils._sympy.singleton_int import SingletonInt from .. import codecache, config, ir from ..ir import ReinterpretView -from ..runtime import triton_heuristics from ..utils import ( cache_on_self, get_benchmark_name, @@ -522,11 +521,10 @@ class WrapperCodeGen(CodeGen): """ import triton import triton.language as tl - from {} import grid, split_scan_grid, start_graph, end_graph + from torch._inductor.triton_heuristics import grid, split_scan_grid, start_graph, end_graph {} """.format( - triton_heuristics.__name__, - V.graph.device_ops.import_get_raw_stream_as("get_raw_stream"), + V.graph.device_ops.import_get_raw_stream_as("get_raw_stream") ) ) @@ -1268,9 +1266,9 @@ class WrapperCodeGen(CodeGen): def generate_reset_kernel_saved_flags(self): self.wrapper_call.splice( - f""" + """ for kernel in globals().values(): - if isinstance(kernel, {triton_heuristics.__name__}.CachingAutotuner): + if isinstance(kernel, torch._inductor.triton_heuristics.CachingAutotuner): kernel.cuda_kernel_saved = False """ ) @@ -1287,9 +1285,9 @@ class WrapperCodeGen(CodeGen): subsequent AOTInductor code generation and compilation. """ self.wrapper_call.splice( - f""" + """ for kernel in globals().values(): - if isinstance(kernel, {triton_heuristics.__name__}.CachingAutotuner): + if isinstance(kernel, torch._inductor.triton_heuristics.CachingAutotuner): if not kernel.cuda_kernel_saved: if len(kernel.launchers) == 0: kernel.precompile() diff --git a/torch/_inductor/runtime/__init__.py b/torch/_inductor/runtime/__init__.py deleted file mode 100644 index 04f35cf3b4..0000000000 --- a/torch/_inductor/runtime/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -from . import triton_helpers, triton_heuristics -from .triton_helpers import libdevice, math as tl_math -from .triton_heuristics import AutotuneHint - - -__all__ = [ - "triton_heuristics", - "triton_helpers", - "libdevice", - "tl_math", - "AutotuneHint", -] diff --git a/torch/_inductor/runtime/triton_helpers.py b/torch/_inductor/triton_helpers.py similarity index 96% rename from torch/_inductor/runtime/triton_helpers.py rename to torch/_inductor/triton_helpers.py index 71b746bdf4..61db5b1bc3 100644 --- a/torch/_inductor/runtime/triton_helpers.py +++ b/torch/_inductor/triton_helpers.py @@ -1,18 +1,5 @@ -try: - import triton - import triton.language as tl -except ImportError: - - class triton: # type: ignore[no-redef] - @staticmethod - def jit(x): - return x - - class tl: # type: ignore[no-redef] - constexpr = None # type: ignore[var-annotated] - math = None # type: ignore[var-annotated] - extra = None # type: ignore[var-annotated] - +import triton +import triton.language as tl # In the latest triton, math functions were shuffled around into different modules: # https://github.com/openai/triton/pull/3172 diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/triton_heuristics.py similarity index 99% rename from torch/_inductor/runtime/triton_heuristics.py rename to torch/_inductor/triton_heuristics.py index 85860fd05c..cf68fb020c 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/triton_heuristics.py @@ -20,13 +20,14 @@ import torch import torch.autograd.profiler as autograd_profiler from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device from torch._dynamo.utils import dynamo_timed, get_first_attr +from torch.utils._triton import has_triton_package -from torch._inductor import config -from torch._inductor.codecache import cache_dir, CudaKernelParamCache -from torch._inductor.coordinate_descent_tuner import CoordescTuner +from . import config +from .codecache import cache_dir, CudaKernelParamCache +from .coordinate_descent_tuner import CoordescTuner -from torch._inductor.ir import ReductionHint, TileHint -from torch._inductor.utils import ( +from .ir import ReductionHint, TileHint +from .utils import ( ceildiv, conditional_product, create_bandwidth_info_str, @@ -36,7 +37,6 @@ from torch._inductor.utils import ( next_power_of_2, triton_config_to_hashable, ) -from torch.utils._triton import has_triton_package log = logging.getLogger(__name__) @@ -614,7 +614,7 @@ class CachingAutotuner(KernelInterface): return do_bench(kernel_call, rep=40, fast_flush=True) def clone_args(self, *args, **kwargs) -> Tuple[List[Any], Dict[str, Any]]: - from ..compile_fx import clone_preserve_strides + from .compile_fx import clone_preserve_strides # clone inplace buffers to avoid autotune contaminating them if # the kernel does in-place stores. avoid cloning other buffers because diff --git a/torch/_inductor/wrapper_benchmark.py b/torch/_inductor/wrapper_benchmark.py index 81a07fcf8d..c0205659ef 100644 --- a/torch/_inductor/wrapper_benchmark.py +++ b/torch/_inductor/wrapper_benchmark.py @@ -49,7 +49,7 @@ def get_kernel_category(kernel_mod): def get_triton_kernel(mod): - from torch._inductor.runtime.triton_heuristics import CachingAutotuner + from torch._inductor.triton_heuristics import CachingAutotuner cand_list = [ v
2.41.0
80585fd2b08ebcdf367782c8de1d85b327a4084
Sun, 21 Apr 2024 11:09:44 -0700
[PATCH 0472/1000] [inductor] Refactor runtime files into torch._inductor.runtime (part 1) (#124552)
I am planning to make the compile_worker process not import torch so it can start up much faster. This stack is prep for that. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124552 Approved by: https://github.com/yanboliang
diff --git a/test/inductor/test_cuda_repro.py b/test/inductor/test_cuda_repro.py index c1d57d91d7..952a2eacaf 100644 --- a/test/inductor/test_cuda_repro.py +++ b/test/inductor/test_cuda_repro.py @@ -381,7 +381,7 @@ class CudaReproTests(TestCase): https://github.com/pytorch/torchdynamo/issues/1670 """ from torch._C import _cuda_getCurrentRawStream as get_cuda_stream - from torch._inductor.triton_heuristics import ( + from torch._inductor.runtime.triton_heuristics import ( CachingAutotuner, grid, HeuristicType, diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py index 3000be0f88..c63b1c6494 100644 --- a/test/inductor/test_torchinductor.py +++ b/test/inductor/test_torchinductor.py @@ -9643,7 +9643,7 @@ if HAS_GPU and RUN_GPU and not TEST_WITH_ASAN: copy_tests(CommonTemplate, GPUTests, GPU_TYPE) class TritonCodeGenTests(TestCase): - from torch._inductor.triton_heuristics import CachingAutotuner + from torch._inductor.runtime.triton_heuristics import CachingAutotuner class NoOpCompilerBackend: def __init__(self): @@ -9695,7 +9695,7 @@ if HAS_GPU and RUN_GPU and not TEST_WITH_ASAN: for val in mod.__dict__.values(): if isinstance( - val, torch._inductor.triton_heuristics.CachingAutotuner + val, torch._inductor.runtime.triton_heuristics.CachingAutotuner ): kernels.append(val) diff --git a/test/inductor/test_triton_heuristics.py b/test/inductor/test_triton_heuristics.py index 1841454ab9..ab54164edd 100644 --- a/test/inductor/test_triton_heuristics.py +++ b/test/inductor/test_triton_heuristics.py @@ -16,8 +16,8 @@ except ImportError: raise unittest.SkipTest("requires triton") # noqa: TRY200 from torch._inductor import config +from torch._inductor.runtime.triton_heuristics import triton_config from torch._inductor.test_case import run_tests, TestCase -from torch._inductor.triton_heuristics import triton_config class TestTritonHeuristics(TestCase): diff --git a/test/test_public_bindings.py b/test/test_public_bindings.py index 65aa339aff..96ffb9d03e 100644 --- a/test/test_public_bindings.py +++ b/test/test_public_bindings.py @@ -333,7 +333,7 @@ class TestPublicBindings(TestCase): "torch.utils.tensorboard._caffe2_graph", "torch._inductor.codegen.cuda.cuda_template", "torch._inductor.codegen.cuda.gemm_template", - "torch._inductor.triton_helpers", + "torch._inductor.runtime.triton_helpers", "torch.ao.pruning._experimental.data_sparsifier.lightning.callbacks.data_sparsity", "torch.backends._coreml.preprocess", "torch.contrib._tensorboard_vis", diff --git a/torch/_inductor/codegen/cpp_wrapper_cuda.py b/torch/_inductor/codegen/cpp_wrapper_cuda.py index 69838dccad..39d37348d7 100644 --- a/torch/_inductor/codegen/cpp_wrapper_cuda.py +++ b/torch/_inductor/codegen/cpp_wrapper_cuda.py @@ -6,10 +6,10 @@ from typing import Any, List, Optional, TYPE_CHECKING import sympy from torch._inductor.codecache import get_cpp_wrapper_cubin_path_name +from torch._inductor.runtime.triton_heuristics import grid as default_grid from .. import config from ..codecache import CudaKernelParamCache -from ..triton_heuristics import grid as default_grid from ..virtualized import V from .aoti_hipify_utils import maybe_hipify_code_wrapper from .codegen_device_driver import cuda_kernel_driver, cuda_kernel_header diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py index cf51b70c16..007e412e42 100644 --- a/torch/_inductor/codegen/triton.py +++ b/torch/_inductor/codegen/triton.py @@ -34,6 +34,7 @@ import torch.utils._pytree as pytree from torch._dynamo.utils import preserve_rng_state from torch._inductor.metrics import is_metric_table_enabled, log_kernel_metadata +from torch._inductor.runtime.triton_heuristics import AutotuneHint from torch._prims_common import is_integer_dtype from torch.utils._sympy.functions import FloorDiv, ModularIndexing from torch.utils._sympy.value_ranges import ValueRanges @@ -46,7 +47,6 @@ from ..dependencies import Dep, MemoryDep, StarDep, WeakDep from ..ir import IRNode, ReductionHint, TritonTemplateBuffer from ..optimize_indexing import indexing_dtype_strength_reduction from ..scheduler import BaseSchedulerNode, BaseScheduling, WhyNoFuse -from ..triton_heuristics import AutotuneHint from ..utils import ( cache_on_self, do_bench, @@ -120,10 +120,14 @@ def gen_common_triton_imports(): imports.splice( """ - from torch._inductor import triton_helpers, triton_heuristics + from torch._inductor.runtime import ( + triton_helpers, + triton_heuristics, + libdevice, + tl_math, + AutotuneHint, + ) from torch._inductor.ir import ReductionHint, TileHint - from torch._inductor.triton_helpers import libdevice, math as tl_math - from torch._inductor.triton_heuristics import AutotuneHint from torch._inductor.utils import instance_descriptor """ ) @@ -2652,7 +2656,7 @@ class TritonKernel(Kernel): from torch._dynamo.testing import rand_strided {} import torch - from torch._inductor.triton_heuristics import grid, split_scan_grid + from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid """.format( V.graph.device_ops.import_get_raw_stream_as("get_raw_stream") ) diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py index 6f28ea7ea8..17c2f58a76 100644 --- a/torch/_inductor/codegen/wrapper.py +++ b/torch/_inductor/codegen/wrapper.py @@ -34,6 +34,7 @@ from torch.utils._sympy.singleton_int import SingletonInt from .. import codecache, config, ir from ..ir import ReinterpretView +from ..runtime import triton_heuristics from ..utils import ( cache_on_self, get_benchmark_name, @@ -521,10 +522,11 @@ class WrapperCodeGen(CodeGen): """ import triton import triton.language as tl - from torch._inductor.triton_heuristics import grid, split_scan_grid, start_graph, end_graph + from {} import grid, split_scan_grid, start_graph, end_graph {} """.format( - V.graph.device_ops.import_get_raw_stream_as("get_raw_stream") + triton_heuristics.__name__, + V.graph.device_ops.import_get_raw_stream_as("get_raw_stream"), ) ) @@ -1266,9 +1268,9 @@ class WrapperCodeGen(CodeGen): def generate_reset_kernel_saved_flags(self): self.wrapper_call.splice( - """ + f""" for kernel in globals().values(): - if isinstance(kernel, torch._inductor.triton_heuristics.CachingAutotuner): + if isinstance(kernel, {triton_heuristics.__name__}.CachingAutotuner): kernel.cuda_kernel_saved = False """ ) @@ -1285,9 +1287,9 @@ class WrapperCodeGen(CodeGen): subsequent AOTInductor code generation and compilation. """ self.wrapper_call.splice( - """ + f""" for kernel in globals().values(): - if isinstance(kernel, torch._inductor.triton_heuristics.CachingAutotuner): + if isinstance(kernel, {triton_heuristics.__name__}.CachingAutotuner): if not kernel.cuda_kernel_saved: if len(kernel.launchers) == 0: kernel.precompile() diff --git a/torch/_inductor/runtime/__init__.py b/torch/_inductor/runtime/__init__.py new file mode 100644 index 0000000000..04f35cf3b4 --- /dev/null +++ b/torch/_inductor/runtime/__init__.py @@ -0,0 +1,12 @@ +from . import triton_helpers, triton_heuristics +from .triton_helpers import libdevice, math as tl_math +from .triton_heuristics import AutotuneHint + + +__all__ = [ + "triton_heuristics", + "triton_helpers", + "libdevice", + "tl_math", + "AutotuneHint", +] diff --git a/torch/_inductor/triton_helpers.py b/torch/_inductor/runtime/triton_helpers.py similarity index 96% rename from torch/_inductor/triton_helpers.py rename to torch/_inductor/runtime/triton_helpers.py index 61db5b1bc3..71b746bdf4 100644 --- a/torch/_inductor/triton_helpers.py +++ b/torch/_inductor/runtime/triton_helpers.py @@ -1,5 +1,18 @@ -import triton -import triton.language as tl +try: + import triton + import triton.language as tl +except ImportError: + + class triton: # type: ignore[no-redef] + @staticmethod + def jit(x): + return x + + class tl: # type: ignore[no-redef] + constexpr = None # type: ignore[var-annotated] + math = None # type: ignore[var-annotated] + extra = None # type: ignore[var-annotated] + # In the latest triton, math functions were shuffled around into different modules: # https://github.com/openai/triton/pull/3172 diff --git a/torch/_inductor/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py similarity index 99% rename from torch/_inductor/triton_heuristics.py rename to torch/_inductor/runtime/triton_heuristics.py index cf68fb020c..85860fd05c 100644 --- a/torch/_inductor/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -20,14 +20,13 @@ import torch import torch.autograd.profiler as autograd_profiler from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device from torch._dynamo.utils import dynamo_timed, get_first_attr -from torch.utils._triton import has_triton_package -from . import config -from .codecache import cache_dir, CudaKernelParamCache -from .coordinate_descent_tuner import CoordescTuner +from torch._inductor import config +from torch._inductor.codecache import cache_dir, CudaKernelParamCache +from torch._inductor.coordinate_descent_tuner import CoordescTuner -from .ir import ReductionHint, TileHint -from .utils import ( +from torch._inductor.ir import ReductionHint, TileHint +from torch._inductor.utils import ( ceildiv, conditional_product, create_bandwidth_info_str, @@ -37,6 +36,7 @@ from .utils import ( next_power_of_2, triton_config_to_hashable, ) +from torch.utils._triton import has_triton_package log = logging.getLogger(__name__) @@ -614,7 +614,7 @@ class CachingAutotuner(KernelInterface): return do_bench(kernel_call, rep=40, fast_flush=True) def clone_args(self, *args, **kwargs) -> Tuple[List[Any], Dict[str, Any]]: - from .compile_fx import clone_preserve_strides + from ..compile_fx import clone_preserve_strides # clone inplace buffers to avoid autotune contaminating them if # the kernel does in-place stores. avoid cloning other buffers because diff --git a/torch/_inductor/wrapper_benchmark.py b/torch/_inductor/wrapper_benchmark.py index c0205659ef..81a07fcf8d 100644 --- a/torch/_inductor/wrapper_benchmark.py +++ b/torch/_inductor/wrapper_benchmark.py @@ -49,7 +49,7 @@ def get_kernel_category(kernel_mod): def get_triton_kernel(mod): - from torch._inductor.triton_heuristics import CachingAutotuner + from torch._inductor.runtime.triton_heuristics import CachingAutotuner cand_list = [ v
2.41.0
b8815bc312877cb288960a7a9d22a0d389c8f3f
Sun, 21 Apr 2024 11:09:44 -0700
[PATCH 0473/1000] [inductor] Refactor runtime files into torch._inductor.runtime (part 2) (#124553)
I am planning to make the compile_worker process not import torch so it can start up much faster. This stack is prep for that. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124553 Approved by: https://github.com/yanboliang ghstack dependencies: #124552
diff --git a/test/inductor/test_cuda_repro.py b/test/inductor/test_cuda_repro.py index 952a2eacaf..684f3cef8f 100644 --- a/test/inductor/test_cuda_repro.py +++ b/test/inductor/test_cuda_repro.py @@ -381,12 +381,8 @@ class CudaReproTests(TestCase): https://github.com/pytorch/torchdynamo/issues/1670 """ from torch._C import _cuda_getCurrentRawStream as get_cuda_stream - from torch._inductor.runtime.triton_heuristics import ( - CachingAutotuner, - grid, - HeuristicType, - ) - from torch._inductor.utils import instance_descriptor + from torch._inductor.runtime.hints import HeuristicType, instance_descriptor + from torch._inductor.runtime.triton_heuristics import CachingAutotuner, grid def autotune(configs, meta): def decorator(fn): diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py index 007e412e42..67d69efe7a 100644 --- a/torch/_inductor/codegen/triton.py +++ b/torch/_inductor/codegen/triton.py @@ -34,7 +34,7 @@ import torch.utils._pytree as pytree from torch._dynamo.utils import preserve_rng_state from torch._inductor.metrics import is_metric_table_enabled, log_kernel_metadata -from torch._inductor.runtime.triton_heuristics import AutotuneHint +from torch._inductor.runtime.hints import AutotuneHint from torch._prims_common import is_integer_dtype from torch.utils._sympy.functions import FloorDiv, ModularIndexing from torch.utils._sympy.value_ranges import ValueRanges @@ -44,8 +44,9 @@ from ..._dynamo.utils import counters from .. import config, ir, scheduler from ..codecache import code_hash, get_path, PyCodeCache from ..dependencies import Dep, MemoryDep, StarDep, WeakDep -from ..ir import IRNode, ReductionHint, TritonTemplateBuffer +from ..ir import IRNode, TritonTemplateBuffer from ..optimize_indexing import indexing_dtype_strength_reduction +from ..runtime.hints import ReductionHint from ..scheduler import BaseSchedulerNode, BaseScheduling, WhyNoFuse from ..utils import ( cache_on_self, @@ -120,15 +121,9 @@ def gen_common_triton_imports(): imports.splice( """ - from torch._inductor.runtime import ( - triton_helpers, - triton_heuristics, - libdevice, - tl_math, - AutotuneHint, - ) - from torch._inductor.ir import ReductionHint, TileHint - from torch._inductor.utils import instance_descriptor + from torch._inductor.runtime import triton_helpers, triton_heuristics + from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math + from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor """ ) return imports.getvalue() diff --git a/torch/_inductor/codegen/triton_split_scan.py b/torch/_inductor/codegen/triton_split_scan.py index fba8717328..c6b851dc42 100644 --- a/torch/_inductor/codegen/triton_split_scan.py +++ b/torch/_inductor/codegen/triton_split_scan.py @@ -2,7 +2,8 @@ import functools from typing import Optional, Set -from torch._inductor import config, ir +import torch._inductor.runtime.hints +from torch._inductor import config from torch._inductor.codegen.triton import ( IterationRangesRoot, @@ -36,7 +37,7 @@ class TritonSplitScanKernel(TritonKernel): *groups, index_dtype: str, mutations: Optional[Set[str]] = None, - reduction_hint=ir.ReductionHint.DEFAULT, + reduction_hint=torch._inductor.runtime.hints.ReductionHint.DEFAULT, min_elem_per_thread=0, ): super().__init__( diff --git a/torch/_inductor/codegen/triton_utils.py b/torch/_inductor/codegen/triton_utils.py index c8a7d92e3c..630f55ee94 100644 --- a/torch/_inductor/codegen/triton_utils.py +++ b/torch/_inductor/codegen/triton_utils.py @@ -5,7 +5,8 @@ import sympy import torch from .. import config -from ..utils import _type_of, instance_descriptor +from ..runtime.hints import instance_descriptor +from ..utils import _type_of from ..virtualized import V from .common import KernelArgType, SizeArg, TensorArg, WorkspaceArg diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py index 7b3f71240f..dbd61f790c 100644 --- a/torch/_inductor/ir.py +++ b/torch/_inductor/ir.py @@ -8,7 +8,6 @@ import re import textwrap import traceback from contextlib import nullcontext -from enum import Enum from functools import partial from typing import ( Any, @@ -61,6 +60,7 @@ from .dependencies import ( var_builder, ) from .ops_handler import OpCounterCSE +from .runtime.hints import ReductionHint from .utils import ( argsort, cache_on_self, @@ -533,18 +533,6 @@ class Scatter(Pointwise): ) -class ReductionHint(Enum): - INNER = 0 - OUTER = 1 - OUTER_TINY = 2 - DEFAULT = 3 - - -class TileHint(Enum): - SQUARE = 0 - DEFAULT = 1 - - REDUCTION_COMBINE_FN = { "any": ops_wrapper("logical_or"), "max": ops_wrapper("maximum"), diff --git a/torch/_inductor/runtime/__init__.py b/torch/_inductor/runtime/__init__.py index 04f35cf3b4..e69de29bb2 100644 --- a/torch/_inductor/runtime/__init__.py +++ b/torch/_inductor/runtime/__init__.py @@ -1,12 +0,0 @@ -from . import triton_helpers, triton_heuristics -from .triton_helpers import libdevice, math as tl_math -from .triton_heuristics import AutotuneHint - - -__all__ = [ - "triton_heuristics", - "triton_helpers", - "libdevice", - "tl_math", - "AutotuneHint", -] diff --git a/torch/_inductor/runtime/hints.py b/torch/_inductor/runtime/hints.py new file mode 100644 index 0000000000..082e7fcc89 --- /dev/null +++ b/torch/_inductor/runtime/hints.py @@ -0,0 +1,82 @@ +import collections +from dataclasses import fields +from enum import auto, Enum + + +class ReductionHint(Enum): + INNER = 0 + OUTER = 1 + OUTER_TINY = 2 + DEFAULT = 3 + + +class TileHint(Enum): + SQUARE = 0 + DEFAULT = 1 + + +# Attempt to import AttrsDescriptor from Triton +try: + from triton.compiler.compiler import AttrsDescriptor + + attrs_descriptor_available = True + # Determine if 'ids_of_folded_args' is a valid field for AttrsDescriptor + attr_desc_fields = {f.name for f in fields(AttrsDescriptor)} + ids_of_folded_args_available = "ids_of_folded_args" in attr_desc_fields + divisible_by_8_available = "divisible_by_8" in attr_desc_fields +except ImportError: + attrs_descriptor_available = False + +# Define `instance_descriptor` function with clear conditional handling +if attrs_descriptor_available: + + def instance_descriptor( + divisible_by_16=None, + equal_to_1=None, + ids_of_folded_args=None, + divisible_by_8=None, + ): + # Prepare the arguments for AttrsDescriptor + kwargs = { + "divisible_by_16": divisible_by_16, + "equal_to_1": equal_to_1, + } + + # Conditionally add 'ids_of_folded_args' if it's available in AttrsDescriptor + if ids_of_folded_args_available: + kwargs["ids_of_folded_args"] = ids_of_folded_args + if divisible_by_8_available: + kwargs["divisible_by_8"] = divisible_by_8 + + # Instantiate AttrsDescriptor with the prepared arguments + return AttrsDescriptor(**kwargs) + +else: + # Define a namedtuple as a fallback when AttrsDescriptor is not available + instance_descriptor = collections.namedtuple( # type: ignore[no-redef] + "instance_descriptor", + ["divisible_by_16", "equal_to_1", "ids_of_folded_args", "divisible_by_8"], + defaults=[tuple(), tuple(), tuple(), tuple()], + ) + + +_NUM_THREADS_PER_WARP = 32 + + +class HeuristicType(Enum): + PERSISTENT_REDUCTION = auto() + POINTWISE = auto() + REDUCTION = auto() + SPLIT_SCAN = auto() + TEMPLATE = auto() + USER_AUTOTUNE = auto() + + +class AutotuneHint(Enum): + ELEMENTS_PER_WARP_32 = 0 + + # Triton codegen tries to codegen set of AutotuneHints. + # Enum.__repr__ looks like "<AutotuneHint.ELEMENTS_PER_WARP_32: 0>"" + # which isn't valid python. + # Enum.__str__ will just return "AutotuneHint.ELEMENTS_PER_WARP_32". + __repr__ = Enum.__str__ diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index 85860fd05c..cb29da2f59 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -12,7 +12,6 @@ import os.path import re import threading import time -from enum import auto, Enum from typing import Any, Callable, Dict, List, Optional, Set, Tuple import torch @@ -24,8 +23,6 @@ from torch._dynamo.utils import dynamo_timed, get_first_attr from torch._inductor import config from torch._inductor.codecache import cache_dir, CudaKernelParamCache from torch._inductor.coordinate_descent_tuner import CoordescTuner - -from torch._inductor.ir import ReductionHint, TileHint from torch._inductor.utils import ( ceildiv, conditional_product, @@ -37,6 +34,13 @@ from torch._inductor.utils import ( triton_config_to_hashable, ) from torch.utils._triton import has_triton_package +from .hints import ( + _NUM_THREADS_PER_WARP, + AutotuneHint, + HeuristicType, + ReductionHint, + TileHint, +) log = logging.getLogger(__name__) @@ -59,28 +63,6 @@ else: ASTSource = None -_NUM_THREADS_PER_WARP = 32 - - -class HeuristicType(Enum): - PERSISTENT_REDUCTION = auto() - POINTWISE = auto() - REDUCTION = auto() - SPLIT_SCAN = auto() - TEMPLATE = auto() - USER_AUTOTUNE = auto() - - -class AutotuneHint(Enum): - ELEMENTS_PER_WARP_32 = 0 - - # Triton codegen tries to codegen set of AutotuneHints. - # Enum.__repr__ looks like "<AutotuneHint.ELEMENTS_PER_WARP_32: 0>"" - # which isn't valid python. - # Enum.__str__ will just return "AutotuneHint.ELEMENTS_PER_WARP_32". - __repr__ = Enum.__str__ - - def autotune_hints_to_configs( hints: Set[AutotuneHint], size_hints, block_size: int ) -> List[Config]: diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py index a2a80e1e5e..daddaaf04d 100644 --- a/torch/_inductor/utils.py +++ b/torch/_inductor/utils.py @@ -21,7 +21,6 @@ import tempfile import textwrap import time import unittest -from dataclasses import fields from datetime import datetime from io import StringIO from typing import ( @@ -689,51 +688,6 @@ def output_node(gm: torch.fx.GraphModule): return last_node -# Attempt to import AttrsDescriptor from Triton -try: - from triton.compiler.compiler import AttrsDescriptor - - attrs_descriptor_available = True - # Determine if 'ids_of_folded_args' is a valid field for AttrsDescriptor - attr_desc_fields = {f.name for f in fields(AttrsDescriptor)} - ids_of_folded_args_available = "ids_of_folded_args" in attr_desc_fields - divisible_by_8_available = "divisible_by_8" in attr_desc_fields -except ImportError: - attrs_descriptor_available = False - -# Define `instance_descriptor` function with clear conditional handling -if attrs_descriptor_available: - - def instance_descriptor( - divisible_by_16=None, - equal_to_1=None, - ids_of_folded_args=None, - divisible_by_8=None, - ): - # Prepare the arguments for AttrsDescriptor - kwargs = { - "divisible_by_16": divisible_by_16, - "equal_to_1": equal_to_1, - } - - # Conditionally add 'ids_of_folded_args' if it's available in AttrsDescriptor - if ids_of_folded_args_available: - kwargs["ids_of_folded_args"] = ids_of_folded_args - if divisible_by_8_available: - kwargs["divisible_by_8"] = divisible_by_8 - - # Instantiate AttrsDescriptor with the prepared arguments - return AttrsDescriptor(**kwargs) - -else: - # Define a namedtuple as a fallback when AttrsDescriptor is not available - instance_descriptor = collections.namedtuple( # type: ignore[no-redef] - "instance_descriptor", - ["divisible_by_16", "equal_to_1", "ids_of_folded_args", "divisible_by_8"], - defaults=[tuple(), tuple(), tuple(), tuple()], - ) - - _registered_caches: List[Any] = []
2.41.0
fd8870e6b9c598bc65567549ebdae6143526a53
Sun, 21 Apr 2024 11:09:45 -0700
[PATCH 0474/1000] [inductor] Refactor runtime files into torch._inductor.runtime (part 3) (#124557)
I am planning to make the compile_worker process not import torch so it can start up much faster. This stack is prep for that. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124557 Approved by: https://github.com/yanboliang ghstack dependencies: #124552, #124553
diff --git a/benchmarks/dynamo/microbenchmarks/tensor_layout_mini_benchmark.py b/benchmarks/dynamo/microbenchmarks/tensor_layout_mini_benchmark.py index 3eebb8ca66..3738f041fe 100644 --- a/benchmarks/dynamo/microbenchmarks/tensor_layout_mini_benchmark.py +++ b/benchmarks/dynamo/microbenchmarks/tensor_layout_mini_benchmark.py @@ -1,6 +1,6 @@ import torch from torch._inductor import ir -from torch._inductor.utils import do_bench +from torch._inductor.runtime.runtime_utils import do_bench def to_channels_last(x): diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py index 0103c43c65..d3b0c42d7c 100644 --- a/test/inductor/test_aot_inductor.py +++ b/test/inductor/test_aot_inductor.py @@ -14,8 +14,8 @@ from torch._dynamo.testing import rand_strided, same from torch._dynamo.utils import counters from torch._inductor import config from torch._inductor.exc import CppWrapperCodeGenError +from torch._inductor.runtime.runtime_utils import cache_dir from torch._inductor.test_case import TestCase -from torch._inductor.utils import cache_dir from torch.export import Dim, export from torch.testing import FileCheck diff --git a/test/inductor/test_codecache.py b/test/inductor/test_codecache.py index 55a2233f15..96ed0d7022 100644 --- a/test/inductor/test_codecache.py +++ b/test/inductor/test_codecache.py @@ -19,8 +19,9 @@ from torch._inductor.codecache import ( TensorMetadata, TensorMetadataAndValues, ) +from torch._inductor.runtime.runtime_utils import cache_dir from torch._inductor.test_case import run_tests, TestCase -from torch._inductor.utils import cache_dir, fresh_inductor_cache +from torch._inductor.utils import fresh_inductor_cache from torch.testing._internal.common_cuda import SM80OrLater from torch.testing._internal.common_device_type import largeTensorTest from torch.testing._internal.common_utils import ( diff --git a/test/inductor/test_inductor_utils.py b/test/inductor/test_inductor_utils.py index 0c11ac0511..f86dd9219e 100644 --- a/test/inductor/test_inductor_utils.py +++ b/test/inductor/test_inductor_utils.py @@ -4,11 +4,11 @@ import functools import logging import torch +from torch._inductor.runtime.runtime_utils import do_bench from torch._inductor.test_case import run_tests, TestCase -from torch._inductor.utils import do_bench, do_bench_using_profiling - +from torch._inductor.utils import do_bench_using_profiling log = logging.getLogger(__name__) diff --git a/test/inductor/test_padding.py b/test/inductor/test_padding.py index 2270c33291..d9bf81e663 100644 --- a/test/inductor/test_padding.py +++ b/test/inductor/test_padding.py @@ -12,7 +12,8 @@ from torch._dynamo.testing import rand_strided, reduce_to_scalar_loss from torch._dynamo.utils import maybe_cprofile from torch._inductor import config, ir, metrics from torch._inductor.fx_passes import pad_mm as pad_mm_pass -from torch._inductor.utils import do_bench, run_and_get_code +from torch._inductor.runtime.runtime_utils import do_bench +from torch._inductor.utils import run_and_get_code from torch.testing._internal.inductor_utils import HAS_CUDA DO_PERF_TEST = os.environ.get("DO_PERF_TEST") == "1" diff --git a/torch/_inductor/autotune_process.py b/torch/_inductor/autotune_process.py index aef3d18f35..f45f33ffde 100644 --- a/torch/_inductor/autotune_process.py +++ b/torch/_inductor/autotune_process.py @@ -35,7 +35,7 @@ if TYPE_CHECKING: from torch._inductor.select_algorithm import TritonTemplateCaller from . import config -from .utils import do_bench +from .runtime.runtime_utils import do_bench from .virtualized import V CUDA_VISIBLE_DEVICES = "CUDA_VISIBLE_DEVICES" diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py index 3111901a1c..a5474b0616 100644 --- a/torch/_inductor/codecache.py +++ b/torch/_inductor/codecache.py @@ -59,7 +59,8 @@ from torch._dynamo.device_interface import ( from torch._dynamo.utils import counters, dynamo_timed from torch._inductor import config, exc, metrics from torch._inductor.codegen.cuda import cuda_env -from torch._inductor.utils import cache_dir, clear_on_fresh_inductor_cache, is_linux +from torch._inductor.runtime.runtime_utils import cache_dir +from torch._inductor.utils import clear_on_fresh_inductor_cache, is_linux from torch._subclasses.fake_tensor import ( extract_tensor_metadata, FakeTensor, diff --git a/torch/_inductor/codegen/cuda/cutlass_utils.py b/torch/_inductor/codegen/cuda/cutlass_utils.py index 134ebb93fe..40daf6da1c 100644 --- a/torch/_inductor/codegen/cuda/cutlass_utils.py +++ b/torch/_inductor/codegen/cuda/cutlass_utils.py @@ -8,10 +8,10 @@ from typing import Any, List, Optional import sympy import torch - -from ...codecache import cache_dir from ...config import cuda as inductor_cuda_config from ...ir import Layout + +from ...runtime.runtime_utils import cache_dir from .cuda_env import get_cuda_arch, get_cuda_version log = logging.getLogger(__name__) diff --git a/torch/_inductor/codegen/multi_kernel.py b/torch/_inductor/codegen/multi_kernel.py index e03ca8eca9..e4fc396c64 100644 --- a/torch/_inductor/codegen/multi_kernel.py +++ b/torch/_inductor/codegen/multi_kernel.py @@ -6,7 +6,8 @@ from torch._inductor.metrics import get_metric_table, is_metric_table_enabled from .. import config from ..codecache import PyCodeCache, TritonFuture -from ..utils import cache_on_self, do_bench +from ..runtime.runtime_utils import do_bench +from ..utils import cache_on_self from ..virtualized import V from .common import TensorArg diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py index 67d69efe7a..c03e6c6954 100644 --- a/torch/_inductor/codegen/triton.py +++ b/torch/_inductor/codegen/triton.py @@ -47,24 +47,26 @@ from ..dependencies import Dep, MemoryDep, StarDep, WeakDep from ..ir import IRNode, TritonTemplateBuffer from ..optimize_indexing import indexing_dtype_strength_reduction from ..runtime.hints import ReductionHint +from ..runtime.runtime_utils import ( + do_bench, + get_max_y_grid, + green_text, + next_power_of_2, + yellow_text, +) from ..scheduler import BaseSchedulerNode, BaseScheduling, WhyNoFuse from ..utils import ( cache_on_self, - do_bench, get_dtype_size, get_fused_kernel_name, get_kernel_metadata, - get_max_y_grid, - green_text, is_welford_reduction, - next_power_of_2, Placeholder, sympy_dot, sympy_index_symbol, sympy_product, sympy_subs, unique, - yellow_text, ) from ..virtualized import _ops as ops, OpsHandler, ReductionType, StoreMode, V from ..wrapper_benchmark import get_kernel_category_by_source_code diff --git a/torch/_inductor/coordinate_descent_tuner.py b/torch/_inductor/coordinate_descent_tuner.py index baf293d9f5..2511800bc1 100644 --- a/torch/_inductor/coordinate_descent_tuner.py +++ b/torch/_inductor/coordinate_descent_tuner.py @@ -4,7 +4,7 @@ import logging from typing import Callable, Optional from torch.utils._triton import has_triton -from .utils import red_text, triton_config_to_hashable +from .runtime.runtime_utils import red_text, triton_config_to_hashable if has_triton(): import triton diff --git a/torch/_inductor/fx_passes/pad_mm.py b/torch/_inductor/fx_passes/pad_mm.py index 40948dc461..ea4d45e389 100644 --- a/torch/_inductor/fx_passes/pad_mm.py +++ b/torch/_inductor/fx_passes/pad_mm.py @@ -2,6 +2,7 @@ import functools from typing import List, Optional, Union import torch +import torch._inductor.runtime.runtime_utils from torch import Tensor from torch._inductor import utils from torch._subclasses.fake_tensor import FakeTensor @@ -241,7 +242,7 @@ def should_pad_bench( return False do_bench = functools.partial( - utils.do_bench, + torch._inductor.runtime.runtime_utils.do_bench, warmup=5, ) diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py index dbd61f790c..59ac4ee395 100644 --- a/torch/_inductor/ir.py +++ b/torch/_inductor/ir.py @@ -61,6 +61,7 @@ from .dependencies import ( ) from .ops_handler import OpCounterCSE from .runtime.hints import ReductionHint +from .runtime.runtime_utils import do_bench from .utils import ( argsort, cache_on_self, @@ -68,7 +69,6 @@ from .utils import ( convert_shape_to_inductor, convert_shape_to_symint, developer_warning, - do_bench, get_kernel_metadata, is_dynamic, is_gpu, diff --git a/torch/_inductor/kernel/mm_common.py b/torch/_inductor/kernel/mm_common.py index 12a280cb91..e04f87c523 100644 --- a/torch/_inductor/kernel/mm_common.py +++ b/torch/_inductor/kernel/mm_common.py @@ -9,7 +9,8 @@ from torch._inductor.select_algorithm import realize_inputs from torch._inductor.virtualized import V from .. import config as inductor_config -from ..utils import ceildiv as cdiv, next_power_of_2 +from ..runtime.runtime_utils import next_power_of_2 +from ..utils import ceildiv as cdiv log = logging.getLogger(__name__) diff --git a/torch/_inductor/runtime/runtime_utils.py b/torch/_inductor/runtime/runtime_utils.py new file mode 100644 index 0000000000..948ad0e5cf --- /dev/null +++ b/torch/_inductor/runtime/runtime_utils.py @@ -0,0 +1,142 @@ +from __future__ import annotations + +import functools +import getpass +import inspect +import operator +import os +import re +import tempfile + +import torch + + +def conditional_product(*args): + return functools.reduce(operator.mul, [x for x in args if x]) + + +def ceildiv(numer: int, denom: int) -> int: + return -(numer // -denom) + + +def next_power_of_2(n: int) -> int: + """Return the smallest power of 2 greater than or equal to n""" + n -= 1 + n |= n >> 1 + n |= n >> 2 + n |= n >> 4 + n |= n >> 8 + n |= n >> 16 + n |= n >> 32 + n += 1 + return n + + +def get_num_bytes(*args: torch.Tensor, num_in_out_args: int = 0) -> int: + """ + Return the total number of bytes the arguments of tensor type takes. + + For in/out args, tensor sizes are counted twice: once for reading and + once for writing. + + The first num_in_out_args arguments are in out tensors. + """ + return sum( + arg.numel() * arg.element_size() * (1 + int(i < num_in_out_args)) + for i, arg in enumerate(args) + if isinstance(arg, torch.Tensor) + ) + + +def triton_config_to_hashable(cfg): + """ + Convert triton config to a tuple that can uniquely identify it. We can use + the return value as a dictionary key. + """ + items = sorted(cfg.kwargs.items()) + items.append(("num_warps", cfg.num_warps)) + items.append(("num_stages", cfg.num_stages)) + return tuple(items) + + +def create_bandwidth_info_str(ms, num_gb, gb_per_s, prefix="", suffix="", color=True): + info_str = f"{prefix}{ms:.3f}ms \t{num_gb:.3f} GB \t {gb_per_s:7.2f}GB/s{suffix}" + slow = ms > 0.012 and gb_per_s < 650 + return red_text(info_str) if color and slow else info_str + + +def get_max_y_grid(): + return 65535 + + +def do_bench(*args, **kwargs): + @functools.lru_cache(None) + def load_triton(): + try: + # NB: Lazily load triton, as importing triton is slow + # see https://github.com/openai/triton/issues/1599 + from triton.testing import do_bench as triton_do_bench + except ImportError as exc: + raise NotImplementedError("requires Triton") from exc + + # triton PR https://github.com/openai/triton/pull/1513 change the + # quantile fields name from 'percentiles' to 'quantiles' + # and change the default value from (0.5, 0.2, 0.8) to None. + # This may break inductor since a caller expects a tuple may get a item. + # + # Add a wrapper to maintain the same behavior for inductor. + # Maybe we should have own implementation of this function? + return triton_do_bench, ( + "quantiles" + if inspect.signature(triton_do_bench).parameters.get("quantiles") + is not None + else "percentiles" + ) + + triton_do_bench, quantile_field_name = load_triton() + + if quantile_field_name not in kwargs: + kwargs[quantile_field_name] = (0.5, 0.2, 0.8) + return triton_do_bench(*args, **kwargs)[0] + + +def cache_dir() -> str: + cache_dir = os.environ.get("TORCHINDUCTOR_CACHE_DIR") + if cache_dir is None: + sanitized_username = re.sub(r'[\\/:*?"<>|]', "_", getpass.getuser()) + os.environ["TORCHINDUCTOR_CACHE_DIR"] = cache_dir = os.path.join( + tempfile.gettempdir(), + "torchinductor_" + sanitized_username, + ) + os.makedirs(cache_dir, exist_ok=True) + return cache_dir + + +HAS_COLORAMA = True +try: + import colorama +except ImportError: + HAS_COLORAMA = False + + +def _color_text(msg, color): + if not HAS_COLORAMA: + return msg + + return getattr(colorama.Fore, color.upper()) + msg + colorama.Fore.RESET + + +def green_text(msg): + return _color_text(msg, "green") + + +def yellow_text(msg): + return _color_text(msg, "yellow") + + +def red_text(msg): + return _color_text(msg, "red") + + +def blue_text(msg): + return _color_text(msg, "blue") diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index cb29da2f59..89be8f36d4 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -21,9 +21,17 @@ from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device from torch._dynamo.utils import dynamo_timed, get_first_attr from torch._inductor import config -from torch._inductor.codecache import cache_dir, CudaKernelParamCache from torch._inductor.coordinate_descent_tuner import CoordescTuner -from torch._inductor.utils import ( +from .hints import ( + _NUM_THREADS_PER_WARP, + AutotuneHint, + HeuristicType, + ReductionHint, + TileHint, +) + +from .runtime_utils import ( + cache_dir, ceildiv, conditional_product, create_bandwidth_info_str, @@ -33,20 +41,13 @@ from torch._inductor.utils import ( next_power_of_2, triton_config_to_hashable, ) -from torch.utils._triton import has_triton_package -from .hints import ( - _NUM_THREADS_PER_WARP, - AutotuneHint, - HeuristicType, - ReductionHint, - TileHint, -) - -log = logging.getLogger(__name__) - -if has_triton_package(): +try: import triton +except ImportError: + triton = None + +if triton is not None: from triton import Config from triton.runtime.autotuner import OutOfResources from triton.runtime.jit import KernelInterface @@ -57,12 +58,14 @@ if has_triton_package(): ASTSource = None else: Config = object - triton = None KernelInterface = object OutOfResources = object ASTSource = None +log = logging.getLogger(__name__) + + def autotune_hints_to_configs( hints: Set[AutotuneHint], size_hints, block_size: int ) -> List[Config]: @@ -681,6 +684,8 @@ class CachingAutotuner(KernelInterface): "meta": launcher.config.kwargs, } + from torch._inductor.codecache import CudaKernelParamCache + if torch.version.hip is None: CudaKernelParamCache.set(key, params, launcher.bin.asm["cubin"]) else: diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py index 32f734ba8b..2783e3edfb 100644 --- a/torch/_inductor/scheduler.py +++ b/torch/_inductor/scheduler.py @@ -35,6 +35,7 @@ from .codegen.common import get_scheduling_for_device, Kernel from .comm_analysis import estimate_nccl_collective_runtime from .dependencies import Dep, MemoryDep, StarDep, WeakDep from .ir import ComputedBuffer, MultiOutput, MultiOutputLayout +from .runtime.runtime_utils import green_text, red_text from .sizevars import SimplifyIndexing from .utils import ( cache_on_self, @@ -44,11 +45,9 @@ from .utils import ( get_device_tflops, get_dtype_size, get_gpu_dram_gbps, - green_text, is_collective, is_gpu, is_wait, - red_text, sympy_product, ) from .virtualized import V diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py index 36801e3c28..2485de547b 100644 --- a/torch/_inductor/select_algorithm.py +++ b/torch/_inductor/select_algorithm.py @@ -35,14 +35,8 @@ from .codegen.triton import ( from .codegen.triton_utils import config_of, signature_to_meta from .exc import CUDACompileError from .ir import ChoiceCaller, PrimitiveInfoType -from .utils import ( - do_bench, - get_dtype_size, - Placeholder, - sympy_dot, - sympy_product, - unique, -) +from .runtime.runtime_utils import do_bench +from .utils import get_dtype_size, Placeholder, sympy_dot, sympy_product, unique from .virtualized import V log = logging.getLogger(__name__) diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py index daddaaf04d..a7be602f87 100644 --- a/torch/_inductor/utils.py +++ b/torch/_inductor/utils.py @@ -5,7 +5,6 @@ import contextlib import dataclasses import enum import functools -import getpass import inspect import io import itertools @@ -14,7 +13,6 @@ import math import operator import os import platform -import re import shutil import sys import tempfile @@ -51,6 +49,7 @@ from torch.autograd.profiler_util import EventList from torch.fx.passes.shape_prop import ShapeProp from torch.utils._sympy.functions import CeilDiv, CleanDiv, FloorDiv, ModularIndexing from . import config +from .runtime.runtime_utils import ceildiv as runtime_ceildiv log = logging.getLogger(__name__) @@ -140,37 +139,6 @@ def do_bench_using_profiling(fn: Callable[[], Any], warmup=25, rep=100) -> float return res -def do_bench(*args, **kwargs): - @functools.lru_cache(None) - def load_triton(): - try: - # NB: Lazily load triton, as importing triton is slow - # see https://github.com/openai/triton/issues/1599 - from triton.testing import do_bench as triton_do_bench - except ImportError as exc: - raise NotImplementedError("requires Triton") from exc - - # triton PR https://github.com/openai/triton/pull/1513 change the - # quantile fields name from 'percentiles' to 'quantiles' - # and change the default value from (0.5, 0.2, 0.8) to None. - # This may break inductor since a caller expects a tuple may get a item. - # - # Add a wrapper to maintain the same behavior for inductor. - # Maybe we should have own implementation of this function? - return triton_do_bench, ( - "quantiles" - if inspect.signature(triton_do_bench).parameters.get("quantiles") - is not None - else "percentiles" - ) - - triton_do_bench, quantile_field_name = load_triton() - - if quantile_field_name not in kwargs: - kwargs[quantile_field_name] = (0.5, 0.2, 0.8) - return triton_do_bench(*args, **kwargs)[0] - - @functools.lru_cache(None) def has_torchvision_roi_align() -> bool: try: @@ -183,10 +151,6 @@ def has_torchvision_roi_align() -> bool: return False -def conditional_product(*args): - return functools.reduce(operator.mul, [x for x in args if x]) - - def decode_device(device: Union[Optional[torch.device], str]) -> torch.device: if device is None: return torch.tensor(0.0).device # default device @@ -222,20 +186,7 @@ def ceildiv( assert isinstance(numer, int) and isinstance( denom, int ), f"{numer}: {type(numer)}, {denom}: {type(denom)}" - return -(numer // -denom) - - -def next_power_of_2(n: int) -> int: - """Return the smallest power of 2 greater than or equal to n""" - n -= 1 - n |= n >> 1 - n |= n >> 2 - n |= n >> 4 - n |= n >> 8 - n |= n >> 16 - n |= n >> 32 - n += 1 - return n + return runtime_ceildiv(numer, denom) def _type_of(key): @@ -703,20 +654,6 @@ def clear_on_fresh_inductor_cache(obj: Any): return obj -@clear_on_fresh_inductor_cache -@functools.lru_cache(None) -def cache_dir() -> str: - cache_dir = os.environ.get("TORCHINDUCTOR_CACHE_DIR") - if cache_dir is None: - sanitized_username = re.sub(r'[\\/:*?"<>|]', "_", getpass.getuser()) - cache_dir = os.path.join( - tempfile.gettempdir(), - "torchinductor_" + sanitized_username, - ) - os.makedirs(cache_dir, exist_ok=True) - return cache_dir - - @contextlib.contextmanager def fresh_inductor_cache(cache_entries=None): """ @@ -1141,28 +1078,6 @@ def developer_warning(msg): log.info(msg) -def get_num_bytes(*args: torch.Tensor, num_in_out_args: int = 0) -> int: - """ - Return the total number of bytes the arguments of tensor type takes. - - For in/out args, tensor sizes are counted twice: once for reading and - once for writing. - - The first num_in_out_args arguments are in out tensors. - """ - return sum( - arg.numel() * arg.element_size() * (1 + int(i < num_in_out_args)) - for i, arg in enumerate(args) - if isinstance(arg, torch.Tensor) - ) - - -def create_bandwidth_info_str(ms, num_gb, gb_per_s, prefix="", suffix="", color=True): - info_str = f"{prefix}{ms:.3f}ms \t{num_gb:.3f} GB \t {gb_per_s:7.2f}GB/s{suffix}" - slow = ms > 0.012 and gb_per_s < 650 - return red_text(info_str) if color and slow else info_str - - def get_benchmark_name(): """ An experimental API used only when config.benchmark_kernel is true. @@ -1229,17 +1144,6 @@ def maybe_profile(should_profile, *args, **kwargs): yield -def triton_config_to_hashable(cfg): - """ - Convert triton config to a tuple that can uniquely identify it. We can use - the return value as a dictionary key. - """ - items = sorted(cfg.kwargs.items()) - items.append(("num_warps", cfg.num_warps)) - items.append(("num_stages", cfg.num_stages)) - return tuple(items) - - def parallel_num_threads(): threads = config.cpp.threads if threads < 1: @@ -1247,36 +1151,6 @@ def parallel_num_threads(): return threads -HAS_COLORAMA = True -try: - import colorama -except ImportError: - HAS_COLORAMA = False - - -def _color_text(msg, color): - if not HAS_COLORAMA: - return msg - - return getattr(colorama.Fore, color.upper()) + msg + colorama.Fore.RESET - - -def green_text(msg): - return _color_text(msg, "green") - - -def yellow_text(msg): - return _color_text(msg, "yellow") - - -def red_text(msg): - return _color_text(msg, "red") - - -def blue_text(msg): - return _color_text(msg, "blue") - - @functools.lru_cache(None) def get_device_tflops(dtype): from triton.testing import get_max_simd_tflops, get_max_tensorcore_tflops @@ -1320,10 +1194,6 @@ def reduction_num_outputs(reduction_type): return 3 if is_welford_reduction(reduction_type) else 1 -def get_max_y_grid(): - return 65535 - - def is_linux() -> bool: return platform.system() == "Linux" diff --git a/torch/_inductor/wrapper_benchmark.py b/torch/_inductor/wrapper_benchmark.py index 81a07fcf8d..31b81bba4a 100644 --- a/torch/_inductor/wrapper_benchmark.py +++ b/torch/_inductor/wrapper_benchmark.py @@ -4,7 +4,7 @@ from collections import defaultdict import torch from torch.autograd import DeviceType -from .utils import create_bandwidth_info_str, do_bench, get_num_bytes +from .runtime.runtime_utils import create_bandwidth_info_str, do_bench, get_num_bytes _kernel_category_choices = [ "foreach",
2.41.0
cc0e60e303d2bea1217f6ea6fc515a02cd9a673
Sun, 21 Apr 2024 11:09:45 -0700
[PATCH 0475/1000] [inductor] Refactor runtime files into torch._inductor.runtime (part 4) (#124559)
I am planning to make the compile_worker process not import torch so it can start up much faster. This stack is prep for that. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124559 Approved by: https://github.com/yanboliang ghstack dependencies: #124552, #124553, #124557
diff --git a/test/inductor/test_coordinate_descent_tuner.py b/test/inductor/test_coordinate_descent_tuner.py index 5b9f35fa9c..8f57cab4d3 100644 --- a/test/inductor/test_coordinate_descent_tuner.py +++ b/test/inductor/test_coordinate_descent_tuner.py @@ -18,7 +18,7 @@ except ImportError: raise unittest.SkipTest("requires triton") # noqa: TRY200 from torch._inductor import config -from torch._inductor.coordinate_descent_tuner import CoordescTuner +from torch._inductor.runtime.coordinate_descent_tuner import CoordescTuner config.benchmark_kernel = True config.coordinate_descent_tuning = True diff --git a/torch/_inductor/coordinate_descent_tuner.py b/torch/_inductor/runtime/coordinate_descent_tuner.py similarity index 98% rename from torch/_inductor/coordinate_descent_tuner.py rename to torch/_inductor/runtime/coordinate_descent_tuner.py index 2511800bc1..83f4973a1f 100644 --- a/torch/_inductor/coordinate_descent_tuner.py +++ b/torch/_inductor/runtime/coordinate_descent_tuner.py @@ -3,15 +3,14 @@ import itertools import logging from typing import Callable, Optional -from torch.utils._triton import has_triton -from .runtime.runtime_utils import red_text, triton_config_to_hashable +from torch._inductor.runtime.runtime_utils import red_text, triton_config_to_hashable -if has_triton(): +try: import triton -else: +except ImportError: triton = None -from . import config as inductor_config +from torch._inductor import config as inductor_config log = logging.getLogger(__name__) diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index 89be8f36d4..1b0467553b 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -21,7 +21,7 @@ from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device from torch._dynamo.utils import dynamo_timed, get_first_attr from torch._inductor import config -from torch._inductor.coordinate_descent_tuner import CoordescTuner +from .coordinate_descent_tuner import CoordescTuner from .hints import ( _NUM_THREADS_PER_WARP, AutotuneHint, @@ -29,7 +29,6 @@ from .hints import ( ReductionHint, TileHint, ) - from .runtime_utils import ( cache_dir, ceildiv,
2.41.0
620a45542cf106d65a9b2b4c54810b147ae6be7
Sun, 21 Apr 2024 11:09:46 -0700
[PATCH 0476/1000] [inductor] Refactor runtime files into torch._inductor.runtime (part 5) (#124560)
I am planning to make the compile_worker process not import torch so it can start up much faster. This stack is prep for that. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124560 Approved by: https://github.com/yanboliang ghstack dependencies: #124552, #124553, #124557, #124559
diff --git a/torch/_inductor/runtime/coordinate_descent_tuner.py b/torch/_inductor/runtime/coordinate_descent_tuner.py index 83f4973a1f..f280765aec 100644 --- a/torch/_inductor/runtime/coordinate_descent_tuner.py +++ b/torch/_inductor/runtime/coordinate_descent_tuner.py @@ -3,7 +3,7 @@ import itertools import logging from typing import Callable, Optional -from torch._inductor.runtime.runtime_utils import red_text, triton_config_to_hashable +from .runtime_utils import red_text, triton_config_to_hashable try: import triton diff --git a/torch/_inductor/runtime/runtime_utils.py b/torch/_inductor/runtime/runtime_utils.py index 948ad0e5cf..c0fdf65ec9 100644 --- a/torch/_inductor/runtime/runtime_utils.py +++ b/torch/_inductor/runtime/runtime_utils.py @@ -140,3 +140,24 @@ def red_text(msg): def blue_text(msg): return _color_text(msg, "blue") + + +def get_first_attr(obj, *attrs): + """ + Return the first available attribute or throw an exception if none is present. + """ + for attr in attrs: + if hasattr(obj, attr): + return getattr(obj, attr) + + raise AssertionError(f"{obj} does not has any of the attributes: {attrs}") + + +try: + dynamo_timed = torch._dynamo.utils.dynamo_timed +except AttributeError: # Compile workers only have a mock version of torch + + def dynamo_timed(original_function=None, phase_name=None): + if original_function: + return original_function + return dynamo_timed diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index 1b0467553b..f65cd6eaa5 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -16,9 +16,7 @@ from typing import Any, Callable, Dict, List, Optional, Set, Tuple import torch -import torch.autograd.profiler as autograd_profiler from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device -from torch._dynamo.utils import dynamo_timed, get_first_attr from torch._inductor import config from .coordinate_descent_tuner import CoordescTuner @@ -35,6 +33,8 @@ from .runtime_utils import ( conditional_product, create_bandwidth_info_str, do_bench, + dynamo_timed, + get_first_attr, get_max_y_grid, get_num_bytes, next_power_of_2, @@ -61,6 +61,13 @@ else: OutOfResources = object ASTSource = None +try: + autograd_profiler = torch.autograd.profiler +except AttributeError: # Compile workers only have a mock version of torch + + class autograd_profiler: # type: ignore[no-redef] + _is_profiler_enabled = False + log = logging.getLogger(__name__)
2.41.0
b9fe91f5cc90a0b110ebb681f4074a7657124dd
Sun, 21 Apr 2024 11:09:47 -0700
[PATCH 0477/1000] [inductor] Remove config check for 3D tiling (#124569)
This makes the check per-kernel (if 3D tiling is used), rather than global config. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124569 Approved by: https://github.com/yanboliang ghstack dependencies: #124552, #124553, #124557, #124559, #124560
diff --git a/test/inductor/test_triton_heuristics.py b/test/inductor/test_triton_heuristics.py index ab54164edd..2094797f9f 100644 --- a/test/inductor/test_triton_heuristics.py +++ b/test/inductor/test_triton_heuristics.py @@ -78,11 +78,6 @@ class TestTritonHeuristics(TestCase): def test_artificial_grid_cpp_wrapper(self): self._test_artificial_zgrid() - @config.patch("triton.max_tiles", 3) - def test_artificial_grid_max_tiles(self): - with self.assertRaisesRegex(Exception, "Generated y grid"): - self._test_artificial_zgrid() - if __name__ == "__main__": if IS_LINUX and HAS_GPU: diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py index c03e6c6954..fb1f460628 100644 --- a/torch/_inductor/codegen/triton.py +++ b/torch/_inductor/codegen/triton.py @@ -1017,6 +1017,7 @@ class IterationRangesRoot(IterationRanges): is_loop: bool, tensor_dim: Optional[int], grid_dim: Optional[int], + has_zdim: bool, ): if pid_cache is None: pid_cache = {} @@ -1044,6 +1045,7 @@ class IterationRangesRoot(IterationRanges): self.tensor_dim = tensor_dim # Index of corresponding dimension in the triton grid self.grid_dim = grid_dim + self.has_zdim = has_zdim def __repr__(self): return f"IterationRangesRoot({self.name!r}, {self.numel}, ...)" @@ -1135,7 +1137,7 @@ class IterationRangesRoot(IterationRanges): # z grid is only exercised when max_tiles == 3 (off by default). if ( self.grid_dim == 1 - and config.triton.max_tiles <= 2 + and not self.has_zdim and not (isinstance(self.numel, int) and self.numel <= get_max_y_grid()) ): key = f"{key} * (tl.program_id({self.grid_dim + 1}) + 1)" @@ -1415,6 +1417,7 @@ class TritonKernel(Kernel): is_loop=is_reduction and not self.persistent_reduction, tensor_dim=tensor_dim, grid_dim=grid_dim, + has_zdim="z" in active_prefixes, ) ) for tree in self.range_trees: diff --git a/torch/_inductor/codegen/triton_split_scan.py b/torch/_inductor/codegen/triton_split_scan.py index c6b851dc42..8df904946e 100644 --- a/torch/_inductor/codegen/triton_split_scan.py +++ b/torch/_inductor/codegen/triton_split_scan.py @@ -73,6 +73,7 @@ class TritonSplitScanKernel(TritonKernel): is_loop=False, tensor_dim=tensor_dim, grid_dim=grid_dim, + has_zdim=False, ) ) for tree in self.range_trees: diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index f65cd6eaa5..ff6388e30e 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -1647,21 +1647,19 @@ def grid(*numels): return numel return ceildiv(numel, block) - max_grid_dims = config.triton.max_tiles - def grid_fn(meta): x_grid = get_grid_dim(xnumel, meta.get("XBLOCK", 1)) y_grid = get_grid_dim(ynumel, meta.get("YBLOCK", None)) - MAX_Y_GRID = get_max_y_grid() - if znumel is None and max_grid_dims <= 2: - div = ceildiv(y_grid, MAX_Y_GRID) + max_y_grid = get_max_y_grid() + if znumel is None: + div = ceildiv(y_grid, max_y_grid) y_grid = y_grid // div z_grid = div else: z_grid = get_grid_dim(znumel, meta.get("ZBLOCK", None)) torch._check( - y_grid <= MAX_Y_GRID, + y_grid <= max_y_grid, lambda: f"Generated y grid beyond 2^16 ({y_grid}) not supported with z dimension present. File issue", )
2.41.0
093735ccde6e8d0f70189ab3ebe6b8ba96e2a76
Sun, 21 Apr 2024 11:09:47 -0700
[PATCH 0478/1000] [inductor] Use compile time config values in runtime (#124561)
This removes usage of torch._inductor.config from `torch._inductor.runtime`. Fixing two issues: 1) If configs change we should really use the compile time ones 2) In compile workers, we want to use the parent process config Pull Request resolved: https://github.com/pytorch/pytorch/pull/124561 Approved by: https://github.com/yanboliang ghstack dependencies: #124552, #124553, #124557, #124559, #124560, #124569
diff --git a/test/inductor/test_coordinate_descent_tuner.py b/test/inductor/test_coordinate_descent_tuner.py index 8f57cab4d3..70618c06e9 100644 --- a/test/inductor/test_coordinate_descent_tuner.py +++ b/test/inductor/test_coordinate_descent_tuner.py @@ -5,6 +5,7 @@ import unittest from unittest import mock import torch +from torch._inductor.runtime.hints import TRITON_MAX_BLOCK from torch._inductor.test_case import run_tests, TestCase from torch.testing._internal.common_utils import IS_LINUX @@ -104,7 +105,7 @@ class TestCoordinateDescentTuner(TestCase): tuner = CoordescTuner(size_hints=size_hints) - max_block = config.triton.max_block + max_block = TRITON_MAX_BLOCK self.assertFalse(tuner.value_too_large("XBLOCK", max_block["X"])) self.assertTrue(tuner.value_too_large("XBLOCK", max_block["X"] * 2)) self.assertFalse(tuner.value_too_large("RBLOCK", max_block["R"])) diff --git a/test/inductor/test_triton_heuristics.py b/test/inductor/test_triton_heuristics.py index 2094797f9f..3a85211f81 100644 --- a/test/inductor/test_triton_heuristics.py +++ b/test/inductor/test_triton_heuristics.py @@ -16,6 +16,7 @@ except ImportError: raise unittest.SkipTest("requires triton") # noqa: TRY200 from torch._inductor import config +from torch._inductor.runtime.hints import TRITON_MAX_BLOCK from torch._inductor.runtime.triton_heuristics import triton_config from torch._inductor.test_case import run_tests, TestCase @@ -30,7 +31,7 @@ class TestTritonHeuristics(TestCase): key = f"{label}BLOCK" if key not in cfg.kwargs: continue - self.assertTrue(cfg.kwargs[key] <= config.triton.max_block[label]) + self.assertTrue(cfg.kwargs[key] <= TRITON_MAX_BLOCK[label]) def _test_artificial_zgrid(self): def forward(primals_1, primals_2, primals_5): diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py index fb1f460628..4950f5e802 100644 --- a/torch/_inductor/codegen/triton.py +++ b/torch/_inductor/codegen/triton.py @@ -46,7 +46,7 @@ from ..codecache import code_hash, get_path, PyCodeCache from ..dependencies import Dep, MemoryDep, StarDep, WeakDep from ..ir import IRNode, TritonTemplateBuffer from ..optimize_indexing import indexing_dtype_strength_reduction -from ..runtime.hints import ReductionHint +from ..runtime.hints import ReductionHint, TRITON_MAX_BLOCK from ..runtime.runtime_utils import ( do_bench, get_max_y_grid, @@ -252,7 +252,7 @@ class BlockPtrOptions: and not V.graph.sizevars.statically_known_equals(self.strides[i], 0) # type: ignore[arg-type] and not V.graph.sizevars.statically_known_multiple_of( self.shape[i], - config.triton.max_block[self.block_shape[i][0]], # type: ignore[arg-type] + TRITON_MAX_BLOCK[self.block_shape[i][0]], # type: ignore[arg-type] ) and not (V.kernel.no_x_dim and self.block_shape[i] == "XBLOCK") ): @@ -1771,9 +1771,9 @@ class TritonKernel(Kernel): continue # Masks are superfluous if numel is a multiple of BLOCK # (We use the fact that BLOCK is required by triton to be a power of 2) - if tree.prefix.upper() not in config.triton.max_block: + if tree.prefix.upper() not in TRITON_MAX_BLOCK: continue - max_block = config.triton.max_block[tree.prefix.upper()] + max_block = TRITON_MAX_BLOCK[tree.prefix.upper()] # Optional optimization: if block divides numel exactly, we will # never need to do a masked load to handle stragglers at the end. # It's faster to avoid masking at all. But it is sound to always @@ -2732,6 +2732,42 @@ class TritonKernel(Kernel): return "reduction" return "pointwise" + @staticmethod + def inductor_meta_common(): + inductor_meta = { + "backend_hash": torch.utils._triton.triton_hash_with_backend(), + "are_deterministic_algorithms_enabled": torch.are_deterministic_algorithms_enabled(), + "assert_indirect_indexing": config.assert_indirect_indexing, + "autotune_local_cache": config.autotune_local_cache, + "autotune_pointwise": config.triton.autotune_pointwise, + "autotune_remote_cache": config.autotune_remote_cache, + "dynamic_scale_rblock": config.dynamic_scale_rblock, + "max_autotune": config.max_autotune, + "max_autotune_pointwise": config.max_autotune_pointwise, + "min_split_scan_rblock": config.triton.min_split_scan_rblock, + "spill_threshold": config.triton.spill_threshold, + "store_cubin": config.triton.store_cubin, + } + if torch.version.hip is not None: + inductor_meta["is_hip"] = True + if config.is_fbcode(): + inductor_meta["is_fbcode"] = True + if config.profile_bandwidth: + inductor_meta["profile_bandwidth"] = config.profile_bandwidth + inductor_meta["profile_bandwidth_regex"] = config.profile_bandwidth_regex + inductor_meta["profile_bandwidth_output"] = config.profile_bandwidth_output + if config.coordinate_descent_tuning: + inductor_meta[ + "coordinate_descent_tuning" + ] = config.coordinate_descent_tuning + inductor_meta[ + "coordinate_descent_search_radius" + ] = config.coordinate_descent_search_radius + inductor_meta[ + "coordinate_descent_check_all_directions" + ] = config.coordinate_descent_check_all_directions + return inductor_meta + def codegen_kernel(self, name=None): code = IndentedBuffer() @@ -2807,8 +2843,9 @@ class TritonKernel(Kernel): "kernel_name": str(Placeholder.DESCRIPTIVE_NAME), "mutated_arg_names": mutated_args, "no_x_dim": self.no_x_dim, - "backend_hash": torch.utils._triton.triton_hash_with_backend(), + **self.inductor_meta_common(), } + num_gb = None if config.benchmark_kernel or config.profile_bandwidth: num_gb = self.estimate_kernel_num_bytes() / 1e9 diff --git a/torch/_inductor/codegen/triton_foreach.py b/torch/_inductor/codegen/triton_foreach.py index 449af125d8..a0acdcdae0 100644 --- a/torch/_inductor/codegen/triton_foreach.py +++ b/torch/_inductor/codegen/triton_foreach.py @@ -5,8 +5,6 @@ from typing import Dict, List, Tuple from sympy import Integer -import torch - from .. import metrics from ..scheduler import SchedulerNode from ..utils import ceildiv, Placeholder @@ -166,7 +164,7 @@ class ForeachKernel(Kernel): triton_meta["configs"] = [config_of(signature)] inductor_meta = { "kernel_name": str(Placeholder.DESCRIPTIVE_NAME), - "backend_hash": torch.utils._triton.triton_hash_with_backend(), + **TritonKernel.inductor_meta_common(), } return f""" @triton_heuristics.foreach( diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py index 17c2f58a76..18d4ccaf3e 100644 --- a/torch/_inductor/codegen/wrapper.py +++ b/torch/_inductor/codegen/wrapper.py @@ -1129,13 +1129,13 @@ class WrapperCodeGen(CodeGen): compile_wrapper = IndentedBuffer() compile_wrapper.writeline(f"async_compile.triton({original_name!r}, '''") - from .triton import gen_common_triton_imports + from .triton import gen_common_triton_imports, TritonKernel compile_wrapper.splice(gen_common_triton_imports()) inductor_meta = { "kernel_name": name, - "backend_hash": torch.utils._triton.triton_hash_with_backend(), + **TritonKernel.inductor_meta_common(), } configs = [ @@ -1264,7 +1264,7 @@ class WrapperCodeGen(CodeGen): self.wrapper_call.writeline("start_graph()") def generate_end_graph(self): - self.wrapper_call.writeline("end_graph()") + self.wrapper_call.writeline(f"end_graph({config.profile_bandwidth_output!r})") def generate_reset_kernel_saved_flags(self): self.wrapper_call.splice( diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py index ea9f8955b1..b5b8e16684 100644 --- a/torch/_inductor/config.py +++ b/torch/_inductor/config.py @@ -657,18 +657,6 @@ class triton: # hint to Triton when arguments are divisible by 16 divisible_by_16 = True - # theses are not enforced, but they are used by asserts in triton_heuristics.py - # NOTE: mobilevit_s in timm_models required X to be set to the higher value 2048 - - # Max RBLOCK will be large for multi-kernel since we do more aggressive - # persistent reduction. - max_block = { - "X": 2048, - "Y": 1024, - "Z": 1024, - "R": 4096 * (16 if multi_kernel else 1), - } - # Minimum RBLOCK to be used for a TritonSplitScanKernel # NOTE: This also indirectly controls the size of workspace buffer required min_split_scan_rblock = 256 diff --git a/torch/_inductor/runtime/coordinate_descent_tuner.py b/torch/_inductor/runtime/coordinate_descent_tuner.py index f280765aec..b5d10478a0 100644 --- a/torch/_inductor/runtime/coordinate_descent_tuner.py +++ b/torch/_inductor/runtime/coordinate_descent_tuner.py @@ -3,6 +3,8 @@ import itertools import logging from typing import Callable, Optional +from .hints import TRITON_MAX_BLOCK + from .runtime_utils import red_text, triton_config_to_hashable try: @@ -10,8 +12,6 @@ try: except ImportError: triton = None -from torch._inductor import config as inductor_config - log = logging.getLogger(__name__) @@ -44,32 +44,35 @@ class CoordescTuner: i.e., there are multiple local optima.. """ - def __init__(self, is_mm=False, name="unknown", size_hints=None): + def __init__( + self, is_mm=False, name="unknown", size_hints=None, inductor_meta=None + ): self.is_mm = is_mm # we will tune num_stages for mm self.cached_benchmark_results = {} self.name = name self.size_hints = size_hints + self.inductor_meta = inductor_meta or {} def get_xmax(self): - xmax = inductor_config.triton.max_block["X"] + xmax = TRITON_MAX_BLOCK["X"] if self.size_hints and len(self.size_hints) > 0: xmax = min(xmax, self.size_hints[0]) return xmax def get_ymax(self): - ymax = inductor_config.triton.max_block["Y"] + ymax = TRITON_MAX_BLOCK["Y"] if self.size_hints and len(self.size_hints) > 1: ymax = min(ymax, self.size_hints[1]) return ymax def get_zmax(self): - zmax = inductor_config.triton.max_block["Z"] + zmax = TRITON_MAX_BLOCK["Z"] if self.size_hints and len(self.size_hints) > 2: zmax = min(zmax, self.size_hints[2]) return zmax def get_rmax(self): - rmax = inductor_config.triton.max_block["R"] + rmax = TRITON_MAX_BLOCK["R"] if self.size_hints and len(self.size_hints) > 0: rmax = min(rmax, self.size_hints[-1]) # the last one is for reduction return rmax @@ -194,7 +197,7 @@ class CoordescTuner: candidate_values = self.get_neighbour_values( field, old_value, - radius=inductor_config.coordinate_descent_search_radius, + radius=self.inductor_meta.get("coordinate_descent_search_radius", 1), include_self=True, ) candidate_values_list.append(candidate_values) @@ -286,7 +289,9 @@ class CoordescTuner: improved = True best_config, best_timing = candidate_config, candidate_timing - if not improved and inductor_config.coordinate_descent_check_all_directions: + if not improved and self.inductor_meta.get( + "coordinate_descent_check_all_directions" + ): old_best_timing = best_timing improved, best_config, best_timing = self.check_all_tuning_directions( func, best_config, best_timing diff --git a/torch/_inductor/runtime/hints.py b/torch/_inductor/runtime/hints.py index 082e7fcc89..5b2b53ebff 100644 --- a/torch/_inductor/runtime/hints.py +++ b/torch/_inductor/runtime/hints.py @@ -3,6 +3,15 @@ from dataclasses import fields from enum import auto, Enum +# NOTE: if these fail asserts submit a PR to increase them +TRITON_MAX_BLOCK = { + "X": 2048, + "Y": 1024, + "Z": 1024, + "R": 4096 * 16, # * 16 is multi-kernel only +} + + class ReductionHint(Enum): INNER = 0 OUTER = 1 diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py index ff6388e30e..1b042d4f4a 100644 --- a/torch/_inductor/runtime/triton_heuristics.py +++ b/torch/_inductor/runtime/triton_heuristics.py @@ -17,15 +17,15 @@ from typing import Any, Callable, Dict, List, Optional, Set, Tuple import torch from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device - -from torch._inductor import config from .coordinate_descent_tuner import CoordescTuner + from .hints import ( _NUM_THREADS_PER_WARP, AutotuneHint, HeuristicType, ReductionHint, TileHint, + TRITON_MAX_BLOCK, ) from .runtime_utils import ( cache_dir, @@ -111,12 +111,12 @@ def autotune_hints_to_configs( return configs -def disable_pointwise_autotuning(): +def disable_pointwise_autotuning(inductor_meta): # Autotuning can give different benchmarking results from run to run, and # therefore we disable autotuning when use_deterministic flag is on. - if torch.are_deterministic_algorithms_enabled(): + if inductor_meta.get("are_deterministic_algorithms_enabled"): return True - return not config.triton.autotune_pointwise + return not inductor_meta.get("autotune_pointwise", True) class CachingAutotuner(KernelInterface): @@ -179,7 +179,10 @@ class CachingAutotuner(KernelInterface): self.size_hints = size_hints self.coordesc_tuner = CoordescTuner( - is_mm=False, name=self.fn.__name__, size_hints=size_hints + is_mm=False, + name=self.fn.__name__, + size_hints=size_hints, + inductor_meta=self.inductor_meta, ) self.filename = filename @@ -220,11 +223,11 @@ class CachingAutotuner(KernelInterface): self.triton_meta["device"] ) if ( - config.dynamic_scale_rblock + self.inductor_meta.get("dynamic_scale_rblock", True) and self.heuristic_type == HeuristicType.REDUCTION and self.size_hints is not None # Disable for AMDGPU as Triton is not ready to return n_regs for a compiled_binary. - and torch.version.hip is None + and not self.inductor_meta.get("is_hip") # Disable for Intel GPU as Triton is not ready to return n_regs for a compiled_binary. and self.device_type != "xpu" and device_prop.major >= 8 @@ -307,9 +310,9 @@ class CachingAutotuner(KernelInterface): compile_meta["constants"][self.fn.arg_names.index(k)] = v compile_meta["num_warps"] = cfg.num_warps compile_meta["num_stages"] = cfg.num_stages - compile_meta["debug"] = ( - config.assert_indirect_indexing and torch.version.hip is None - ) + compile_meta["debug"] = self.inductor_meta.get( + "assert_indirect_indexing", True + ) and not self.inductor_meta.get("is_hip", False) # Setting device_type="hip" required on ROCm to pass down to triton compile_meta["device_type"] = ( @@ -561,7 +564,7 @@ class CachingAutotuner(KernelInterface): launcher.n_regs = getattr(binary, "n_regs", None) launcher.n_spills = getattr(binary, "n_spills", None) launcher.shared = binary_shared - launcher.store_cubin = config.triton.store_cubin + launcher.store_cubin = self.inductor_meta.get("store_cubin", False) # store this global variable to avoid the high overhead of reading it when calling run if launcher.store_cubin: launcher.fn = self.fn @@ -576,7 +579,9 @@ class CachingAutotuner(KernelInterface): # control over the kernel code; (ii) there is empirical evidence that # for some (complicated) custom Triton kernels, a register-spilling # config may yield the best latency. - if not self.custom_kernel and launcher.n_spills > config.triton.spill_threshold: + if not self.custom_kernel and launcher.n_spills > self.inductor_meta.get( + "spill_threshold", 16 + ): log.debug( "Skip config %s because of register spilling: %d", launcher.config, @@ -766,10 +771,9 @@ class CachingAutotuner(KernelInterface): if len(self.launchers) > 1: self.autotune_to_one_config(*args, grid=grid, **kwargs) - if ( - not getattr(self.launchers[0].config, "found_by_coordesc", False) - and config.coordinate_descent_tuning - ): + if not getattr( + self.launchers[0].config, "found_by_coordesc", False + ) and self.inductor_meta.get("coordinate_descent_tuning", False): self.launchers = [ self.coordinate_descent_tuning( self.launchers[0], *args, grid=grid, **kwargs @@ -844,7 +848,7 @@ def start_graph(): collected_calls.clear() -def end_graph(): +def end_graph(output_file): if len(collected_calls) == 0: return overall_time = sum(call[0] for call in collected_calls) @@ -856,7 +860,6 @@ def end_graph(): ) print(summary_str) print() - output_file = config.profile_bandwidth_output if output_file is not None: # sort perf numbers in descending order, i.e. placing the # most runtime-heavy kernels at the top of the list @@ -939,6 +942,7 @@ def load_cached_autotuning( best_config, configs_hash: str, configs: List[Config], + inductor_meta: Dict[str, Any], ): if best_config is None: return None @@ -948,7 +952,9 @@ def load_cached_autotuning( # Remove time taken for comparison best_config.pop("time_taken_ms", None) - if config.coordinate_descent_tuning and best_config.pop("found_by_coordesc", False): + if inductor_meta.get("coordinate_descent_tuning") and best_config.pop( + "found_by_coordesc", False + ): num_warps = best_config.pop("num_warps") num_stages = best_config.pop("num_stages") triton_config = Config(best_config, num_warps=num_warps, num_stages=num_stages) @@ -968,12 +974,12 @@ def load_cached_autotuning( return matching_configs[0] -def should_use_remote_autotune_cache(): - if config.autotune_remote_cache: +def should_use_remote_autotune_cache(inductor_meta): + if inductor_meta.get("autotune_remote_cache"): return True - if not config.is_fbcode(): + if not inductor_meta.get("is_fbcode"): return False - if torch.version.hip is not None: + if inductor_meta.get("is_hip"): return False from triton.runtime.fb_memcache import MEMCACHE_VERSION @@ -1002,22 +1008,24 @@ def cached_autotune( inductor_meta = {} if inductor_meta is None else inductor_meta # on disk caching logic and/or remote caching - if filename is not None and (len(configs) > 1 or config.coordinate_descent_tuning): + if filename is not None and ( + len(configs) > 1 or inductor_meta.get("coordinate_descent_tuning") + ): configs_hash = hash_configs(configs) cache_filename = None remote_cache = None remote_cache_key = None - if config.autotune_local_cache: + if inductor_meta.get("autotune_local_cache", True): cache_filename = os.path.splitext(filename)[0] + ".best_config" - if should_use_remote_autotune_cache(): + if should_use_remote_autotune_cache(inductor_meta): backend_hash = inductor_meta.get("backend_hash", None) if backend_hash is not None: key = backend_hash + configs_hash + "autotune-best-config-v2" key = hashlib.sha256(key.encode("utf-8")).hexdigest() try: - if config.is_fbcode(): + if inductor_meta.get("is_fbcode"): remote_cache = triton.runtime.fb_memcache.FbMemcacheRemoteAutotuneCacheBackend( key ) @@ -1040,7 +1048,9 @@ def cached_autotune( elif remote_cache is not None and remote_cache_key is not None: best_config = remote_cache.get(remote_cache_key) - best_config = load_cached_autotuning(best_config, configs_hash, configs) + best_config = load_cached_autotuning( + best_config, configs_hash, configs, inductor_meta + ) if best_config: configs = [best_config] @@ -1081,12 +1091,12 @@ def cached_autotune( assert tconfig.kwargs["XBLOCK"] == 1 tconfig.kwargs.pop("XBLOCK") - if config.profile_bandwidth: + if inductor_meta.get("profile_bandwidth"): return DebugAutotuner( fn, triton_meta=triton_meta, inductor_meta=inductor_meta, - regex_filter=config.profile_bandwidth_regex, + regex_filter=inductor_meta["profile_bandwidth_regex"], configs=configs, save_cache_hook=save_cache_hook, mutated_arg_names=mutated_arg_names, @@ -1134,7 +1144,7 @@ def check_config(cfg, *, xnumel=None, ynumel=None, znumel=None): f"TritonKernel.indexing assumes numel == 1 => BLOCK == 1" f" but {label.lower()}numel=={numel} and {label}BLOCK={block} (cfg={cfg})." ) - max_block = config.triton.max_block[label] + max_block = TRITON_MAX_BLOCK[label] max_block_str = f'config.triton.max_block["{label}"]' assert max_block % block == 0, ( f"TritonKernel.indexing assumes {label}BLOCK divides {max_block_str}" @@ -1186,13 +1196,13 @@ def triton_config( # if we are below original block size, scale up where we can; # or if the calculated grid size is larger than the limit, we bump up the corresponding dimension - while x < min(size_hints[0], config.triton.max_block["X"]) and ( + while x < min(size_hints[0], TRITON_MAX_BLOCK["X"]) and ( x * maxGridSize[0] < size_hints[0] or conditional_product(x, y, z) < target ): x *= 2 while ( y - and y < min(size_hints[1], config.triton.max_block["Y"]) + and y < min(size_hints[1], TRITON_MAX_BLOCK["Y"]) and ( y * maxGridSize[1] < size_hints[1] or conditional_product(x, y, z) < target ) @@ -1200,7 +1210,7 @@ def triton_config( y *= 2 while ( z - and z < min(size_hints[2], config.triton.max_block["Z"]) + and z < min(size_hints[2], TRITON_MAX_BLOCK["Z"]) and ( z * maxGridSize[2] < size_hints[2] or conditional_product(x, y, z) < target ) @@ -1263,9 +1273,7 @@ def triton_config_reduction(size_hints, x, r, num_stages=1, num_warps=None) -> C num_warps = conditional_product(x, r) // 128 num_warps = next_power_of_2(min(max(num_warps, 2), 8)) check_config(cfg, xnumel=size_hints[0]) - assert ( - r <= config.triton.max_block["R"] - ), f"increase config.triton.MAX_BLOCK['r'] to {r}" + assert r <= TRITON_MAX_BLOCK["R"], f"increase TRITON_MAX_BLOCK['r'] to {r}" return Config(cfg, num_warps=num_warps, num_stages=num_stages) @@ -1296,9 +1304,7 @@ def triton_config_tiled_reduction(size_hints, x, y, r, num_stages=1): cfg = {"XBLOCK": x, "YBLOCK": y, "RBLOCK": r} num_warps = next_power_of_2(min(max(conditional_product(x, y, r) // 256, 1), 8)) check_config(cfg, xnumel=size_hints[0], ynumel=size_hints[1]) - assert ( - r <= config.triton.max_block["R"] - ), f"increase config.triton.MAX_BLOCK['r'] to {r}" + assert r <= TRITON_MAX_BLOCK["R"], f"increase TRITON_MAX_BLOCK['r'] to {r}" return Config(cfg, num_warps=num_warps, num_stages=num_stages) @@ -1328,8 +1334,9 @@ def pointwise( ) if len(size_hints) == 1: - if disable_pointwise_autotuning() and not ( - config.max_autotune or config.max_autotune_pointwise + if disable_pointwise_autotuning(inductor_meta) and not ( + inductor_meta.get("max_autotune") + or inductor_meta.get("max_autotune_pointwise") ): return cached_autotune( size_hints, @@ -1357,8 +1364,11 @@ def pointwise( filename=filename, ) if len(size_hints) == 2: - if (disable_pointwise_autotuning() or tile_hint == TileHint.SQUARE) and not ( - config.max_autotune or config.max_autotune_pointwise + if ( + disable_pointwise_autotuning(inductor_meta) or tile_hint == TileHint.SQUARE + ) and not ( + inductor_meta.get("max_autotune") + or inductor_meta.get("max_autotune_pointwise") ): return cached_autotune( size_hints, @@ -1385,7 +1395,7 @@ def pointwise( heuristic_type=HeuristicType.POINTWISE, ) if len(size_hints) == 3: - if disable_pointwise_autotuning(): + if disable_pointwise_autotuning(inductor_meta): return cached_autotune( size_hints, [triton_config_with_settings(size_hints, 16, 16, 16)], @@ -1428,7 +1438,7 @@ def _reduction_configs( tiny_config = triton_config_reduction( size_hints, 2 * (256 // rnumel) if rnumel <= 256 else 1, min(rnumel, 2048) ) - if config.max_autotune or config.max_autotune_pointwise: + if inductor_meta.get("max_autotune") or inductor_meta.get("max_autotune_pointwise"): pass # skip all these cases elif reduction_hint == ReductionHint.INNER: return [contiguous_config] @@ -1436,7 +1446,7 @@ def _reduction_configs( return [outer_config] elif reduction_hint == ReductionHint.OUTER_TINY: return [tiny_config] - if disable_pointwise_autotuning(): + if disable_pointwise_autotuning(inductor_meta): return [triton_config_reduction(size_hints, 32, 128)] return [ contiguous_config, @@ -1515,7 +1525,7 @@ def persistent_reduction( # we don't need RBLOCK for persistent reduction c.kwargs.pop("RBLOCK") - if disable_pointwise_autotuning(): + if disable_pointwise_autotuning(inductor_meta): configs = configs[:1] return cached_autotune( @@ -1542,14 +1552,13 @@ def split_scan( size_hints = [1, *size_hints[1:]] assert triton_meta is not None - rnumel = size_hints[-1] if len(size_hints) != 2: raise NotImplementedError(f"size_hints: {size_hints}") configs = _reduction_configs(size_hints=size_hints, inductor_meta=inductor_meta) # Fixup configs to enforce the minimum RBLOCK size - min_rblock = config.triton.min_split_scan_rblock + min_rblock = inductor_meta.get("min_split_scan_rblock", 256) for cfg in configs: if cfg.kwargs["RBLOCK"] < min_rblock: cfg.kwargs["RBLOCK"] = min_rblock diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py index 2485de547b..5360c41765 100644 --- a/torch/_inductor/select_algorithm.py +++ b/torch/_inductor/select_algorithm.py @@ -162,7 +162,7 @@ class TritonTemplateKernel(TritonKernel): inductor_meta = { "kernel_name": str(Placeholder.DESCRIPTIVE_NAME), - "backend_hash": torch.utils._triton.triton_hash_with_backend(), + **TritonKernel.inductor_meta_common(), } if config.profile_bandwidth or config.benchmark_kernel: num_gb = self.estimate_kernel_num_bytes() / 1e9
2.41.0
bbbc754dd166edc0c83bb44288682f71aae2275
Mon, 22 Apr 2024 11:01:22 -0400
[PATCH 0479/1000] Add AOTInductor generated cpp code to TORCH_TRACE (#124617)
Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124617 Approved by: https://github.com/albanD
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py index a5474b0616..8707bd8a6b 100644 --- a/torch/_inductor/codecache.py +++ b/torch/_inductor/codecache.py @@ -51,7 +51,6 @@ from typing import ( ) import torch - from torch._dynamo.device_interface import ( get_interface_for_device, get_registered_device_interfaces, @@ -61,6 +60,8 @@ from torch._inductor import config, exc, metrics from torch._inductor.codegen.cuda import cuda_env from torch._inductor.runtime.runtime_utils import cache_dir from torch._inductor.utils import clear_on_fresh_inductor_cache, is_linux + +from torch._logging import trace_structured from torch._subclasses.fake_tensor import ( extract_tensor_metadata, FakeTensor, @@ -1721,6 +1722,15 @@ class AotCodeCompiler: specified_dir=specified_output_path, ) output_code_log.info("Output code written to: %s", input_path) + trace_structured( + "graph_dump", + lambda: { + "name": "inductor_aot_code", + "type": "cpp", + "filename": input_path, + }, + payload_fn=lambda: source_code, + ) def _compile_consts_linux(consts: bytes) -> str: _, consts_path = write(
2.41.0
b37910e307c7fc1accd610829b66275d3f4e5c1
Sun, 21 Apr 2024 18:52:50 -0700
[PATCH 0481/1000] [AOTI] Fixes ScatterFallback codegen (#124580)
Summary: For https://github.com/pytorch/pytorch/issues/123184. ScatterFallback currently relies on op name matching for codegen, which makes its cpp codegen fragile. Refactor to use op_overload and fix the relevant unit test failures. Differential Revision: [D56417815](https://our.internmc.facebook.com/intern/diff/D56417815) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124580 Approved by: https://github.com/chenyang78
diff --git a/test/inductor/test_cpu_cpp_wrapper.py b/test/inductor/test_cpu_cpp_wrapper.py index bb7e6770a6..828ed8eb0e 100644 --- a/test/inductor/test_cpu_cpp_wrapper.py +++ b/test/inductor/test_cpu_cpp_wrapper.py @@ -88,8 +88,6 @@ if config.abi_compatible: "test_qlinear_cpu", "test_qlinear_dequant_promotion_cpu", "test_qlinear_relu_cpu", - "test_scatter5_cpu", - "test_scatter6_cpu", "test_tensor2_cpu", ] for test_name in xfail_list: diff --git a/torch/_inductor/codegen/cpp_wrapper_cpu.py b/torch/_inductor/codegen/cpp_wrapper_cpu.py index 4c07930045..a8da96a5ea 100644 --- a/torch/_inductor/codegen/cpp_wrapper_cpu.py +++ b/torch/_inductor/codegen/cpp_wrapper_cpu.py @@ -1278,22 +1278,44 @@ class CppWrapperCpu(WrapperCodeGen): ) def generate_scatter_fallback( - self, output, inputs, kernel, python_kernel_name, src_is_tensor, reduce, kwargs + self, + output, + inputs, + cpp_kernel_name, + python_kernel_name, + src_is_tensor, + reduce, + kwargs, ): # No stack allocation when there is a fallback op self.allow_stack_allocation = False # TODO: needs updates to use C shim v2 - # TODO: support other overload for cpp wrapper and remove the below assertions if config.abi_compatible: # call the ABI shim function instead of the ATen one - kernel = kernel.replace("at::", "aoti_torch_") - inputs_wrapped = [f"convert_arrayref_tensor_to_tensor({x})" for x in inputs] - line = f"{kernel}(convert_arrayref_tensor_to_tensor({output}), {','.join(inputs_wrapped)}" + if config.c_shim_version == "1": + cpp_kernel_name = ( + "aoti_torch_scatter_reduce_out" + if python_kernel_name.startswith("aten.scatter_reduce") + else "aoti_torch_scatter_out" + ) + else: + cpp_kernel_name = self.get_c_shim_func_name(cpp_kernel_name) + # C shim only contains out-variant instead of inplace-variant + cpp_kernel_name = cpp_kernel_name.replace("__", "_") + "_out" + inputs_wrapped = [ + f"convert_arrayref_tensor_to_tensor({x})" + if isinstance(x, str) + else str(x) + for x in inputs + ] + line = f"{cpp_kernel_name}(convert_arrayref_tensor_to_tensor({output}), {','.join(inputs_wrapped)}" else: - line = f"{kernel}({output}, {','.join(map(str, inputs))}" + line = f"{cpp_kernel_name}({','.join(map(str, inputs))}" - if python_kernel_name == "aten.scatter_": + if python_kernel_name.startswith("aten.scatter_reduce"): + line += f", {','.join(kwargs)}" + else: if src_is_tensor: if reduce: line += f", {V.graph.wrapper_code.val_to_arg_str(reduce)}" @@ -1301,9 +1323,7 @@ class CppWrapperCpu(WrapperCodeGen): assert ( reduce is None ), "Expect reduce to be None for aten.scatter_ with scalar src" - else: - line += f", {','.join(kwargs)}" - line += f"){self.ending}" + line += ");" self.writeline(line) def generate_index_put_fallback(self, kernel, x, indices, values, accumulate): diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py index 18d4ccaf3e..b679b1f174 100644 --- a/torch/_inductor/codegen/wrapper.py +++ b/torch/_inductor/codegen/wrapper.py @@ -686,15 +686,22 @@ class WrapperCodeGen(CodeGen): ) def generate_scatter_fallback( - self, output, inputs, kernel, python_kernel_name, src_is_tensor, reduce, kwargs + self, + output, + inputs, + cpp_kernel_name, + python_kernel_name, + src_is_tensor, + reduce, + kwargs, ): - line = f"{kernel}({','.join(map(str, inputs))}" - if kernel == "aten.scatter_": + line = f"{python_kernel_name}({','.join(map(str, inputs))}" + if python_kernel_name.startswith("aten.scatter_reduce"): + line += ", ".join([""] + kwargs) + else: if reduce: line += f", reduce={repr(reduce)}" - else: - line += ", ".join([""] + kwargs) - line += f"){self.ending}" + line += ")" self.writeline(line) def generate_index_put_fallback(self, kernel, x, indices, values, accumulate): diff --git a/torch/_inductor/decomposition.py b/torch/_inductor/decomposition.py index 47370c2e54..c483753e2a 100644 --- a/torch/_inductor/decomposition.py +++ b/torch/_inductor/decomposition.py @@ -728,7 +728,7 @@ def index_reduce( return out / counts if true_division else out // counts if use_scatter_fallback( - "aten.scatter_reduce_", + aten.scatter_reduce_.two, reduction_type, self.dtype, src.dtype, diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py index 59ac4ee395..0131a52239 100644 --- a/torch/_inductor/ir.py +++ b/torch/_inductor/ir.py @@ -4910,7 +4910,7 @@ class ScatterFallback(ExternKernel): wrapper.generate_scatter_fallback( x, [x, self.constant_args[0], index, src], - self.get_kernel_name(), + self.cpp_kernel_name, self.python_kernel_name, self.src_is_tensor, reduce, @@ -4920,25 +4920,6 @@ class ScatterFallback(ExternKernel): def should_allocate(self): return False - def get_cpp_kernel(self): - reduce = self.kwargs["reduce"] - if self.python_kernel_name == "aten.scatter_": - if self.src_is_tensor: - kernel = ( - "at::scatter_out" if reduce is None else "at::scatter_reduce_out" - ) - else: - assert ( - reduce is None - ), "Expect reduce to be None for aten.scatter_ with scalar src" - kernel = "at::scatter_out" - else: - assert ( - reduce is not None - ), "Expect reduce to be not None for aten.scatter_reduce_" - kernel = "at::scatter_reduce_out" - return kernel - def get_mutation_names(self): return [self.inputs[0].get_name()] @@ -4948,7 +4929,6 @@ class ScatterFallback(ExternKernel): def __init__( self, op_overload, - python_kernel_name, x, dim: int, index, @@ -4957,7 +4937,6 @@ class ScatterFallback(ExternKernel): reduce: Optional[str] = None, include_self: bool = True, ): - assert python_kernel_name in {"aten.scatter_", "aten.scatter_reduce_"} self.src_is_tensor = isinstance(src, TensorBox) constant_args: Tuple[Any, ...] @@ -4974,11 +4953,11 @@ class ScatterFallback(ExternKernel): self.unwrap_storage(tensors), constant_args, {"reduce": reduce, "include_self": include_self}, - python_kernel_name=python_kernel_name, + python_kernel_name=str(op_overload), ordered_kwargs_for_cpp_kernel=["reduce", "include_self"], op_overload=op_overload, ) - self.cpp_kernel_name = self.get_cpp_kernel() + self.cpp_kernel_name = get_aten_cpp_kernel_name(op_overload) self.name = V.graph.register_buffer(self) mark_node_as_mutating(self, x) diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py index 7fd89ab3bf..701e75b88d 100644 --- a/torch/_inductor/lowering.py +++ b/torch/_inductor/lowering.py @@ -2963,7 +2963,7 @@ def scatter(x, dim: int, index, src, **kwargs): def scatter_fallback( - fn, + op_overload: torch._ops.OpOverload, self, dim: int, index, @@ -2974,7 +2974,7 @@ def scatter_fallback( ): src_is_tensor = isinstance(src, TensorBox) if use_scatter_fallback( - fn, + op_overload, reduce, self.get_dtype(), src.get_dtype() if src_is_tensor else type(src), @@ -2982,8 +2982,7 @@ def scatter_fallback( src_is_tensor, ): ir.ScatterFallback( - V.graph.current_node.target, - fn, + op_overload, self, dim, index, @@ -3000,18 +2999,18 @@ def scatter_fallback( def scatter_(self, dim: int, index, src, *, reduce: Optional[str] = None): assert reduce in {None, "add", "multiply"} - fallback_result = scatter_fallback( - "aten.scatter_", self, dim, index, src, reduce=reduce - ) - - if fallback_result: - return fallback_result + if reduce is None: + op_overload = getattr(aten.scatter_, V.graph.current_node.target._overloadname) # type: ignore[union-attr] + fallback_result = scatter_fallback( + op_overload, self, dim, index, src, reduce=reduce + ) + if fallback_result is not None: + return fallback_result if reduce == "add": reduce = "sum" elif reduce == "multiply": reduce = "prod" - return scatter_reduce_(self, dim, index, src, reduce) @@ -3034,8 +3033,12 @@ def scatter_reduce(x, dim: int, index, src, reduction_type, **kwargs): def scatter_reduce_(self, dim: int, index, src, reduce, *, include_self: bool = True): assert reduce in {None, "sum", "prod", "mean", "amax", "amin"} + assert ( + len(aten.scatter_reduce_.overloads()) == 1 + and "two" in aten.scatter_reduce_.overloads() + ), "aten.scatter_reduce_.two is not the unique overload of aten.scatter_reduce_" fallback_result = scatter_fallback( - "aten.scatter_reduce_", + aten.scatter_reduce_.two, self, dim, index, diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py index a7be602f87..7580b216a0 100644 --- a/torch/_inductor/utils.py +++ b/torch/_inductor/utils.py @@ -1360,9 +1360,16 @@ def needs_fallback_due_to_atomic_add_limitations(dtype): def use_scatter_fallback( - fn, reduction_type, self_dtype, src_dtype, src_device_type, src_is_tensor + op_overload: torch._ops.OpOverload, + reduction_type, + self_dtype, + src_dtype, + src_device_type, + src_is_tensor, ): - reduce_ty = "add" if fn == "aten.scatter_" else "sum" + reduce_ty = ( + "add" if op_overload.overloadpacket == torch.ops.aten.scatter_ else "sum" + ) return ( reduction_type not in {None, reduce_ty} @@ -1372,7 +1379,7 @@ def use_scatter_fallback( and needs_fallback_due_to_atomic_add_limitations(src_dtype) ) or ( - fn == "aten.scatter_reduce_" + op_overload.overloadpacket == torch.ops.aten.scatter_reduce_ and reduction_type == "sum" and src_is_tensor and src_device_type == "cpu"
2.41.0
785b02ba68f7ffe29c5aac086c5ddc84fc587c5
Mon, 22 Apr 2024 21:27:32 +0000
[PATCH 0482/1000] Skip workspace permission change for ROCm CI (#123816)
PR https://github.com/pytorch/pytorch/pull/122922 added chown steps to test.sh and used the trap mechanism to ensure that, even if the test scripts fails and exits with a non-zero code, it will call the cleanup_workspace function on EXIT. However, this doesn't work as intended when the CI job gets cancelled for eg. if a PR pushes new commits and the older commit CI job gets cancelled. The trap function doesn't get called as the test script immediately aborts. Any subsequent jobs scheduled on the same runner then fail in the 'Checkout PyTorch' step when they try to delete the workspace. This has been resulting in a slew of CI failures on the HUD. Example of this situation playing out on one of the ROCm runners: Cancelled job: https://github.com/pytorch/pytorch/actions/runs/8563212279/job/23469711035 ![image](https://github.com/pytorch/pytorch/assets/37884920/7192e4fe-8cff-4256-abc8-9f874a3918ff) Subsequent failed job: https://github.com/pytorch/pytorch/actions/runs/8564517036/job/23472675041 ![image](https://github.com/pytorch/pytorch/assets/37884920/24b0af66-cfe9-431f-851a-24a1ccc18e84) This PR skips the logic introduced by PR 122922 for ROCm CI. Alternative to https://github.com/pytorch/pytorch/pull/123468 and https://github.com/pytorch/pytorch/pull/123588 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123816 Approved by: https://github.com/pruthvistony, https://github.com/zxiiro, https://github.com/kit1980, https://github.com/malfet
diff --git a/.ci/pytorch/build.sh b/.ci/pytorch/build.sh index 3a51f255fe..13069482ae 100755 --- a/.ci/pytorch/build.sh +++ b/.ci/pytorch/build.sh @@ -223,19 +223,23 @@ if [[ "${BUILD_ENVIRONMENT}" != *android* && "${BUILD_ENVIRONMENT}" != *cuda* ]] export BUILD_STATIC_RUNTIME_BENCHMARK=ON fi -# Workaround for dind-rootless userid mapping (https://github.com/pytorch/ci-infra/issues/96) -WORKSPACE_ORIGINAL_OWNER_ID=$(stat -c '%u' "/var/lib/jenkins/workspace") -cleanup_workspace() { - echo "sudo may print the following warning message that can be ignored. The chown command will still run." - echo " sudo: setrlimit(RLIMIT_STACK): Operation not permitted" - echo "For more details refer to https://github.com/sudo-project/sudo/issues/42" - sudo chown -R "$WORKSPACE_ORIGINAL_OWNER_ID" /var/lib/jenkins/workspace -} -# Disable shellcheck SC2064 as we want to parse the original owner immediately. -# shellcheck disable=SC2064 -trap_add cleanup_workspace EXIT -sudo chown -R jenkins /var/lib/jenkins/workspace -git config --global --add safe.directory /var/lib/jenkins/workspace +# Do not change workspace permissions for ROCm CI jobs +# as it can leave workspace with bad permissions for cancelled jobs +if [[ "$BUILD_ENVIRONMENT" != *rocm* ]]; then + # Workaround for dind-rootless userid mapping (https://github.com/pytorch/ci-infra/issues/96) + WORKSPACE_ORIGINAL_OWNER_ID=$(stat -c '%u' "/var/lib/jenkins/workspace") + cleanup_workspace() { + echo "sudo may print the following warning message that can be ignored. The chown command will still run." + echo " sudo: setrlimit(RLIMIT_STACK): Operation not permitted" + echo "For more details refer to https://github.com/sudo-project/sudo/issues/42" + sudo chown -R "$WORKSPACE_ORIGINAL_OWNER_ID" /var/lib/jenkins/workspace + } + # Disable shellcheck SC2064 as we want to parse the original owner immediately. + # shellcheck disable=SC2064 + trap_add cleanup_workspace EXIT + sudo chown -R jenkins /var/lib/jenkins/workspace + git config --global --add safe.directory /var/lib/jenkins/workspace +fi if [[ "$BUILD_ENVIRONMENT" == *-bazel-* ]]; then set -e diff --git a/.ci/pytorch/test.sh b/.ci/pytorch/test.sh index 23eaf8a2dd..b13e41681a 100755 --- a/.ci/pytorch/test.sh +++ b/.ci/pytorch/test.sh @@ -9,19 +9,23 @@ set -ex # shellcheck source=./common.sh source "$(dirname "${BASH_SOURCE[0]}")/common.sh" -# Workaround for dind-rootless userid mapping (https://github.com/pytorch/ci-infra/issues/96) -WORKSPACE_ORIGINAL_OWNER_ID=$(stat -c '%u' "/var/lib/jenkins/workspace") -cleanup_workspace() { - echo "sudo may print the following warning message that can be ignored. The chown command will still run." - echo " sudo: setrlimit(RLIMIT_STACK): Operation not permitted" - echo "For more details refer to https://github.com/sudo-project/sudo/issues/42" - sudo chown -R "$WORKSPACE_ORIGINAL_OWNER_ID" /var/lib/jenkins/workspace -} -# Disable shellcheck SC2064 as we want to parse the original owner immediately. -# shellcheck disable=SC2064 -trap_add cleanup_workspace EXIT -sudo chown -R jenkins /var/lib/jenkins/workspace -git config --global --add safe.directory /var/lib/jenkins/workspace +# Do not change workspace permissions for ROCm CI jobs +# as it can leave workspace with bad permissions for cancelled jobs +if [[ "$BUILD_ENVIRONMENT" != *rocm* ]]; then + # Workaround for dind-rootless userid mapping (https://github.com/pytorch/ci-infra/issues/96) + WORKSPACE_ORIGINAL_OWNER_ID=$(stat -c '%u' "/var/lib/jenkins/workspace") + cleanup_workspace() { + echo "sudo may print the following warning message that can be ignored. The chown command will still run." + echo " sudo: setrlimit(RLIMIT_STACK): Operation not permitted" + echo "For more details refer to https://github.com/sudo-project/sudo/issues/42" + sudo chown -R "$WORKSPACE_ORIGINAL_OWNER_ID" /var/lib/jenkins/workspace + } + # Disable shellcheck SC2064 as we want to parse the original owner immediately. + # shellcheck disable=SC2064 + trap_add cleanup_workspace EXIT + sudo chown -R jenkins /var/lib/jenkins/workspace + git config --global --add safe.directory /var/lib/jenkins/workspace +fi echo "Environment variables:" env
2.41.0
5addd56585a8a77efd78b7c5e17c0a3afc56c76
Sun, 21 Apr 2024 22:20:22 -0700
[PATCH 0483/1000] [tp] add kwargs support to prepare_module_input (#124114)
as titled, this PR adds kwargs support to PrepareModuleInput style, where there might be modules who have only kwargs inputs but no positional args, so we should support this Pull Request resolved: https://github.com/pytorch/pytorch/pull/124114 Approved by: https://github.com/XilunWu
diff --git a/test/distributed/tensor/parallel/test_tp_style.py b/test/distributed/tensor/parallel/test_tp_style.py index ab4f1ab8a7..520f04ec59 100644 --- a/test/distributed/tensor/parallel/test_tp_style.py +++ b/test/distributed/tensor/parallel/test_tp_style.py @@ -245,6 +245,65 @@ class TensorParallelStyleTest(DTensorTestBase): ) self.assertEqual(output.shape, (self.world_size * 2, 8 // self.world_size)) + @with_comms + def test_prepare_module_kwargs_input(self): + mesh = init_device_mesh(self.device_type, (self.world_size,)) + + class TestKwargModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear = torch.nn.Linear(8, 8) + + def forward(self, x, *, y, z=2): + return self.linear(x) + y + z + + test_mod = TestKwargModule().to(self.device_type) + prepare_inps_simple = PrepareModuleInput( + input_kwarg_layouts={"y": Shard(0)}, + desired_input_kwarg_layouts={"y": Replicate()}, + ) + parallelize_module( + test_mod.linear, mesh, ColwiseParallel(use_local_output=False) + ) + parallelize_module(test_mod, mesh, prepare_inps_simple) + + comm_mode = CommDebugMode() + with comm_mode: + output = test_mod( + torch.randn(1 * self.world_size, 8, device=self.device_type), + y=torch.ones(1, 8, device=self.device_type), + ) + + self.assertEqual(comm_mode.get_total_counts(), 1) + self.assertEqual(output.shape, (1 * self.world_size, 8)) + + class TestKwargOnlyModule(torch.nn.Module): + def __init__(self): + super().__init__() + self.linear = torch.nn.Linear(8, 8) + + def forward(self, *, x, y=2, z=None): + return self.linear(x) + y + z + + test_kwonly_mod = TestKwargOnlyModule().to(self.device_type) + prepare_inps_simple = PrepareModuleInput( + input_kwarg_layouts={"x": Shard(0), "z": Shard(0)}, + desired_input_kwarg_layouts={"x": Replicate(), "z": Replicate()}, + ) + parallelize_module( + test_kwonly_mod.linear, mesh, ColwiseParallel(use_local_output=False) + ) + parallelize_module(test_kwonly_mod, mesh, prepare_inps_simple) + + with comm_mode: + output = test_kwonly_mod( + x=torch.randn(1, 8, device=self.device_type), + z=torch.ones(1, 8, device=self.device_type), + ) + + self.assertEqual(comm_mode.get_total_counts(), 2) + self.assertEqual(output.shape, (1 * self.world_size, 8)) + @with_comms def test_prepare_module_output(self): mesh = init_device_mesh(self.device_type, (self.world_size,)) diff --git a/torch/distributed/tensor/parallel/style.py b/torch/distributed/tensor/parallel/style.py index fefca0d185..2720f9dca7 100644 --- a/torch/distributed/tensor/parallel/style.py +++ b/torch/distributed/tensor/parallel/style.py @@ -1,6 +1,6 @@ # Copyright (c) Meta Platforms, Inc. and affiliates from abc import ABC, abstractmethod -from typing import Optional, Union, Tuple +from typing import Optional, Union, Tuple, Dict from functools import partial import torch @@ -335,13 +335,19 @@ class PrepareModuleInput(ParallelStyle): ``input_layouts``, and perform layout redistribution according to the ``desired_input_layouts``. Keyword Args: - input_layouts (Union[Placement, Tuple[Placement]]): + input_layouts (Union[Placement, Tuple[Optional[Placement]]]): The DTensor layouts of input tensors for the nn.Module, this is used to convert the input tensors to DTensors. If some inputs are not torch.Tensor or no need to convert to DTensors, ``None`` need to be specified - as a placeholder. - desired_input_layouts (Union[Placement, Tuple[Placement]]): + as a placeholder. default: None. + desired_input_layouts (Union[Placement, Tuple[Optional[Placement]]]): The desired DTensor layout of input tensors for the nn.Module, this is used to ensure the inputs of the nn.Module - have the desired DTensor layouts. This argument needs to have the same length with ``input_layouts``. + have the desired DTensor layouts. This argument needs to have the same length with ``input_layouts``. default: None. + input_kwarg_layouts (Dict[str, Placement]): + The DTensor layouts of input kwargs for the nn.Module, this is used to convert the input kwarg tensors to DTensors. + default: None + desired_input_kwarg_layouts: (Dict[str, Placement]): + The desired DTensor layout of input kwargs for the nn.Module, this is used to ensure the inputs of the nn.Module + have the desired DTensor layouts. default: None. use_local_output (bool, optional): Whether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module inputs, default: False. Returns: @@ -372,24 +378,37 @@ class PrepareModuleInput(ParallelStyle): def __init__( self, *, - input_layouts: Union[Placement, Tuple[Placement]], - desired_input_layouts: Union[Placement, Tuple[Placement]], + input_layouts: Optional[Union[Placement, Tuple[Optional[Placement]]]] = None, + desired_input_layouts: Optional[Union[Placement, Tuple[Optional[Placement]]]] = None, + input_kwarg_layouts: Optional[Dict[str, Placement]] = None, + desired_input_kwarg_layouts: Optional[Dict[str, Placement]] = None, use_local_output: bool = False ): self.input_layouts = (input_layouts,) if isinstance(input_layouts, Placement) else input_layouts self.desired_input_layouts = \ (desired_input_layouts,) if isinstance(desired_input_layouts, Placement) else desired_input_layouts self.use_local_output = use_local_output - assert len(self.input_layouts) == len(self.desired_input_layouts), \ - "input_layouts and desired_input_layouts should have same length!" + if self.input_layouts is not None: + assert self.desired_input_layouts is not None, "desired module inputs should not be None!" + assert len(self.input_layouts) == len(self.desired_input_layouts), \ + "input_layouts and desired_input_layouts should have same length!" + self.with_kwargs = input_kwarg_layouts is not None + self.input_kwarg_layouts = input_kwarg_layouts or {} + self.desired_input_kwarg_layouts = desired_input_kwarg_layouts or {} + if self.with_kwargs: + assert len(self.input_kwarg_layouts) == len(self.desired_input_kwarg_layouts), \ + "input_kwarg_layouts and desired_input_kwarg_layouts should have same length!" def _prepare_input_fn(self, inputs, device_mesh): + if self.input_layouts is None: + return inputs prepared_inputs = [] if not isinstance(inputs, tuple): inputs = (inputs,) if len(inputs) != len(self.input_layouts): raise ValueError("module inputs and input_layouts should have same length!") + assert self.desired_input_layouts is not None, "desired module inputs should not be None!" for inp, input_layout, desired_layout in zip(inputs, self.input_layouts, self.desired_input_layouts): if input_layout is not None: if isinstance(inp, DTensor): @@ -398,15 +417,44 @@ class PrepareModuleInput(ParallelStyle): dt_inp = inp else: dt_inp = DTensor.from_local(inp, device_mesh, (input_layout,), run_check=False) - if input_layout != desired_layout: + + if desired_layout is not None and input_layout != desired_layout: dt_inp = dt_inp.redistribute(placements=(desired_layout,)) prepared_inputs.append(dt_inp.to_local() if self.use_local_output else dt_inp) else: prepared_inputs.append(inp) return tuple(prepared_inputs) + def _prepare_input_kwarg_fn(self, inputs, kwarg_inputs, device_mesh): + prepared_arg_inputs = self._prepare_input_fn(inputs, device_mesh) + prepared_kwarg_inputs = {} + for kwarg_key in kwarg_inputs.keys(): + kwarg_val = kwarg_inputs[kwarg_key] + input_layout = None + if kwarg_key in self.input_kwarg_layouts: + input_layout = self.input_kwarg_layouts[kwarg_key] + assert isinstance(kwarg_val, torch.Tensor), f"input of key {kwarg_key} to the module should be a Tensor!" + kwarg_val = DTensor.from_local(kwarg_val, device_mesh, (input_layout,), run_check=False) + + if kwarg_key in self.desired_input_kwarg_layouts: + desired_layout = self.desired_input_kwarg_layouts[kwarg_key] + if desired_layout != input_layout: + kwarg_val = kwarg_val.redistribute(placements=(desired_layout,)) + + prepared_kwarg_inputs[kwarg_key] = kwarg_val.to_local() if self.use_local_output else kwarg_val + else: + prepared_kwarg_inputs[kwarg_key] = kwarg_val + + return (prepared_arg_inputs, prepared_kwarg_inputs) + def _apply(self, module: nn.Module, device_mesh: DeviceMesh) -> nn.Module: - module.register_forward_pre_hook(lambda _, inputs: self._prepare_input_fn(inputs, device_mesh)) # type: ignore[misc, call-arg] + if self.with_kwargs: + module.register_forward_pre_hook( + lambda _, inputs, kwargs: self._prepare_input_kwarg_fn(inputs, kwargs, device_mesh), + with_kwargs=True + ) # type: ignore[misc] + else: + module.register_forward_pre_hook(lambda _, inputs: self._prepare_input_fn(inputs, device_mesh)) # type: ignore[misc, call-arg] return module
2.41.0
01499ecc6c52195379cc9baf4a40df30799259f
Mon, 22 Apr 2024 08:03:21 -0700
[PATCH 0484/1000] [sym_shapes][perf] Cache ShapeEnv constrain_symbol_range calls (#124610)
Differential Revision: [D56422688](https://our.internmc.facebook.com/intern/diff/D56422688) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124610 Approved by: https://github.com/ezyang, https://github.com/lezcano
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py index 1604118fd9..ed19b7bc8e 100644 --- a/torch/fx/experimental/symbolic_shapes.py +++ b/torch/fx/experimental/symbolic_shapes.py @@ -512,13 +512,8 @@ def guard_scalar(a): raise AssertionError(f"unrecognized scalar {a}") -@record_shapeenv_event() def _constrain_symbol_range(shape_env, s: sympy.Symbol, compiler_min: int, compiler_max: int): - upd_vr = ValueRanges(compiler_min, compiler_max) - old_vr = shape_env.var_to_range.get(s, ValueRanges.unknown()) - shape_env._update_var_to_range(s, upd_vr) - if (new_vr := shape_env.var_to_range[s]) != old_vr: - log.info("_constrain_symbol_range %s [%s, %s]", s, new_vr.lower, new_vr.upper) + shape_env.constrain_symbol_range(s, compiler_min, compiler_max) def _advise_is_size(a): @@ -585,8 +580,7 @@ def _constrain_range_for_size(a, min: Optional[int] = None, max: Optional[int] = "received min={min} and max={max}" ) - _constrain_symbol_range( - a.node.shape_env, + a.node.shape_env.constrain_symbol_range( a.node.expr, compiler_min=min, compiler_max=max, @@ -4543,6 +4537,16 @@ class ShapeEnv: # Clears the cache, since this update can change the result. self._maybe_evaluate_static.cache_clear() + @lru_cache(maxsize=None) + @record_shapeenv_event() + def constrain_symbol_range(self, s: sympy.Symbol, compiler_min: int, compiler_max: int): + upd_vr = ValueRanges(compiler_min, compiler_max) + old_vr = self.var_to_range.get(s, ValueRanges.unknown()) + self._update_var_to_range(s, upd_vr) + if (new_vr := self.var_to_range[s]) != old_vr: + log.info("constrain_symbol_range %s [%s, %s]", s, new_vr.lower, new_vr.upper) + + def _is_int(expr): return isinstance(expr, SymInt) and expr.node.expr.is_number
2.41.0
a3c00c26697046546b8e5cdf01dfec46e05ae89
Mon, 22 Apr 2024 21:50:43 +0000
[PATCH 0485/1000] [test_profiler.py] Disable tqdm monitor thread and torch.compile with compile_threads=1 (#124409)
Summary: if tqdm is not shutdown properly, it will leave the monitor thread alive. This causes an issue in the multithreading test because we check all events in that test with their tids. The events that correspond to these lingering threads all have TID of (uint64_t)(-1) which is invalid. The work around is turning off monitoring thread when tqdm is loaded. Since these are unit tests, it is safe to turn off monitor thread. Test Plan: buck test mode/dev-sand caffe2/test:profiler Differential Revision: D56310301 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124409 Approved by: https://github.com/aaronenyeshi
diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py index b9f01e0fbd..14de69f530 100644 --- a/test/profiler/test_profiler.py +++ b/test/profiler/test_profiler.py @@ -1,4 +1,18 @@ # Owner(s): ["oncall: profiler"] + +# if tqdm is not shutdown properly, it will leave the monitor thread alive. +# This causes an issue in the multithreading test because we check all events +# in that test with their tids. The events that correspond to these lingering +# threads all have TID of (uint64_t)(-1) which is invalid. +# The work around is turnning off monitoring thread when tqdm is loaded. +# Since these are unit tests, it is safe to turn off monitor thread. +try: + import tqdm + + tqdm.tqdm.monitor_interval = 0 +except ImportError: + None + import collections import gc import json @@ -526,16 +540,17 @@ class TestExecutionTrace(TestCase): return x # Create a temp file to save execution trace data. - fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False) + fp = tempfile.NamedTemporaryFile("w+t", suffix="_et.json", delete=False) fp.close() - test_module = torch.compile(ConvAndRelu()) + with torch._inductor.config.patch(compile_threads=1): + test_module = torch.compile(ConvAndRelu()) - x = torch.rand(128, 4096) - et = ExecutionTraceObserver().register_callback(fp.name) - et.start() - test_module.forward(x) - et.stop() + x = torch.rand(128, 4096) + et = ExecutionTraceObserver().register_callback(fp.name) + et.start() + test_module.forward(x) + et.stop() assert fp.name == et.get_output_file_path() et.unregister_callback()
2.41.0
01edc7e590c967fe2b416011a1a0aaa6790e60b
Mon, 22 Apr 2024 10:43:48 -0700
[PATCH 0486/1000] [inductor, test] remove cast for test_tmp_not_defined_issue2_cpu (#114910)
Does this verify that https://github.com/pytorch/pytorch/issues/94017 is fixed? Pull Request resolved: https://github.com/pytorch/pytorch/pull/114910 Approved by: https://github.com/angelayi
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py index c63b1c6494..a74e67593a 100644 --- a/test/inductor/test_torchinductor.py +++ b/test/inductor/test_torchinductor.py @@ -7586,8 +7586,7 @@ class CommonTemplate: sum_default_7 = torch.ops.aten.sum.default(mul_tensor_24) return (new_zeros_default_4, sum_default_7) - # TODO: Remove once https://github.com/pytorch/pytorch/issues/94017 is resolved - dtype = torch.float64 if self.device == "cpu" else torch.float32 + dtype = torch.float32 args = [ ((1, 88, 40, 40), (140800, 1600, 40, 1), dtype), ((), (), dtype),
2.41.0
7a35d5a292d94e3f0bc7adecc3b9e696cf1bbd3
Mon, 22 Apr 2024 21:57:39 +0000
[PATCH 0487/1000] Use new function to log one cluster per line (#124628)
Summary: For motivation behind the overall stack of diffs see D56218385 summary. This particular diff makes cpp_dumper take a custom printer function to log callstacks one-group-at-a-time and as such no longer running into 30K characters limit of `LOG(INFO)`. Test Plan: ``` [romanmal@46150.od /data/sandcastle/boxes/fbsource/fbcode (520a7b7b5)]$ buck2 test //caffe2/torch/csrc/distributed/c10d/... File changed: fbcode//common/base/ThreadStackTrace.cpp File changed: fbsource//xplat/caffe2/torch/csrc/distributed/c10d/fb/TraceUtils.cpp File changed: fbcode//caffe2/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp 4 additional file change events Buck UI: https://www.internalfb.com/buck2/d8ceae86-7d6f-4779-ad0c-8e37eddcff98 Network: Up: 0B Down: 0B Jobs completed: 2. Time elapsed: 1.5s. Tests finished: Pass 0. Fail 0. Fatal 0. Skip 0. Build failure 0 NO TESTS RAN [romanmal@46150.od /data/sandcastle/boxes/fbsource/fbcode (520a7b7b5)]$ ``` Tested to print the stack trace: P1220109730 Differential Revision: D56218360 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124628 Approved by: https://github.com/wconstab
diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp index bf21fc0dc6..4845006b56 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp @@ -352,8 +352,11 @@ std::string dump_nccl_trace() { } #endif -c10::optional<std::function<std::string()>>& get_cpp_trace_dumper() { - static c10::optional<std::function<std::string()>> dumper(c10::nullopt); +c10::optional<std::function<void(std::function<void(const std::string&)>)>>& +get_cpp_trace_dumper() { + static c10::optional< + std::function<void(std::function<void(const std::string&)>)>> + dumper(c10::nullopt); return dumper; } @@ -1330,7 +1333,8 @@ void ProcessGroupNCCL::heartbeatMonitor() { auto& cpp_dumper = get_cpp_trace_dumper(); if (cpp_dumper.has_value()) { - LOG(INFO) << "Dumping c++ stacktraces: " << cpp_dumper.value()(); + LOG(INFO) << "Dumping c++ stacktraces:"; + cpp_dumper.value()([](const std::string& line) { LOG(INFO) << line; }); } // Store debug info to storage if no other thread does it. (By default to diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp index a2b819f2f9..30bcee1992 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp @@ -1090,10 +1090,11 @@ class TORCH_API ProcessGroupNCCL : public Backend { TORCH_API std::string dump_nccl_trace(); // Gets a mutable reference to a global optional function. Heartbeat Monitor -// will query this function and if available, call it to dump traces. Inside -// fbcode, we store a function here that uses an internal tool for process -// tracing -TORCH_API c10::optional<std::function<std::string()>>& get_cpp_trace_dumper(); +// will use this function to dump traces, if available. Inside fbcode, we store +// a function here that uses an internal tool for process tracing +TORCH_API c10::optional< + std::function<void(std::function<void(const std::string&)>)>>& +get_cpp_trace_dumper(); // Similar to get_cpp_trace_dumper, this stores a function defined in // torch-python layer that lets us check whether the GIL can be acquired,
2.41.0
02d7e9a6ecc0d8e162f6d4ff8d067d7ba5bf4eb
Mon, 22 Apr 2024 22:38:39 +0000
[PATCH 0488/1000] [Binary Build] Increase timeout for Linux nightly binary builds (#124668)
Related issue: https://github.com/pytorch/pytorch/issues/124667. Please note, this is mitigation PR. Will follow up with investigation and proper fix for this. Similar to: https://github.com/pytorch/pytorch/commit/94d6463255211bdd397e4eab6ad0cddedc922fea Pull Request resolved: https://github.com/pytorch/pytorch/pull/124668 Approved by: https://github.com/huydhn
diff --git a/.github/workflows/_binary-build-linux.yml b/.github/workflows/_binary-build-linux.yml index 3dee4ba92e..34400149e5 100644 --- a/.github/workflows/_binary-build-linux.yml +++ b/.github/workflows/_binary-build-linux.yml @@ -78,7 +78,7 @@ on: jobs: build: runs-on: ${{ inputs.runs_on }} - timeout-minutes: 180 + timeout-minutes: 210 env: PYTORCH_ROOT: ${{ inputs.PYTORCH_ROOT }} BUILDER_ROOT: ${{ inputs.BUILDER_ROOT }}
2.41.0
2a34eeb9957fa5be443c28ba0435994a0244ab0
Mon, 22 Apr 2024 23:32:50 +0000
[PATCH 0489/1000] Dynamo x autograd.Function supports non-{Tensor, symnode, constant} inputs (#124360)
Fixes #118395 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124360 Approved by: https://github.com/zou3519
diff --git a/test/dynamo/test_autograd_function.py b/test/dynamo/test_autograd_function.py index 88859c894e..492936d0a9 100644 --- a/test/dynamo/test_autograd_function.py +++ b/test/dynamo/test_autograd_function.py @@ -1,8 +1,10 @@ # Owner(s): ["module: dynamo"] - +# flake8: noqa: B950 import copy import math +from dataclasses import dataclass + import torch import torch._dynamo.test_case @@ -467,32 +469,182 @@ class AutogradFunctionTests(torch._dynamo.test_case.TestCase): self.assertEqual(res, MyMM.apply(a, a)) self.assertEqual(cnt.frame_count, 1) - def test_graph_break_if_lifted_free_variable(self): - torch._dynamo.utils.counters.clear() - cnt = torch._dynamo.testing.CompileCounter() - delta = torch.randn(3) + def test_user_defined_object_as_input(self): + cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") + + @dataclass + class Weird: + x: int + b: torch.Tensor + c: torch.Tensor class Foo(torch.autograd.Function): @staticmethod - def forward(ctx, x): - return x.clone(), (x + delta).clone() + def forward(ctx, x: torch.Tensor, weird: Weird, z: torch.Tensor): + ctx.save_for_backward(weird.b, weird.c) + return weird.b * weird.c * x.clone() @staticmethod - def backward(ctx, grad1, grad2): - return grad1 + grad2 + def backward(ctx, grad): + b, c = ctx.saved_tensors + return grad * b * c, None, grad * 2 - @torch.compile(backend=cnt) - def f(x): - return Foo.apply(x) + @torch.compile(backend=cnt, fullgraph=True) + def f(x, weird, z): + return Foo.apply(x, weird, z) - x = torch.randn(3, requires_grad=True) - result = f(x) + x = torch.tensor(2.0, requires_grad=True) + weird = Weird(1.2, torch.tensor(2.5, requires_grad=True), torch.tensor(3.5)) + z = torch.tensor(3.0, requires_grad=True) - self.assertEqual(result, Foo.apply(x)) - self.assertEqual(cnt.frame_count, 1) - self.assertEqual( - list(torch._dynamo.utils.counters["graph_break"].values()), [1] + result = f(x, weird, z) + result.sum().backward() + + self.assertEqual(result, Foo.apply(x, weird, z)) + self.assertEqual(x.grad, 2.5 * 3.5) + self.assertEqual(z.grad, 2.0) + self.assertEqual(weird.b.grad, None) + + # check Dynamo captured graph is correct! + actual_graph = torch._dynamo.testing.normalize_gm( + cnt.graphs[0].print_readable(print_output=False) + ) + self.assertExpectedInline( + actual_graph, + """\ +class GraphModule(torch.nn.Module): + def forward(self, L_x_ : torch.Tensor, L_z_ : torch.Tensor, L_weird_b : torch.Tensor, L_weird_c : torch.Tensor): + l_x_ = L_x_ + l_z_ = L_z_ + l_weird_b = L_weird_b + l_weird_c = L_weird_c + + function_ctx = torch.autograd.function.FunctionCtx() + fwd_body_0 = self.fwd_body_0 + bwd_body_0 = self.bwd_body_0 + autograd_function_apply = torch._functorch.autograd_function.autograd_function_apply(fwd_body_0, bwd_body_0, l_x_, l_z_, l_weird_b, l_weird_c, args_tensor_mask = [True, False, True]); fwd_body_0 = bwd_body_0 = l_x_ = l_z_ = l_weird_b = l_weird_c = None + return (autograd_function_apply,) + + class GraphModule(torch.nn.Module): + def forward(self, function_ctx, l_x_, l_z_, l_weird_b, l_weird_c): + mul = l_weird_b * l_weird_c + clone = l_x_.clone(); l_x_ = None + mul_1 = mul * clone; mul = clone = None + return (mul_1, [l_weird_b, l_weird_c]) + + class GraphModule(torch.nn.Module): + def forward(self, function_ctx, mul_1, l_weird_b, l_weird_c): + _set_grad_enabled = torch._C._set_grad_enabled(False) + + mul = mul_1 * l_weird_b; l_weird_b = None + mul_2 = mul * l_weird_c; mul = l_weird_c = None + mul_3 = mul_1 * 2; mul_1 = None + + _set_grad_enabled_1 = torch._C._set_grad_enabled(True) + return (mul_2, mul_3) +""", + ) + + def test_tensor_list_as_input(self): + class Foo(torch.autograd.Function): + @staticmethod + def forward(ctx, x, tl): + ctx.save_for_backward(tl[0], tl[1]) + return x.clone() * (tl[0] + tl[1]) + + @staticmethod + def backward(ctx, grad): + tl0, tl1 = ctx.saved_tensors + return grad * (tl0 + tl1), None + + @torch.compile(backend="aot_eager", fullgraph=True) + def f(x, tl): + return Foo.apply(x, tl) + + x = torch.tensor(2.0, requires_grad=True) + tl = [ + torch.tensor(3.0, requires_grad=True), + torch.tensor(4.0, requires_grad=True), + ] + + result = f(x, tl) + result.sum().backward() + + self.assertEqual(result, Foo.apply(x, tl)) + self.assertEqual(x.grad, 7.0) + self.assertEqual(tl[0].grad, None) + self.assertEqual(tl[1].grad, None) + + def test_multiple_different_non_tensor_inputs(self): + @dataclass + class Weird: + x: int + b: torch.Tensor + c: torch.Tensor + + class Foo(torch.autograd.Function): + @staticmethod + def forward(ctx, x, weird, z, tl): + ctx.save_for_backward(weird.b, weird.c, tl[0], tl[1]) + return x.clone() * weird.b * weird.c * tl[0] + + @staticmethod + def backward(ctx, grad): + b, c, tl0, _ = ctx.saved_tensors + return grad * b * c * tl0, None, grad * 2, None + + @torch.compile(backend="aot_eager", fullgraph=True) + def f(x, weird, z, tl): + return Foo.apply(x, weird, z, tl) + + x = torch.tensor(2.0, requires_grad=True) + weird = Weird( + 1.2, + torch.tensor(2.5, requires_grad=True), + torch.tensor(3.5, requires_grad=True), ) + z = torch.tensor(3.0, requires_grad=True) + tl = [ + torch.tensor(0.5, requires_grad=True), + torch.tensor(0.6, requires_grad=True), + ] + + result = f(x, weird, z, tl) + result.sum().backward() + + self.assertEqual(result, Foo.apply(x, weird, z, tl)) + self.assertEqual(x.grad, 2.5 * 3.5 * 0.5) + self.assertEqual(z.grad, 2.0) + self.assertEqual(weird.b.grad, None) + self.assertEqual(weird.c.grad, None) + self.assertEqual(tl[0].grad, None) + self.assertEqual(tl[1].grad, None) + + def test_backward_returns_none_for_tensor_input(self): + class Foo(torch.autograd.Function): + @staticmethod + def forward(ctx, x, y): + ctx.save_for_backward(y) + return x.clone() * y + + @staticmethod + def backward(ctx, grad): + (y,) = ctx.saved_tensors + return grad * y, None + + @torch.compile(backend="aot_eager", fullgraph=True) + def f(x, y): + return Foo.apply(x, y) + + x = torch.tensor(2.0, requires_grad=True) + y = torch.tensor(3.0, requires_grad=True) + + result = f(x, y) + result.sum().backward() + + self.assertEqual(result, Foo.apply(x, y)) + self.assertEqual(x.grad, 3.0) + self.assertEqual(y.grad, None) def test_function_with_bound_free_variable(self): class LowerBound(torch.autograd.Function): diff --git a/test/dynamo/test_repros.py b/test/dynamo/test_repros.py index 754aef6d34..995c8754f3 100644 --- a/test/dynamo/test_repros.py +++ b/test/dynamo/test_repros.py @@ -235,6 +235,7 @@ class _ReversibleFunction(torch.autograd.Function): all_hidden_states.append(hidden_states) attn_output = layer(attn_output) + all_buckets = all_buckets + (attn_output,) # Add last layer if output_hidden_states is True: @@ -256,45 +257,8 @@ class _ReversibleFunction(torch.autograd.Function): grad_hidden_states, 2, dim=-1 ) - # retrieve params from ctx for backward - attn_output, hidden_states = ctx.saved_tensors - - # create tuple - output = ReformerBackwardOutput( - attn_output=attn_output, - hidden_states=hidden_states, - grad_attn_output=grad_attn_output, - grad_hidden_states=grad_hidden_states, - ) - # free memory - del grad_attn_output, grad_hidden_states, attn_output, hidden_states - - layers = ctx.layers - all_buckets = ctx.all_buckets - head_mask = ctx.head_mask - attention_mask = ctx.attention_mask - - for idx, layer in enumerate(layers[::-1]): - # pop last buckets from stack - buckets = all_buckets[-1] - all_buckets = all_buckets[:-1] - - # backprop - output = layer.backward_pass( - next_attn_output=output.attn_output, - hidden_states=output.hidden_states, - grad_attn_output=output.grad_attn_output, - grad_hidden_states=output.grad_hidden_states, - head_mask=head_mask[len(layers) - idx - 1], - attention_mask=attention_mask, - buckets=buckets, - ) - - assert all_buckets == (), "buckets have to be empty after backpropagation" - grad_hidden_states = torch.cat( - [output.grad_attn_output, output.grad_hidden_states], dim=-1 - ) + del grad_attn_output # num of return vars has to match num of forward() args # return gradient for hidden_states arg and None for other args @@ -1160,11 +1124,11 @@ class ReproTests(torch._dynamo.test_case.TestCase): cnt = self._reformer(nopython=False) # cant inline torch.autograd.Function means graph break if torch._dynamo.config.assume_static_by_default: - self.assertExpectedInline(cnt.frame_count, """3""") - self.assertExpectedInline(cnt.op_count, """10""") + self.assertExpectedInline(cnt.frame_count, """1""") + self.assertExpectedInline(cnt.op_count, """5""") else: - self.assertExpectedInline(cnt.frame_count, """3""") - self.assertExpectedInline(cnt.op_count, """10""") + self.assertExpectedInline(cnt.frame_count, """1""") + self.assertExpectedInline(cnt.op_count, """5""") @disable_translation_validation_if_dynamic_shapes def test_longformer_chunk(self): diff --git a/torch/_dynamo/variables/higher_order_ops.py b/torch/_dynamo/variables/higher_order_ops.py index b750501f1f..e0b0233e05 100644 --- a/torch/_dynamo/variables/higher_order_ops.py +++ b/torch/_dynamo/variables/higher_order_ops.py @@ -162,6 +162,25 @@ def validate_args_and_maybe_create_graph_inputs( if set_subgraph_inputs == "automatic": args.append(a) continue + elif set_subgraph_inputs == "semi_automatic": + if isinstance(a, AutogradFunctionContextVariable): + tracer.create_graph_input(a.as_proxy().node.name) + elif a.maybe_fx_node() is not None: + node = a.maybe_fx_node() + new_proxy = tracer.create_graph_input(node.name) + example_value = ( + node.meta["example_value"] + if "example_value" in node.meta + else None + ) + a = wrap_fx_proxy_cls( + target_cls=type(a), + tx=tx, + proxy=new_proxy, + example_value=example_value, + ) + args.append(a) + continue if a.is_python_constant(): # This arg is not used in the body of the higher order op. @@ -334,6 +353,7 @@ def speculate_subgraph( assert set_subgraph_inputs in { "automatic", + "semi_automatic", "flatten_manual", "manual", }, "Please use one of the supported set_subgraph_inputs options." @@ -1523,14 +1543,11 @@ class AutogradFunctionApplyVariable(VariableTracker): fwd_args, kwargs, "autograd.Function", - set_subgraph_inputs="manual", + set_subgraph_inputs="semi_automatic", restore_side_effects=False, tracer=fwd_tracer, ) - if fwd_freevars: - unimplemented("NYI") - if ctx.mutable_local in tx.output.side_effects.store_attr_mutations: if ( "_materialize_non_diff_grads" @@ -1602,12 +1619,23 @@ class AutogradFunctionApplyVariable(VariableTracker): fwd_graph.erase_node(node) break - new_fwd_graph_outputs = (fwd_out.as_proxy(), list(bwd_freevars.keys())) + # Because we lift the bwd_freevars as inputs of the bwd_graph, + # we have to manually add the bwd_freevars as output of fwd_graph. + # However, the bwd_freevars got from speculate_subgraph use the Proxies in the bwd_graph, + # we need to convert them to Proxies in the fwd_graph and then generate new fwd_graph output. + fwd_proxy_of_bwd_freevars = [] + for k in bwd_freevars.keys(): + if k in fwd_freevars: + fwd_proxy_of_bwd_freevars.append(fwd_freevars[k]) + else: + fwd_proxy_of_bwd_freevars.append(k) + + new_fwd_graph_outputs = (fwd_out.as_proxy(), fwd_proxy_of_bwd_freevars) new_fwd_graph_outputs = pytree.tree_map(lambda x: x.node, new_fwd_graph_outputs) fwd_graph.output(new_fwd_graph_outputs) + fwd_graph.lint() # Store fwd_body - fwd_nn_modules = tx.output.tracing_context.module_context.copy_graphstate() fwd_name = add_subgraph( tx, @@ -1617,6 +1645,62 @@ class AutogradFunctionApplyVariable(VariableTracker): fwd_node = make_attr(tx, fwd_name) + # The type of original args can be arbitrary, but we only support basic type in FX graph. + # So the speculated subgraph input includes original tensor args and the lifted freevars. + # We need to filter out the original tensor args and concat them with the lifted freevars + # to generate the proxy args for the FX call_function node. + filtered_args = [] + # A boolean list to mark if the type of corresponding argument is tensor. + # This is used to determine if a FX node's argument should be an argument of + # ApplyTemplate.forward and if we should skip the output from ApplyTemplate.backward + # at torch._functorch.autograd_function.AutogradFunctionApply. + args_tensor_mask = [False] * len(args) + for i, arg in enumerate(args): + if isinstance(arg, (variables.TensorVariable, variables.SymNodeVariable)): + filtered_args.append(arg) + args_tensor_mask[i] = True + + # Rewrite the output of bwd_graph to remove the grad output for the non-Tensor args. + new_bwd_graph_outputs = None + for node in bwd_graph.find_nodes(op="output"): + bwd_graph.erase_node(node) + break + + # The same as the above fwd proxies, we need to use the bwd proxies in the bwd_graph + # if some of the output is from fwd_freevars. + bwd_out_proxy = bwd_out.as_proxy() + bwd_proxy_of_fwd_freevars = [] + if isinstance(bwd_out_proxy, (tuple, list)): + for k in bwd_out_proxy: + if k in bwd_freevars: + bwd_proxy_of_fwd_freevars.append(bwd_freevars[k]) + else: + bwd_proxy_of_fwd_freevars.append(k) + else: + if bwd_out_proxy in bwd_freevars: + bwd_proxy_of_fwd_freevars = bwd_freevars[bwd_out_proxy] + else: + bwd_proxy_of_fwd_freevars = bwd_out_proxy + + # Remove bwd output for non-Tensor args. + output_proxy = bwd_proxy_of_fwd_freevars + if isinstance(output_proxy, (tuple, list)): + new_bwd_graph_outputs = () + for x, mask in zip(output_proxy, args_tensor_mask): + if mask: + new_bwd_graph_outputs = new_bwd_graph_outputs + (x,) + else: + assert x is None, f"Grad of non-Tensor arg {x} is not None." + else: + new_bwd_graph_outputs = output_proxy + + # Update the bwd graph output. + new_bwd_graph_outputs = pytree.tree_map( + lambda x: None if x is None else x.node, new_bwd_graph_outputs + ) + bwd_graph.output(new_bwd_graph_outputs) + bwd_graph.lint() + # Store bwd_body bwd_nn_modules = tx.output.tracing_context.module_context.copy_graphstate() bwd_name = add_subgraph( @@ -1629,7 +1713,11 @@ class AutogradFunctionApplyVariable(VariableTracker): tx.output.side_effects = prev_side_effects - p_args = (fwd_node, bwd_node, *(arg.as_proxy() for arg in args)) + p_args = ( + fwd_node, + bwd_node, + *([arg.as_proxy() for arg in filtered_args] + list(fwd_freevars.keys())), + ) example_value = pytree.tree_map_only( torch.fx.Proxy, lambda a: a.node.meta["example_value"], @@ -1645,7 +1733,7 @@ class AutogradFunctionApplyVariable(VariableTracker): "call_function", autograd_function_apply, args=p_args, - kwargs={}, + kwargs={"args_tensor_mask": args_tensor_mask}, ), example_value=example_value, ) diff --git a/torch/_functorch/autograd_function.py b/torch/_functorch/autograd_function.py index 2798e177e6..5017a25022 100644 --- a/torch/_functorch/autograd_function.py +++ b/torch/_functorch/autograd_function.py @@ -686,21 +686,27 @@ class AutogradFunctionApply(HigherOrderOperator): def __init__(self): super().__init__("autograd_function_apply") - def __call__(self, fwd, bwd, *fwd_args): + def __call__(self, fwd, bwd, *fwd_args, **fwd_kwargs): saved_values = None + args_tensor_mask = fwd_kwargs["args_tensor_mask"] + length_of_tensor_args = sum(args_tensor_mask) + # Filter out the original tensor args from fwd_args, + # lifted freevars should not be args of ApplyTemplate.apply + # since we don't need to calculate the gradients of them. + new_fwd_args = fwd_args[:length_of_tensor_args] class ApplyTemplate(torch.autograd.Function): @staticmethod def forward(ctx, *args): nonlocal saved_values - output, saved_values = fwd(None, *args) + output, saved_values = fwd(None, *fwd_args) return output @staticmethod def backward(ctx, *grad): return bwd(None, *grad, *saved_values) - return ApplyTemplate.apply(*fwd_args) + return ApplyTemplate.apply(*new_fwd_args) autograd_function_apply = AutogradFunctionApply()
2.41.0
bf3f90781263ee45e2e79cf7f80102ffa7f1b14
Mon, 22 Apr 2024 23:43:11 +0000
[PATCH 0490/1000] [MPS] Fix large copy (#124635)
By slicing `copyFromBuffer:sourceOffset:toBuffer:destinationOffset:size:` into 2Gb chunks Add regression test, but limit it to machines with 12Gb of RAM or more, and MacOS 14+, as on MacOS 13 attempt to alloc 4Gb tensor fails with: ``` /AppleInternal/Library/BuildRoots/c651a45f-806e-11ed-a221-7ef33c48bc85/Library/Caches/com.apple.xbs/Sources/MetalPerformanceShaders/MPSCore/Types/MPSNDArray.mm:724: failed assertion `[MPSNDArray initWithDevice:descriptor:] Error: total bytes of NDArray > 2**32' ``` Fixes https://github.com/pytorch/pytorch/issues/124335 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124635 Approved by: https://github.com/kulinseth
diff --git a/aten/src/ATen/mps/MPSStream.mm b/aten/src/ATen/mps/MPSStream.mm index 2ac8b0cc64..03caa4cf76 100644 --- a/aten/src/ATen/mps/MPSStream.mm +++ b/aten/src/ATen/mps/MPSStream.mm @@ -173,11 +173,22 @@ void MPSStream::copy(id<MTLBuffer> srcBuffer, endKernelCoalescing(); id<MTLBlitCommandEncoder> blitEncoder = [commandBuffer() blitCommandEncoder]; - [blitEncoder copyFromBuffer:srcBuffer - sourceOffset:(NSUInteger)srcOffset - toBuffer:dstBuffer - destinationOffset:(NSUInteger)dstOffset - size:(NSUInteger)length]; + // For some reason copyFromBuffer for 4Gb fails without returning an error + // See https://github.com/pytorch/pytorch/issues/124335 + // Workaround by batching copy commands into 2Gb chunks + constexpr size_t max_copy_size = 0x80000000; // 2GB + size_t bytes_copied = 0; + size_t bytes_remains = length; + while (bytes_remains > 0) { + NSUInteger bytes_to_copy = std::min(max_copy_size, bytes_remains); + [blitEncoder copyFromBuffer:srcBuffer + sourceOffset:(NSUInteger)srcOffset + bytes_copied + toBuffer:dstBuffer + destinationOffset:(NSUInteger)dstOffset + bytes_copied + size:bytes_to_copy]; + bytes_copied += bytes_to_copy; + bytes_remains -= bytes_to_copy; + } [blitEncoder endEncoding]; // profilerId has a value only if copy profiling is enabled diff --git a/test/test_mps.py b/test/test_mps.py index 48a6e5715b..bfac420775 100644 --- a/test/test_mps.py +++ b/test/test_mps.py @@ -1089,6 +1089,7 @@ if not torch.backends.mps.is_available(): NNTestCase = NoTest # noqa: F811 product_version = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) +total_memory = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"])) # Determine whether to enable MPS memory leak check (uses same code as CUDA). TEST_MPS_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_MPS_MEM_LEAK_CHECK', '0') == '1' @@ -7013,6 +7014,16 @@ class TestMPS(TestCaseMPS): # Test bfloat16 mm compare_mm(1024, 1, 32769, torch.bfloat16) + @unittest.skipIf(total_memory < 12_000_000_000, "Needs at least 12Gb RAM to run the test") + @unittest.skipIf(product_version < 14.0, "Can't allocate 4Gb tensor on MacOS 13") + def test_copy_large(self): + """ Test that copy of 4Gb+ tensors works """ + x = torch.ones((2**30 + 11,), dtype=torch.float32) + y = x.to(device="mps") + self.assertTrue(torch.all(y == torch.tensor(1.0, device="mps"))) + del y + del x + # Test flip def test_flip(self): def helper(shape, dims):
2.41.0
c21161488b9219233c2401effc816d689bcc8a0
Tue, 23 Apr 2024 00:24:55 +0000
[PATCH 0492/1000] Add meta function for `torch.histc` (#124548)
Registers a meta function for the `aten.histc.default` and `aten.histc.out` ops to support `torch.compile(dynamic=True)`. Fixes #124512. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124548 Approved by: https://github.com/lezcano, https://github.com/peterbell10
diff --git a/test/test_meta.py b/test/test_meta.py index deb421adee..af1a5fb6ad 100644 --- a/test/test_meta.py +++ b/test/test_meta.py @@ -664,7 +664,6 @@ meta_function_expected_failures = { torch.bincount : {i32, i64, u8, i16, i8}, torch.functional.unique : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64}, torch.functional.unique_consecutive : {f64, i32, i64, u8, i16, f16, bf16, b8, i8, f32, u16, u32, u64}, - torch.histc : {f64, f16, bf16, f32}, torch.histogram : {f64, f32}, torch.histogramdd : {f64, f32}, torch.kthvalue : {f64, i32, i64, u8, i16, f16, bf16, i8, f32}, @@ -741,7 +740,6 @@ meta_function_device_expected_failures['cuda'] = { torch.functional.unique: {f16}, # aten::_unique2, aten::unique_dim torch.functional.unique_consecutive: {f16}, # aten::unique_consecutive torch.geqrf: {f32, f64}, # aten::geqrf - torch.histc: {i16, i32, i64, i8}, # aten::histc, aten::histc.out torch.kthvalue: {f16}, # aten::kthvalue.values } @@ -838,8 +836,6 @@ meta_dispatch_expected_failures = { aten._unique2.default : {i8, f64, i64, f16, bf16, f32, i32, b8, i16, u8, u16, u32, u64}, aten.bincount.default : {i64, i8, i32, i16, u8}, aten.equal.default : {c64, f16, i8, f64, c128, i64, bf16, f32, i32, b8, i16, u8}, - aten.histc.default : {bf16, f32, f64}, - aten.histc.out : {bf16, f32, f64}, aten.histogram.bin_ct : {f32, f64}, aten.histogram.bins_tensor : {f32, f64}, aten.kthvalue.default : {i8, f64, i64, f16, bf16, f32, i32, i16, u8}, @@ -883,8 +879,6 @@ meta_dispatch_device_expected_failures['cpu'] = { aten._batch_norm_with_update.default: {bf16, f16}, aten.native_layer_norm.default: {bf16, f16}, - aten.histc.default: {f16}, - aten.histc.out: {f16}, } meta_dispatch_device_expected_failures['cuda'] = { @@ -893,8 +887,6 @@ meta_dispatch_device_expected_failures['cuda'] = { aten._use_cudnn_ctc_loss.Tensor: {f32, f64}, # aten::_use_cudnn_ctc_loss.Tensor aten.cudnn_grid_sampler.default: {f16, f32, f64}, # aten::cudnn_grid_sampler aten.geqrf.default: {f32, f64}, # aten::geqrf - aten.histc.default: {i16, i32, i64, i8}, # aten::histc - aten.histc.out: {i16, i32, i64, i8}, # aten::histc.out aten.kthvalue.default: {f16}, # aten::kthvalue.values aten.linalg_eigvalsh.out: {f32, f64}, # aten::linalg_eigvalsh.out aten.log_sigmoid_forward.default: {bf16, f16, f64, f32}, diff --git a/test/test_proxy_tensor.py b/test/test_proxy_tensor.py index 6becba90c5..fd2cc6d304 100644 --- a/test/test_proxy_tensor.py +++ b/test/test_proxy_tensor.py @@ -1897,7 +1897,6 @@ fake_tensor_failures = { symbolic_tensor_failures = { xfail('combinations', ''), xfail('geqrf', ''), # aten.geqrf.default - couldn't find symbolic meta function/decomposition - xfail('histc', ''), # Could not run 'aten::histc' with arguments from the 'Meta' backend. This could be because... xfail('histogram', ''), # Could not run 'aten::histogram.bin_ct' with arguments from the 'Meta' backend. This c... xfail('histogramdd', ''), # aten._histogramdd_bin_edges.default - couldn't find symbolic meta function/decomposition xfail('kthvalue', ''), # aten.kthvalue.default - couldn't find symbolic meta function/decomposition diff --git a/torch/_meta_registrations.py b/torch/_meta_registrations.py index c1fe54a1a6..6245f908db 100644 --- a/torch/_meta_registrations.py +++ b/torch/_meta_registrations.py @@ -21,6 +21,7 @@ from torch._prims_common import ( ELEMENTWISE_TYPE_PROMOTION_KIND, IntLike, make_contiguous_strides_for, + Number, TensorLike, ) @@ -6083,6 +6084,32 @@ def meta_bucketize(self, boundaries, *, out_int32=False, right=False): ).contiguous() +@register_meta([aten.histc]) +@out_wrapper() +def meta_histc(input, bins=100, min=0, max=0): + fn_name = "histc()" + if device_hint(input) == "cpu": + torch._check( + input.is_floating_point(), + lambda: f"\"histogram_cpu\" not implemented for '{input.dtype}'", + ) + torch._check( + isinstance(bins, IntLike), + lambda: f"{fn_name}: argument 'bins' must be int, not {type(bins)}", + ) + torch._check(bins > 0, lambda: f"{fn_name}: bins must be > 0, but got {bins}") + torch._check( + isinstance(min, Number), + lambda: f"{fn_name}: argument 'min' must be Number, not {type(min)}", + ) + torch._check( + isinstance(max, Number), + lambda: f"{fn_name}: argument 'max' must be Number, not {type(max)}", + ) + torch._check(max >= min, lambda: "{fn_name}: max must be larger than min") + return torch.empty(bins, device=input.device, dtype=input.dtype) + + @register_meta( [aten._upsample_bilinear2d_aa.default, aten._upsample_bicubic2d_aa.default] )
2.41.0
fcdea8cd684808330c000cdffd8fa00d076ab6c
Mon, 22 Apr 2024 14:57:40 -0400
[PATCH 0493/1000] Do not import transformers when import torch._dynamo (#124634)
Fixes https://github.com/pytorch/pytorch/issues/123954 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124634 Approved by: https://github.com/thiagocrepaldi, https://github.com/Chillee ghstack dependencies: #124343
diff --git a/torch/onnx/_internal/fx/patcher.py b/torch/onnx/_internal/fx/patcher.py index e2262bba4f..ee919eae00 100644 --- a/torch/onnx/_internal/fx/patcher.py +++ b/torch/onnx/_internal/fx/patcher.py @@ -1,19 +1,24 @@ import copy +import functools import io from typing import List, Union import torch + # TODO: Remove after https://github.com/huggingface/safetensors/pull/318 -try: - # safetensors is not an exporter requirement, but needed for some huggingface models - import safetensors # type: ignore[import] # noqa: F401 - import transformers # type: ignore[import] - from safetensors import torch as safetensors_torch # noqa: F401 +@functools.lru_cache(None) +def has_safetensors_and_transformers(): + try: + # safetensors is not an exporter requirement, but needed for some huggingface models + import safetensors # type: ignore[import] # noqa: F401 + import transformers # type: ignore[import] # noqa: F401 + + from safetensors import torch as safetensors_torch # noqa: F401 - has_safetensors_and_transformers = True -except ImportError: - has_safetensors_and_transformers = False + return True + except ImportError: + return False class ONNXTorchPatcher: @@ -61,7 +66,9 @@ class ONNXTorchPatcher: # Wrapper or modified version of torch functions. self.torch_load_wrapper = torch_load_wrapper - if has_safetensors_and_transformers: + if has_safetensors_and_transformers(): + import safetensors + import transformers def safetensors_load_file_wrapper(filename, device="cpu"): # Record path for later serialization into ONNX proto @@ -109,7 +116,10 @@ class ONNXTorchPatcher: desired_wrapped_methods.append((torch.Tensor, "__getitem__")) torch.fx._symbolic_trace._wrapped_methods_to_patch = desired_wrapped_methods - if has_safetensors_and_transformers: + if has_safetensors_and_transformers(): + import safetensors + import transformers + safetensors.torch.load_file = self.safetensors_torch_load_file_wrapper transformers.modeling_utils.safe_load_file = ( self.safetensors_torch_load_file_wrapper @@ -120,7 +130,10 @@ class ONNXTorchPatcher: torch.fx._symbolic_trace._wrapped_methods_to_patch = ( self.torch_fx__symbolic_trace__wrapped_methods_to_patch ) - if has_safetensors_and_transformers: + if has_safetensors_and_transformers(): + import safetensors + import transformers + safetensors.torch.load_file = self.safetensors_torch_load_file transformers.modeling_utils.safe_load_file = ( self.transformers_modeling_utils_safe_load_file
2.41.0
d440ac734b9a459f7d994b3811f0c973516e0a1
Wed, 17 Apr 2024 13:27:32 +0800
[PATCH 0494/1000] Add Matmul recipe into x86_inductor_quantizer (#122776)
**Summary** Add `matmul` in the quantization recipes, noting that it's not a general recipe but tailored to meet accuracy criteria for specific models. `matmul` recipe is disabled by default. **Test Plan** ``` python -m pytest quantization/pt2e/test_x86inductor_quantizer.py -k test_attention_block ``` Differential Revision: [D56288468](https://our.internmc.facebook.com/intern/diff/D56288468) Pull Request resolved: https://github.com/pytorch/pytorch/pull/122776 Approved by: https://github.com/jgong5, https://github.com/jerryzh168
diff --git a/test/quantization/pt2e/test_x86inductor_quantizer.py b/test/quantization/pt2e/test_x86inductor_quantizer.py index cb7385ad27..daee638172 100644 --- a/test/quantization/pt2e/test_x86inductor_quantizer.py +++ b/test/quantization/pt2e/test_x86inductor_quantizer.py @@ -359,21 +359,45 @@ class TestHelperModules: return tmp + self.bn2(self.conv2(tmp)) class SelfAttnLikeModule(torch.nn.Module): - def __init__(self, input_dim) -> None: + def __init__( + self, + input_dim, + transpose_for_score=False, + num_attention_heads=None, + attention_head_size=None, + ) -> None: super().__init__() self.input_dim = input_dim self.q_proj = nn.Linear(input_dim, input_dim, bias=False) self.k_proj = nn.Linear(input_dim, input_dim, bias=False) self.v_proj = nn.Linear(input_dim, input_dim, bias=False) self.softmax = nn.Softmax(dim=-1) + self.transpose_for_score = transpose_for_score + if self.transpose_for_score: + assert num_attention_heads is not None + assert attention_head_size is not None + self.num_attention_heads = num_attention_heads + self.attention_head_size = attention_head_size + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + ( + self.num_attention_heads, + self.attention_head_size, + ) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) def forward(self, x): q = self.q_proj(x) k = self.k_proj(x) v = self.v_proj(x) - scores = torch.bmm(q, k.transpose(1, 2)) / (self.input_dim**0.5) + if self.transpose_for_score: + q = self.transpose_for_scores(q) + k = self.transpose_for_scores(k) + v = self.transpose_for_scores(v) + scores = torch.matmul(q, k.transpose(-1, -2)) / (self.input_dim**0.5) attention = self.softmax(scores) - weighted = torch.bmm(attention, v) + weighted = torch.matmul(attention, v) return weighted @@ -1402,7 +1426,6 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase): torch.ops.quantized_decomposed.choose_qparams.tensor, torch.ops.quantized_decomposed.quantize_per_tensor.tensor, torch.ops.quantized_decomposed.dequantize_per_tensor.tensor, - torch.ops.quantized_decomposed.dequantize_per_channel.default, torch.ops.aten.linear.default, ] self._test_quantizer( @@ -1438,7 +1461,6 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase): torch.ops.quantized_decomposed.choose_qparams.tensor, torch.ops.quantized_decomposed.quantize_per_tensor.tensor, torch.ops.quantized_decomposed.dequantize_per_tensor.tensor, - torch.ops.quantized_decomposed.dequantize_per_channel.default, torch.ops.aten.linear.default, ] self._test_quantizer( @@ -1551,3 +1573,72 @@ class TestQuantizePT2EX86Inductor(X86InductorQuantTestCase): node_occurrence, node_list, ) + + @skipIfNoX86 + def test_attention_block(self): + """ + Test pattern of Attention like Block with X86InductorQuantizer. + """ + for annotate_matmul in [False, True]: + with override_quantized_engine("x86"), torch.no_grad(): + m = TestHelperModules.SelfAttnLikeModule( + input_dim=64 * 16, + transpose_for_score=True, + num_attention_heads=16, + attention_head_size=64, + ).eval() + example_inputs = (torch.randn(2, 384, 1024),) + + m(*example_inputs) + + quantizer = X86InductorQuantizer().set_global( + xiq.get_default_x86_inductor_quantization_config() + ) + + if annotate_matmul: + quantizer.set_function_type_qconfig( + torch.matmul, quantizer.get_global_quantization_config() + ) + + node_occurrence = { + torch.ops.quantized_decomposed.quantize_per_tensor.default: 5 + if annotate_matmul + else 1, + torch.ops.quantized_decomposed.dequantize_per_tensor.default: 7 + if annotate_matmul + else 3, + # quantize_per_channel for weights are const propagated + torch.ops.quantized_decomposed.quantize_per_channel.default: 0, + torch.ops.quantized_decomposed.dequantize_per_channel.default: 3, + } + if annotate_matmul: + node_list = [ + torch.ops.quantized_decomposed.quantize_per_tensor.default, + torch.ops.quantized_decomposed.dequantize_per_tensor.default, + torch.ops.aten.linear.default, + torch.ops.aten.view.default, + torch.ops.aten.permute.default, + torch.ops.quantized_decomposed.quantize_per_tensor.default, + torch.ops.quantized_decomposed.dequantize_per_tensor.default, + torch.ops.aten.matmul.default, + torch.ops.aten.div.Tensor, + torch.ops.aten.softmax.int, + ] + else: + node_list = [ + torch.ops.quantized_decomposed.quantize_per_tensor.default, + torch.ops.quantized_decomposed.dequantize_per_tensor.default, + torch.ops.aten.linear.default, + torch.ops.aten.view.default, + torch.ops.aten.permute.default, + torch.ops.aten.matmul.default, + torch.ops.aten.div.Tensor, + torch.ops.aten.softmax.int, + ] + self._test_quantizer( + m, + example_inputs, + quantizer, + node_occurrence, + node_list, + ) diff --git a/torch/ao/quantization/quantizer/x86_inductor_quantizer.py b/torch/ao/quantization/quantizer/x86_inductor_quantizer.py index 269b0128c6..37220d17af 100644 --- a/torch/ao/quantization/quantizer/x86_inductor_quantizer.py +++ b/torch/ao/quantization/quantizer/x86_inductor_quantizer.py @@ -82,7 +82,9 @@ default_quantizable_ops = propagation_quantizable_ops | { # A superset of default_quantizable_ops includes operators support the int8 data type # but not enabled by default recipe of X86InductorQuantizer. -quantizable_ops = default_quantizable_ops +quantizable_ops = default_quantizable_ops | { + torch.ops.aten.matmul.default, +} QUANT_ANNOTATION_KEY = "quantization_annotation" @@ -110,6 +112,12 @@ def _map_module_function_to_aten_operator_type(): ], torch.ops.aten.flatten.using_ints, ), + ( + [ + torch.matmul, + ], + torch.ops.aten.matmul.default, + ), ) for map_item in map_list: module_function_to_aten_operator.update(dict.fromkeys(map_item[0], map_item[1])) # type: ignore[call-overload] @@ -310,6 +318,14 @@ class X86InductorQuantizer(Quantizer): self.global_config = quantization_config return self + def get_global_quantization_config(self): + if not isinstance(self.global_config, QuantizationConfig): + warnings.warn( + "The global_config for X86InductorQuantizer is currently invalid. \ + Please ensure that you use set_global to establish the global quantization configuration." + ) + return self.global_config + def set_function_type_qconfig( self, function_type: Callable, @@ -499,6 +515,7 @@ class X86InductorQuantizer(Quantizer): # Step1: Recipe of fusion patterns like conv/linear. self._annotate_conv2d_fusion_pattern(model) self._annotate_linear_fusion_pattern(model) + self._annotate_matmul(model) # Step2: Recipe to propagate annotation for patterns beside conv/linear. # Go through all the nodes from start to end. @@ -752,6 +769,24 @@ class X86InductorQuantizer(Quantizer): self._annotate_linear_unary(model, config) self._annotate_linear(model, config) + def _annotate_matmul(self, model: torch.fx.GraphModule): + if config := self._get_aten_operator_qconfig(torch.ops.aten.matmul.default): + for node in model.graph.nodes: + if node.target == torch.ops.aten.matmul.default and not _is_annotated( + [node] + ): + input_qspec_map = {} + matmul_node = node + for input_node in matmul_node.args: + input_qspec_map[input_node] = get_input_act_qspec(config) + matmul_node.meta[ + QUANT_ANNOTATION_KEY + ] = _X86InductorQuantizationAnnotation( + input_qspec_map=input_qspec_map, + _annotated=True, + _is_output_of_quantized_pattern=True, + ) + def _annotate_conv2d_binary_unary( self, gm: torch.fx.GraphModule, quantization_config: QuantizationConfig ) -> None:
2.41.0
ffecb5affd60a4359241aec7469a1627961fe63
Thu, 11 Apr 2024 00:31:01 -0700
[PATCH 0495/1000] [Inductor] Enable VecMask store (#123710)
**Summary** Enable the vectorization of store with `bool` dtype. **Test Plan** ``` python -u -m pytest -s -v inductor/test_cpu_repro.py -k test_decomposed_fake_quant_per_channel ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/123710 Approved by: https://github.com/jgong5, https://github.com/lezcano ghstack dependencies: #123512
diff --git a/aten/src/ATen/cpu/vec/vec_mask.h b/aten/src/ATen/cpu/vec/vec_mask.h index 75964fa675..90f0f98962 100644 --- a/aten/src/ATen/cpu/vec/vec_mask.h +++ b/aten/src/ATen/cpu/vec/vec_mask.h @@ -134,6 +134,24 @@ class VecMask { return VectorizedN<T, N>(VectorizedN<T, N>::loadu(mask)); } + static VecMask<T, N> blendv( + const VecMask<T, N>& c, + const VecMask<T, N>& b, + const VecMask<T, N>& a) { + VectorizedN<T, N> result = VectorizedN<T, N>::blendv( + VectorizedN<T, N>(c), + VectorizedN<T, N>(b), + VectorizedN<T, N>(a)); + return result; + } + + void store(bool* b, int count = size()) { + constexpr int L = (VectorizedN<T, N>::size() + Vectorized<bool>::size() - 1)/ Vectorized<bool>::size(); + auto res = this->to<bool, L>(); + res.store(b, count); + return; + } + template <typename U, int L, std::enable_if_t<L >= 2, int> = 0> inline VectorizedN<U, L> to() const { return VecMaskTo<U, L, T, N>::apply(*this); diff --git a/test/inductor/test_cpu_repro.py b/test/inductor/test_cpu_repro.py index 7acd238c96..fea142e6f6 100644 --- a/test/inductor/test_cpu_repro.py +++ b/test/inductor/test_cpu_repro.py @@ -1851,6 +1851,8 @@ class CPUReproTests(TestCase): self.assertEqual(input_grad_aten_eager, input_grad) self.assertEqual(input_grad_decomp_eager, input_grad) self.assertEqual(input_grad[1, 2, 3, 4], torch.tensor(0.0)) + # For forward and backward kernel + check_metrics_vec_kernel_count(2) @patch("torch.cuda.is_available", lambda: False) def test_scatter_using_atomic_add(self): diff --git a/torch/_inductor/codegen/cpp.py b/torch/_inductor/codegen/cpp.py index e4b30f0ba1..1cd2903229 100644 --- a/torch/_inductor/codegen/cpp.py +++ b/torch/_inductor/codegen/cpp.py @@ -1539,7 +1539,14 @@ class CppVecOverrides(CppOverrides): @staticmethod def where(a, b, c): assert isinstance(V.kernel, CppVecKernel) - return f"decltype({b})::blendv({c}, {b}, {V.kernel._get_mask_cast(a, b.dtype)})" + if b.dtype == torch.bool: + assert c.dtype == torch.bool + blendv_a = f"{V.kernel._get_mask_cast(a, torch.float)}" + blendv_b = f"{V.kernel._get_mask_cast(b, torch.float)}" + blendv_c = f"{V.kernel._get_mask_cast(c, torch.float)}" + return f"decltype({b})::blendv({blendv_c}, {blendv_b}, {blendv_a})" + else: + return f"decltype({b})::blendv({c}, {b}, {V.kernel._get_mask_cast(a, b.dtype)})" @staticmethod def sign(x): @@ -2869,7 +2876,7 @@ class CppVecKernelChecker(CppVecKernel): self.exit_stack = contextlib.ExitStack() # Cache all the load result - self.load_supported_dtypes: List[torch.dtype] = [ + self.supported_dtypes: List[torch.dtype] = [ torch.float, torch.bfloat16, torch.float16, @@ -2879,15 +2886,6 @@ class CppVecKernelChecker(CppVecKernel): torch.int32, torch.int64, ] - self.store_supported_dtypes: List[torch.dtype] = [ - torch.float, - torch.bfloat16, - torch.float16, - torch.uint8, - torch.int8, - torch.int32, - torch.int64, - ] def disable_vec(self, msg=None): if schedule_log.isEnabledFor(logging.DEBUG): @@ -2907,7 +2905,7 @@ class CppVecKernelChecker(CppVecKernel): self.disable_vec("not a loop") return var - if load_dtype not in self.load_supported_dtypes and ( + if load_dtype not in self.supported_dtypes and ( index.has(self.itervars[self.tiling_idx]) or free_symbol_startswith(index, "tmp") ): @@ -2928,7 +2926,7 @@ class CppVecKernelChecker(CppVecKernel): assert opt_ctx opt_ctx.dtype = store_dtype - if store_dtype not in self.store_supported_dtypes: + if store_dtype not in self.supported_dtypes: self.disable_vec(f"{store_dtype} not supported by store") return self.simd_vec @@ -3032,18 +3030,7 @@ class CppVecKernelChecker(CppVecKernel): ): opt_ctx.dtype = torch.float32 - supported_dtypes = [ - torch.float, - torch.bfloat16, - torch.float16, - torch.bool, - torch.uint8, - torch.int8, - torch.int32, - torch.int64, - ] - - if opt_ctx.dtype not in supported_dtypes: + if opt_ctx.dtype not in self.supported_dtypes: self.disable_vec(f"constant dtype: {opt_ctx.dtype}") return val @@ -3116,16 +3103,7 @@ class CppVecKernelChecker(CppVecKernel): @staticmethod def to_dtype(x, dtype, src_dtype=None): - if dtype not in [ - torch.float, - torch.bfloat16, - torch.float16, - torch.bool, - torch.uint8, - torch.int8, - torch.int32, - torch.int64, - ]: + if dtype not in self.supported_dtypes: self.disable_vec(f"to_dtype: {dtype}") return x
2.41.0
f5778476adefd6bb733edbda3ce0eb73156d547
Tue, 23 Apr 2024 00:33:20 +0000
[PATCH 0496/1000] rename ort to maia (#123265)
Fixes #123264 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123265 Approved by: https://github.com/albanD
diff --git a/aten/src/ATen/Context.h b/aten/src/ATen/Context.h index 5c5036caa9..32b22855f9 100644 --- a/aten/src/ATen/Context.h +++ b/aten/src/ATen/Context.h @@ -12,9 +12,9 @@ #include <ATen/detail/CUDAHooksInterface.h> #include <ATen/detail/HIPHooksInterface.h> #include <ATen/detail/IPUHooksInterface.h> +#include <ATen/detail/MAIAHooksInterface.h> #include <ATen/detail/MPSHooksInterface.h> #include <ATen/detail/MTIAHooksInterface.h> -#include <ATen/detail/ORTHooksInterface.h> #include <ATen/detail/PrivateUse1HooksInterface.h> #include <ATen/detail/XPUHooksInterface.h> #include <c10/core/QEngine.h> @@ -142,8 +142,8 @@ class TORCH_API Context { static bool hasLazy() { return c10::impl::hasDeviceGuardImpl(c10::DeviceType::Lazy); } - static bool hasORT() { - return c10::impl::hasDeviceGuardImpl(c10::DeviceType::ORT); + static bool hasMAIA() { + return c10::impl::hasDeviceGuardImpl(c10::DeviceType::MAIA); } // defined in header so that getNonVariableType has ability to inline // call_once check. getNonVariableType is called fairly frequently @@ -455,8 +455,8 @@ static inline bool hasMPS() { return globalContext().hasMPS(); } -static inline bool hasORT() { - return globalContext().hasORT(); +static inline bool hasMAIA() { + return globalContext().hasMAIA(); } static inline bool hasXPU() { diff --git a/aten/src/ATen/TensorIterator.cpp b/aten/src/ATen/TensorIterator.cpp index 868fdd83cc..0afac10d44 100644 --- a/aten/src/ATen/TensorIterator.cpp +++ b/aten/src/ATen/TensorIterator.cpp @@ -1530,13 +1530,13 @@ void TensorIteratorBase::build(TensorIteratorConfig& config) { // XLA and lazy tensors don't have storage, so they don't have an underlying data pointer. // Nothing beyond this point is important for meta functions, so it's fine to exit early here. - // Extend the condition to ORT tesnors as ORT tensors also don't have storage. + // Extend the condition to MAIA tesnors as MAIA tensors also don't have storage. if (privateuse1_without_storage || common_device_.type() == DeviceType::MTIA || common_device_.type() == DeviceType::XLA || common_device_.type() == DeviceType::IPU || common_device_.type() == DeviceType::Lazy || - common_device_.type() == DeviceType::ORT || + common_device_.type() == DeviceType::MAIA || common_device_.type() == DeviceType::HPU) return; for (auto& op : operands_) { diff --git a/aten/src/ATen/Version.cpp b/aten/src/ATen/Version.cpp index eb71fe315d..cf33d89e08 100644 --- a/aten/src/ATen/Version.cpp +++ b/aten/src/ATen/Version.cpp @@ -190,8 +190,8 @@ std::string show_config() { ss << detail::getCUDAHooks().showConfig(); } - if (hasORT()) { - ss << detail::getORTHooks().showConfig(); + if (hasMAIA()) { + ss << detail::getMAIAHooks().showConfig(); } if (hasXPU()) { diff --git a/aten/src/ATen/core/TensorBase.h b/aten/src/ATen/core/TensorBase.h index a94b28b86f..e03c6bdf2b 100644 --- a/aten/src/ATen/core/TensorBase.h +++ b/aten/src/ATen/core/TensorBase.h @@ -507,10 +507,10 @@ class TORCH_API TensorBase { return impl_->is_mps(); } - /// Returns if a `Tensor` is ort tensor. - bool is_ort() const { + /// Returns if a `Tensor` is maia tensor. + bool is_maia() const { // NB: this is not a native function to avoid dispatching overhead. - return impl_->is_ort(); + return impl_->is_maia(); } /// Returns if a `Tensor` is vulkan tensor. diff --git a/aten/src/ATen/core/dispatch/OperatorEntry.cpp b/aten/src/ATen/core/dispatch/OperatorEntry.cpp index 627109c516..5f4538f2c9 100644 --- a/aten/src/ATen/core/dispatch/OperatorEntry.cpp +++ b/aten/src/ATen/core/dispatch/OperatorEntry.cpp @@ -421,7 +421,7 @@ void OperatorEntry::updateDispatchTable_(const c10::Dispatcher& dispatcher, Disp // In theory, we should only have to check if the given runtime key has "dense" functionality, // e.g. DispatchKey::CPU (which is composed of DispatchKey::Dense and BackendComponent::CPUBit). // However, there are some backends that should be included in this set that don't have the dense key set. - // E.g. DispatchKey::Meta, DispatchKey::ORT. + // E.g. DispatchKey::Meta, DispatchKey::MAIA. if (c10::isBackendDispatchKey(dispatch_key)) { DispatchKey autograd_key = getAutogradKeyFromBackend(toBackendComponent(dispatch_key)); updateDispatchTableEntry_(dispatcher, autograd_key); diff --git a/aten/src/ATen/core/op_registration/README.md b/aten/src/ATen/core/op_registration/README.md index 5605e962a6..61b41b48c4 100644 --- a/aten/src/ATen/core/op_registration/README.md +++ b/aten/src/ATen/core/op_registration/README.md @@ -13,13 +13,13 @@ There’s four main use cases * You’re writing a new operator that isn’t supposed to be part of the public PyTorch API. * You’re writing a new operator but don’t want to change the core pytorch code base, say you’re developing a shared library with operators. * You’re writing a C++ extension for PyTorch or you’re using inline c++ in your .py model files. -* You’re writing a backend library like XLA or ORT that adds new kernels to all operators defined in `native_functions.yaml`. +* You’re writing a backend library like XLA or MAIA that adds new kernels to all operators defined in `native_functions.yaml`. For these use cases, the custom operator API is the better solution. ### What is the price for using the custom operator API instead of `native_functions.yaml`? -If you’re just using the custom operator API to add new kernels for existing operators (e.g. the XLA/ORT example above), then you’re fine and don’t pay any price. If, however, you define a new operator purely using the custom op API, i.e. your operator never shows up in `native_functions.yaml`, then you need to be aware of a few caveats. +If you’re just using the custom operator API to add new kernels for existing operators (e.g. the XLA/MAIA example above), then you’re fine and don’t pay any price. If, however, you define a new operator purely using the custom op API, i.e. your operator never shows up in `native_functions.yaml`, then you need to be aware of a few caveats. * It will not get a C++ API generated. There will not be `Tensor::your_op()` methods or `at::your_op()` functions to call your operator. * The API for calling the operator from Python looks a little bit different. It needs to be called through `torch.ops.your_op()` instead of `torch._C`. diff --git a/aten/src/ATen/detail/MAIAHooksInterface.cpp b/aten/src/ATen/detail/MAIAHooksInterface.cpp new file mode 100644 index 0000000000..e82ad8f677 --- /dev/null +++ b/aten/src/ATen/detail/MAIAHooksInterface.cpp @@ -0,0 +1,29 @@ +#include <ATen/detail/MAIAHooksInterface.h> + +#include <c10/util/CallOnce.h> +#include <c10/util/Registry.h> + +#include <cstddef> +#include <memory> + +namespace at { +namespace detail { + +// See getCUDAHooks for some more commentary +const MAIAHooksInterface& getMAIAHooks() { + static std::unique_ptr<MAIAHooksInterface> maia_hooks; + static c10::once_flag once; + c10::call_once(once, [] { + maia_hooks = MAIAHooksRegistry()->Create("MAIAHooks", {}); + if (!maia_hooks) { + maia_hooks = std::make_unique<MAIAHooksInterface>(); + } + }); + return *maia_hooks; +} +} // namespace detail + +// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) +C10_DEFINE_REGISTRY(MAIAHooksRegistry, MAIAHooksInterface, MAIAHooksArgs) + +} // namespace at diff --git a/aten/src/ATen/detail/MAIAHooksInterface.h b/aten/src/ATen/detail/MAIAHooksInterface.h new file mode 100644 index 0000000000..ad4ef146ec --- /dev/null +++ b/aten/src/ATen/detail/MAIAHooksInterface.h @@ -0,0 +1,31 @@ +#pragma once + +#include <c10/util/Exception.h> +#include <c10/util/Registry.h> + +// NB: Class must live in `at` due to limitations of Registry.h. +namespace at { + +struct TORCH_API MAIAHooksInterface { + // This should never actually be implemented, but it is used to + // squelch -Werror=non-virtual-dtor + virtual ~MAIAHooksInterface() = default; + + virtual std::string showConfig() const { + TORCH_CHECK(false, "Cannot query detailed MAIA version information."); + } +}; + +// NB: dummy argument to suppress "ISO C++11 requires at least one argument +// for the "..." in a variadic macro" +struct TORCH_API MAIAHooksArgs {}; + +TORCH_DECLARE_REGISTRY(MAIAHooksRegistry, MAIAHooksInterface, MAIAHooksArgs); +#define REGISTER_MAIA_HOOKS(clsname) \ + C10_REGISTER_CLASS(MAIAHooksRegistry, clsname, clsname) + +namespace detail { +TORCH_API const MAIAHooksInterface& getMAIAHooks(); +} // namespace detail + +} // namespace at diff --git a/aten/src/ATen/detail/ORTHooksInterface.cpp b/aten/src/ATen/detail/ORTHooksInterface.cpp deleted file mode 100644 index bbb69809e8..0000000000 --- a/aten/src/ATen/detail/ORTHooksInterface.cpp +++ /dev/null @@ -1,29 +0,0 @@ -#include <ATen/detail/ORTHooksInterface.h> - -#include <c10/util/CallOnce.h> -#include <c10/util/Registry.h> - -#include <cstddef> -#include <memory> - -namespace at { -namespace detail { - -// See getCUDAHooks for some more commentary -const ORTHooksInterface& getORTHooks() { - static std::unique_ptr<ORTHooksInterface> ort_hooks; - static c10::once_flag once; - c10::call_once(once, [] { - ort_hooks = ORTHooksRegistry()->Create("ORTHooks", {}); - if (!ort_hooks) { - ort_hooks = std::make_unique<ORTHooksInterface>(); - } - }); - return *ort_hooks; -} -} // namespace detail - -// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables) -C10_DEFINE_REGISTRY(ORTHooksRegistry, ORTHooksInterface, ORTHooksArgs) - -} // namespace at diff --git a/aten/src/ATen/detail/ORTHooksInterface.h b/aten/src/ATen/detail/ORTHooksInterface.h deleted file mode 100644 index f49969ec66..0000000000 --- a/aten/src/ATen/detail/ORTHooksInterface.h +++ /dev/null @@ -1,36 +0,0 @@ -#pragma once - -#include <c10/util/Exception.h> -#include <c10/util/Registry.h> - -constexpr const char* ORT_HELP = - " You need to 'import torch_ort' to use the 'ort' device in PyTorch. " - "The 'torch_ort' module is provided by the ONNX Runtime itself " - "(https://onnxruntime.ai)."; - -// NB: Class must live in `at` due to limitations of Registry.h. -namespace at { - -struct TORCH_API ORTHooksInterface { - // This should never actually be implemented, but it is used to - // squelch -Werror=non-virtual-dtor - virtual ~ORTHooksInterface() = default; - - virtual std::string showConfig() const { - TORCH_CHECK(false, "Cannot query detailed ORT version information.", ORT_HELP); - } -}; - -// NB: dummy argument to suppress "ISO C++11 requires at least one argument -// for the "..." in a variadic macro" -struct TORCH_API ORTHooksArgs {}; - -TORCH_DECLARE_REGISTRY(ORTHooksRegistry, ORTHooksInterface, ORTHooksArgs); -#define REGISTER_ORT_HOOKS(clsname) \ - C10_REGISTER_CLASS(ORTHooksRegistry, clsname, clsname) - -namespace detail { -TORCH_API const ORTHooksInterface& getORTHooks(); -} // namespace detail - -} // namespace at diff --git a/aten/src/ATen/test/extension_backend_test.cpp b/aten/src/ATen/test/extension_backend_test.cpp index f2ce15e99e..4be68b1d0a 100644 --- a/aten/src/ATen/test/extension_backend_test.cpp +++ b/aten/src/ATen/test/extension_backend_test.cpp @@ -6,8 +6,8 @@ #include <torch/csrc/jit/runtime/operator.h> -// NB. These tests use the ORT dispatch key to test backend dispatching -// machinery, but these tests are not specific to ORT at all. The ORT +// NB. These tests use the MAIA dispatch key to test backend dispatching +// machinery, but these tests are not specific to MAIA at all. The MAIA // backend is fully out-of-tree, so it's safe to use this key for // in-tree tests. @@ -22,16 +22,16 @@ Tensor empty_override(SymIntArrayRef size, c10::optional<ScalarType> dtype, c10: Storage( Storage::use_byte_size_t(), 0, - at::DataPtr(nullptr, Device(DeviceType::ORT, 1)), + at::DataPtr(nullptr, Device(DeviceType::MAIA, 1)), nullptr, false), - DispatchKey::ORT, + DispatchKey::MAIA, caffe2::TypeMeta::Make<float>()); return Tensor(std::move(tensor_impl)); } Tensor add_override(const Tensor & a, const Tensor & b , const Scalar& c) { - auto out = empty({5, 5}, at::kORT); // Don't return self as-is + auto out = empty({5, 5}, at::kMAIA); // Don't return self as-is test_int = 2; return out; } @@ -47,28 +47,28 @@ Tensor empty_strided_override( return empty_override(fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, c10::nullopt); } -TORCH_LIBRARY_IMPL(aten, ORT, m) { +TORCH_LIBRARY_IMPL(aten, MAIA, m) { m.impl("aten::empty.memory_format", empty_override); m.impl("aten::empty_strided", empty_strided_override); m.impl("aten::add.Tensor", add_override); } TEST(BackendExtensionTest, TestRegisterOp) { - Tensor a = empty({5, 5}, at::kORT); - ASSERT_EQ(a.device().type(), at::kORT); + Tensor a = empty({5, 5}, at::kMAIA); + ASSERT_EQ(a.device().type(), at::kMAIA); ASSERT_EQ(a.device().index(), 1); ASSERT_EQ(a.dtype(), caffe2::TypeMeta::Make<float>()); ASSERT_EQ(test_int, 1); - Tensor b = empty_like(a, at::kORT); - ASSERT_EQ(b.device().type(), at::kORT); + Tensor b = empty_like(a, at::kMAIA); + ASSERT_EQ(b.device().type(), at::kMAIA); ASSERT_EQ(b.device().index(), 1); ASSERT_EQ(b.dtype(), caffe2::TypeMeta::Make<float>()); add(a, b); ASSERT_EQ(test_int, 2); - // Ensure that non-ORT operator still works + // Ensure that non-MAIA operator still works Tensor d = empty({5, 5}, at::kCPU); ASSERT_EQ(d.device().type(), at::kCPU); } diff --git a/build_variables.bzl b/build_variables.bzl index ec0c31369e..a8b173ac3f 100644 --- a/build_variables.bzl +++ b/build_variables.bzl @@ -947,7 +947,7 @@ aten_cpu_non_globed_sources = [ "aten/src/ATen/detail/CUDAHooksInterface.cpp", "aten/src/ATen/detail/HIPHooksInterface.cpp", "aten/src/ATen/detail/MPSHooksInterface.cpp", - "aten/src/ATen/detail/ORTHooksInterface.cpp", + "aten/src/ATen/detail/MAIAHooksInterface.cpp", "aten/src/ATen/detail/PrivateUse1HooksInterface.cpp", "aten/src/ATen/detail/XPUHooksInterface.cpp", "aten/src/ATen/detail/MTIAHooksInterface.cpp", @@ -964,7 +964,7 @@ aten_cpu_non_globed_headers = [ "aten/src/ATen/detail/CUDAHooksInterface.h", "aten/src/ATen/detail/MPSHooksInterface.h", "aten/src/ATen/detail/HIPHooksInterface.h", - "aten/src/ATen/detail/ORTHooksInterface.h", + "aten/src/ATen/detail/MAIAHooksInterface.h", "aten/src/ATen/detail/PrivateUse1HooksInterface.h", "aten/src/ATen/detail/XPUHooksInterface.h", "aten/src/ATen/detail/MTIAHooksInterface.h", diff --git a/c10/core/Backend.h b/c10/core/Backend.h index d298f0d697..1cf1782fa5 100644 --- a/c10/core/Backend.h +++ b/c10/core/Backend.h @@ -46,7 +46,7 @@ enum class Backend { SparseCsrVE, SparseCsrXPU, SparseCsrPrivateUse1, - ORT, + MAIA, XLA, Vulkan, Metal, @@ -76,8 +76,8 @@ static inline Backend dispatchKeyToBackend(DispatchKey t) { return Backend::VE; } else if (t == DispatchKey::FPGA) { return Backend::FPGA; - } else if (t == DispatchKey::ORT) { - return Backend::ORT; + } else if (t == DispatchKey::MAIA) { + return Backend::MAIA; } else if (t == DispatchKey::XLA || t == DispatchKey::AutogradXLA) { return Backend::XLA; } else if (t == DispatchKey::Lazy || t == DispatchKey::AutogradLazy) { @@ -154,8 +154,8 @@ static inline DispatchKey backendToDispatchKey(Backend b) { return DispatchKey::VE; case Backend::FPGA: return DispatchKey::FPGA; - case Backend::ORT: - return DispatchKey::ORT; + case Backend::MAIA: + return DispatchKey::MAIA; case Backend::XLA: return DispatchKey::XLA; case Backend::Lazy: @@ -236,8 +236,8 @@ static inline DeviceType backendToDeviceType(Backend b) { return DeviceType::VE; case Backend::FPGA: return DeviceType::FPGA; - case Backend::ORT: - return DeviceType::ORT; + case Backend::MAIA: + return DeviceType::MAIA; case Backend::XLA: return DeviceType::XLA; case Backend::Lazy: @@ -298,8 +298,8 @@ static inline const char* toString(Backend b) { return "XPU"; case Backend::IPU: return "IPU"; - case Backend::ORT: - return "ORT"; + case Backend::MAIA: + return "MAIA"; case Backend::XLA: return "XLA"; case Backend::Lazy: diff --git a/c10/core/Device.cpp b/c10/core/Device.cpp index 7cc97d1a33..1b19114663 100644 --- a/c10/core/Device.cpp +++ b/c10/core/Device.cpp @@ -26,7 +26,7 @@ DeviceType parse_type(const std::string& device_string) { {"hip", DeviceType::HIP}, {"ve", DeviceType::VE}, {"fpga", DeviceType::FPGA}, - {"ort", DeviceType::ORT}, + {"maia", DeviceType::MAIA}, {"xla", DeviceType::XLA}, {"lazy", DeviceType::Lazy}, {"vulkan", DeviceType::Vulkan}, diff --git a/c10/core/Device.h b/c10/core/Device.h index c58c03c9b9..cbe9129852 100644 --- a/c10/core/Device.h +++ b/c10/core/Device.h @@ -142,9 +142,9 @@ struct C10_API Device final { return type_ == DeviceType::Metal; } - /// Return true if the device is of ORT type. - bool is_ort() const noexcept { - return type_ == DeviceType::ORT; + /// Return true if the device is of MAIA type. + bool is_maia() const noexcept { + return type_ == DeviceType::MAIA; } /// Return true if the device is of META type. diff --git a/c10/core/DeviceType.cpp b/c10/core/DeviceType.cpp index 0b44e1d862..3cd70f42e2 100644 --- a/c10/core/DeviceType.cpp +++ b/c10/core/DeviceType.cpp @@ -27,8 +27,8 @@ std::string DeviceTypeName(DeviceType d, bool lower_case) { return lower_case ? "ve" : "VE"; case DeviceType::FPGA: return lower_case ? "fpga" : "FPGA"; - case DeviceType::ORT: - return lower_case ? "ort" : "ORT"; + case DeviceType::MAIA: + return lower_case ? "maia" : "MAIA"; case DeviceType::XLA: return lower_case ? "xla" : "XLA"; case DeviceType::Lazy: @@ -83,7 +83,7 @@ bool isValidDeviceType(DeviceType d) { case DeviceType::HIP: case DeviceType::VE: case DeviceType::FPGA: - case DeviceType::ORT: + case DeviceType::MAIA: case DeviceType::XLA: case DeviceType::Lazy: case DeviceType::MPS: diff --git a/c10/core/DeviceType.h b/c10/core/DeviceType.h index 701ea3f3bd..911c863363 100644 --- a/c10/core/DeviceType.h +++ b/c10/core/DeviceType.h @@ -42,7 +42,7 @@ enum class DeviceType : int8_t { IDEEP = 5, // IDEEP. HIP = 6, // AMD HIP FPGA = 7, // FPGA - ORT = 8, // ONNX Runtime / Microsoft + MAIA = 8, // ONNX Runtime / Microsoft XLA = 9, // XLA / TPU Vulkan = 10, // Vulkan Metal = 11, // Metal @@ -66,7 +66,7 @@ constexpr DeviceType kCPU = DeviceType::CPU; constexpr DeviceType kCUDA = DeviceType::CUDA; constexpr DeviceType kHIP = DeviceType::HIP; constexpr DeviceType kFPGA = DeviceType::FPGA; -constexpr DeviceType kORT = DeviceType::ORT; +constexpr DeviceType kMAIA = DeviceType::MAIA; constexpr DeviceType kXLA = DeviceType::XLA; constexpr DeviceType kMPS = DeviceType::MPS; constexpr DeviceType kMeta = DeviceType::Meta; diff --git a/c10/core/DispatchKey.cpp b/c10/core/DispatchKey.cpp index 62f1ac03e5..0388234efd 100644 --- a/c10/core/DispatchKey.cpp +++ b/c10/core/DispatchKey.cpp @@ -66,8 +66,8 @@ const char* toString(DispatchKey t) { return "Dense"; case DispatchKey::FPGA: return "FPGA"; - case DispatchKey::ORT: - return "ORT"; + case DispatchKey::MAIA: + return "MAIA"; case DispatchKey::Vulkan: return "Vulkan"; case DispatchKey::Metal: @@ -263,7 +263,7 @@ c10::DispatchKey parseDispatchKey(const std::string& k) { {"Undefined", c10::DispatchKey::Undefined}, {"Dense", c10::DispatchKey::Dense}, {"FPGA", c10::DispatchKey::FPGA}, - {"ORT", c10::DispatchKey::ORT}, + {"MAIA", c10::DispatchKey::MAIA}, {"MPS", c10::DispatchKey::MPS}, {"Vulkan", c10::DispatchKey::Vulkan}, {"Metal", c10::DispatchKey::Metal}, diff --git a/c10/core/DispatchKey.h b/c10/core/DispatchKey.h index 0219db40ed..71277ebfd8 100644 --- a/c10/core/DispatchKey.h +++ b/c10/core/DispatchKey.h @@ -181,13 +181,11 @@ enum class DispatchKey : uint16_t { // https://gitlab.com/pytorch-complex/vitis_kernels // TODO: put this in BackendComponents - // ONNX Runtime, lives out of tree at https://github.com/pytorch/ort and - // https://github.com/microsoft/onnxruntime, and is also used to test general - // backend/extension machinery in the core. cf: - // - test/cpp_extensions/ort_extension.cpp + // MAIA backend lives out of tree + // - test/cpp_extensions/maia_extension.cpp // - test/test_torch.py // - aten/src/ATen/test/extension_backend_test.cpp - ORT, + MAIA, Vulkan, // TODO: put this in BackendComponents Metal, // TODO: put this in BackendComponents diff --git a/c10/core/DispatchKeySet.h b/c10/core/DispatchKeySet.h index db2e94fd8c..f7461ea73a 100644 --- a/c10/core/DispatchKeySet.h +++ b/c10/core/DispatchKeySet.h @@ -702,7 +702,7 @@ constexpr DispatchKeySet autogradother_backends = // Technically, HIP will now redispatch to its own custom AutogradHIP // slot in the runtime table. {DispatchKey::FPGA, - DispatchKey::ORT, + DispatchKey::MAIA, DispatchKey::Vulkan, DispatchKey::Metal, DispatchKey::CustomRNGKeyId, diff --git a/c10/core/TensorImpl.h b/c10/core/TensorImpl.h index 40a65cb107..95e7a0e3b6 100644 --- a/c10/core/TensorImpl.h +++ b/c10/core/TensorImpl.h @@ -1204,11 +1204,11 @@ struct C10_API TensorImpl : public c10::intrusive_ptr_target { return device_opt_.has_value() && device_opt_->type() == kMPS; } - bool is_ort() const { + bool is_maia() const { if (C10_UNLIKELY(device_policy_)) { - return device_custom().is_ort(); + return device_custom().is_maia(); } - return device_opt_.has_value() && device_opt_->type() == kORT; + return device_opt_.has_value() && device_opt_->type() == kMAIA; } bool is_nested() const { diff --git a/c10/core/TensorOptions.h b/c10/core/TensorOptions.h index 2d9e4a2433..765f474702 100644 --- a/c10/core/TensorOptions.h +++ b/c10/core/TensorOptions.h @@ -653,8 +653,8 @@ inline DispatchKey computeDispatchKey( #undef DO_CASE case c10::DeviceType::FPGA: return DispatchKey::FPGA; - case c10::DeviceType::ORT: - return DispatchKey::ORT; + case c10::DeviceType::MAIA: + return DispatchKey::MAIA; case c10::DeviceType::Vulkan: return DispatchKey::Vulkan; case c10::DeviceType::Metal: @@ -757,8 +757,8 @@ inline c10::DeviceType dispatchKeyToDeviceType(DispatchKey dispatch_key) { case DispatchKey::Vulkan: return c10::DeviceType::Vulkan; - case DispatchKey::ORT: - return c10::DeviceType::ORT; + case DispatchKey::MAIA: + return c10::DeviceType::MAIA; default: TORCH_CHECK( false, diff --git a/caffe2/proto/caffe2.proto b/caffe2/proto/caffe2.proto index 861a6c5d43..077e7b0ed5 100644 --- a/caffe2/proto/caffe2.proto +++ b/caffe2/proto/caffe2.proto @@ -218,7 +218,7 @@ enum DeviceTypeProto { PROTO_IDEEP = 5; // IDEEP. PROTO_HIP = 6; // AMD HIP PROTO_FPGA = 7; // FPGA - PROTO_ORT = 8; // ONNX Runtime + PROTO_MAIA = 8; // MAIA PROTO_XLA = 9; // XLA / TPU PROTO_MPS = 10; // MPS // Change the following number if you add more devices in the code. diff --git a/caffe2/proto/caffe2_pb2.pyi b/caffe2/proto/caffe2_pb2.pyi index ed1f4249a4..43249ebf75 100644 --- a/caffe2/proto/caffe2_pb2.pyi +++ b/caffe2/proto/caffe2_pb2.pyi @@ -23,7 +23,7 @@ class _DeviceTypeProto(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapp PROTO_IDEEP = DeviceTypeProto.V(5) PROTO_HIP = DeviceTypeProto.V(6) PROTO_FPGA = DeviceTypeProto.V(7) - PROTO_ORT = DeviceTypeProto.V(8) + PROTO_MAIA = DeviceTypeProto.V(8) PROTO_XLA = DeviceTypeProto.V(9) PROTO_MPS = DeviceTypeProto.V(10) PROTO_COMPILE_TIME_MAX_DEVICE_TYPES = DeviceTypeProto.V(11) @@ -37,7 +37,7 @@ PROTO_OPENCL = DeviceTypeProto.V(4) PROTO_IDEEP = DeviceTypeProto.V(5) PROTO_HIP = DeviceTypeProto.V(6) PROTO_FPGA = DeviceTypeProto.V(7) -PROTO_ORT = DeviceTypeProto.V(8) +PROTO_MAIA = DeviceTypeProto.V(8) PROTO_XLA = DeviceTypeProto.V(9) PROTO_MPS = DeviceTypeProto.V(10) PROTO_COMPILE_TIME_MAX_DEVICE_TYPES = DeviceTypeProto.V(11) diff --git a/test/cpp_extensions/ort_extension.cpp b/test/cpp_extensions/maia_extension.cpp similarity index 78% rename from test/cpp_extensions/ort_extension.cpp rename to test/cpp_extensions/maia_extension.cpp index b646f3b149..13315810f5 100644 --- a/test/cpp_extensions/ort_extension.cpp +++ b/test/cpp_extensions/maia_extension.cpp @@ -10,10 +10,10 @@ Tensor get_tensor(caffe2::TypeMeta dtype, IntArrayRef size) { Storage( Storage::use_byte_size_t(), 0, - at::DataPtr(nullptr, Device(DeviceType::ORT, 0)), + at::DataPtr(nullptr, Device(DeviceType::MAIA, 0)), nullptr, false), - DispatchKey::ORT, + DispatchKey::MAIA, dtype); // This is a hack to workaround the shape checks in _convolution. tensor_impl->set_sizes_contiguous(size); @@ -52,7 +52,7 @@ std::tuple<Tensor,Tensor,Tensor> fake_convolution_backward( get_tensor(input.dtype(), {})); } -TORCH_LIBRARY_IMPL(aten, ORT, m) { +TORCH_LIBRARY_IMPL(aten, MAIA, m) { m.impl("empty.memory_format", empty_override); m.impl("add.out", add_out_override); m.impl("convolution_overrideable", fake_convolution); @@ -61,34 +61,34 @@ TORCH_LIBRARY_IMPL(aten, ORT, m) { // TODO: Extend this to exercise multi-device setting. In that case, // we need to add a thread local variable to track the current device. -struct ORTGuardImpl final : public c10::impl::DeviceGuardImplInterface { - static constexpr DeviceType static_type = DeviceType::ORT; - ORTGuardImpl() {} - ORTGuardImpl(DeviceType t) { - AT_ASSERT(t == DeviceType::ORT); +struct MAIAGuardImpl final : public c10::impl::DeviceGuardImplInterface { + static constexpr DeviceType static_type = DeviceType::MAIA; + MAIAGuardImpl() {} + MAIAGuardImpl(DeviceType t) { + AT_ASSERT(t == DeviceType::MAIA); } DeviceType type() const override { - return DeviceType::ORT; + return DeviceType::MAIA; } Device exchangeDevice(Device d) const override { - AT_ASSERT(d.type() == DeviceType::ORT); + AT_ASSERT(d.type() == DeviceType::MAIA); AT_ASSERT(d.index() == 0); return d; } Device getDevice() const override { - return Device(DeviceType::ORT, 0); + return Device(DeviceType::MAIA, 0); } void setDevice(Device d) const override { - AT_ASSERT(d.type() == DeviceType::ORT); + AT_ASSERT(d.type() == DeviceType::MAIA); AT_ASSERT(d.index() == 0); } void uncheckedSetDevice(Device d) const noexcept override { } Stream getStream(Device d) const noexcept override { - return Stream(Stream::DEFAULT, Device(DeviceType::ORT, 0)); + return Stream(Stream::DEFAULT, Device(DeviceType::MAIA, 0)); } Stream exchangeStream(Stream s) const noexcept override { - return Stream(Stream::DEFAULT, Device(DeviceType::ORT, 0)); + return Stream(Stream::DEFAULT, Device(DeviceType::MAIA, 0)); } DeviceIndex deviceCount() const noexcept override { return 1; @@ -99,23 +99,23 @@ struct ORTGuardImpl final : public c10::impl::DeviceGuardImplInterface { const Stream& stream, const DeviceIndex device_index, const EventFlag flag) const override { - TORCH_CHECK(false, "ORT backend doesn't support events."); + TORCH_CHECK(false, "MAIA backend doesn't support events."); } void block( void* event, const Stream& stream) const override { - TORCH_CHECK(false, "ORT backend doesn't support events."); + TORCH_CHECK(false, "MAIA backend doesn't support events."); } bool queryEvent(void* event) const override { - TORCH_CHECK(false, "ORT backend doesn't support events."); + TORCH_CHECK(false, "MAIA backend doesn't support events."); } void destroyEvent( void* event, const DeviceIndex device_index) const noexcept override { } }; -constexpr DeviceType ORTGuardImpl::static_type; -C10_REGISTER_GUARD_IMPL(ORT, ORTGuardImpl); +constexpr DeviceType MAIAGuardImpl::static_type; +C10_REGISTER_GUARD_IMPL(MAIA, MAIAGuardImpl); int get_test_int() { return test_int; diff --git a/test/cpp_extensions/setup.py b/test/cpp_extensions/setup.py index 3731dc8c91..4d4288a307 100644 --- a/test/cpp_extensions/setup.py +++ b/test/cpp_extensions/setup.py @@ -28,8 +28,8 @@ ext_modules = [ "torch_test_cpp_extension.cpp", ["extension.cpp"], extra_compile_args=CXX_FLAGS ), CppExtension( - "torch_test_cpp_extension.ort", - ["ort_extension.cpp"], + "torch_test_cpp_extension.maia", + ["maia_extension.cpp"], extra_compile_args=CXX_FLAGS, ), CppExtension( diff --git a/test/forward_backward_compatibility/check_forward_backward_compatibility.py b/test/forward_backward_compatibility/check_forward_backward_compatibility.py index 5a4aac572c..093e27154e 100644 --- a/test/forward_backward_compatibility/check_forward_backward_compatibility.py +++ b/test/forward_backward_compatibility/check_forward_backward_compatibility.py @@ -46,6 +46,7 @@ ALLOW_LIST = [ ("prim::ModuleDictIndex", datetime.date(9999, 1, 1)), ("prim::MKLDNNRelu6", datetime.date(9999, 1, 1)), ("prim::MKLDNNRelu6_", datetime.date(9999, 1, 1)), + ("prim::is_ort", datetime.date(9999, 1, 1)), ("prim::Concat", datetime.date(9999, 1, 1)), ("aten::_NestedTensor_GeneralizedBMM", datetime.date(9999, 1, 1)), # Internal, profiler-specific ops diff --git a/test/test_cpp_extensions_aot.py b/test/test_cpp_extensions_aot.py index 1d5df82a12..3e5ce5cfce 100644 --- a/test/test_cpp_extensions_aot.py +++ b/test/test_cpp_extensions_aot.py @@ -26,11 +26,11 @@ except ImportError as e: try: if HAS_PYTEST: cpp_extension = pytest.importorskip("torch_test_cpp_extension.cpp") - ort_extension = pytest.importorskip("torch_test_cpp_extension.ort") + maia_extension = pytest.importorskip("torch_test_cpp_extension.maia") rng_extension = pytest.importorskip("torch_test_cpp_extension.rng") else: import torch_test_cpp_extension.cpp as cpp_extension - import torch_test_cpp_extension.ort as ort_extension + import torch_test_cpp_extension.maia as maia_extension import torch_test_cpp_extension.rng as rng_extension except ImportError as e: raise RuntimeError( @@ -255,46 +255,46 @@ class TestPybindTypeCasters(common.TestCase): @torch.testing._internal.common_utils.markDynamoStrictTest -class TestORTTensor(common.TestCase): +class TestMAIATensor(common.TestCase): def test_unregistered(self): a = torch.arange(0, 10, device="cpu") with self.assertRaisesRegex(RuntimeError, "Could not run"): - b = torch.arange(0, 10, device="ort") + b = torch.arange(0, 10, device="maia") - @skipIfTorchDynamo("dynamo cannot model ort device") + @skipIfTorchDynamo("dynamo cannot model maia device") def test_zeros(self): a = torch.empty(5, 5, device="cpu") self.assertEqual(a.device, torch.device("cpu")) - b = torch.empty(5, 5, device="ort") - self.assertEqual(b.device, torch.device("ort", 0)) - self.assertEqual(ort_extension.get_test_int(), 0) + b = torch.empty(5, 5, device="maia") + self.assertEqual(b.device, torch.device("maia", 0)) + self.assertEqual(maia_extension.get_test_int(), 0) self.assertEqual(torch.get_default_dtype(), b.dtype) - c = torch.empty((5, 5), dtype=torch.int64, device="ort") - self.assertEqual(ort_extension.get_test_int(), 0) + c = torch.empty((5, 5), dtype=torch.int64, device="maia") + self.assertEqual(maia_extension.get_test_int(), 0) self.assertEqual(torch.int64, c.dtype) def test_add(self): - a = torch.empty(5, 5, device="ort", requires_grad=True) - self.assertEqual(ort_extension.get_test_int(), 0) + a = torch.empty(5, 5, device="maia", requires_grad=True) + self.assertEqual(maia_extension.get_test_int(), 0) - b = torch.empty(5, 5, device="ort") - self.assertEqual(ort_extension.get_test_int(), 0) + b = torch.empty(5, 5, device="maia") + self.assertEqual(maia_extension.get_test_int(), 0) c = a + b - self.assertEqual(ort_extension.get_test_int(), 1) + self.assertEqual(maia_extension.get_test_int(), 1) def test_conv_backend_override(self): # To simplify tests, we use 4d input here to avoid doing view4d( which # needs more overrides) in _convolution. - input = torch.empty(2, 4, 10, 2, device="ort", requires_grad=True) - weight = torch.empty(6, 4, 2, 2, device="ort", requires_grad=True) - bias = torch.empty(6, device="ort") + input = torch.empty(2, 4, 10, 2, device="maia", requires_grad=True) + weight = torch.empty(6, 4, 2, 2, device="maia", requires_grad=True) + bias = torch.empty(6, device="maia") # Make sure forward is overriden out = torch.nn.functional.conv2d(input, weight, bias, 2, 0, 1, 1) - self.assertEqual(ort_extension.get_test_int(), 2) + self.assertEqual(maia_extension.get_test_int(), 2) self.assertEqual(out.shape[0], input.shape[0]) self.assertEqual(out.shape[1], weight.shape[0]) @@ -302,7 +302,7 @@ class TestORTTensor(common.TestCase): # Double backward is dispatched to _convolution_double_backward. # It is not tested here as it involves more computation/overrides. grad = torch.autograd.grad(out, input, out, create_graph=True) - self.assertEqual(ort_extension.get_test_int(), 3) + self.assertEqual(maia_extension.get_test_int(), 3) self.assertEqual(grad[0].shape, input.shape) diff --git a/tools/pyi/gen_pyi.py b/tools/pyi/gen_pyi.py index 369f1504bf..f0b9044c6f 100644 --- a/tools/pyi/gen_pyi.py +++ b/tools/pyi/gen_pyi.py @@ -1160,7 +1160,7 @@ def gen_pyi( "is_meta": ["is_meta: _bool"], "is_mps": ["is_mps: _bool"], "is_mtia": ["is_mtia: _bool"], - "is_ort": ["is_ort: _bool"], + "is_maia": ["is_maia: _bool"], "is_mkldnn": ["is_mkldnn: _bool"], "is_vulkan": ["is_vulkan: _bool"], "is_ipu": ["is_ipu: _bool"], diff --git a/torch/_C/_autograd.pyi b/torch/_C/_autograd.pyi index 2c50a28bfb..34eb451be0 100644 --- a/torch/_C/_autograd.pyi +++ b/torch/_C/_autograd.pyi @@ -22,7 +22,7 @@ class DeviceType(Enum): IDEEP = ... HIP = ... FPGA = ... - ORT = ... + MAIA = ... XLA = ... MPS = ... HPU = ... diff --git a/torch/_tensor.py b/torch/_tensor.py index 0ce59ca924..4ae1ff943c 100644 --- a/torch/_tensor.py +++ b/torch/_tensor.py @@ -101,7 +101,7 @@ class Tensor(torch._C.TensorBase): if ( self.is_sparse or self.device.type - in ["lazy", "xla", "mtia", "mps", "ort", "meta", "ipu"] + in ["lazy", "xla", "mtia", "mps", "maia", "meta", "ipu"] or ( not torch._C._has_storage(self) and self.device.type == torch._C._get_privateuse1_backend_name() @@ -249,7 +249,7 @@ class Tensor(torch._C.TensorBase): # See Note [Don't serialize hooks] torch.utils.hooks.warn_if_has_hooks(self) backward_hooks: Dict[Any, Any] = OrderedDict() - # Note: Numpy array is chosen to be the rebuild component for XLA, MTIA, ORT Tensors. + # Note: Numpy array is chosen to be the rebuild component for XLA, MTIA, MAIA Tensors. # We considered a few options: # 1. CPU tensor can't be used here. # Otherwise in torch.load CPU storage is reconstructed with randomly @@ -259,7 +259,7 @@ class Tensor(torch._C.TensorBase): # 2. Python list is not a good fit due to performance reason. # `tolist()` converts every single element in the tensor into python objects # and serialize them one by one. - if self.device.type in ["xla", "mtia", "ort"] or ( + if self.device.type in ["xla", "mtia", "maia"] or ( not torch._C._has_storage(self) and self.device.type == torch._C._get_privateuse1_backend_name() ): diff --git a/torch/csrc/Storage.cpp b/torch/csrc/Storage.cpp index c22e6f5d1b..a3f8263303 100644 --- a/torch/csrc/Storage.cpp +++ b/torch/csrc/Storage.cpp @@ -355,7 +355,7 @@ static PyObject* THPStorage_pynew( } else if (device.type() == at::DeviceType::PrivateUse1) { at::globalContext().lazyInitPrivateUse1(); allocator = c10::GetAllocator(device.type()); - } else if (device.type() == at::DeviceType::ORT) { + } else if (device.type() == at::DeviceType::MAIA) { allocator = c10::GetAllocator(device.type()); } else { // NOLINTEND(bugprone-branch-clone) diff --git a/torch/csrc/autograd/init.cpp b/torch/csrc/autograd/init.cpp index 2bea7c4cda..8edf23cd2e 100644 --- a/torch/csrc/autograd/init.cpp +++ b/torch/csrc/autograd/init.cpp @@ -162,7 +162,7 @@ PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject* unused) { .value("IDEEP", c10::DeviceType::IDEEP) .value("HIP", c10::DeviceType::HIP) .value("FPGA", c10::DeviceType::FPGA) - .value("ORT", c10::DeviceType::ORT) + .value("MAIA", c10::DeviceType::MAIA) .value("XLA", c10::DeviceType::XLA) .value("Vulkan", c10::DeviceType::Vulkan) .value("Metal", c10::DeviceType::Metal) diff --git a/torch/csrc/autograd/python_variable.cpp b/torch/csrc/autograd/python_variable.cpp index ea55bb55dd..3705ac5e42 100644 --- a/torch/csrc/autograd/python_variable.cpp +++ b/torch/csrc/autograd/python_variable.cpp @@ -1451,13 +1451,13 @@ PyObject* THPVariable_is_mps(THPVariable* self, void* unused) { END_HANDLE_TH_ERRORS } -PyObject* THPVariable_is_ort(THPVariable* self, void* unused) { +PyObject* THPVariable_is_maia(THPVariable* self, void* unused) { HANDLE_TH_ERRORS if (check_has_torch_function((PyObject*)self)) { - return handle_torch_function_getter(self, "is_ort"); + return handle_torch_function_getter(self, "is_maia"); } auto& self_ = THPVariable_Unpack(self); - return torch::autograd::utils::wrap(self_.is_ort()); + return torch::autograd::utils::wrap(self_.is_maia()); END_HANDLE_TH_ERRORS } @@ -1674,7 +1674,7 @@ static struct PyGetSetDef THPVariable_properties[] = { nullptr}, {"is_mkldnn", (getter)THPVariable_is_mkldnn, nullptr, nullptr, nullptr}, {"is_mps", (getter)THPVariable_is_mps, nullptr, nullptr, nullptr}, - {"is_ort", (getter)THPVariable_is_ort, nullptr, nullptr, nullptr}, + {"is_maia", (getter)THPVariable_is_maia, nullptr, nullptr, nullptr}, {"is_vulkan", (getter)THPVariable_is_vulkan, nullptr, nullptr, nullptr}, {"is_complex", (getter)THPVariable_is_complex, nullptr, nullptr, nullptr}, {"is_quantized", diff --git a/torch/csrc/jit/frontend/sugared_value.cpp b/torch/csrc/jit/frontend/sugared_value.cpp index e9f090cfbb..80b5d27fba 100644 --- a/torch/csrc/jit/frontend/sugared_value.cpp +++ b/torch/csrc/jit/frontend/sugared_value.cpp @@ -145,7 +145,7 @@ std::shared_ptr<SugaredValue> SimpleValue::attr( {"H", "prim"}, {"mT", "aten"}, {"mH", "aten"}, - {"is_ort", "prim"}, + {"is_maia", "prim"}, {"itemsize", "prim"}, {"nbytes", "prim"}, {"ndim", "prim"}, diff --git a/torch/csrc/jit/runtime/register_prim_ops.cpp b/torch/csrc/jit/runtime/register_prim_ops.cpp index 4d8a0cd89d..cec9c70bc7 100644 --- a/torch/csrc/jit/runtime/register_prim_ops.cpp +++ b/torch/csrc/jit/runtime/register_prim_ops.cpp @@ -2431,11 +2431,11 @@ static const std::vector<OperatorGeneratorArgs> opGenArgs1{ }, aliasAnalysisFromSchema()), OperatorGeneratorArgs( - TORCH_SELECTIVE_SCHEMA("prim::is_ort(Tensor a) -> bool"), + TORCH_SELECTIVE_SCHEMA("prim::is_maia(Tensor a) -> bool"), [](Stack& stack) { at::Tensor a; pop(stack, a); - push(stack, a.is_ort()); + push(stack, a.is_maia()); }, aliasAnalysisFromSchema()), OperatorGeneratorArgs( diff --git a/torch/library.h b/torch/library.h index fcac0e8094..c38179a6ee 100644 --- a/torch/library.h +++ b/torch/library.h @@ -370,8 +370,8 @@ inline CppFunction dispatch(c10::DeviceType type, Func&& raw_f) { return c10::DispatchKey::Meta; case c10::DeviceType::HIP: return c10::DispatchKey::HIP; - case c10::DeviceType::ORT: - return c10::DispatchKey::ORT; + case c10::DeviceType::MAIA: + return c10::DispatchKey::MAIA; case c10::DeviceType::HPU: return c10::DispatchKey::HPU; case c10::DeviceType::MTIA: diff --git a/torch/overrides.py b/torch/overrides.py index 6a5d3e891d..9f99ee0c54 100644 --- a/torch/overrides.py +++ b/torch/overrides.py @@ -1283,7 +1283,7 @@ def get_testing_overrides() -> Dict[Callable, Callable]: Tensor.is_mps.__get__: lambda self: -1, Tensor.is_mtia.__get__: lambda self: -1, Tensor.is_nested.__get__: lambda self: -1, - Tensor.is_ort.__get__: lambda self: -1, + Tensor.is_maia.__get__: lambda self: -1, Tensor.is_mkldnn.__get__: lambda self: -1, Tensor.is_quantized.__get__: lambda self: -1, Tensor.is_sparse.__get__: lambda self: -1, diff --git a/torchgen/model.py b/torchgen/model.py index 7b0dd8cc1f..2706f234c5 100644 --- a/torchgen/model.py +++ b/torchgen/model.py @@ -79,7 +79,7 @@ class DispatchKey(Enum): CatchAll = Undefined FPGA = auto() - ORT = auto() + MAIA = auto() Vulkan = auto() Metal = auto() MKLDNN = auto()
2.41.0
a61c9cb299b6dfebc57dc9d8821c34321d568ab
Tue, 23 Apr 2024 00:43:50 +0000
[PATCH 0497/1000] [Distributed] [5/N] Fix clang-tidy warnings in torch/csrc/distributed/c10d (#124043)
This PR continues to fix some clang-tidy warnings in distributed/c10d code, following https://github.com/pytorch/pytorch/pull/124032. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124043 Approved by: https://github.com/ezyang
diff --git a/torch/csrc/distributed/c10d/Backend.hpp b/torch/csrc/distributed/c10d/Backend.hpp index 3931a06b9a..c9e8aec439 100644 --- a/torch/csrc/distributed/c10d/Backend.hpp +++ b/torch/csrc/distributed/c10d/Backend.hpp @@ -1,10 +1,6 @@ #pragma once -#include <condition_variable> #include <memory> -#include <mutex> -#include <stdexcept> -#include <unordered_map> #include <utility> #include <vector> diff --git a/torch/csrc/distributed/c10d/Functional.cpp b/torch/csrc/distributed/c10d/Functional.cpp index 5728774f74..d633429bb3 100644 --- a/torch/csrc/distributed/c10d/Functional.cpp +++ b/torch/csrc/distributed/c10d/Functional.cpp @@ -15,9 +15,9 @@ class WorkRegistry { void register_work( const at::Tensor& tensor, const c10::intrusive_ptr<c10d::Work>& work) { - const auto storage = tensor.storage().getWeakStorageImpl(); + auto storage = tensor.storage().getWeakStorageImpl(); std::unique_lock lock(lock_); - auto [it, inserted] = registry_.emplace(storage, work); + auto [it, inserted] = registry_.try_emplace(std::move(storage), work); TORCH_CHECK( inserted || it->second != work, "The tensor storage is already associated with another work."); diff --git a/torch/csrc/distributed/c10d/HashStore.hpp b/torch/csrc/distributed/c10d/HashStore.hpp index b691de302a..1453c0a728 100644 --- a/torch/csrc/distributed/c10d/HashStore.hpp +++ b/torch/csrc/distributed/c10d/HashStore.hpp @@ -1,7 +1,5 @@ #pragma once -#include <sys/types.h> - #include <condition_variable> #include <mutex> #include <unordered_map> diff --git a/torch/csrc/distributed/c10d/Ops.cpp b/torch/csrc/distributed/c10d/Ops.cpp index 736f231350..32e0afc6a8 100644 --- a/torch/csrc/distributed/c10d/Ops.cpp +++ b/torch/csrc/distributed/c10d/Ops.cpp @@ -465,7 +465,7 @@ allreduce_sparse_cuda_( ->allreduce_sparse( tensor_vec, AllreduceOptions{ - *reduce_op.get(), + *reduce_op, std::chrono::milliseconds(timeout), sparse_indices}); diff --git a/torch/csrc/distributed/c10d/ParamCommsUtils.cpp b/torch/csrc/distributed/c10d/ParamCommsUtils.cpp index 7568a76a0b..fe12092ee9 100644 --- a/torch/csrc/distributed/c10d/ParamCommsUtils.cpp +++ b/torch/csrc/distributed/c10d/ParamCommsUtils.cpp @@ -22,7 +22,7 @@ ParamCommsDebugInfo::ParamCommsDebugInfo( : pgName_(pgName), rank_(rank), worldSize_(worldSize), - collectiveName_(collName), + collectiveName_(std::move(collName)), inMessageNelems_(inNelems), outMessageNelems_(outNelems), dType_(dType), diff --git a/torch/csrc/distributed/c10d/PrefixStore.hpp b/torch/csrc/distributed/c10d/PrefixStore.hpp index b74d182b13..19098f0c38 100644 --- a/torch/csrc/distributed/c10d/PrefixStore.hpp +++ b/torch/csrc/distributed/c10d/PrefixStore.hpp @@ -1,7 +1,6 @@ #pragma once #include <torch/csrc/distributed/c10d/Store.hpp> -#include <memory> namespace c10d { diff --git a/torch/csrc/distributed/c10d/ProcessGroup.hpp b/torch/csrc/distributed/c10d/ProcessGroup.hpp index f8dff7ec12..dcb6d15547 100644 --- a/torch/csrc/distributed/c10d/ProcessGroup.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroup.hpp @@ -1,10 +1,7 @@ #pragma once #include <torch/csrc/distributed/c10d/Backend.hpp> -#include <condition_variable> #include <memory> -#include <mutex> -#include <stdexcept> #include <unordered_map> #include <utility> #include <vector> diff --git a/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp b/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp index b0620f9667..3bfbbeaa2b 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp @@ -6,7 +6,6 @@ #include <deque> #include <mutex> #include <thread> -#include <unordered_map> #include <vector> #include <gloo/algorithm.h> diff --git a/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp b/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp index 939f120268..90031f4a93 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp @@ -3,7 +3,6 @@ #ifdef USE_C10D_MPI #include <iostream> -#include <limits> #include <map> #include <c10/core/DeviceGuard.h> @@ -107,7 +106,8 @@ c10::intrusive_ptr<c10::ivalue::Future> ProcessGroupMPI::WorkMPI::getFuture() { return future_; } -void ProcessGroupMPI::WorkMPI::finishWorkMPIError(std::exception_ptr eptr) { +void ProcessGroupMPI::WorkMPI::finishWorkMPIError( + const std::exception_ptr& eptr) { future_->setError(eptr); finish(eptr); } @@ -132,7 +132,7 @@ ProcessGroupMPI::AsyncWork::~AsyncWork() { if (request_ != MPI_REQUEST_NULL) { std::cerr << "Attempted destruction of AsyncWork before work has completed, " - << "terminating the program." << std::endl; + << "terminating the program." << '\n'; std::terminate(); } } @@ -210,7 +210,7 @@ std::vector<at::Tensor> ProcessGroupMPI::AsyncWork::result() { } void ProcessGroupMPI::AsyncWork::populateException() { - std::array<char, MPI_MAX_ERROR_STRING> buf; + std::array<char, MPI_MAX_ERROR_STRING> buf{}; int len = buf.size(); MPI_CHECK(MPI_Error_string(status_.MPI_ERROR, buf.data(), &len)); exception_ = @@ -267,8 +267,8 @@ c10::intrusive_ptr<ProcessGroupMPI> ProcessGroupMPI::createProcessGroupMPI( // If no ranks are specified, assume we're creating the root group if (!ranks.empty()) { - MPI_Group worldGroup; - MPI_Group ranksGroup; + MPI_Group worldGroup = nullptr; + MPI_Group ranksGroup = nullptr; MPI_CHECK(MPI_Comm_group(MPI_COMM_WORLD, &worldGroup)); MPI_CHECK( MPI_Group_incl(worldGroup, ranks.size(), ranks.data(), &ranksGroup)); @@ -383,7 +383,7 @@ c10::intrusive_ptr<Work> ProcessGroupMPI::enqueue( auto work = c10::make_intrusive<WorkMPI>(entry->dst, profilingTitle, inputTensors); std::unique_lock<std::mutex> lock(pgMutex_); - queue_.push_back(std::make_tuple(std::move(entry), work)); + queue_.emplace_back(std::move(entry), work); lock.unlock(); queueProduceCV_.notify_one(); return work; @@ -539,7 +539,7 @@ c10::intrusive_ptr<Work> ProcessGroupMPI::gather( checkSingleTensor(inputTensors); if (rank_ != opts.rootRank) { - if (outputTensors.size() > 0) { + if (!outputTensors.empty()) { TORCH_CHECK( false, "Gather: number of output tensors should be 0 " @@ -615,7 +615,7 @@ c10::intrusive_ptr<Work> ProcessGroupMPI::scatter( checkSingleTensor(outputTensors); if (rank_ != opts.rootRank) { - if (inputTensors.size() > 0) { + if (!inputTensors.empty()) { TORCH_CHECK( false, "Scatter: number of input tensors should be 0 " @@ -670,7 +670,7 @@ c10::intrusive_ptr<Work> ProcessGroupMPI::scatter( return enqueue( std::move(entry), "mpi:scatter", - inputTensors.size() > 0 + !inputTensors.empty() ? c10::optional<std::vector<at::Tensor>>(inputTensors[0]) : c10::nullopt); } else { @@ -679,7 +679,7 @@ c10::intrusive_ptr<Work> ProcessGroupMPI::scatter( return enqueue( std::move(entry), "mpi:scatter", - inputTensors.size() > 0 + !inputTensors.empty() ? c10::optional<std::vector<at::Tensor>>(inputTensors[0]) : c10::nullopt); } @@ -701,7 +701,7 @@ c10::intrusive_ptr<Work> ProcessGroupMPI::alltoall_base( checkSingleTensorHelper(inputTensor); checkSingleTensorHelper(outputTensor); - if (outputSplitSizes.size() == 0 && inputSplitSizes.size() == 0) { + if (outputSplitSizes.empty() && inputSplitSizes.empty()) { // We can use alltoall TORCH_CHECK( outputTensor.numel() == inputTensor.numel() && diff --git a/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp b/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp index e92f195c36..dd586dda70 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroupMPI.hpp @@ -102,7 +102,7 @@ class TORCH_API ProcessGroupMPI : public Backend { private: void finishWorkMPI(); - void finishWorkMPIError(std::exception_ptr eptr); + void finishWorkMPIError(const std::exception_ptr& eptr); std::vector<at::Tensor> outputTensors_; c10::intrusive_ptr<at::ivalue::Future> future_; @@ -137,7 +137,7 @@ class TORCH_API ProcessGroupMPI : public Backend { private: const std::vector<at::Tensor> outputTensors_; MPI_Request request_; - MPI_Status status_; + MPI_Status status_{}; }; // Constructor will spawn up the worker thread loop diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp index 4845006b56..573779ae39 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp +++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp @@ -1,13 +1,11 @@ -#include <torch/csrc/distributed/c10d/NCCLUtils.hpp> -#include <torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp> -#include <fstream> -#include <mutex> -#include <sstream> #ifdef USE_C10D_NCCL #include <exception> +#include <fstream> #include <map> +#include <mutex> +#include <sstream> #include <stdexcept> #include <tuple> #include <unordered_set> @@ -25,8 +23,10 @@ #include <c10/util/Optional.h> #include <c10/util/irange.h> #include <torch/csrc/cuda/nccl.h> +#include <torch/csrc/distributed/c10d/NCCLUtils.hpp> #include <torch/csrc/distributed/c10d/ParamCommsUtils.hpp> #include <torch/csrc/distributed/c10d/PrefixStore.hpp> +#include <torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp> #include <torch/csrc/distributed/c10d/TraceUtils.h> #include <torch/csrc/distributed/c10d/Utils.hpp> #include <torch/csrc/distributed/c10d/logger.hpp> diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp index 30bcee1992..fac9b6f382 100644 --- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp +++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.hpp @@ -1,5 +1,7 @@ #pragma once +#ifdef USE_C10D_NCCL + #if defined(__linux__) #include <fcntl.h> #include <sys/stat.h> @@ -7,8 +9,6 @@ #include <unistd.h> #endif -#ifdef USE_C10D_NCCL - #include <atomic> #include <chrono> #include <future> diff --git a/torch/csrc/distributed/c10d/Store.hpp b/torch/csrc/distributed/c10d/Store.hpp index 3c0ae960ff..525440e767 100644 --- a/torch/csrc/distributed/c10d/Store.hpp +++ b/torch/csrc/distributed/c10d/Store.hpp @@ -2,7 +2,6 @@ #include <chrono> #include <cstdint> -#include <stdexcept> #include <string> #include <vector> diff --git a/torch/csrc/distributed/c10d/TCPStore.cpp b/torch/csrc/distributed/c10d/TCPStore.cpp index fe18dd5355..a95f0ebdb1 100644 --- a/torch/csrc/distributed/c10d/TCPStore.cpp +++ b/torch/csrc/distributed/c10d/TCPStore.cpp @@ -5,13 +5,9 @@ #include <torch/csrc/distributed/c10d/logging.h> #include <fcntl.h> -#include <algorithm> -#include <array> #include <chrono> #include <fstream> #include <random> -#include <streambuf> -#include <system_error> #include <thread> #include <unordered_map> #include <utility> diff --git a/torch/csrc/distributed/c10d/TCPStoreBackend.hpp b/torch/csrc/distributed/c10d/TCPStoreBackend.hpp index 84aac61d02..10cff9552d 100644 --- a/torch/csrc/distributed/c10d/TCPStoreBackend.hpp +++ b/torch/csrc/distributed/c10d/TCPStoreBackend.hpp @@ -1,8 +1,6 @@ #pragma once -#include <chrono> #include <thread> -#include <vector> #include <torch/csrc/distributed/c10d/TCPStore.hpp> #include <torch/csrc/distributed/c10d/socket.h> diff --git a/torch/csrc/distributed/c10d/Types.hpp b/torch/csrc/distributed/c10d/Types.hpp index 423b959803..fab819798e 100644 --- a/torch/csrc/distributed/c10d/Types.hpp +++ b/torch/csrc/distributed/c10d/Types.hpp @@ -67,9 +67,11 @@ struct TORCH_API ReduceOp : torch::CustomClassHolder { // The heap resource supplement_, if it exists, is managed by a // c10::intrusive_ptr, so constructors and operator= can be simple ReduceOp(const ReduceOp& other) = default; - ReduceOp& operator=(const ReduceOp& other) = default; + ReduceOp(ReduceOp&& other) = default; + ReduceOp& operator=(ReduceOp&& other) = default; + operator RedOpType() const { return op_; } diff --git a/torch/csrc/distributed/c10d/Utils.hpp b/torch/csrc/distributed/c10d/Utils.hpp index 673f16f0aa..3e9bdf8d6f 100644 --- a/torch/csrc/distributed/c10d/Utils.hpp +++ b/torch/csrc/distributed/c10d/Utils.hpp @@ -21,14 +21,10 @@ typedef SSIZE_T ssize_t; #include <sys/types.h> -#include <chrono> #include <cstdint> #include <cstdlib> #include <functional> -#include <limits> #include <string> -#include <system_error> -#include <tuple> #include <vector> namespace c10d { diff --git a/torch/csrc/distributed/c10d/Work.hpp b/torch/csrc/distributed/c10d/Work.hpp index f6c9c9be5e..d106183231 100644 --- a/torch/csrc/distributed/c10d/Work.hpp +++ b/torch/csrc/distributed/c10d/Work.hpp @@ -1,7 +1,8 @@ #pragma once #include <ATen/ATen.h> -#include <stdexcept> +#include <chrono> +#include <mutex> #include <vector> constexpr auto kNoTimeout = std::chrono::milliseconds(0); diff --git a/torch/csrc/distributed/c10d/logger.hpp b/torch/csrc/distributed/c10d/logger.hpp index 2ab7be9d03..d2949a4f67 100644 --- a/torch/csrc/distributed/c10d/logger.hpp +++ b/torch/csrc/distributed/c10d/logger.hpp @@ -1,7 +1,6 @@ #include <c10/util/Logging.h> #include <torch/csrc/distributed/c10d/reducer.hpp> -#include <mutex> #include <utility> namespace c10d { diff --git a/torch/csrc/distributed/c10d/logging.cpp b/torch/csrc/distributed/c10d/logging.cpp index 8ded400535..5d05b5a3a5 100644 --- a/torch/csrc/distributed/c10d/logging.cpp +++ b/torch/csrc/distributed/c10d/logging.cpp @@ -8,8 +8,7 @@ #include <torch/csrc/distributed/c10d/debug.h> -namespace c10d { -namespace detail { +namespace c10d::detail { bool isLogLevelEnabled(LogLevel level) noexcept { // c10 logger does not support debug and trace levels. In order to map higher @@ -35,5 +34,4 @@ bool isLogLevelEnabled(LogLevel level) noexcept { return false; } -} // namespace detail -} // namespace c10d +} // namespace c10d::detail diff --git a/torch/csrc/distributed/c10d/sequence_num.hpp b/torch/csrc/distributed/c10d/sequence_num.hpp index 50c800e8d7..8c80642f42 100644 --- a/torch/csrc/distributed/c10d/sequence_num.hpp +++ b/torch/csrc/distributed/c10d/sequence_num.hpp @@ -3,6 +3,7 @@ #include <c10/macros/Macros.h> #include <c10/util/Optional.h> #include <c10/util/irange.h> +#include <mutex> #include <vector> namespace c10d {
2.41.0
75ec25f55ecc88d9dfd36ae556ecf9f8016dabd
Tue, 23 Apr 2024 01:35:07 +0000
[PATCH 0498/1000] Add missing aten::sort.any op for assistant lm models (#123982)
Differential Revision: D56084098 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123982 Approved by: https://github.com/JacobSzwejbka
diff --git a/torch/csrc/jit/runtime/register_prim_ops.cpp b/torch/csrc/jit/runtime/register_prim_ops.cpp index cec9c70bc7..30ca033c51 100644 --- a/torch/csrc/jit/runtime/register_prim_ops.cpp +++ b/torch/csrc/jit/runtime/register_prim_ops.cpp @@ -81,6 +81,110 @@ c10::List<std::string> splitNoneSeparator(const std::string& string) { return splits; } +bool isSortableTupleType( + const TupleTypePtr& tuple_type, + std::stringstream& why_not) { + for (const TypePtr& ele_type : tuple_type->containedTypes()) { + switch (ele_type->kind()) { + case TypeKind::IntType: + case TypeKind::BoolType: + case TypeKind::FloatType: + case TypeKind::StringType: + case TypeKind::TensorType: + continue; + case TypeKind::TupleType: + if (!isSortableTupleType(ele_type->expect<TupleType>(), why_not)) { + return false; + } + continue; + case TypeKind::ClassType: + if (!c10::checkObjectSortSchema( + ele_type->expect<ClassType>(), why_not)) { + return false; + } + continue; + default: + why_not << "Contained elements in " << *tuple_type + << " are not sortable. Only Int, Bool, Float, String, Tensor, " + << "a User Defined Class with __lt__ method defined or Tuples " + << "of aforementionted types can be sorted."; + return false; + } + } + + return true; +} + +bool isSortableListOfObjectsOrTuples( + c10::List<IValue>& ivalues, + std::stringstream& why_not) { + if (ivalues.empty()) { + return true; + } + + auto type = ivalues.get(0).type(); + // We assume lists have homogenous types, use first element to determine + // best sorting methods. If in the future we need to support heterogenous + // types inside list, then sorting needs to have runtime sortable checks. + const size_t n = ivalues.size(); + for (const auto i : c10::irange(n)) { + const IValue& v = ivalues.get(i); + auto curr_type = v.type(); + if (*curr_type != *type) { + why_not << "Only values of same type can be compared. " + << "Found " << type->repr_str() << " and " + << curr_type->repr_str(); + return false; + } + } + + if (auto tuple_type = type->cast<TupleType>()) { + return isSortableTupleType(tuple_type, why_not); + } + + if (auto class_type = type->cast<ClassType>()) { + return c10::checkObjectSortSchema(class_type, why_not) != nullptr; + } + + // Basic types like tensors/ints/floats/bools/strs are not checked in this + // method because they should have been schema matched to specialized + // aten::sort kernels using listSort<T>. + why_not << "Only list of Tensors, ints, floats, bools, strs, " + << "a User Defined Class that defines the __lt__ compare method " + << "or Tuples of aforementioned types can be sorted, got list of " + << type->repr_str() << "\n"; + return false; +} + +template <bool has_reverse_arg, bool copy_return_list> +void sort_op(Stack& stack) { + bool reverse = has_reverse_arg ? pop(stack).toBool() : false; + auto g_list = pop(stack).toList(); + + if (copy_return_list) { + g_list = g_list.copy(); + } + + if (!g_list.empty()) { + std::stringstream error_str; + if (!isSortableListOfObjectsOrTuples(g_list, error_str)) { + throw std::runtime_error(error_str.str()); + } + + c10::IValueComparator comparator; + if (reverse) { + comparator = c10::getGreaterThanComparator(g_list.get(0)); + } else { + comparator = c10::getLessThanComparator(g_list.get(0)); + } + std::sort(g_list.begin(), g_list.end(), comparator); + } + + if (copy_return_list) { + push(stack, g_list); + } +} + template <typename T, typename U> auto powWrapper(T a, U b) { TORCH_CHECK( @@ -2878,6 +2982,15 @@ static const std::vector<OperatorGeneratorArgs> opGenArgs2{ TORCH_SELECTIVE_SCHEMA("aten::ne.str_list(str[] a, str[] b) -> bool"), listNe<std::string>, aliasAnalysisFromSchema()), + OperatorGeneratorArgs( + TORCH_SELECTIVE_SCHEMA("aten::sorted.any(t[](a) self) -> (t[])"), + sort_op</*has_reverse_arg*/ false, /*copy_return_list*/ true>, + aliasAnalysisFromSchema()), + OperatorGeneratorArgs( + TORCH_SELECTIVE_SCHEMA( + "aten::sort.any(t[](a!) self, bool reverse=False) -> ()"), + sort_op</*has_reverse_arg*/ true, /*copy_return_list*/ false>, + aliasAnalysisFromSchema()), #define DEFINE_CONVERT_BASE_OP(op_name, prefix, char_op) \ OperatorGeneratorArgs( \ diff --git a/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp b/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp index ac2dd62e64..d48a981666 100644 --- a/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp +++ b/torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp @@ -384,122 +384,6 @@ C10_UNUSED void hashValue(Stack& stack) { push(stack, value.hash()); } -bool isSortableTupleType( - const TupleTypePtr& tuple_type, - std::stringstream& why_not) { - for (const TypePtr& ele_type : tuple_type->containedTypes()) { - switch (ele_type->kind()) { - case TypeKind::IntType: - case TypeKind::BoolType: - case TypeKind::FloatType: - case TypeKind::StringType: - case TypeKind::TensorType: - continue; - case TypeKind::TupleType: - if (!isSortableTupleType(ele_type->expect<TupleType>(), why_not)) { - return false; - } - continue; - case TypeKind::ClassType: - if (!c10::checkObjectSortSchema( - ele_type->expect<ClassType>(), why_not)) { - return false; - } - continue; - default: - why_not << "Contained elements in " << *tuple_type - << " are not sortable. Only Int, Bool, Float, String, Tensor, " - << "a User Defined Class with __lt__ method defined or Tuples " - << "of aforementionted types can be sorted."; - return false; - } - } - - return true; -} - -bool isSortableListOfObjectsOrTuples( - c10::List<IValue>& ivalues, - std::stringstream& why_not) { - if (ivalues.empty()) { - return true; - } - - auto type = ivalues.get(0).type(); - // We assume lists have homogenous types, use first element to determine - // best sorting methods. If in the future we need to support heterogenous - // types inside list, then sorting needs to have runtime sortable checks. - const size_t n = ivalues.size(); - for (const auto i : c10::irange(n)) { - const IValue& v = ivalues.get(i); - auto curr_type = v.type(); - if (*curr_type != *type) { - why_not << "Only values of same type can be compared. " - << "Found " << type->repr_str() << " and " - << curr_type->repr_str(); - return false; - } - } - - if (auto tuple_type = type->cast<TupleType>()) { - return isSortableTupleType(tuple_type, why_not); - } - - if (auto class_type = type->cast<ClassType>()) { - return c10::checkObjectSortSchema(class_type, why_not) != nullptr; - } - - // Basic types like tensors/ints/floats/bools/strs are not checked in this - // method because they should have been schema matched to specialized - // aten::sort kernels using listSort<T>. - why_not << "Only list of Tensors, ints, floats, bools, strs, " - << "a User Defined Class that defines the __lt__ compare method " - << "or Tuples of aforementioned types can be sorted, got list of " - << type->repr_str() << "\n"; - return false; -} - -template <bool has_reverse_arg, bool copy_return_list> -void sort_op(Stack& stack) { - bool reverse = has_reverse_arg ? pop(stack).toBool() : false; - auto g_list = pop(stack).toList(); - - if (copy_return_list) { - g_list = g_list.copy(); - } - - if (!g_list.empty()) { - std::stringstream error_str; - if (!isSortableListOfObjectsOrTuples(g_list, error_str)) { - throw std::runtime_error(error_str.str()); - } - - c10::IValueComparator comparator; - if (reverse) { - comparator = c10::getGreaterThanComparator(g_list.get(0)); - } else { - comparator = c10::getLessThanComparator(g_list.get(0)); - } - std::sort(g_list.begin(), g_list.end(), comparator); - } - - if (copy_return_list) { - push(stack, g_list); - } -} - -// NB: this must be registered after the other aten::sort operators -RegisterOperators regSort({ - Operator( - "aten::sorted.any(t[](a) self) -> (t[])", - sort_op</*has_reverse_arg*/ false, /*copy_return_list*/ true>, - aliasAnalysisFromSchema()), - Operator( - "aten::sort.any(t[](a!) self, bool reverse=False) -> ()", - sort_op</*has_reverse_arg*/ true, /*copy_return_list*/ false>, - aliasAnalysisFromSchema()), -}); - // reference: _output_size in torch/nn/functional.py // size can be none, int or intlist // scale_factors can be none, float, or floatlist
2.41.0
e095be4b66945c4dfdfc99eb2d6b4bd836e22d8
Mon, 22 Apr 2024 14:19:03 -0700
[PATCH 0499/1000] Fix test_max_autotune_remote_caching (#124655)
D55206000 broke this test. It is not clear why it did not run in the CI but here's the fix. Differential Revision: [D56439213](https://our.internmc.facebook.com/intern/diff/D56439213/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124655 Approved by: https://github.com/aorenste
diff --git a/test/inductor/test_max_autotune.py b/test/inductor/test_max_autotune.py index bbcff4f87f..f74fa4ed1d 100644 --- a/test/inductor/test_max_autotune.py +++ b/test/inductor/test_max_autotune.py @@ -250,13 +250,13 @@ class TestMaxAutotune(TestCase): def __init__(self, key, is_autotune=False): pass - def get(self, filenames): + def get(self, filename): nonlocal cache nonlocal num_get - ret = { - file: json.loads(cache[file]) for file in filenames if file in cache - } - num_get += len(ret) + if filename not in cache: + return None + ret = json.loads(cache[filename]) + num_get += 1 return ret def put(self, filename, data):
2.41.0
60db767ef56a0512c5e94051c5058613fcaf1df
Mon, 22 Apr 2024 14:14:22 -0400
[PATCH 0500/1000] Don't clean up fresh inductor cache on error (#124620)
Useful for local debugging. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124620 Approved by: https://github.com/oulgen, https://github.com/desertfire, https://github.com/jansel
diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py index 7580b216a0..3cf4faa24b 100644 --- a/torch/_inductor/utils.py +++ b/torch/_inductor/utils.py @@ -665,7 +665,8 @@ def fresh_inductor_cache(cache_entries=None): for obj in _registered_caches: obj.cache_clear() - with tempfile.TemporaryDirectory() as inductor_cache_dir: + inductor_cache_dir = tempfile.mkdtemp() + try: with mock.patch.dict( os.environ, {"TORCHINDUCTOR_CACHE_DIR": inductor_cache_dir} ): @@ -683,6 +684,10 @@ def fresh_inductor_cache(cache_entries=None): if ".lock" not in f } ) + shutil.rmtree(inductor_cache_dir) + except Exception: + log.warning("on error, temporary cache dir kept at %s", inductor_cache_dir) + raise def argsort(seq) -> List[int]:
2.41.0
e5d689cf9e0dcd980a1e7e0daff3f8a4a4ec1ec
Tue, 23 Apr 2024 03:22:20 +0000
[PATCH 0502/1000] [EZ] Update pillow to 10.3.0 (#124614)
As older versions as subject to [CVE-2024-28219](https://nvd.nist.gov/vuln/detail/CVE-2024-28219), although it's not super important from CI PoV Modernize `torch/utils/tensorboard/summary.py` to use Pillow-9+ APIs (is this file even used for anything anymore?) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124614 Approved by: https://github.com/Skylion007, https://github.com/ZainRizvi
diff --git a/.ci/docker/requirements-ci.txt b/.ci/docker/requirements-ci.txt index a636d70e32..fdd2fe25c9 100644 --- a/.ci/docker/requirements-ci.txt +++ b/.ci/docker/requirements-ci.txt @@ -147,9 +147,9 @@ optree==0.11.0 #test_pointwise_ops.py, test_dtensor_ops.py, test_torchinductor.py, test_fx.py, #test_fake_tensor.py, test_mps.py -pillow==10.2.0 +pillow==10.3.0 #Description: Python Imaging Library fork -#Pinned versions: 10.2.0 +#Pinned versions: 10.3.0 #test that import: protobuf==3.20.2 diff --git a/torch/utils/tensorboard/summary.py b/torch/utils/tensorboard/summary.py index 8211f6e0c8..4d94c3e615 100644 --- a/torch/utils/tensorboard/summary.py +++ b/torch/utils/tensorboard/summary.py @@ -142,7 +142,8 @@ def _draw_single_box( if display_str: text_bottom = bottom # Reverse list and print from bottom to top. - text_width, text_height = font.getsize(display_str) + _left, _top, _right, _bottom = font.getbbox(display_str) + text_width, text_height = _right - _left, _bottom - _top margin = np.ceil(0.05 * text_height) draw.rectangle( [ @@ -620,10 +621,7 @@ def make_image(tensor, rescale=1, rois=None, labels=None): image = Image.fromarray(tensor) if rois is not None: image = draw_boxes(image, rois, labels=labels) - try: - ANTIALIAS = Image.Resampling.LANCZOS - except AttributeError: - ANTIALIAS = Image.ANTIALIAS + ANTIALIAS = Image.Resampling.LANCZOS image = image.resize((scaled_width, scaled_height), ANTIALIAS) import io
2.41.0