commitId
stringlengths 40
40
| datetime
stringlengths 30
31
| subject
stringlengths 37
266
| comment
stringlengths 109
15.2k
| diff
stringlengths 238
914k
| gitVersion
stringclasses 9
values |
|---|---|---|---|---|---|
21bf5e4e48430247789cc3fa715c818b263e968
|
Thu, 25 Apr 2024 04:42:18 +0000
|
[PATCH 0628/1000] [foreach] Use same `dtypes` when `dtypesIfCUDA` is `None` (#124813)
|
in order to avoid accidentally testing cuda path with fewer dtypes Pull Request resolved: https://github.com/pytorch/pytorch/pull/124813 Approved by: https://github.com/janeyx99
|
diff --git a/aten/src/ATen/native/cuda/ForeachUnaryOp.cu b/aten/src/ATen/native/cuda/ForeachUnaryOp.cu
index ff809d108d..d7a118e6a9 100644
--- a/aten/src/ATen/native/cuda/ForeachUnaryOp.cu
+++ b/aten/src/ATen/native/cuda/ForeachUnaryOp.cu
@@ -388,9 +388,10 @@ void foreach_tensor_zero_cuda_(TensorList tensors) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
- AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
+ AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
ScalarType::Half,
ScalarType::BFloat16,
+ ScalarType::Bool,
tensors[0].scalar_type(),
"foreach_zero_cuda_",
[&]() {
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
index 634ff4fe57..46148424e5 100644
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -9217,6 +9217,8 @@ class foreach_inputs_sample_func:
# unary
if opinfo.ref in (torch.abs, torch.neg):
return False
+ if opinfo.ref_inplace in (torch.Tensor.zero_,):
+ return False
return dtype in integral_types_and(torch.bool)
if self.arity < 2 or rightmost_arg_type == ForeachRightmostArgType.Tensor:
return None
@@ -9475,43 +9477,53 @@ foreach_unary_op_db: List[OpInfo] = [
'exp',
foreach_inputs_sample_func(1, False, False),
backward_requires_result=True,
+ dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
),
ForeachFuncInfo(
'acos',
foreach_inputs_sample_func(1, False, False),
+ dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
),
ForeachFuncInfo(
'asin',
foreach_inputs_sample_func(1, False, False),
+ dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
),
ForeachFuncInfo(
'atan',
foreach_inputs_sample_func(1, False, False),
+ dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
),
ForeachFuncInfo(
'cos',
foreach_inputs_sample_func(1, False, False),
+ dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
),
ForeachFuncInfo(
'cosh',
foreach_inputs_sample_func(1, False, False),
+ dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
),
ForeachFuncInfo(
'log',
foreach_inputs_sample_func(1, False, False),
+ dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
),
ForeachFuncInfo(
'log10',
foreach_inputs_sample_func(1, False, False),
+ dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
),
ForeachFuncInfo(
'log2',
foreach_inputs_sample_func(1, False, False),
+ dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
),
ForeachFuncInfo(
'tan',
foreach_inputs_sample_func(1, False, False),
backward_requires_result=True,
+ dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
decorators=(
# due to https://github.com/pytorch/pytorch/pull/102427 enabling jiterator for complex
DecorateInfo(
@@ -9530,6 +9542,7 @@ foreach_unary_op_db: List[OpInfo] = [
'tanh',
foreach_inputs_sample_func(1, False, False),
backward_requires_result=True,
+ dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
decorators=(
DecorateInfo(
toleranceOverride(
@@ -9544,10 +9557,12 @@ foreach_unary_op_db: List[OpInfo] = [
ForeachFuncInfo(
'sin',
foreach_inputs_sample_func(1, False, False),
+ dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
),
ForeachFuncInfo(
'sinh',
foreach_inputs_sample_func(1, False, False),
+ dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
),
ForeachFuncInfo(
'neg',
diff --git a/torch/testing/_internal/opinfo/core.py b/torch/testing/_internal/opinfo/core.py
index eee3df27f8..87a57f7678 100644
--- a/torch/testing/_internal/opinfo/core.py
+++ b/torch/testing/_internal/opinfo/core.py
@@ -2714,7 +2714,7 @@ class ForeachFuncInfo(OpInfo):
sample_inputs_func,
*,
dtypes=floating_and_complex_types(),
- dtypesIfCUDA=floating_and_complex_types_and(torch.half),
+ dtypesIfCUDA=None,
dtypesIfROCM=None,
supports_alpha_param=False,
supports_autograd=True,
|
2.41.0
|
a70e7f58cf043c4b20c3395561c1e8b975f0d54
|
Wed, 24 Apr 2024 15:33:21 -0700
|
[PATCH 0630/1000] [Nested Tensor]Add unit test that cover the internal use cases (#124880)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124880 Approved by: https://github.com/jbschlosser
|
diff --git a/test/test_nestedtensor.py b/test/test_nestedtensor.py
index 6b7f7aa7ca..3df4b63801 100644
--- a/test/test_nestedtensor.py
+++ b/test/test_nestedtensor.py
@@ -185,8 +185,7 @@ def layout_name(layout):
return layout.__repr__().split(".")[-1]
-# Internally-defined conversion functions are lifted to here for maximum test realism.
-# TODO: Remove these when ViewNestedFromBuffer, etc. are deprecated.
+# Helper function for test_dummy_mha_with_nt
@torch.fx.wrap
def convert_dense_to_nested_tensor(values):
offsets = torch.arange(
@@ -198,6 +197,8 @@ def convert_dense_to_nested_tensor(values):
)
return nt
+
+# Helper function for test_dummy_mha_with_nt
@torch.fx.wrap
def convert_jagged_to_nested_tensor(
values: torch.Tensor, offsets: torch.Tensor, max_length: int
@@ -207,6 +208,7 @@ def convert_jagged_to_nested_tensor(
return nt
+# Helper function for test_dummy_mha_with_nt
@torch.fx.wrap
def convert_nt_to_jagged(nt):
return buffer_from_jagged(nt)
@@ -4264,6 +4266,79 @@ class TestNestedTensorSubclass(TestCase):
output.sum().backward()
self.assertEqual(values.grad, torch.ones_like(values))
+ # Internally-defined NT use cases are lifted to here for maximum test realism.
+ # TODO: Remove these when ViewNestedFromBuffer, etc. are deprecated.
+ @skipCUDAIfRocm # not needed
+ @skipIfTorchDynamo("compiles internally")
+ @unittest.skipIf(IS_WINDOWS, reason="Windows not yet supported for torch.compile")
+ @skipCUDAIf(not SM70OrLater, "GPU capability is < SM70")
+ def test_dummy_mha_with_nt(self, device):
+ bs = 3
+ d1 = 2
+ d2 = 4
+ d3 = 6
+ n_heads = 2
+ d_head = d3 // n_heads
+ max_length_1 = 10
+ max_length_2 = 20
+ torch.manual_seed(0)
+
+ class mha(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ torch.manual_seed(0)
+ self.linear = torch.nn.Linear(d2, d3, device=device)
+
+ def forward(self, query, value, offsets):
+
+ value = self.linear(value)
+ key = convert_jagged_to_nested_tensor(value, offsets, max_length_1)
+ value = convert_jagged_to_nested_tensor(value, offsets, max_length_2)
+ query = convert_dense_to_nested_tensor(query)
+ q = query.view(bs, -1, n_heads, d_head).transpose(1, 2)
+ k = key.view(bs, -1, n_heads, d_head).transpose(1, 2)
+ v = value.view(bs, -1, n_heads, d_head).transpose(1, 2)
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
+ q,
+ k,
+ v,
+ attn_mask=None,
+ dropout_p=0.0,
+ is_causal=False,
+ )
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = convert_nt_to_jagged(attn_output)
+ return attn_output, key._max_seqlen, value._max_seqlen
+
+ query = torch.rand(bs, d1, d3, device=device)
+ value = torch.rand(6, d2, requires_grad=True, device=device)
+ offsets = torch.tensor([0, 2, 3, 6], device=device)
+
+ m = mha()
+ symbolic_traced: torch.fx.GraphModule = torch.fx.symbolic_trace(m)
+ m = torch.compile(symbolic_traced)
+ attn_output, cached_key_max_seqlen, cached_value_max_seqlen = m(
+ query, value, offsets
+ )
+ loss = attn_output.sum()
+ # Check that NT can be fx traced and torch.compile, and backward works
+ loss.backward()
+
+ # Check that value.requires_grad is not lost after tracing and compiling
+ value_grad = value.grad # save for comparison later
+ self.assertIsNotNone(value_grad)
+ # check that max_seqlen is cached properly
+ self.assertEqual(cached_key_max_seqlen, max_length_1)
+ self.assertEqual(cached_value_max_seqlen, max_length_2)
+
+ # check if the output is numerically equivalent with the eager mode
+ m_eager = mha()
+ value.grad = None
+ attn_output_eager, _, _ = m_eager(query, value, offsets)
+ attn_output_eager.sum().backward()
+ self.assertTrue(torch.allclose(attn_output_eager, attn_output))
+ self.assertTrue(torch.allclose(value_grad, value.grad))
+
instantiate_parametrized_tests(TestNestedTensor)
instantiate_device_type_tests(TestNestedTensorDeviceType, globals())
|
2.41.0
|
4af62b000a9256591d91e17ad7002b0d99a05cc
|
Thu, 25 Apr 2024 06:29:15 +0000
|
[PATCH 0632/1000] Updated test_graph_grad_scaling to use new OptimizerInfo infrastructure (#123581)
|
This PR targets the issue mentioned in #123451 , and solves the specific task to update`test_graph_grad_scaling` in `test/test_cuda.py` to use the new OptimizerInfo infrastructure. `test_graph_grad_scaling` is moved to a new `TestCase` class called `TestCudaOptims` in order to use `instantiate_device_type_tests`. The test content remained the same. `@onlyCUDA` is applied to the new test; the original use of the wrapper function is also changed to a `@parametrize` decorator for better style. If we think that this migration is successful, we can delete the original test item under `TestCuda`. Currently it is left untouched to avoid any unexpected issues. Local linter passed. ``` $ lintrunner test/test_cuda.py ok No lint issues. ``` Local tests passed. ``` > python .\test\test_cuda.py -k test_graph_grad_scaling Ran 7 tests in 0.458s OK (skipped = 3) ``` Co-authored-by: Jane (Yuan) Xu <31798555+janeyx99@users.noreply.github.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123581 Approved by: https://github.com/janeyx99
|
diff --git a/test/test_cuda.py b/test/test_cuda.py
index 1a83e5fd01..88147eab8b 100644
--- a/test/test_cuda.py
+++ b/test/test_cuda.py
@@ -31,6 +31,9 @@ from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run
serialTest
from torch.testing._internal.common_cuda import TEST_CUDNN, TEST_MULTIGPU, \
_create_scaling_case, _get_torch_cuda_version
+from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA
+from torch.testing._internal.common_optimizers import (
+ optim_db, optims)
from torch.testing._internal.autocast_test_lists import AutocastTestLists
from torch.utils.viz._cycles import observe_tensor_cycles
@@ -2638,59 +2641,6 @@ exit(2)
y = model(x)
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
- def test_graph_grad_scaling(self):
- for foreach, fused in ((False, False), (True, False), (False, True)):
- self._test_graph_grad_scaling(foreach, fused)
-
- def _test_graph_grad_scaling(self, foreach, fused):
- torch.cuda.empty_cache()
-
- scaler = torch.cuda.amp.GradScaler(init_scale=4.)
- g = torch.cuda.CUDAGraph()
- s = torch.cuda.Stream()
-
- weight = torch.ones((100,), device="cuda", requires_grad=True)
- opt = torch.optim.SGD([weight], lr=0.1, foreach=foreach, fused=fused)
- static_input = torch.ones_like(weight)
- static_grad = torch.ones_like(weight)
-
- # warmup
- s = torch.cuda.Stream()
- s.wait_stream(torch.cuda.current_stream())
- with torch.cuda.stream(s):
- loss = (weight.half() * static_input).sum()
- scaler.scale(loss).backward()
- torch.cuda.current_stream().wait_stream(s)
-
- opt.zero_grad(set_to_none=True)
-
- # capture
- with torch.cuda.stream(s):
- g.capture_begin()
- loss = (weight.half() * static_input).sum()
- scaler.scale(loss).backward()
- g.capture_end()
-
- input_vals = [5, 20000, 5, 40000]
- # If the scale gets updated properly, these are the scale, growth tracker,
- # and grad values we expect.
- expected_scales = [4, 2, 2, 1]
- expected_growth_trackers = [1, 0, 1, 0]
- expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
-
- for data, scale, growth_tracker, grad_val in zip(input_vals,
- expected_scales,
- expected_growth_trackers,
- expected_grad_vals):
- static_input.fill_(data)
- g.replay()
- self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
- scaler.step(opt)
- scaler.update()
- self.assertEqual(scaler._scale, scale)
- self.assertEqual(scaler._growth_tracker, growth_tracker)
-
@unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
@parametrize(
"with_amp,cache_enabled,allow_unused_input",
@@ -4156,9 +4106,68 @@ class TestBlockStateAbsorption(TestCase):
cwd=os.path.dirname(os.path.realpath(__file__))).strip().decode('ascii')
self.assertEqual(rc, "False", "Triton was imported when importing torch!")
+class TestCudaOptims(TestCase):
+ # These tests will be instantiate with instantiate_device_type_tests
+ # to apply the new OptimizerInfo structure.
+
+ @onlyCUDA
+ @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @parametrize("foreach, fused", [(False, False), (True, False), (False, True)])
+ @optims(
+ [optim for optim in optim_db if "foreach" in optim.supported_impls and "fused" in optim.supported_impls],
+ dtypes=[torch.float32]
+ )
+ def test_graph_grad_scaling(self, device, dtype, optim_info, foreach, fused):
+ torch.cuda.empty_cache()
+
+ scaler = torch.cuda.amp.GradScaler(init_scale=4.)
+ g = torch.cuda.CUDAGraph()
+ s = torch.cuda.Stream()
+
+ weight = torch.ones((100,), device="cuda", requires_grad=True)
+ opt = optim_info.optim_cls([weight], lr=0.1, foreach=foreach, fused=fused)
+ static_input = torch.ones_like(weight)
+ static_grad = torch.ones_like(weight)
+
+ # warmup
+ s = torch.cuda.Stream()
+ s.wait_stream(torch.cuda.current_stream())
+ with torch.cuda.stream(s):
+ loss = (weight.half() * static_input).sum()
+ scaler.scale(loss).backward()
+ torch.cuda.current_stream().wait_stream(s)
+
+ opt.zero_grad(set_to_none=True)
+
+ # capture
+ with torch.cuda.stream(s):
+ g.capture_begin()
+ loss = (weight.half() * static_input).sum()
+ scaler.scale(loss).backward()
+ g.capture_end()
+
+ input_vals = [5, 20000, 5, 40000]
+ # If the scale gets updated properly, these are the scale, growth tracker,
+ # and grad values we expect.
+ expected_scales = [4, 2, 2, 1]
+ expected_growth_trackers = [1, 0, 1, 0]
+ expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
+
+ for data, scale, growth_tracker, grad_val in zip(input_vals,
+ expected_scales,
+ expected_growth_trackers,
+ expected_grad_vals):
+ static_input.fill_(data)
+ g.replay()
+ self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
+ scaler.step(opt)
+ scaler.update()
+ self.assertEqual(scaler._scale, scale)
+ self.assertEqual(scaler._growth_tracker, growth_tracker)
instantiate_parametrized_tests(TestCuda)
instantiate_parametrized_tests(TestCudaMallocAsync)
+instantiate_device_type_tests(TestCudaOptims, globals())
if __name__ == '__main__':
run_tests()
|
2.41.0
|
9a1f1f308545e3ac1d81940a51f8dc0db3d82d4
|
Tue, 23 Apr 2024 22:52:17 -0700
|
[PATCH 0633/1000] [dynamo][inline inbuilt nn modules] Do not inline for export (#124814)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124814 Approved by: https://github.com/jansel
|
diff --git a/torch/_dynamo/mutation_guard.py b/torch/_dynamo/mutation_guard.py
index 64703ca5df..5933aa97e0 100644
--- a/torch/_dynamo/mutation_guard.py
+++ b/torch/_dynamo/mutation_guard.py
@@ -84,7 +84,7 @@ class GenerationTracker:
)
-def is_dynamic_nn_module(obj):
+def is_dynamic_nn_module(obj, is_export):
"""Check for nn.Modules() created dynamically or mutated"""
if isinstance(obj, torch.nn.Module) and "forward" in obj.__dict__:
# A monkey patched `.forward` indicates something wacky is going on
@@ -93,7 +93,11 @@ def is_dynamic_nn_module(obj):
return obj.torchdynamo_force_dynamic
if is_lazy_module(obj):
return False
- if config.inline_inbuilt_nn_modules:
+ # For export, we will have to fix
+ # 1) Input signature problem because params are lifted as inputs
+ # 2) nn module stack info changes
+ # 3) adjust failing tests
+ if config.inline_inbuilt_nn_modules and not is_export:
return True
dyn = GenerationTracker.dynamic_classes.get(type(obj)) or GenerationTracker.check(
obj
diff --git a/torch/_dynamo/output_graph.py b/torch/_dynamo/output_graph.py
index f996358ba0..09b05d30fb 100644
--- a/torch/_dynamo/output_graph.py
+++ b/torch/_dynamo/output_graph.py
@@ -741,7 +741,7 @@ class OutputGraph:
*names,
**options,
):
- if is_dynamic_nn_module(target):
+ if is_dynamic_nn_module(target, self.root_tx.export):
return variables.UnspecializedNNModuleVariable(target, **options)
options = dict(options)
diff --git a/torch/_dynamo/variables/builder.py b/torch/_dynamo/variables/builder.py
index d4dc884435..6cdac8b66d 100644
--- a/torch/_dynamo/variables/builder.py
+++ b/torch/_dynamo/variables/builder.py
@@ -992,7 +992,7 @@ class VariableBuilder:
and not config.allow_rnn
):
unimplemented("TorchDynamo purposely graph breaks on RNN, GRU, LSTMs")
- if mutation_guard.is_dynamic_nn_module(value):
+ if mutation_guard.is_dynamic_nn_module(value, self.tx.export):
# created dynamically, don't specialize on it
self.install_guards(GuardBuilder.TYPE_MATCH)
result = UnspecializedNNModuleVariable(value, source=self.source)
|
2.41.0
|
68d65dae205dff44c420a7a0b298f1a5101319f
|
Wed, 24 Apr 2024 16:11:09 -0700
|
[PATCH 0634/1000] [dynamo][cpp-guards] Differentiate dict guards wrt to guarding on key order (#124779)
|
We guard on key order 1) When a key is a non-constant object 2) When we actually need key order - like .values, .items etc For dicts/OrderedDicts that do not require key order guarding, we just rely on usual `GuardManger + DictGetItemGuardAccessor`. This is faster than going through the `list(d.keys())` based design for OrderedDicts. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124779 Approved by: https://github.com/jansel
|
diff --git a/test/dynamo/test_guard_manager.py b/test/dynamo/test_guard_manager.py
index 88cb5e5968..4edc65c143 100644
--- a/test/dynamo/test_guard_manager.py
+++ b/test/dynamo/test_guard_manager.py
@@ -24,6 +24,8 @@ install_no_tensor_aliasing_guard = guards.install_no_tensor_aliasing_guard
x = torch.tensor(4)
weakref_x = weakref.ref(x)
+default_mgr_enum = torch._dynamo.guards.GuardManagerType.GUARD_MANAGER
+
class Pair:
def __init__(self, x, y):
@@ -238,8 +240,8 @@ num_guards_executed=0)
f_locals = Foo(a, a)
- x_guard_mgr = guard_manager.getattr_manager("x", "", a)
- y_guard_mgr = guard_manager.getattr_manager("y", "", a)
+ x_guard_mgr = guard_manager.getattr_manager("x", "", a, default_mgr_enum)
+ y_guard_mgr = guard_manager.getattr_manager("y", "", a, default_mgr_enum)
install_tensor_aliasing_guard(x_guard_mgr, y_guard_mgr, ["x is y"])
# Check structure
@@ -319,9 +321,9 @@ num_guards_executed=0)
f_locals = Foo(a, a, a)
- x_guard_mgr = guard_manager.getattr_manager("x", "", a)
- y_guard_mgr = guard_manager.getattr_manager("y", "", a)
- z_guard_mgr = guard_manager.getattr_manager("z", "", a)
+ x_guard_mgr = guard_manager.getattr_manager("x", "", a, default_mgr_enum)
+ y_guard_mgr = guard_manager.getattr_manager("y", "", a, default_mgr_enum)
+ z_guard_mgr = guard_manager.getattr_manager("z", "", a, default_mgr_enum)
install_no_tensor_aliasing_guard(
[x_guard_mgr, y_guard_mgr, z_guard_mgr],
["x", "y", "z"],
@@ -396,11 +398,11 @@ num_guards_executed=0)
foo = Foo(1, 2)
guard_manager = RootGuardManager()
guard_manager.add_type_match_guard(id_type(foo), ["type(x) == Foo"])
- guard_manager.getattr_manager("x", "x", 1).add_lambda_guard(
+ guard_manager.getattr_manager("x", "x", 1, default_mgr_enum).add_lambda_guard(
functools.partial(equals_match, expected=foo.x),
equals_match_verbose_code_parts(foo.x),
)
- guard_manager.getattr_manager("y", "y", 2).add_lambda_guard(
+ guard_manager.getattr_manager("y", "y", 2, default_mgr_enum).add_lambda_guard(
functools.partial(equals_match, expected=foo.y),
equals_match_verbose_code_parts(foo.y),
)
@@ -417,13 +419,21 @@ num_guards_executed=0)
self.assertEqual(
len(
guard_manager.getattr_manager(
- attr="x", source="x", example_value=None
+ attr="x",
+ source="x",
+ example_value=None,
+ guard_manager_enum=default_mgr_enum,
).get_leaf_guards()
),
1,
)
self.assertEqual(
- len(guard_manager.getattr_manager("y", "y", None).get_leaf_guards()), 1
+ len(
+ guard_manager.getattr_manager(
+ "y", "y", None, default_mgr_enum
+ ).get_leaf_guards()
+ ),
+ 1,
)
self.assertTrue(guard_manager.check(foo))
@@ -434,11 +444,11 @@ num_guards_executed=0)
foo = [1, 2]
guard_manager = RootGuardManager()
guard_manager.add_type_match_guard(id_type(foo), ["type(x) == Foo"])
- guard_manager.getitem_manager(0, "", 1).add_lambda_guard(
+ guard_manager.getitem_manager(0, "", 1, default_mgr_enum).add_lambda_guard(
functools.partial(equals_match, expected=foo[0]),
equals_match_verbose_code_parts(foo[0]),
)
- guard_manager.getitem_manager(1, "", 2).add_lambda_guard(
+ guard_manager.getitem_manager(1, "", 2, default_mgr_enum).add_lambda_guard(
functools.partial(equals_match, expected=foo[1]),
equals_match_verbose_code_parts(foo[1]),
)
@@ -453,10 +463,20 @@ num_guards_executed=0)
)
# Check leaf guards on child managers
self.assertEqual(
- len(guard_manager.getitem_manager(0, "", None).get_leaf_guards()), 1
+ len(
+ guard_manager.getitem_manager(
+ 0, "", None, default_mgr_enum
+ ).get_leaf_guards()
+ ),
+ 1,
)
self.assertEqual(
- len(guard_manager.getitem_manager(1, "", None).get_leaf_guards()), 1
+ len(
+ guard_manager.getitem_manager(
+ 1, "", None, default_mgr_enum
+ ).get_leaf_guards()
+ ),
+ 1,
)
self.assertTrue(guard_manager.check(foo))
@@ -471,12 +491,12 @@ num_guards_executed=0)
guards_manager = RootGuardManager()
guards_manager.add_type_match_guard(id_type(foo), ["type(x) == Foo"])
- guards_manager.dict_getitem_manager("a", "", 1).add_equals_match_guard(
- 1, ["a == 1"]
- )
- guards_manager.dict_getitem_manager("b", "", 2).add_equals_match_guard(
- 2, ["b == 2"]
- )
+ guards_manager.dict_getitem_manager(
+ "a", "", 1, default_mgr_enum
+ ).add_equals_match_guard(1, ["a == 1"])
+ guards_manager.dict_getitem_manager(
+ "b", "", 2, default_mgr_enum
+ ).add_equals_match_guard(2, ["b == 2"])
self.assertTrue(guards_manager.check(foo))
self.assertFalse(guards_manager.check({"a": 1, "b": 3}))
@@ -485,8 +505,8 @@ num_guards_executed=0)
global global_pair, Pair
guard_manager = RootGuardManager()
gpair_mgr = guard_manager.globals_dict_manager(
- globals(), "", None
- ).getitem_manager("global_pair", "", global_pair)
+ globals(), "", None, default_mgr_enum
+ ).getitem_manager("global_pair", "", global_pair, default_mgr_enum)
gpair_mgr.add_lambda_guard(
lambda x: isinstance(x, Pair)
@@ -513,10 +533,12 @@ num_guards_executed=0)
f_locals = {"foo": foo}
# len(type(foo).__mro__) == 2
- foo_mgr = guard_manager.getitem_manager("foo", "", foo)
- type_manager = foo_mgr.type_manager("", type(foo))
+ foo_mgr = guard_manager.getitem_manager("foo", "", foo, default_mgr_enum)
+ type_manager = foo_mgr.type_manager("", type(foo), default_mgr_enum)
self.assertTrue(isinstance(foo_mgr.get_accessors()[0], TypeGuardAccessor))
- mro_manager = type_manager.getattr_manager("__mro__", "", type(foo).__mro__)
+ mro_manager = type_manager.getattr_manager(
+ "__mro__", "", type(foo).__mro__, default_mgr_enum
+ )
self.assertTrue(
isinstance(type_manager.get_accessors()[0], GetAttrGuardAccessor)
)
@@ -526,11 +548,15 @@ num_guards_executed=0)
)
# type(foo).__mro__[0].a = 4
- item_manager = mro_manager.getitem_manager(1, "", type(foo).__mro__[1])
+ item_manager = mro_manager.getitem_manager(
+ 1, "", type(foo).__mro__[1], default_mgr_enum
+ )
self.assertTrue(
isinstance(mro_manager.get_accessors()[0], GetItemGuardAccessor)
)
- attr_manager = item_manager.getattr_manager("a", "", type(foo).__mro__[0].a)
+ attr_manager = item_manager.getattr_manager(
+ "a", "", type(foo).__mro__[0].a, default_mgr_enum
+ )
self.assertTrue(
isinstance(item_manager.get_accessors()[0], GetAttrGuardAccessor)
)
@@ -551,9 +577,9 @@ num_guards_executed=0)
guard_manager.add_tuple_iterator_length_guard(
5, id_type(iter(tuple())), ["len == 5"]
)
- guard_manager.tuple_iterator_getitem_manager(2, "", foo).add_equals_match_guard(
- a[3], ["x==4"]
- )
+ guard_manager.tuple_iterator_getitem_manager(
+ 2, "", foo, default_mgr_enum
+ ).add_equals_match_guard(a[3], ["x==4"])
# Check that type match works
self.assertFalse(guard_manager.check(False))
@@ -567,8 +593,12 @@ num_guards_executed=0)
def test_global_weakref(self):
guard_manager = RootGuardManager()
- globals_manager = guard_manager.globals_dict_manager(globals(), "", None)
- weakref_manager = globals_manager.global_weakref_manager("weakref_x", "", None)
+ globals_manager = guard_manager.globals_dict_manager(
+ globals(), "", None, default_mgr_enum
+ )
+ weakref_manager = globals_manager.global_weakref_manager(
+ "weakref_x", "", None, default_mgr_enum
+ )
weakref_manager.add_lambda_guard(
lambda x: isinstance(x, torch.Tensor),
@@ -586,7 +616,9 @@ num_guards_executed=0)
guard_manager = RootGuardManager()
# Check that we can use the same accessor
- foo_mgr = guard_manager.lambda_manager(lambda x: x[2], "", None)
+ foo_mgr = guard_manager.lambda_manager(
+ lambda x: x[2], "", None, default_mgr_enum
+ )
foo_mgr.add_lambda_guard(
lambda x: x == 3,
"Expected value 3",
@@ -600,7 +632,7 @@ num_guards_executed=0)
raise AssertionError("Test")
return x
- foo_mgr = guard_manager.lambda_manager(fn, "", None)
+ foo_mgr = guard_manager.lambda_manager(fn, "", None, default_mgr_enum)
self.assertFalse(guard_manager.check(None))
debug_info = guard_manager.check_verbose(None)
@@ -634,7 +666,12 @@ num_guards_executed=0)
# its a getitem_manager just for f_locals. But the child guard manager
# should be a DictGuardManager.
- dict_mgr = root.getitem_manager("d", "", f_locals["d"])
+ dict_mgr = root.getitem_manager(
+ "d",
+ "",
+ f_locals["d"],
+ torch._dynamo.guards.GuardManagerType.DICT_GUARD_MANAGER,
+ )
self.assertTrue(isinstance(dict_mgr, DictGuardManager))
self.assertTrue(root.check(f_locals))
@@ -655,20 +692,28 @@ num_guards_executed=0)
# Add key-value manager ("a" : 1)
self.assertTrue(root.check(f_locals))
- dict_mgr.get_key_manager(0, "", "a").add_equals_match_guard(
- "a", ["dict.keys()[0] == a"]
+ dict_mgr.get_key_manager(0, "", "a", default_mgr_enum).add_equals_match_guard(
+ "a",
+ ["dict.keys()[0] == a"],
)
self.assertTrue(root.check(f_locals))
- dict_mgr.get_value_manager(0, "", 1).add_equals_match_guard(1, ["d[0] == 1"])
+ dict_mgr.get_value_manager(0, "", 1, default_mgr_enum).add_equals_match_guard(
+ 1, ["d[0] == 1"]
+ )
self.assertTrue(root.check(f_locals))
# Add key-value manager (nothing : {"z" : 3})
self.assertTrue(root.check(f_locals))
- dict_mgr.get_key_manager(1, "", nothing).add_lambda_guard(
+ dict_mgr.get_key_manager(1, "", nothing, default_mgr_enum).add_lambda_guard(
lambda x: x is nothing, ["x is nothing"]
)
self.assertTrue(root.check(f_locals))
- value_mgr = dict_mgr.get_value_manager(1, "", f_locals["d"][nothing])
+ value_mgr = dict_mgr.get_value_manager(
+ 1,
+ "",
+ f_locals["d"][nothing],
+ torch._dynamo.guards.GuardManagerType.DICT_GUARD_MANAGER,
+ )
self.assertTrue(isinstance(value_mgr, DictGuardManager))
self.assertTrue(root.check(f_locals))
@@ -688,69 +733,6 @@ num_guards_executed=0)
# fails because of len check
self.assertFalse(root.check(f_locals))
- def test_dict_guard_manager2(self):
- root = RootGuardManager()
-
- f_locals = {
- "d": {"a": 1, 100: torch.randn(4)},
- }
- dict_mgr = root.getitem_manager("d", "", f_locals["d"])
- self.assertTrue(type(dict_mgr) is DictGuardManager)
- self.assertTrue(root.check(f_locals))
-
- # defaultdict
- root = RootGuardManager()
- from collections import defaultdict
-
- f_locals = {}
- f_locals["d"] = defaultdict()
- f_locals["d"]["a"] = 1
- f_locals["d"][100] = torch.randn(4)
- dict_mgr = root.getitem_manager("d", "", f_locals["d"])
- self.assertTrue(type(dict_mgr) is DictGuardManager)
- self.assertTrue(root.check(f_locals))
-
- # ordereddict
- root = RootGuardManager()
- from collections import OrderedDict
-
- f_locals = {}
- f_locals["d"] = OrderedDict()
- f_locals["d"]["a"] = 1
- f_locals["d"][100] = torch.randn(4)
- dict_mgr = root.getitem_manager("d", "", f_locals["d"])
- self.assertTrue(type(dict_mgr) is DictSubclassGuardManager)
- self.assertTrue(root.check(f_locals))
-
- # dict subclass - should be treated as a dict
- root = RootGuardManager()
-
- class MyDict(dict):
- pass
-
- f_locals = {}
- f_locals["d"] = MyDict()
- f_locals["d"]["a"] = 1
- f_locals["d"][100] = torch.randn(4)
- dict_mgr = root.getitem_manager("d", "", f_locals["d"])
- self.assertTrue(type(dict_mgr) is DictGuardManager)
- self.assertTrue(root.check(f_locals))
-
- # dict subclass - with modified keys
- root = RootGuardManager()
-
- class ReversedDict(dict):
- def keys(self):
- return [10, 100]
-
- f_locals = {}
- f_locals["d"] = ReversedDict()
- f_locals["d"][100] = torch.randn(4)
- f_locals["d"][10] = torch.randn(4)
- dict_mgr = root.getitem_manager("d", "", f_locals["d"])
- self.assertTrue(type(dict_mgr) is DictSubclassGuardManager)
- self.assertTrue(root.check(f_locals))
-
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index f2c6515242..5d0e2c2ae6 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -2753,7 +2753,10 @@ utils_device.CURRENT_DEVICE == None""".split(
def test_dict_order_keys(self):
def fn(d):
- return d["a"] - d["b"]
+ c = 0
+ for v in d.values():
+ c += v
+ return c
args1 = {}
args1["a"] = torch.rand(10)
@@ -2762,7 +2765,8 @@ utils_device.CURRENT_DEVICE == None""".split(
opt_fn = torch._dynamo.optimize(cnts)(fn)
self.assertEqual(fn(args1), opt_fn(args1))
self.assertEqual(cnts.frame_count, 1)
- self.assertEqual(cnts.op_count, 1)
+ self.assertEqual(cnts.op_count, 2)
+
# A different order of keys recompiles
args2 = {}
args2["b"] = args1["b"]
@@ -10539,6 +10543,63 @@ fn
with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True):
fn(torch.randn(4), d)
+ def test_dict_guard_on_keys_order(self):
+ d = {
+ 2: 4,
+ 3: 5,
+ }
+
+ cnts = torch._dynamo.testing.CompileCounter()
+
+ def fn(x, d):
+ for key, value in d.items():
+ x = x * key + value
+ return x
+
+ opt_fn = torch.compile(fn, backend=cnts)
+ opt_fn(torch.randn(4), d)
+ opt_fn(torch.randn(4), d)
+ # No recompilation
+ self.assertEqual(cnts.frame_count, 1)
+
+ # move 2 to the end
+ d[2] = d.pop(2)
+
+ x = torch.randn(4)
+ res = opt_fn(x, d)
+ # Check recompilation
+ self.assertEqual(cnts.frame_count, 2)
+ self.assertEqual(res, fn(x, d))
+
+ def test_dict_guard_on_keys_order2(self):
+ d = {
+ 2: 4,
+ 3: 5,
+ }
+
+ cnts = torch._dynamo.testing.CompileCounter()
+
+ def fn(x, d):
+ for key in d:
+ value = d[key]
+ x = x * key + value
+ return x
+
+ opt_fn = torch.compile(fn, backend=cnts)
+ opt_fn(torch.randn(4), d)
+ opt_fn(torch.randn(4), d)
+ # No recompilation
+ self.assertEqual(cnts.frame_count, 1)
+
+ # move 2 to the end
+ d[2] = d.pop(2)
+
+ x = torch.randn(4)
+ res = opt_fn(x, d)
+ # Check recompilation
+ self.assertEqual(cnts.frame_count, 2)
+ self.assertEqual(res, fn(x, d))
+
class TestTracer(JitTestCase):
def test_jit_save(self):
diff --git a/torch/_C/_dynamo/guards.pyi b/torch/_C/_dynamo/guards.pyi
index 5dc6cfa28c..2de2f10cd3 100644
--- a/torch/_C/_dynamo/guards.pyi
+++ b/torch/_C/_dynamo/guards.pyi
@@ -15,15 +15,27 @@ class GuardManager:
# Accessors
def globals_dict_manager(
- self, f_globals: Dict[str, Any], source, example_value
+ self,
+ f_globals: Dict[str, Any],
+ source,
+ example_value,
+ guard_manager_enum,
+ ) -> GuardManager: ...
+ def dict_getitem_manager(
+ self, key, source, example_value, guard_manager_enum
) -> GuardManager: ...
- def dict_getitem_manager(self, key, source, example_value) -> GuardManager: ...
def global_weakref_manager(
- self, global_name: str, source, example_value
+ self, global_name: str, source, example_value, guard_manager_enum
+ ) -> GuardManager: ...
+ def type_manager(
+ self, source, example_value, guard_manager_enum
+ ) -> GuardManager: ...
+ def getattr_manager(
+ self, attr: str, source, example_value, guard_manager_enum
+ ) -> GuardManager: ...
+ def lambda_manager(
+ self, python_lambda, source, example_value, guard_manager_enum
) -> GuardManager: ...
- def type_manager(self, source, example_value) -> GuardManager: ...
- def getattr_manager(self, attr: str, source, example_value) -> GuardManager: ...
- def lambda_manager(self, python_lambda, source, example_value) -> GuardManager: ...
# Leaf guards
def add_lambda_guard(self, user_lambda, verbose_code_parts: List[str]) -> None: ...
@@ -40,8 +52,12 @@ class RootGuardManager(GuardManager):
) -> None: ...
class DictGuardManager(GuardManager):
- def get_key_manager(self, index, source, example_value) -> GuardManager: ...
- def get_value_manager(self, index, source, example_value) -> GuardManager: ...
+ def get_key_manager(
+ self, index, source, example_value, guard_manager_enum
+ ) -> GuardManager: ...
+ def get_value_manager(
+ self, index, source, example_value, guard_manager_enum
+ ) -> GuardManager: ...
def install_tensor_aliasing_guard(
guard_managers: List[GuardManager],
diff --git a/torch/_dynamo/guards.py b/torch/_dynamo/guards.py
index 33caa7dd42..974e551975 100644
--- a/torch/_dynamo/guards.py
+++ b/torch/_dynamo/guards.py
@@ -354,7 +354,7 @@ def get_key_index_source(source, index):
def getitem_on_dict_manager(
- source, base_guard_manager, base_example_value, example_value
+ source, base_guard_manager, base_example_value, example_value, guard_manager_enum
):
base_source_name = source.base.name()
source_name = source.name()
@@ -374,13 +374,19 @@ def getitem_on_dict_manager(
# We have to insert a key manager guard here
# TODO - source debug string is probably wrong here.
base_guard_manager.get_key_manager(
- index=index, source=key_source, example_value=source.index
+ index=index,
+ source=key_source,
+ example_value=source.index,
+ guard_manager_enum=GuardManagerType.GUARD_MANAGER,
).add_equals_match_guard(
source.index, [f"{key_source} == {key_example_value!r}"]
)
return base_guard_manager.get_value_manager(
- index=index, source=value_source, example_value=example_value
+ index=index,
+ source=value_source,
+ example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
@@ -397,6 +403,12 @@ class GuardCodeList:
guard: Guard
+class GuardManagerType(enum.Enum):
+ GUARD_MANAGER = 1
+ DICT_GUARD_MANAGER = 2
+ DICT_SUBCLASS_GUARD_MANAGER = 3
+
+
class GuardBuilder(GuardBuilderBase):
def __init__(
self,
@@ -457,15 +469,47 @@ class GuardBuilder(GuardBuilderBase):
# limit the number of cache entries with same ID_MATCH'd object.
self.id_matched_objs: Dict[str, ReferenceType[object]] = {}
- def add_dict_keys_guard(self, value, guard):
+ def guard_on_dict_keys_and_ignore_order(self, example_value, guard):
+ dict_mgr = self.get_guard_manager(guard)
+ if isinstance(dict_mgr, DictGuardManager):
+ raise NotImplementedError(
+ "Not expecting a DictGuardManager. Seems like Dynamo incorrectly "
+ "added the dict to tx.output.guard_on_key_order"
+ )
+
+ # Iterate over the dicts and install a dict_getitem_manager.
+ dict_source = guard.originating_source.name()
+ for key in example_value.keys():
+ value = example_value[key]
+ value_source = GetItemSource(guard.originating_source, index=key)
+ guard_manager_enum = self.get_guard_manager_type(
+ value_source, example_value
+ )
+ dict_mgr.dict_getitem_manager(
+ key=key,
+ source=f"{dict_source}[{key!r}]",
+ example_value=value,
+ guard_manager_enum=guard_manager_enum,
+ )
+
+ def guard_on_dict_keys_and_order(self, value, guard):
# Add key managers for the DictGuardManager. Then add either an
# ID_MATCH or EQUALS_MATCH guard on the key.
dict_mgr = self.get_guard_manager(guard)
+ if not isinstance(dict_mgr, DictGuardManager):
+ raise NotImplementedError(
+ "Expecting a DictGuardManager. Seems like Dynamo forgot "
+ "to add the dict in tx.output.guard_on_key_order"
+ )
assert isinstance(dict_mgr, DictGuardManager)
+
for idx, key in enumerate(value.keys()):
key_source = get_key_index_source(guard.name, idx)
key_manager = dict_mgr.get_key_manager(
- index=idx, source=key_source, example_value=key
+ index=idx,
+ source=key_source,
+ example_value=key,
+ guard_manager_enum=GuardManagerType.GUARD_MANAGER,
)
if key_is_id(key):
# Install ID_MATCH guard
@@ -482,10 +526,31 @@ class GuardBuilder(GuardBuilderBase):
key, get_verbose_code_parts(f"{key_source} == {key!r}", guard)
)
+ def get_guard_manager_type(self, source, example_value):
+ guard_manager_enum = GuardManagerType.GUARD_MANAGER
+ if source.name() in self.check_fn_manager.output_graph.guard_on_key_order:
+ assert isinstance(example_value, dict)
+ # If keys method is not overriden, we can use PyDict_Next to get key
+ # orderings. Read more in guards.cpp
+ if type(example_value).keys is type({}).keys:
+ guard_manager_enum = GuardManagerType.DICT_GUARD_MANAGER
+ else:
+ guard_manager_enum = GuardManagerType.DICT_SUBCLASS_GUARD_MANAGER
+ return guard_manager_enum
+
+ def manager_guards_on_keys(self, mgr_enum):
+ return (
+ mgr_enum == GuardManagerType.DICT_GUARD_MANAGER
+ or mgr_enum == GuardManagerType.DICT_SUBCLASS_GUARD_MANAGER
+ )
+
def get_global_guard_manager(self):
assert self.guard_manager # to make mypy happy
return self.guard_manager.root.globals_dict_manager(
- f_globals=self.scope["G"], source="G", example_value=None
+ f_globals=self.scope["G"],
+ source="G",
+ example_value=self.scope["G"],
+ guard_manager_enum=GuardManagerType.GUARD_MANAGER,
)
def get_guard_manager_from_source(self, source):
@@ -497,28 +562,20 @@ class GuardBuilder(GuardBuilderBase):
if source_name != "":
example_value = self.get(source_name)
+ guard_manager_enum = self.get_guard_manager_type(source, example_value)
+
# Get base manager related information
base_source_name = None
base_example_value = None
base_guard_manager = None
+ base_guard_manager_enum = GuardManagerType.GUARD_MANAGER
if isinstance(source, ChainedSource):
base_source_name = source.base.name()
base_example_value = self.get(base_source_name)
base_guard_manager = self.get_guard_manager_from_source(source.base)
-
- # TODO(anijain2305) - We special case for sys.modules in builder.py with
- # PythonSysModulesVariable. We specialize because otherwise using a
- # ConstDictVariable tracker installs guards on all the keys, resulting
- # in a large number of guards. Even with LazyVariable trackers, we still
- # install guards on all the keys because of how HashableTracker is
- # currently implemented. Therefore to fix this issue, we will need to
- # improve key guard installation for ConstDictVariable tracker and
- # then remove specialization for sys.modules in builder.py.
- # Set example_value to None to prevent installation fo DictGuardManager.
- if example_value is sys.modules:
- example_value = None
- if base_example_value is sys.modules:
- base_example_value = None
+ base_guard_manager_enum = self.get_guard_manager_type(
+ source.base, base_example_value
+ )
# Use istype instead of isinstance to check for exact type of source.
if istype(source, LocalSource):
@@ -529,6 +586,7 @@ class GuardBuilder(GuardBuilderBase):
key=source.local_name,
source=source_name,
example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
elif istype(source, GlobalSource):
# Global manager accepts a dict but it is not a DictGuardManager
@@ -538,12 +596,14 @@ class GuardBuilder(GuardBuilderBase):
key=source.global_name,
source=source_name,
example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
elif istype(source, GlobalWeakRefSource):
return self.get_global_guard_manager().global_weakref_manager(
global_name=source.global_name,
source=source_name,
example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
elif istype(source, GlobalStateSource):
# Don't do anything here. We guard on global state completely in
@@ -554,7 +614,9 @@ class GuardBuilder(GuardBuilderBase):
elif istype(source, TypeSource):
assert base_guard_manager # to make mypy happy
return base_guard_manager.type_manager(
- source=source_name, example_value=example_value
+ source=source_name,
+ example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
elif istype(
source,
@@ -565,72 +627,87 @@ class GuardBuilder(GuardBuilderBase):
elif istype(source, GradSource):
assert base_guard_manager # to make mypy happy
return base_guard_manager.grad_manager(
- source=source_name, example_value=example_value
+ source=source_name,
+ example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
elif istype(source, AttrSource):
assert base_guard_manager # to make mypy happy
return base_guard_manager.getattr_manager(
- attr=source.member, source=source_name, example_value=example_value
+ attr=source.member,
+ source=source_name,
+ example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
elif istype(source, GetItemSource):
assert base_guard_manager # to make mypy happy
- if isinstance(base_guard_manager, DictGuardManager):
+ if isinstance(base_example_value, (dict, collections.OrderedDict)):
# TODO(anijain2305) - Consider isolating GetItemSource and
# DictGetItemSource (or maybe use ODictGetItemSource for
# dicts) so that GetItemSource is only for non dict objects.
- return getitem_on_dict_manager(
- source,
- base_guard_manager,
- base_example_value,
- example_value,
- )
-
- # TODO(anijain2305) - Ideally we should have an assert here that
- # base_example_value should not be a dict subclass. It should be
- # a dict manager and should already be handled. Infact PyTorch
- # CI is happy with that assert. But lets wait for a few weeks
- # with some more testing on real models before turning this into
- # an assertion.
- if isinstance(base_example_value, (dict, collections.OrderedDict)):
- guards_log.debug(
- "%s",
- (
- f"Using a generic GuardManager instead of DictGuardManager for {source_name}."
- " Could give a small perf improvement in guard eval with DictGuardManager.",
- ),
- )
- return base_guard_manager.dict_getitem_manager(
- key=source.index,
- source=source_name,
- example_value=example_value,
- )
+ if isinstance(base_guard_manager, DictGuardManager):
+ assert self.manager_guards_on_keys(base_guard_manager_enum)
+ return getitem_on_dict_manager(
+ source,
+ base_guard_manager,
+ base_example_value,
+ example_value,
+ guard_manager_enum,
+ )
+ else:
+ if isinstance(source.index, ConstDictKeySource):
+ raise RuntimeError(
+ "Expecting clean index here. Likely Dynamo forgot to mark"
+ " a dict as guard_on_key_order"
+ )
+ return base_guard_manager.dict_getitem_manager(
+ key=source.index,
+ source=source_name,
+ example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
+ )
elif isinstance(base_example_value, list) and not source.index_is_slice:
return base_guard_manager.list_getitem_manager(
key=source.index,
source=source_name,
example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
elif isinstance(base_example_value, tuple) and not source.index_is_slice:
return base_guard_manager.tuple_getitem_manager(
key=source.index,
source=source_name,
example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
index = source.index
if source.index_is_slice:
index = source.unpack_slice()
return base_guard_manager.getitem_manager(
- key=index, source=source_name, example_value=example_value
+ key=index,
+ source=source_name,
+ example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
elif istype(source, ODictGetItemSource):
- assert isinstance(base_guard_manager, DictGuardManager)
- return getitem_on_dict_manager(
- source,
- base_guard_manager,
- base_example_value,
- example_value,
- )
+ if isinstance(base_guard_manager, DictGuardManager):
+ assert self.manager_guards_on_keys(base_guard_manager_enum)
+ return getitem_on_dict_manager(
+ source,
+ base_guard_manager,
+ base_example_value,
+ example_value,
+ guard_manager_enum,
+ )
+ else:
+ assert base_guard_manager # to make mypy happy
+ return base_guard_manager.dict_getitem_manager(
+ key=source.index,
+ source=source_name,
+ example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
+ )
elif istype(source, DefaultsSource):
assert base_guard_manager # to make mypy happy
assert callable(base_example_value)
@@ -638,34 +715,32 @@ class GuardBuilder(GuardBuilderBase):
return base_guard_manager.func_defaults_manager(
source=base_source_name,
example_value=base_example_value.__defaults__,
+ guard_manager_enum=GuardManagerType.GUARD_MANAGER,
).getitem_manager(
key=source.idx_key,
source=source_name,
example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
else:
# kwdefauts is a dict, so use a DictGuardManager
kwdefaults = base_example_value.__kwdefaults__
assert base_source_name is not None
kw_source = base_source_name + ".__kwdefaults__"
+
+ # kwdefaults is a dict. No need to guard on dict order.
dict_mgr = base_guard_manager.func_kwdefaults_manager(
source=kw_source,
example_value=kwdefaults,
+ guard_manager_enum=GuardManagerType.GUARD_MANAGER,
)
- assert isinstance(dict_mgr, DictGuardManager)
- index = get_key_index(kwdefaults, source.idx_key)
- key_source = get_key_index_source(kw_source, index)
-
- # Add key manager and equals match guard
- dict_mgr.get_key_manager(
- index=index, source=key_source, example_value=source.idx_key
- ).add_equals_match_guard(
- source.idx_key, [f"{key_source} == {source.idx_key}"]
- )
+ assert not isinstance(dict_mgr, DictGuardManager)
- # Add value manager and return it
- return dict_mgr.get_value_manager(
- index=index, source=source_name, example_value=example_value
+ return dict_mgr.dict_getitem_manager(
+ key=source.idx_key,
+ source=source_name,
+ example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
elif istype(source, NumpyTensorSource):
assert base_guard_manager # to make mypy happy
@@ -673,11 +748,15 @@ class GuardBuilder(GuardBuilderBase):
python_lambda=from_numpy,
source=source_name,
example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
elif istype(source, TupleIteratorGetItemSource):
assert base_guard_manager # to make mypy happy
return base_guard_manager.tuple_iterator_getitem_manager(
- index=source.index, source=source_name, example_value=example_value
+ index=source.index,
+ source=source_name,
+ example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
elif isinstance(source, ConstDictKeySource):
if not isinstance(base_guard_manager, DictGuardManager):
@@ -685,7 +764,10 @@ class GuardBuilder(GuardBuilderBase):
"ConstDictKeySource can only work on DictGuardManager"
)
return base_guard_manager.get_key_manager(
- index=source.index, source=source_name, example_value=example_value
+ index=source.index,
+ source=source_name,
+ example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
else:
raise AssertionError(
@@ -786,8 +868,12 @@ class GuardBuilder(GuardBuilderBase):
# Just install a getattr manager. GetAttrGuardAccessor itself
# acts as hasattr guard.
example_value = self.get(source.name())
+ guard_manager_enum = self.get_guard_manager_type(source, example_value)
base_manager.getattr_manager(
- attr=attr, source=guard.name, example_value=example_value
+ attr=attr,
+ source=guard.name,
+ example_value=example_value,
+ guard_manager_enum=guard_manager_enum,
)
else:
base_manager.add_no_hasattr_guard(
@@ -1142,15 +1228,17 @@ class GuardBuilder(GuardBuilderBase):
self._set_guard_export_info(guard, code)
if config.enable_cpp_guard_manager:
- self.get_guard_manager(guard).add_length_check_guard(
- len(value), get_verbose_code_parts(code, guard)
- )
+ if isinstance(value, dict):
+ self.get_guard_manager(guard).add_dict_length_check_guard(
+ len(value), get_verbose_code_parts(code, guard)
+ )
+ else:
+ self.get_guard_manager(guard).add_length_check_guard(
+ len(value), get_verbose_code_parts(code, guard)
+ )
else:
self._produce_guard_code(guard, code)
- def DICT_LENGTH(self, guard):
- self.SEQUENCE_LENGTH(guard)
-
def TUPLE_ITERATOR_LEN(self, guard):
ref = self.arg_ref(guard)
value = self.get(guard.name)
@@ -1216,7 +1304,11 @@ class GuardBuilder(GuardBuilderBase):
self._set_guard_export_info(guard, code)
if config.enable_cpp_guard_manager:
- self.add_dict_keys_guard(value, guard)
+ dict_info = self.check_fn_manager.output_graph.guard_on_key_order
+ if guard.originating_source.name() in dict_info:
+ self.guard_on_dict_keys_and_order(value, guard)
+ else:
+ self.guard_on_dict_keys_and_ignore_order(value, guard)
else:
self._produce_guard_code(guard, code)
@@ -1270,7 +1362,11 @@ class GuardBuilder(GuardBuilderBase):
self._set_guard_export_info(guard, code)
if config.enable_cpp_guard_manager:
- self.add_dict_keys_guard(value, guard)
+ dict_info = self.check_fn_manager.output_graph.guard_on_key_order
+ if guard.originating_source.name() in dict_info:
+ self.guard_on_dict_keys_and_order(value, guard)
+ else:
+ self.guard_on_dict_keys_and_ignore_order(value, guard)
else:
self._produce_guard_code(guard, code)
diff --git a/torch/_dynamo/output_graph.py b/torch/_dynamo/output_graph.py
index 09b05d30fb..0be89d59e2 100644
--- a/torch/_dynamo/output_graph.py
+++ b/torch/_dynamo/output_graph.py
@@ -400,6 +400,8 @@ class OutputGraph:
self.install_builtins_dict_in_fglobals()
)
+ self.guard_on_key_order: Set[str] = set()
+
def install_builtins_dict_in_fglobals(self):
# f_globals["__builtins__"] can be a dict or a module. This is an
# implemenation detail -
diff --git a/torch/_dynamo/variables/builder.py b/torch/_dynamo/variables/builder.py
index 6cdac8b66d..3b9edf5b26 100644
--- a/torch/_dynamo/variables/builder.py
+++ b/torch/_dynamo/variables/builder.py
@@ -446,6 +446,10 @@ class VariableBuilder:
# For SUPPORTED_NODES, we guard on the dictionary version (PEP509)
# under the assumption that the values themselves don't change.
self.install_guards(GuardBuilder.DICT_VERSION)
+
+ # The keys on the SUPPORTED_NODES can be arbitrary, so save on the
+ # key order.
+ self.tx.output.guard_on_key_order.add(self.source.name())
result = {
ConstantVariable.create(k): UserDefinedObjectVariable(
v,
@@ -470,12 +474,32 @@ class VariableBuilder:
# but not completely secure job ensuring a property wasn't changed.
self.install_guards(GuardBuilder.BOOL_FALSE)
else:
- self.install_guards(GuardBuilder.DICT_LENGTH)
+ self.install_guards(GuardBuilder.SEQUENCE_LENGTH)
# Optimisation for the common case strings, ints, etc
all_const = all(ConstantVariable.is_literal(k) for k in value.keys())
if all_const:
+ # TODO(anijain2305) - Do we have to guard on all the keys? Can
+ # keys be guarded lazily, similar to values?
self.install_guards(GuardBuilder.DICT_CONST_KEYS)
+ else:
+ # Guard on the key order
+ # This is not ideal, i.e., there is no need to guard on the key
+ # order. But we guard on the key order because of the complexity
+ #
+ # 1) For non-constant objects, we can't save the key in the
+ # guard context because it can be memory heavy. We can add
+ # weakrefs but this complicates the accesses.
+ #
+ # 2) For non-constant objects, we also have to guard on the keys
+ # (like TENSOR_MATCH on tensor). We might also have guards on
+ # the attributes of the keys (like tensor.grad). To make this
+ # work in tree strucutre is complicated.
+ #
+ # So, instead we guard on the key order. While guarding on key
+ # order, we just save the indices and use it to access keys and
+ # values. Indices are cheap to save.
+ self.tx.output.guard_on_key_order.add(self.source.name())
# We need all the keys to be hashable. We do this within the
# _HashableTracker class in dicts.py
diff --git a/torch/_dynamo/variables/builtin.py b/torch/_dynamo/variables/builtin.py
index 0cc73456fe..fe2e2a10f4 100644
--- a/torch/_dynamo/variables/builtin.py
+++ b/torch/_dynamo/variables/builtin.py
@@ -1185,6 +1185,11 @@ class BuiltinVariable(VariableTracker):
obj.source.make_guard(GuardBuilder.TUPLE_ITERATOR_LEN)
)
else:
+ if getattr(obj, "source", False) and isinstance(
+ obj, ConstDictVariable
+ ):
+ tx.output.guard_on_key_order.add(obj.source.name())
+
install_guard(obj.source.make_guard(GuardBuilder.SEQUENCE_LENGTH))
return cls(
diff --git a/torch/_dynamo/variables/dicts.py b/torch/_dynamo/variables/dicts.py
index a640bba07a..f1f7df8ce7 100644
--- a/torch/_dynamo/variables/dicts.py
+++ b/torch/_dynamo/variables/dicts.py
@@ -223,13 +223,19 @@ class ConstDictVariable(VariableTracker):
return self.getitem_const(args[0])
elif name == "items":
assert not (args or kwargs)
+ if self.source:
+ tx.output.guard_on_key_order.add(self.source.name())
return TupleVariable(
[TupleVariable([k.vt, v]) for k, v in self.items.items()]
)
elif name == "keys":
+ if self.source:
+ tx.output.guard_on_key_order.add(self.source.name())
assert not (args or kwargs)
return DictKeys(self)
elif name == "values":
+ if self.source:
+ tx.output.guard_on_key_order.add(self.source.name())
assert not (args or kwargs)
return DictValues(self)
elif name == "copy":
diff --git a/torch/_dynamo/variables/user_defined.py b/torch/_dynamo/variables/user_defined.py
index 5b0f63731c..9db504cd16 100644
--- a/torch/_dynamo/variables/user_defined.py
+++ b/torch/_dynamo/variables/user_defined.py
@@ -574,6 +574,7 @@ class UserDefinedObjectVariable(UserDefinedVariable):
keys = list(self.value.keys())
assert all(map(ConstantVariable.is_literal, keys))
install_guard(self.source.make_guard(GuardBuilder.DICT_CONST_KEYS))
+ tx.output.guard_on_key_order.add(self.source.name())
return TupleVariable([ConstantVariable.create(k) for k in keys])
if (
@@ -585,6 +586,8 @@ class UserDefinedObjectVariable(UserDefinedVariable):
):
assert not kwargs
assert self.source # OrderedDict, dict subtypes must always have source
+
+ # TODO(anijain2305) - Why do we need to guard on all keys?
install_guard(self.source.make_guard(GuardBuilder.DICT_CONST_KEYS))
return ConstantVariable.create(
args[0].as_python_constant() in self.value
@@ -603,6 +606,7 @@ class UserDefinedObjectVariable(UserDefinedVariable):
[key, self.odict_getitem(tx, key)],
)
)
+ tx.output.guard_on_key_order.add(self.source.name())
return TupleVariable(items)
if method is collections.OrderedDict.__getitem__ and len(args) == 1:
diff --git a/torch/csrc/dynamo/guards.cpp b/torch/csrc/dynamo/guards.cpp
index ac8fddfbd3..892197a2d7 100644
--- a/torch/csrc/dynamo/guards.cpp
+++ b/torch/csrc/dynamo/guards.cpp
@@ -1068,6 +1068,21 @@ class LENGTH_CHECK : public LeafGuard {
Py_ssize_t _length;
};
+class DICT_LENGTH : public LeafGuard {
+ public:
+ DICT_LENGTH(py::object value, py::object verbose_code_parts)
+ : LeafGuard(std::move(verbose_code_parts)),
+ _length(py::cast<Py_ssize_t>(std::move(value))) {}
+
+ bool check_nopybind(PyObject* value) override { // borrowed ref
+ return PyDict_Check(value) && PyDict_Size(value) == _length;
+ }
+
+ private:
+ // Length of the guarded dict
+ Py_ssize_t _length;
+};
+
class NOT_NONE : public LeafGuard {
public:
NOT_NONE(py::object verbose_code_parts)
@@ -1381,7 +1396,8 @@ class DICT_VERSION : public LeafGuard {
std::unique_ptr<GuardManager> make_guard_manager(
RootGuardManager* root,
std::string source,
- py::handle example_value);
+ py::handle example_value,
+ py::handle guard_manager_enum);
/**
* Base class representing a pair of accessor and the associated guard
@@ -1405,8 +1421,13 @@ class GuardAccessor {
RootGuardManager* root,
py::object accessor_key,
std::string source,
- py::handle example_value)
- : _guard_manager(make_guard_manager(root, source, example_value)),
+ py::handle example_value,
+ py::handle guard_manager_enum)
+ : _guard_manager(make_guard_manager(
+ root,
+ source,
+ example_value,
+ guard_manager_enum)),
_accessor_key(std::move(accessor_key)),
_source(std::move(source)) {}
@@ -1518,7 +1539,8 @@ class GuardManager {
GuardManager* get_child_manager(
py::object accessor_key,
std::string source,
- py::handle example_value) {
+ py::handle example_value,
+ py::handle guard_manager_enum) {
// accessor_key type depends on the GuardAccessorT
// for example for GetAttrGuardAccessor - py::str name
@@ -1531,7 +1553,11 @@ class GuardManager {
// Construct a new guard accessor
_accessors.emplace_back(std::make_unique<GuardAccessorT>(
- _root, std::move(accessor_key), source, example_value));
+ _root,
+ std::move(accessor_key),
+ source,
+ example_value,
+ guard_manager_enum));
return _accessors.back()->get_guard_manager().get();
}
@@ -1898,12 +1924,16 @@ class DictGuardManager : public GuardManager {
GuardManager* get_key_manager(
py::object key_index,
std::string source,
- py::handle example_value) {
+ py::handle example_value,
+ py::handle guard_manager_enum) {
KeyValueManager& key_value_manager =
_get_index_manager(std::move(key_index));
if (!key_value_manager.first) {
key_value_manager.first = make_guard_manager(
- this->get_root(), std::move(source), example_value);
+ this->get_root(),
+ std::move(source),
+ example_value,
+ guard_manager_enum);
};
return key_value_manager.first.get();
}
@@ -1911,12 +1941,16 @@ class DictGuardManager : public GuardManager {
GuardManager* get_value_manager(
py::object key_index,
std::string source,
- py::handle example_value) {
+ py::handle example_value,
+ py::handle guard_manager_enum) {
KeyValueManager& key_value_manager =
_get_index_manager(std::move(key_index));
if (!key_value_manager.second) {
key_value_manager.second = make_guard_manager(
- this->get_root(), std::move(source), example_value);
+ this->get_root(),
+ std::move(source),
+ example_value,
+ guard_manager_enum);
};
return key_value_manager.second.get();
}
@@ -2284,22 +2318,19 @@ class DictSubclassGuardManager : public DictGuardManager {
}
};
-bool has_base_dict_keys_iter(py::handle& obj) {
- // Implements `type(obj).keys is type(dict()).keys`
- py::object obj_type = py::type::of(obj);
- py::object dict_type = py::type::of(py::dict());
-
- // Fetch keys for both types
- py::object obj_keys = obj_type.attr("keys");
- py::object dict_keys = dict_type.attr("keys");
-
- return obj_keys.ptr() == dict_keys.ptr();
-}
-
std::unique_ptr<GuardManager> make_guard_manager(
RootGuardManager* root,
std::string source,
- py::handle example_value) {
+ py::handle example_value,
+ py::handle guard_manager_enum) {
+ static py::object guard_manager_enum_class =
+ py::module_::import("torch._dynamo.guards").attr("GuardManagerType");
+ static py::object base_guard_manager_enum =
+ guard_manager_enum_class.attr("GUARD_MANAGER");
+ static py::object dict_guard_manager_enum =
+ guard_manager_enum_class.attr("DICT_GUARD_MANAGER");
+ static py::object dict_subclass_guard_manager_enum =
+ guard_manager_enum_class.attr("DICT_SUBCLASS_GUARD_MANAGER");
if (py::isinstance<py::dict>(example_value)) {
// The purpose of having both DictGuardManager and DictSubclassGuardManager
// is to handle the variability in how dictionaries and their subclasses
@@ -2323,12 +2354,20 @@ std::unique_ptr<GuardManager> make_guard_manager(
// Since regular dicts are more common than subclasses of dicts with
// overridden keys method, we still optimize for the common case with
// DictGuardManager by relying on PyDict_Next.
- if (has_base_dict_keys_iter(example_value)) {
+
+ if (guard_manager_enum.is(base_guard_manager_enum)) {
+ // For dicts that don't need to guard on keys, we can just rely on the
+ // base GuardManager.
+ return std::make_unique<GuardManager>(root, std::move(source));
+ } else if (guard_manager_enum.is(dict_guard_manager_enum)) {
return std::make_unique<DictGuardManager>(
root, std::move(source), example_value);
+ } else if (guard_manager_enum.is(dict_subclass_guard_manager_enum))
+ return std::make_unique<DictSubclassGuardManager>(
+ root, std::move(source), example_value);
+ else {
+ throw py::type_error("Invalid guard manager enum");
}
- return std::make_unique<DictSubclassGuardManager>(
- root, std::move(source), example_value);
}
return std::make_unique<GuardManager>(root, std::move(source));
}
@@ -2421,8 +2460,14 @@ class GetAttrGuardAccessor : public GuardAccessor {
RootGuardManager* root,
py::str name,
std::string source,
- py::handle example_value)
- : GuardAccessor(root, name, std::move(source), example_value),
+ py::handle example_value,
+ py::handle guard_manager_enum)
+ : GuardAccessor(
+ root,
+ name,
+ std::move(source),
+ example_value,
+ guard_manager_enum),
_attr_name(name.ptr()) {}
// NB: Intentional duplication between check_nopybind and
@@ -2474,8 +2519,14 @@ class GetItemGuardAccessor : public GuardAccessor {
RootGuardManager* root,
py::object name,
std::string source,
- py::handle example_value)
- : GuardAccessor(root, name, std::move(source), example_value),
+ py::handle example_value,
+ py::handle guard_manager_enum)
+ : GuardAccessor(
+ root,
+ name,
+ std::move(source),
+ example_value,
+ guard_manager_enum),
_attr_name(name.ptr()) {}
// NB: Intentional duplication between check_nopybind and
@@ -2525,16 +2576,22 @@ class DictGetItemGuardAccessor : public GuardAccessor {
public:
DictGetItemGuardAccessor(
RootGuardManager* root,
- py::str name,
+ py::object key,
std::string source,
- py::handle example_value)
- : GuardAccessor(root, name, std::move(source), example_value),
- _attr_name(name.ptr()) {}
+ py::handle example_value,
+ py::handle guard_manager_enum)
+ : GuardAccessor(
+ root,
+ key,
+ std::move(source),
+ example_value,
+ guard_manager_enum),
+ _key(key.ptr()) {}
// NB: Intentional duplication between check_nopybind and
// check_verbose_nopybind.
bool check_nopybind(PyObject* obj) override { // borrowed ref
- PyObject* x = PyDict_GetItem(obj, _attr_name); // borrowed ref
+ PyObject* x = PyDict_GetItem(obj, _key); // borrowed ref
if (x == nullptr) {
PyErr_Clear();
return false;
@@ -2545,7 +2602,7 @@ class DictGetItemGuardAccessor : public GuardAccessor {
GuardDebugInfo check_verbose_nopybind(
PyObject* obj) override { // borrowed ref
- PyObject* x = PyDict_GetItem(obj, _attr_name); // borrowed ref
+ PyObject* x = PyDict_GetItem(obj, _key); // borrowed ref
if (x == nullptr) {
PyErr_Clear();
return GuardDebugInfo(
@@ -2556,12 +2613,12 @@ class DictGetItemGuardAccessor : public GuardAccessor {
}
std::string repr() const override {
- return "DictGetItemGuardAccessor(" +
- py::str(_attr_name).cast<std::string>() + ")";
+ return "DictGetItemGuardAccessor(" + py::str(_key).cast<std::string>() +
+ ")";
}
private:
- PyObject* _attr_name;
+ PyObject* _key;
};
/**
@@ -2574,8 +2631,14 @@ class ListGetItemGuardAccessor : public GuardAccessor {
RootGuardManager* root,
const py::object& index,
std::string source,
- py::handle example_value)
- : GuardAccessor(root, index, std::move(source), example_value),
+ py::handle example_value,
+ py::handle guard_manager_enum)
+ : GuardAccessor(
+ root,
+ index,
+ std::move(source),
+ example_value,
+ guard_manager_enum),
_index(py::cast<Py_ssize_t>(index)) {}
// NB: Intentional duplication between check_nopybind and
@@ -2620,8 +2683,14 @@ class TupleGetItemGuardAccessor : public GuardAccessor {
RootGuardManager* root,
const py::object& index,
std::string source,
- py::handle example_value)
- : GuardAccessor(root, index, std::move(source), example_value),
+ py::handle example_value,
+ py::handle guard_manager_enum)
+ : GuardAccessor(
+ root,
+ index,
+ std::move(source),
+ example_value,
+ guard_manager_enum),
_index(py::cast<Py_ssize_t>(index)) {}
// NB: Intentional duplication between check_nopybind and
@@ -2665,9 +2734,14 @@ class GradGuardAccessor : public GuardAccessor {
RootGuardManager* root,
py::str name,
std::string source,
- py::handle example_value)
- : GuardAccessor(root, std::move(name), std::move(source), example_value) {
- }
+ py::handle example_value,
+ py::handle guard_manager_enum)
+ : GuardAccessor(
+ root,
+ std::move(name),
+ std::move(source),
+ example_value,
+ guard_manager_enum) {}
// NB: Intentional duplication between check_nopybind and
// check_verbose_nopybind.
@@ -2716,9 +2790,14 @@ class FuncDefaultsGuardAccessor : public GuardAccessor {
RootGuardManager* root,
py::object name,
std::string source,
- py::handle example_value)
- : GuardAccessor(root, std::move(name), std::move(source), example_value) {
- }
+ py::handle example_value,
+ py::handle guard_manager_enum)
+ : GuardAccessor(
+ root,
+ std::move(name),
+ std::move(source),
+ example_value,
+ guard_manager_enum) {}
// NB: Intentional duplication between check_nopybind and
// check_verbose_nopybind.
@@ -2771,9 +2850,14 @@ class FuncKwDefaultsGuardAccessor : public GuardAccessor {
RootGuardManager* root,
py::object name,
std::string source,
- py::handle example_value)
- : GuardAccessor(root, std::move(name), std::move(source), example_value) {
- }
+ py::handle example_value,
+ py::handle guard_manager_enum)
+ : GuardAccessor(
+ root,
+ std::move(name),
+ std::move(source),
+ example_value,
+ guard_manager_enum) {}
// NB: Intentional duplication between check_nopybind and
// check_verbose_nopybind.
@@ -2827,8 +2911,14 @@ class GlobalsGuardAccessor : public GuardAccessor {
RootGuardManager* root,
py::dict globals_dict,
std::string source,
- py::handle example_value)
- : GuardAccessor(root, globals_dict, std::move(source), example_value),
+ py::handle example_value,
+ py::handle guard_manager_enum)
+ : GuardAccessor(
+ root,
+ globals_dict,
+ std::move(source),
+ example_value,
+ guard_manager_enum),
_globals_dict(globals_dict.ptr()) {}
// NB: Intentional duplication between check_nopybind and
@@ -2866,9 +2956,14 @@ class TypeGuardAccessor : public GuardAccessor {
RootGuardManager* root,
py::str name,
std::string source,
- py::handle example_value)
- : GuardAccessor(root, std::move(name), std::move(source), example_value) {
- }
+ py::handle example_value,
+ py::handle guard_manager_enum)
+ : GuardAccessor(
+ root,
+ std::move(name),
+ std::move(source),
+ example_value,
+ guard_manager_enum) {}
// NB: Intentional duplication between check_nopybind and
// check_verbose_nopybind.
@@ -2897,8 +2992,14 @@ class TupleIteratorGetItemAccessor : public GuardAccessor {
RootGuardManager* root,
py::object index,
std::string source,
- py::handle example_value)
- : GuardAccessor(root, index, std::move(source), example_value),
+ py::handle example_value,
+ py::handle guard_manager_enum)
+ : GuardAccessor(
+ root,
+ index,
+ std::move(source),
+ example_value,
+ guard_manager_enum),
_index(py::cast<Py_ssize_t>(std::move(index))) {}
// NB: Intentional duplication between check_nopybind and
@@ -2950,8 +3051,14 @@ class GlobalWeakRefGuardAccessor : public GuardAccessor {
RootGuardManager* root,
py::object global_name,
std::string source,
- py::handle example_value)
- : GuardAccessor(root, global_name, std::move(source), example_value),
+ py::handle example_value,
+ py::handle guard_manager_enum)
+ : GuardAccessor(
+ root,
+ global_name,
+ std::move(source),
+ example_value,
+ guard_manager_enum),
_global_name(global_name.ptr()) {}
// NB: Intentional duplication between check_nopybind and
@@ -3014,8 +3121,14 @@ class PythonLambdaGuardAccessor : public GuardAccessor {
RootGuardManager* root,
py::function accessor_fn,
std::string source,
- py::handle example_value)
- : GuardAccessor(root, accessor_fn, std::move(source), example_value),
+ py::handle example_value,
+ py::handle guard_manager_enum)
+ : GuardAccessor(
+ root,
+ accessor_fn,
+ std::move(source),
+ example_value,
+ guard_manager_enum),
_accessor_fn(std::move(accessor_fn)) {}
// NB: Intentional duplication between check_nopybind and
@@ -3203,6 +3316,10 @@ PyObject* torch_c_dynamo_guards_init() {
py_m, "LENGTH_CHECK")
.def(py::init<py::object, py::list>())
.def("__call__", &LENGTH_CHECK::check);
+ py::class_<DICT_LENGTH, LeafGuard, std::shared_ptr<DICT_LENGTH>>(
+ py_m, "DICT_LENGTH")
+ .def(py::init<py::object, py::list>())
+ .def("__call__", &DICT_LENGTH::check);
py::class_<DEFAULT_DEVICE, LeafGuard, std::shared_ptr<DEFAULT_DEVICE>>(
py_m, "DEFAULT_DEVICE")
.def(py::init<py::list>())
@@ -3394,6 +3511,15 @@ PyObject* torch_c_dynamo_guards_init() {
self.add_leaf_guard(std::make_shared<LENGTH_CHECK>(
std::move(value), std::move(verbose_code_parts)));
})
+ .def(
+ "add_dict_length_check_guard",
+ [](GuardManager& self,
+ py::object value,
+ py::object verbose_code_parts) -> void {
+ SKIP_IF_GUARD_ALREADY_PRESENT("DICT_LENGTH");
+ self.add_leaf_guard(std::make_shared<DICT_LENGTH>(
+ std::move(value), std::move(verbose_code_parts)));
+ })
.def(
"add_tuple_iterator_length_guard",
[](GuardManager& self,
@@ -3493,6 +3619,7 @@ PyObject* torch_c_dynamo_guards_init() {
py::arg("key"),
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference)
// return by reference because GuardManager has the ownership of accessors
// and guard managers
@@ -3502,6 +3629,7 @@ PyObject* torch_c_dynamo_guards_init() {
py::arg("key"),
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference)
// return by reference because GuardManager has the ownership of accessors
// and guard managers
@@ -3511,6 +3639,7 @@ PyObject* torch_c_dynamo_guards_init() {
py::arg("key"),
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference)
// return by reference because GuardManager has the ownership of accessors
// and guard managers
@@ -3520,6 +3649,7 @@ PyObject* torch_c_dynamo_guards_init() {
py::arg("key"),
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference)
// return by reference because GuardManager has the ownership of accessors
// and guard managers
@@ -3527,16 +3657,19 @@ PyObject* torch_c_dynamo_guards_init() {
"func_defaults_manager",
[](GuardManager& self,
std::string source,
- py::object example_value) -> GuardManager* {
+ py::object example_value,
+ py::handle guard_manager_enum) -> GuardManager* {
// A unique key is used to save as the accessor key.
py::str unique_key("__defaults_accessor__");
return self.get_child_manager<FuncDefaultsGuardAccessor>(
std::move(unique_key),
std::move(source),
- std::move(example_value));
+ std::move(example_value),
+ guard_manager_enum);
},
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference)
// return by reference because GuardManager has the ownership of accessors
@@ -3545,16 +3678,19 @@ PyObject* torch_c_dynamo_guards_init() {
"func_kwdefaults_manager",
[](GuardManager& self,
std::string source,
- py::object example_value) -> GuardManager* {
+ py::object example_value,
+ py::handle guard_manager_enum) -> GuardManager* {
// A unique key is used to save as the accessor key.
py::str unique_key("__kwdefaults_accessor__");
return self.get_child_manager<FuncKwDefaultsGuardAccessor>(
std::move(unique_key),
std::move(source),
- std::move(example_value));
+ std::move(example_value),
+ guard_manager_enum);
},
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference)
// return by reference because GuardManager has the ownership of accessors
// and guard managers
@@ -3564,6 +3700,7 @@ PyObject* torch_c_dynamo_guards_init() {
py::arg("f_globals"),
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference)
// return by reference because GuardManager has the ownership of accessors
// and guard managers
@@ -3571,14 +3708,19 @@ PyObject* torch_c_dynamo_guards_init() {
"type_manager",
[](GuardManager& self,
std::string source,
- py::handle example_value) -> GuardManager* {
+ py::handle example_value,
+ py::handle guard_manager_enum) -> GuardManager* {
// A unique key is used to save as the accessor key.
py::str unique_key("__type_accessor__");
return self.get_child_manager<TypeGuardAccessor>(
- std::move(unique_key), std::move(source), example_value);
+ std::move(unique_key),
+ std::move(source),
+ example_value,
+ guard_manager_enum);
},
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference)
// return by reference because GuardManager has the ownership of accessors
// and guard managers
@@ -3588,6 +3730,7 @@ PyObject* torch_c_dynamo_guards_init() {
py::arg("index"),
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference)
// return by reference because GuardManager has the ownership of accessors
// and guard managers
@@ -3597,6 +3740,7 @@ PyObject* torch_c_dynamo_guards_init() {
py::arg("global_name"),
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference)
// return by reference because GuardManager has the ownership of accessors
// and guard managers
@@ -3606,6 +3750,7 @@ PyObject* torch_c_dynamo_guards_init() {
py::arg("python_lambda"),
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference)
// return by reference because GuardManager has the ownership of accessors
// and guard managers
@@ -3613,14 +3758,19 @@ PyObject* torch_c_dynamo_guards_init() {
"grad_manager",
[](GuardManager& self,
std::string source,
- py::handle example_value) -> GuardManager* {
+ py::handle example_value,
+ py::handle guard_manager_enum) -> GuardManager* {
// A unique key is used to save as the accessor key.
py::str unique_key("__grad_accessor__");
return self.get_child_manager<GradGuardAccessor>(
- std::move(unique_key), std::move(source), example_value);
+ std::move(unique_key),
+ std::move(source),
+ example_value,
+ guard_manager_enum);
},
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference)
// return by reference because C++ GuardManager has the ownership of
// accessors and guard managers
@@ -3630,6 +3780,7 @@ PyObject* torch_c_dynamo_guards_init() {
py::arg("attr"),
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference);
// Root Guard Manager
@@ -3663,13 +3814,18 @@ PyObject* torch_c_dynamo_guards_init() {
[](DictGuardManager& self,
py::object index,
std::string source,
- py::handle example_value) -> GuardManager* {
+ py::handle example_value,
+ py::handle guard_manager_enum) -> GuardManager* {
return self.get_key_manager(
- std::move(index), std::move(source), example_value);
+ std::move(index),
+ std::move(source),
+ example_value,
+ guard_manager_enum);
},
py::arg("index"),
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference)
// return by reference because GuardManager has the ownership of accessors
// and guard managers
@@ -3678,13 +3834,18 @@ PyObject* torch_c_dynamo_guards_init() {
[](DictGuardManager& self,
py::object index,
std::string source,
- py::handle example_value) -> GuardManager* {
+ py::handle example_value,
+ py::handle guard_manager_enum) -> GuardManager* {
return self.get_value_manager(
- std::move(index), std::move(source), example_value);
+ std::move(index),
+ std::move(source),
+ example_value,
+ guard_manager_enum);
},
py::arg("index"),
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference)
// return by reference because GuardManager has the ownership of leaf
// guards
@@ -3694,7 +3855,7 @@ PyObject* torch_c_dynamo_guards_init() {
py::return_value_policy::reference)
// Skipped leaf guards
.def("add_type_match_guard", &DictGuardManager::skip_adding_guard)
- .def("add_length_check_guard", &DictGuardManager::skip_adding_guard)
+ .def("add_dict_length_check_guard", &DictGuardManager::skip_adding_guard)
// Permitted leaf guards
.def(
"add_dict_contains_guard",
@@ -3736,17 +3897,22 @@ PyObject* torch_c_dynamo_guards_init() {
[](DictGuardManager& self,
py::object attr_name,
std::string source,
- py::handle example_value) -> GuardManager* {
+ py::handle example_value,
+ py::handle guard_manager_enum) -> GuardManager* {
if (self.is_exact_dict_type()) {
throw std::runtime_error(
"getattr_manager on a DictGuardManager is supported only for dict subclasses");
}
return self.get_child_manager<GetAttrGuardAccessor>(
- std::move(attr_name), std::move(source), example_value);
+ std::move(attr_name),
+ std::move(source),
+ example_value,
+ guard_manager_enum);
},
py::arg("attr"),
py::arg("source"),
py::arg("example_value"),
+ py::arg("guard_manager_enum"),
py::return_value_policy::reference);
// Dict Guard Manager
|
2.41.0
|
d139eedcfddb9e8097ae6bfea3a98a6657b7610
|
Thu, 18 Apr 2024 01:33:52 +0000
|
[PATCH 0635/1000] [AOTI] set alignment for aot constant (#124272)
|
GPU copies the constant blob to aligned memory ([RAII_cudaMalloc](https://github.com/pytorch/pytorch/blob/d0211e207c78fafac2edaf2e14954f668e898b4a/torch/csrc/inductor/aoti_runtime/model.h#L46 ), [64-alignment](https://github.com/pytorch/pytorch/blob/d0211e207c78fafac2edaf2e14954f668e898b4a/torch/csrc/inductor/aoti_runtime/model.h#L324)) while CPU doesn't have this copy procedure for constant blob, which may result in sub-optimal performance when we want to directly use the constant blob buffer in the computation (for example when these constant blobs are the weight tensor to the oneDNN primitive). We set the alignment to the `constant.o` directly so that there's no need to copy the data to an aligned memory for CPU (when using `--rename-section`, the original section name would need to be specified for `--set-section-alignment`). Pull Request resolved: https://github.com/pytorch/pytorch/pull/124272 Approved by: https://github.com/jgong5, https://github.com/desertfire
|
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 10d9168fde..86cdef08d5 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -1755,6 +1755,7 @@ class AotCodeCompiler:
cmd = (
f"{objcopy_command} --rename-section"
" .data=.ldata"
+ " --set-section-alignment .data=64" # following the gAlignment of CPU in c10/core/alignment.h
f" {consts_o} {consts_o}"
)
log.debug("aot constant rename section command: %s", cmd)
|
2.41.0
|
a6813b7b37d31f9702f3153c9427592511baa5b
|
Thu, 25 Apr 2024 09:22:37 +0000
|
[PATCH 0636/1000] Revert "[cuDNN] cuDNN SDPA (Flash Attention) Backward (#122510)"
|
This reverts commit 64af899fdfc30c0c075d90bde111cec74ad9b4bb. Reverted https://github.com/pytorch/pytorch/pull/122510 on behalf of https://github.com/jeanschmidt due to Breaking amd gpu builds ([comment](https://github.com/pytorch/pytorch/pull/122510#issuecomment-2076743868))
|
diff --git a/aten/src/ATen/native/cudnn/MHA.cpp b/aten/src/ATen/native/cudnn/MHA.cpp
index 1f6bdbf530..c3f5f63af6 100644
--- a/aten/src/ATen/native/cudnn/MHA.cpp
+++ b/aten/src/ATen/native/cudnn/MHA.cpp
@@ -29,30 +29,6 @@ void run_cudnn_SDP_fprop(
false, "PyTorch was not compiled with cuDNN Flash Attention enabled!");
}
-void run_cudnn_SDP_bprop(
- int64_t b,
- int64_t h,
- int64_t s_q,
- int64_t s_kv,
- int64_t d,
- float scaling_factor,
- bool is_causal,
- float dropout_probability,
- const Tensor& q,
- const Tensor& k,
- const Tensor& v,
- const Tensor& o,
- const Tensor& dO,
- const Tensor& softmaxstats,
- Tensor& dQ,
- Tensor& dK,
- Tensor& dV,
- const Tensor& dropoutseed,
- const Tensor& dropoutoffset) {
- TORCH_CHECK(
- false, "PyTorch was not compiled with cuDNN Flash Attention enabled!");
-}
-
} // namespace native
} // namespace at
@@ -97,22 +73,6 @@ using graph_and_tensors = std::tuple<
std::shared_ptr<fe::graph::Tensor_attributes> // Stats
>;
-using graph_and_tensors_backward = std::tuple<
- std::shared_ptr<fe::graph::Graph>,
- std::shared_ptr<fe::graph::Tensor_attributes>, // Q,
- std::shared_ptr<fe::graph::Tensor_attributes>, // K,
- std::shared_ptr<fe::graph::Tensor_attributes>, // V,
- std::shared_ptr<fe::graph::Tensor_attributes>, // Attn_scale
- std::shared_ptr<fe::graph::Tensor_attributes>, // Seed,
- std::shared_ptr<fe::graph::Tensor_attributes>, // Offset,
- std::shared_ptr<fe::graph::Tensor_attributes>, // O,
- std::shared_ptr<fe::graph::Tensor_attributes>, // dO,
- std::shared_ptr<fe::graph::Tensor_attributes>, // stats,
- std::shared_ptr<fe::graph::Tensor_attributes>, // dQ,
- std::shared_ptr<fe::graph::Tensor_attributes>, // dK,,
- std::shared_ptr<fe::graph::Tensor_attributes> // dV,
- >;
-
#define MAX_MHA_DIM 4
struct MHAParams {
@@ -218,7 +178,8 @@ struct MHACacheKeyWrapper : ParamsWrapper<MHAParams> {
template <typename T, typename KeyType>
struct MHAGraphCache {
- std::unordered_map<KeyType, T, ParamsWrapperHash<KeyType>> engine_cache;
+ std::unordered_map<KeyType, graph_and_tensors, ParamsWrapperHash<KeyType>>
+ engine_cache;
// no mutexes here as caches are now thread local for v8, can also return a
// pointer to the Execution Plan if we know it will not be invalidated by
@@ -241,8 +202,6 @@ struct MHAGraphCache {
// be thread safe across all engines see Limitations in
// https://docs.nvidia.com/deeplearning/cudnn/release-notes/index.html
thread_local MHAGraphCache<graph_and_tensors, MHACacheKeyWrapper> mhagraphcache;
-thread_local MHAGraphCache<graph_and_tensors_backward, MHACacheKeyWrapper>
- mhagraphbackwardcache;
auto build_graph_and_tensors(
int64_t b,
@@ -268,12 +227,10 @@ auto build_graph_and_tensors(
dtype = fe::DataType_t::BFLOAT16;
}
auto mha_graph = std::make_shared<fe::graph::Graph>();
- // We're baking in float accumulation and scale types
- // in theory the graph may support other types, but they
- // have not been tested
mha_graph->set_io_data_type(dtype)
.set_intermediate_data_type(fe::DataType_t::FLOAT)
.set_compute_data_type(fe::DataType_t::FLOAT);
+
auto Q = mha_graph->tensor(
fe::graph::Tensor_attributes()
.set_name("Q")
@@ -297,7 +254,7 @@ auto build_graph_and_tensors(
params.v_stride.begin(), params.v_stride.end())));
auto attn_scale =
mha_graph->tensor(fe::graph::Tensor_attributes()
- .set_name("Attn_scale")
+ .set_name("attn_scale")
.set_dim({1, 1, 1, 1})
.set_stride({1, 1, 1, 1})
.set_is_pass_by_value(true)
@@ -319,7 +276,7 @@ auto build_graph_and_tensors(
.set_data_type(fe::DataType_t::INT32));
auto scaled_dot_product_flash_attention_options =
fe::graph::SDPA_attributes()
- .set_name("CUDNN_SDPA")
+ .set_name("flash_attention")
.set_is_inference(return_softmaxstats == false)
.set_causal_mask(is_causal)
.set_attn_scale(attn_scale)
@@ -330,12 +287,12 @@ auto build_graph_and_tensors(
}
auto seq_q = mha_graph->tensor(fe::graph::Tensor_attributes()
- .set_name("Seq_q")
+ .set_name("seq_q")
.set_dim({b, 1, 1, 1})
.set_stride({1, 1, 1, 1})
.set_data_type(fe::DataType_t::INT32));
auto seq_kv = mha_graph->tensor(fe::graph::Tensor_attributes()
- .set_name("Seq_kv")
+ .set_name("seq_kv")
.set_dim({b, 1, 1, 1})
.set_stride({1, 1, 1, 1})
.set_data_type(fe::DataType_t::INT32));
@@ -367,146 +324,7 @@ auto build_graph_and_tensors(
AT_CUDNN_FRONTEND_CHECK(mha_graph->build_plans(handle));
return std::make_tuple(
- std::move(mha_graph),
- std::move(Q),
- std::move(K),
- std::move(V),
- std::move(attn_scale),
- std::move(seed),
- std::move(offset),
- std::move(O),
- std::move(Stats));
-}
-
-auto build_graph_and_tensors_backward(
- int64_t b,
- int64_t h,
- int64_t s_q,
- int64_t s_kv,
- int64_t d,
- float scaling_factor,
- bool is_causal,
- float dropout_probability,
- const Tensor& q,
- const Tensor& k,
- const Tensor& v,
- const Tensor& o,
- const Tensor& dO,
- const Tensor& softmaxstats,
- Tensor& dQ,
- Tensor& dK,
- Tensor& dV,
- const Tensor& dropoutseed,
- const Tensor& dropoutoffset,
- cudnnHandle_t& handle,
- MHAParams& params) {
- auto dtype = fe::DataType_t::HALF;
- if (q.scalar_type() == kBFloat16) {
- dtype = fe::DataType_t::BFLOAT16;
- }
- auto mha_graph = std::make_shared<fe::graph::Graph>();
- // We're baking in float accumulation and scale types
- // in theory the graph may support other types, but they
- // have not been tested
- mha_graph->set_io_data_type(dtype)
- .set_intermediate_data_type(fe::DataType_t::FLOAT)
- .set_compute_data_type(fe::DataType_t::FLOAT);
- auto Q = mha_graph->tensor(
- fe::graph::Tensor_attributes()
- .set_name("Q")
- .set_dim(std::vector<int64_t>(q.sizes().begin(), q.sizes().end()))
- .set_stride(
- std::vector<int64_t>(q.strides().begin(), q.strides().end())));
- auto K = mha_graph->tensor(
- fe::graph::Tensor_attributes()
- .set_name("K")
- .set_dim(std::vector<int64_t>(k.sizes().begin(), k.sizes().end()))
- .set_stride(
- std::vector<int64_t>(k.strides().begin(), k.strides().end())));
- auto V = mha_graph->tensor(
- fe::graph::Tensor_attributes()
- .set_name("V")
- .set_dim(std::vector<int64_t>(v.sizes().begin(), v.sizes().end()))
- .set_stride(
- std::vector<int64_t>(v.strides().begin(), v.strides().end())));
- auto attn_scale =
- mha_graph->tensor(fe::graph::Tensor_attributes()
- .set_name("Attn_scale")
- .set_dim({1, 1, 1, 1})
- .set_stride({1, 1, 1, 1})
- .set_is_pass_by_value(true)
- .set_data_type(fe::DataType_t::FLOAT));
- auto Seed = mha_graph->tensor(fe::graph::Tensor_attributes()
- .set_name("Seed")
- .set_dim({1, 1, 1, 1})
- .set_stride({1, 1, 1, 1})
- .set_data_type(fe::DataType_t::INT32));
- auto Offset = mha_graph->tensor(fe::graph::Tensor_attributes()
- .set_name("Offset")
- .set_dim({1, 1, 1, 1})
- .set_stride({1, 1, 1, 1})
- .set_data_type(fe::DataType_t::INT32));
- auto O = mha_graph->tensor(
- fe::graph::Tensor_attributes()
- .set_name("O")
- .set_dim(std::vector<int64_t>(o.sizes().begin(), o.sizes().end()))
- .set_stride(
- std::vector<int64_t>(o.strides().begin(), o.strides().end())));
- auto STATS = mha_graph->tensor(
- fe::graph::Tensor_attributes()
- .set_name("Stats")
- .set_dim(std::vector<int64_t>(
- softmaxstats.sizes().begin(), softmaxstats.sizes().end()))
- .set_stride(std::vector<int64_t>(
- softmaxstats.strides().begin(), softmaxstats.strides().end()))
- .set_data_type(fe::DataType_t::FLOAT));
- auto DO = mha_graph->tensor(
- fe::graph::Tensor_attributes()
- .set_name("DO")
- .set_dim(std::vector<int64_t>(dO.sizes().begin(), dO.sizes().end()))
- .set_stride(
- std::vector<int64_t>(dO.strides().begin(), dO.strides().end())));
- auto sdpa_backward_options = fe::graph::SDPA_backward_attributes()
- .set_name("CUDNN_SDPA_BACKWARD")
- .set_causal_mask(is_causal)
- .set_attn_scale(attn_scale);
- if (dropout_probability != 0.0f) {
- sdpa_backward_options.set_dropout(dropout_probability, Seed, Offset);
- }
- auto [DQ, DK, DV] =
- mha_graph->sdpa_backward(Q, K, V, O, DO, STATS, sdpa_backward_options);
- DQ->set_output(true)
- .set_dim(std::vector<int64_t>(dQ.sizes().begin(), dQ.sizes().end()))
- .set_stride(
- std::vector<int64_t>(dQ.strides().begin(), dQ.strides().end()));
- DK->set_output(true)
- .set_dim(std::vector<int64_t>(dK.sizes().begin(), dK.sizes().end()))
- .set_stride(
- std::vector<int64_t>(dK.strides().begin(), dK.strides().end()));
- DV->set_output(true)
- .set_dim(std::vector<int64_t>(dV.sizes().begin(), dV.sizes().end()))
- .set_stride(
- std::vector<int64_t>(dV.strides().begin(), dV.strides().end()));
- AT_CUDNN_FRONTEND_CHECK(mha_graph->validate());
- AT_CUDNN_FRONTEND_CHECK(mha_graph->build_operation_graph(handle));
- AT_CUDNN_FRONTEND_CHECK(
- mha_graph->create_execution_plans({fe::HeurMode_t::A}));
- AT_CUDNN_FRONTEND_CHECK(mha_graph->check_support(handle));
- AT_CUDNN_FRONTEND_CHECK(mha_graph->build_plans(handle));
- return std::make_tuple(
- std::move(mha_graph),
- std::move(Q),
- std::move(K),
- std::move(V),
- std::move(attn_scale),
- std::move(Seed),
- std::move(Offset),
- std::move(O),
- std::move(DO),
- std::move(STATS),
- std::move(DQ),
- std::move(DK),
- std::move(DV));
+ mha_graph, Q, K, V, attn_scale, seed, offset, O, Stats);
}
void run_cudnn_SDP_fprop(
@@ -589,92 +407,11 @@ void run_cudnn_SDP_fprop(
auto workspace_size = mha_graph->get_workspace_size();
auto workspace_ptr =
c10::cuda::CUDACachingAllocator::get()->allocate(workspace_size);
- TORCH_CHECK(
+ TORCH_INTERNAL_ASSERT(
mha_graph->execute(handle, variant_pack, workspace_ptr.get()).is_good());
mhagraphcache.update(key, graph_and_tensors_values);
}
-void run_cudnn_SDP_bprop(
- int64_t b,
- int64_t h,
- int64_t s_q,
- int64_t s_kv,
- int64_t d,
- float scaling_factor,
- bool is_causal,
- float dropout_probability,
- const Tensor& q,
- const Tensor& k,
- const Tensor& v,
- const Tensor& o,
- const Tensor& dO,
- const Tensor& softmaxstats,
- Tensor& dQ,
- Tensor& dK,
- Tensor& dV,
- const Tensor& dropoutseed,
- const Tensor& dropoutoffset) {
- cudnnHandle_t handle = getCudnnHandle();
- auto key = MHACacheKeyWrapper(
- b, h, s_q, s_kv, d, q, k, v, dropout_probability, is_causal, true);
- auto graph_and_tensors_backward_ptr = mhagraphbackwardcache.find(key);
- graph_and_tensors_backward graph_and_tensors_backward_values;
- if (graph_and_tensors_backward_ptr) {
- graph_and_tensors_backward_values = *graph_and_tensors_backward_ptr;
- } else {
- graph_and_tensors_backward_values = build_graph_and_tensors_backward(
- b,
- h,
- s_q,
- s_kv,
- d,
- scaling_factor,
- is_causal,
- dropout_probability,
- q,
- k,
- v,
- o,
- dO,
- softmaxstats,
- dQ,
- dK,
- dV,
- dropoutseed,
- dropoutoffset,
- handle,
- key.pod);
- }
- auto
- [mha_graph, Q, K, V, attn_scale, Seed, Offset, O, Do, Stats, Dq, Dk, Dv] =
- graph_and_tensors_backward_values;
- std::unordered_map<std::shared_ptr<fe::graph::Tensor_attributes>, void*>
- variant_pack = {// inputs
- {Q, q.data_ptr()},
- {K, k.data_ptr()},
- {V, v.data_ptr()},
- {O, o.data_ptr()},
- {Do, dO.data_ptr()},
- {Stats, softmaxstats.data_ptr()},
- // outputs
- {Dq, dQ.data_ptr()},
- {Dk, dK.data_ptr()},
- {Dv, dV.data_ptr()},
- // pass by value
- {attn_scale, &scaling_factor}};
- if (dropout_probability != 0.0f) {
- variant_pack[Seed] = dropoutseed.data_ptr();
- variant_pack[Offset] = dropoutoffset.data_ptr();
- }
- auto workspace_size = mha_graph->get_workspace_size();
- auto workspace_ptr =
- c10::cuda::CUDACachingAllocator::get()->allocate(workspace_size);
- TORCH_CHECK(!workspace_size || workspace_ptr.get());
- TORCH_CHECK(
- mha_graph->execute(handle, variant_pack, workspace_ptr.get()).is_good());
- mhagraphbackwardcache.update(key, graph_and_tensors_backward_values);
-}
-
} // namespace native
} // namespace at
diff --git a/aten/src/ATen/native/cudnn/MHA.h b/aten/src/ATen/native/cudnn/MHA.h
index 0406cf783d..6b3b9db862 100644
--- a/aten/src/ATen/native/cudnn/MHA.h
+++ b/aten/src/ATen/native/cudnn/MHA.h
@@ -21,27 +21,5 @@ void run_cudnn_SDP_fprop(
Tensor& o,
Tensor& dropoutseed,
Tensor& dropoutoffset);
-
-void run_cudnn_SDP_bprop(
- int64_t b,
- int64_t h,
- int64_t s_q,
- int64_t s_kv,
- int64_t d,
- float scaling_factor,
- bool is_causal,
- float dropout_probability,
- const Tensor& q,
- const Tensor& k,
- const Tensor& v,
- const Tensor& o,
- const Tensor& dO,
- const Tensor& softmaxstats,
- Tensor& dQ,
- Tensor& dK,
- Tensor& dV,
- const Tensor& dropoutseed,
- const Tensor& dropoutoffset);
-
-} // namespace native
+}
} // namespace at
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index 7b48d2116f..517a65fa0e 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -14700,16 +14700,11 @@
CUDA: _scaled_dot_product_efficient_attention_backward_cuda
tags: nondeterministic_seeded
|
+- func: _scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset)
|
54157416c0d36de399212a201b5d9125c12ee81
|
Thu, 25 Apr 2024 09:26:25 +0000
|
[PATCH 0637/1000] Revert "[cudagraphs] add cudagraph_skips counter (#124804)"
|
This reverts commit fdad16b85108209bc021107f312f4b221422a012. Reverted https://github.com/pytorch/pytorch/pull/124804 on behalf of https://github.com/jeanschmidt due to one PR in this stack seems to have broken linux pull cuda12 tests ([comment](https://github.com/pytorch/pytorch/pull/119729#issuecomment-2076750595))
|
diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py
index 99fbd7b86d..d610d7dd13 100644
--- a/benchmarks/dynamo/common.py
+++ b/benchmarks/dynamo/common.py
@@ -1956,9 +1956,6 @@ def get_dynamo_stats():
"autograd_compiles": torch._dynamo.utils.counters["compiled_autograd"][
"compiles"
],
- "cudagraph_skips": torch._dynamo.utils.counters["inductor"][
- "cudagraph_skips"
- ],
}
)
diff --git a/test/inductor/test_cudagraph_trees.py b/test/inductor/test_cudagraph_trees.py
index 583643e123..5e8ee760f1 100644
--- a/test/inductor/test_cudagraph_trees.py
+++ b/test/inductor/test_cudagraph_trees.py
@@ -11,7 +11,6 @@ import torch
import torch._dynamo.config as dynamo_config
import torch.nn as nn
-from torch._dynamo.utils import counters
from torch._inductor import config
from torch._inductor.compile_fx import compile_fx_inner
from torch._inductor.cudagraph_trees import cudagraphify_impl as tree_cudagraphify_impl
@@ -256,7 +255,6 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to mutated inputs (1 instances). Found from"
).check("torch.logical_xor").run(captured_output[0])
- self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@requires_multigpu()
@parametrize("backend", ("inductor", "cudagraphs"))
@@ -271,7 +269,6 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to cpu device (arg1_1). Found from"
).check("y + 2").run(captured_output[0])
- self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
with capture_stderr() as captured_output:
foo(
@@ -281,7 +278,6 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check("skipping cudagraphs due to multiple devices").run(
captured_output[0]
)
- self.assertEqual(counters["inductor"]["cudagraph_skips"], 2)
@torch._inductor.config.patch("triton.cudagraph_skip_dynamic_graphs", True)
def test_skip_symbolic(self):
@@ -295,7 +291,6 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to graph with symbolic shapes inputs"
).check("x + y").run(captured_output[0])
- self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
@@ -317,7 +312,6 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to mutated inputs (1 instances). Found from"
).check(".add_(2)").run(captured_output[0])
- self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
# mutation on inp doesnt hit cudagraphs
self.assertEqual(len(self.get_manager().roots), 0)
@@ -406,7 +400,6 @@ if HAS_CUDA and not TEST_WITH_ASAN:
0,
exactly=True,
).run(captured_output[0])
- self.assertTrue("cudagraph_skips" not in counters["inductor"])
torch.compiler.cudagraph_mark_step_begin()
inp = torch.rand([4], device="cuda")
@@ -420,7 +413,6 @@ if HAS_CUDA and not TEST_WITH_ASAN:
"skipping cudagraphs due to mutated inputs (1 instances). Found from"
).check("x.add_(2)").run(captured_output[0])
self.assertEqual(mut_inp, non_mut(foo(inp)))
- self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
@@ -448,7 +440,6 @@ if HAS_CUDA and not TEST_WITH_ASAN:
1,
exactly=True,
).run(captured_output[0])
- self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
@@ -485,7 +476,6 @@ if HAS_CUDA and not TEST_WITH_ASAN:
1,
exactly=True,
).run(captured_output[0])
- self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
def test_function_compiled_multiple_times(self):
def foo(x):
@@ -1636,7 +1626,6 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to cpu device (_local_scalar_dense)"
).run(captured_output[0])
- self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
def test_incompatible_cudagraph_ops_nonzero(self):
@@ -1656,20 +1645,6 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check("skipping cudagraphs due to ['incompatible ops']").run(
captured_output[0]
)
- self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
-
- @torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
- def test_incompatible_cudagraph_ops_nonzero_graph_breaks(self):
- @torch.compile(mode="reduce-overhead")
- def foo(x):
- y = x.nonzero() # skip
- torch._dynamo.graph_break()
- return y.nonzero() # skip 2 times (due to recompile)
-
- foo(torch.tensor([1, 0, 2], device="cuda"))
- foo(torch.tensor([1, 0, 0], device="cuda"))
-
- self.assertEqual(counters["inductor"]["cudagraph_skips"], 3)
@torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
def test_incompatible_cudagraph_ops_nonzero_backend(self):
@@ -1689,7 +1664,6 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to incompatible op (nonzero)"
).run(captured_output[0])
- self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
def test_storage_access_error(self):
x = torch.rand([4], device="cuda")
diff --git a/torch/_dynamo/backends/cudagraphs.py b/torch/_dynamo/backends/cudagraphs.py
index 4cef3a68fd..742e141c71 100644
--- a/torch/_dynamo/backends/cudagraphs.py
+++ b/torch/_dynamo/backends/cudagraphs.py
@@ -15,7 +15,6 @@ from torch._inductor.cudagraph_utils import (
format_default_skip_message,
get_mutation_stack_trace,
get_placeholders,
- log_cudagraph_skip_and_bump_counter,
)
from torch._inductor.utils import (
BoxedBool,
@@ -28,6 +27,8 @@ from torch._inductor.utils import (
from torch.multiprocessing.reductions import StorageWeakRef
from .registry import register_backend
+perf_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
+
def find_input_mutations(g):
def meta_fk(meta):
@@ -131,9 +132,7 @@ def cudagraphs(dynamo_model, dynamo_inputs):
fixed = num_fw_fixed_arguments(len(dynamo_inputs), len(aot_inputs))
if skip_msg := check_for_skip(aot_model, fixed):
BoxedBool.disable(do_cudagraphs)
- log_cudagraph_skip_and_bump_counter(
- f"skipping cudagraphs due to {skip_msg}"
- )
+ perf_log.warning("skipping cudagraphs due to %s", skip_msg)
return interp
boxed_device_index.set(get_device_index(aot_model))
@@ -158,9 +157,7 @@ def cudagraphs(dynamo_model, dynamo_inputs):
fixed = count_tangents(aot_model)
if skip_msg := check_for_skip(aot_model, fixed):
- log_cudagraph_skip_and_bump_counter(
- "skipping cudagraphs due to %s", skip_msg
- )
+ perf_log.warning("skipping cudagraphs due to %s", skip_msg)
# See [Backward Generation Handling]
manager = torch._inductor.cudagraph_trees.get_manager(
diff --git a/torch/_inductor/compile_fx.py b/torch/_inductor/compile_fx.py
index c93fd8535b..c99d15a86f 100644
--- a/torch/_inductor/compile_fx.py
+++ b/torch/_inductor/compile_fx.py
@@ -29,11 +29,7 @@ from torch._dynamo.utils import (
from torch._functorch import config as functorch_config
from torch._functorch.aot_autograd import aot_export_module, make_boxed_func
from torch._inductor.codecache import code_hash, CompiledFxGraph, FxGraphCache
-from torch._inductor.cudagraph_utils import (
- BoxedDeviceIndex,
- get_placeholders,
- log_cudagraph_skip_and_bump_counter,
-)
+from torch._inductor.cudagraph_utils import BoxedDeviceIndex, get_placeholders
from torch._inductor.debug import save_args_for_compile_fx_inner
from torch._inductor.utils import BoxedBool, count_tangents
@@ -487,8 +483,9 @@ def compile_fx_inner(
# check cudagraph disabling reasons from inductor lowering
if cudagraphs and compiled_graph.disabled_cudagraphs_reason:
if "cuda" in compiled_graph.device_types:
- log_cudagraph_skip_and_bump_counter(
- f"skipping cudagraphs due to {compiled_graph.disabled_cudagraphs_reason}"
+ perf_hint_log.warning(
+ "skipping cudagraphs due to %s",
+ compiled_graph.disabled_cudagraphs_reason,
)
BoxedBool.disable(cudagraphs)
@@ -599,12 +596,10 @@ def compile_fx_inner(
# prefer better disable_cudagraphs_reason bc stack trace
# TODO: migrate all disable reasons to stack trace, refactor
if compiled_graph.disabled_cudagraphs_reason:
- log_cudagraph_skip_and_bump_counter(
- compiled_graph.disabled_cudagraphs_reason
- )
+ perf_hint_log.warning(compiled_graph.disabled_cudagraphs_reason)
else:
- log_cudagraph_skip_and_bump_counter(
- f"skipping cudagraphs due to {cudagraph_fail_reasons}"
+ perf_hint_log.warning(
+ "skipping cudagraphs due to %s", cudagraph_fail_reasons
)
# cudagraphs does its own aligning of inputs
diff --git a/torch/_inductor/cudagraph_trees.py b/torch/_inductor/cudagraph_trees.py
index f1ca0950b9..141354d43a 100644
--- a/torch/_inductor/cudagraph_trees.py
+++ b/torch/_inductor/cudagraph_trees.py
@@ -79,7 +79,6 @@ from torch._inductor.compile_fx import (
from torch._inductor.cudagraph_utils import (
check_for_mutation,
FunctionID,
- log_cudagraph_skip_and_bump_counter,
WrappedFunction,
)
from torch.multiprocessing.reductions import StorageWeakRef
@@ -112,6 +111,9 @@ log = torch._logging.getArtifactLogger(__name__, "cudagraphs")
from . import config
+perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
+
+
@dataclasses.dataclass(frozen=True)
class GraphID:
"Unique counter of a cuda graph recording"
@@ -1814,7 +1816,7 @@ class CUDAGraphTreeManager:
self, function_id: FunctionID, inputs: List[Tensor]
):
node_id = self._get_node_id()
- if maybe_mutation_str := check_for_mutation(
+ if has_mutation_str := check_for_mutation(
self.ids_to_funcs[function_id],
inputs,
self._get_cuda_graph_recorded_tensor_checker(),
@@ -1824,7 +1826,7 @@ class CUDAGraphTreeManager:
if function_id in self.warned_mutation:
return
self.warned_mutation.add(function_id)
- log_cudagraph_skip_and_bump_counter(maybe_mutation_str)
+ perf_hint_log.warning(has_mutation_str)
else:
self.non_cudagraph_managed_mutation_hint[node_id][function_id] = False
diff --git a/torch/_inductor/cudagraph_utils.py b/torch/_inductor/cudagraph_utils.py
index c87022fcb7..dd551fad03 100644
--- a/torch/_inductor/cudagraph_utils.py
+++ b/torch/_inductor/cudagraph_utils.py
@@ -2,9 +2,6 @@ import dataclasses
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
-from torch._dynamo.utils import counters
-
-perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
@dataclasses.dataclass(frozen=True)
@@ -128,11 +125,6 @@ def check_lowering_disable_cudagraph(
return check_multiple_devices_or_any_cpu_nodes(device_node_mapping)
-def log_cudagraph_skip_and_bump_counter(msg):
- perf_hint_log.warning(msg)
- counters["inductor"]["cudagraph_skips"] += 1
-
-
@dataclasses.dataclass
class BoxedDeviceIndex:
value: Optional[int]
|
2.41.0
|
a92b352eead05cb34dd0140801a5e15d1b9e3ee
|
Thu, 25 Apr 2024 09:26:25 +0000
|
[PATCH 0638/1000] Revert "[cudagraphs] add more info to skip messages (#124700)"
|
This reverts commit 0ed38c9b227f2099c77f4b34fbbe72afa176ac25. Reverted https://github.com/pytorch/pytorch/pull/124700 on behalf of https://github.com/jeanschmidt due to one PR in this stack seems to have broken linux pull cuda12 tests ([comment](https://github.com/pytorch/pytorch/pull/119729#issuecomment-2076750595))
|
diff --git a/test/inductor/test_cudagraph_trees.py b/test/inductor/test_cudagraph_trees.py
index 5e8ee760f1..f80c610829 100644
--- a/test/inductor/test_cudagraph_trees.py
+++ b/test/inductor/test_cudagraph_trees.py
@@ -253,7 +253,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
opt = torch.compile(model.forward, mode="reduce-overhead")(x, y, z)
FileCheck().check(
- "skipping cudagraphs due to mutated inputs (1 instances). Found from"
+ "skipping cudagraphs due to mutation on input. Found from"
).check("torch.logical_xor").run(captured_output[0])
@requires_multigpu()
@@ -266,9 +266,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
with capture_stderr() as captured_output:
foo(torch.ones([10], device="cuda"), torch.ones([20]))
- FileCheck().check(
- "skipping cudagraphs due to cpu device (arg1_1). Found from"
- ).check("y + 2").run(captured_output[0])
+ FileCheck().check("skipping cudagraphs due to cpu device.").check(
+ "y + 2"
+ ).run(captured_output[0])
with capture_stderr() as captured_output:
foo(
@@ -309,9 +309,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
with capture_stderr() as captured_output:
foo(inp())
- FileCheck().check(
- "skipping cudagraphs due to mutated inputs (1 instances). Found from"
- ).check(".add_(2)").run(captured_output[0])
+ FileCheck().check("skipping cudagraphs due to mutation on input.").check(
+ ".add_(2)"
+ ).run(captured_output[0])
# mutation on inp doesnt hit cudagraphs
self.assertEqual(len(self.get_manager().roots), 0)
@@ -396,9 +396,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
mut_out = mut(tmp)
self.assertEqual(mut_out, non_mut(foo(inp)))
FileCheck().check_count(
- "skipping cudagraphs due to mutated inputs (1 instances). Found from",
- 0,
- exactly=True,
+ "skipping cudagraphs due to mutation on input.", 0, exactly=True
).run(captured_output[0])
torch.compiler.cudagraph_mark_step_begin()
@@ -409,9 +407,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
# now its an input from eager we should fallback to inductor without cudagraphs
with capture_stderr() as captured_output:
mut(mut_inp)
- FileCheck().check(
- "skipping cudagraphs due to mutated inputs (1 instances). Found from"
- ).check("x.add_(2)").run(captured_output[0])
+ FileCheck().check("skipping cudagraphs due to mutation on input.").check(
+ "x.add_(2)"
+ ).run(captured_output[0])
self.assertEqual(mut_inp, non_mut(foo(inp)))
@parametrize("backend", ("inductor", "cudagraphs"))
@@ -436,9 +434,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
torch.compiler.cudagraph_mark_step_begin()
fee(inp(), foo(inp()))
FileCheck().check_count(
- "skipping cudagraphs due to mutated inputs (1 instances). Found from",
- 1,
- exactly=True,
+ "skipping cudagraphs due to mutation on input.", 1, exactly=True
).run(captured_output[0])
@parametrize("backend", ("inductor", "cudagraphs"))
@@ -472,9 +468,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
mut(mut_inp) # should not warn since mut has warned
FileCheck().check_count(
- "skipping cudagraphs due to mutated inputs (1 instances). Found from",
- 1,
- exactly=True,
+ "skipping cudagraphs due to mutation on input.", 1, exactly=True
).run(captured_output[0])
def test_function_compiled_multiple_times(self):
@@ -1617,15 +1611,8 @@ if HAS_CUDA and not TEST_WITH_ASAN:
def foo(x):
return x.item()
- with capture_stderr() as captured_output:
- self.assertEqual(foo(torch.tensor(3.0, device="cuda")), 3.0)
- self.assertEqual(foo(torch.tensor(6.0, device="cuda")), 6.0)
-
- # NOTE: this test is named after incompatible ops, but is not skipping due to incompatible ops.
- # This should get fixed.
- FileCheck().check(
- "skipping cudagraphs due to cpu device (_local_scalar_dense)"
- ).run(captured_output[0])
+ self.assertEqual(foo(torch.tensor(3.0, device="cuda")), 3.0)
+ self.assertEqual(foo(torch.tensor(6.0, device="cuda")), 6.0)
@torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
def test_incompatible_cudagraph_ops_nonzero(self):
@@ -1633,37 +1620,12 @@ if HAS_CUDA and not TEST_WITH_ASAN:
def foo(x):
return x.nonzero()
- with capture_stderr() as captured_output:
- self.assertEqual(
- foo(torch.tensor([1, 0, 2], device="cuda")),
- torch.tensor([[0], [2]]),
- )
- self.assertEqual(
- foo(torch.tensor([1, 0, 0], device="cuda")), torch.tensor([[0]])
- )
-
- FileCheck().check("skipping cudagraphs due to ['incompatible ops']").run(
- captured_output[0]
+ self.assertEqual(
+ foo(torch.tensor([1, 0, 2], device="cuda")), torch.tensor([[0], [2]])
+ )
+ self.assertEqual(
+ foo(torch.tensor([1, 0, 0], device="cuda")), torch.tensor([[0]])
)
-
- @torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
- def test_incompatible_cudagraph_ops_nonzero_backend(self):
- @torch.compile(backend="cudagraphs")
- def foo(x):
- return x.nonzero()
-
- with capture_stderr() as captured_output:
- self.assertEqual(
- foo(torch.tensor([1, 0, 2], device="cuda")),
- torch.tensor([[0], [2]]),
- )
- self.assertEqual(
- foo(torch.tensor([1, 0, 0], device="cuda")), torch.tensor([[0]])
- )
-
- FileCheck().check(
- "skipping cudagraphs due to incompatible op (nonzero)"
- ).run(captured_output[0])
def test_storage_access_error(self):
x = torch.rand([4], device="cuda")
diff --git a/torch/_dynamo/backends/cudagraphs.py b/torch/_dynamo/backends/cudagraphs.py
index 742e141c71..ee89b79690 100644
--- a/torch/_dynamo/backends/cudagraphs.py
+++ b/torch/_dynamo/backends/cudagraphs.py
@@ -12,14 +12,13 @@ from torch._dynamo.backends.debugging import boxed_nop
from torch._inductor.cudagraph_utils import (
BoxedDeviceIndex,
check_multiple_devices_or_any_cpu_nodes,
- format_default_skip_message,
get_mutation_stack_trace,
get_placeholders,
)
from torch._inductor.utils import (
BoxedBool,
count_tangents,
- get_first_incompatible_cudagraph_node,
+ has_incompatible_cudagraph_ops,
num_fw_fixed_arguments,
output_node,
)
@@ -100,8 +99,8 @@ def check_for_skip(aot_model: torch.fx.GraphModule, num_fixed) -> Optional[str]:
):
return skip
- if node := get_first_incompatible_cudagraph_node(aot_model):
- return format_default_skip_message(f"incompatible op ({node.name})")
+ if has_incompatible_cudagraph_ops(aot_model):
+ return "skipping cudagraphs due to incompatible op"
return None
diff --git a/torch/_inductor/cudagraph_utils.py b/torch/_inductor/cudagraph_utils.py
index dd551fad03..e897096f4e 100644
--- a/torch/_inductor/cudagraph_utils.py
+++ b/torch/_inductor/cudagraph_utils.py
@@ -1,5 +1,5 @@
import dataclasses
-from typing import Any, Callable, Dict, List, Optional, Tuple
+from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import torch
@@ -48,7 +48,7 @@ def format_default_skip_message(reason: str) -> str:
def get_mutation_stack_trace(
- placeholders: List[torch.fx.Node], mutation_indices: List[int]
+ placeholders: List[torch.fx.Node], mutation_indices: Iterable[int]
) -> str:
stack_trace: Optional[str] = ""
@@ -57,13 +57,11 @@ def get_mutation_stack_trace(
if stack_trace := get_mutating_use_stack_trace(placeholder):
break
- msg = format_default_skip_message(
- f"mutated inputs ({len(mutation_indices)} instances)"
- )
if stack_trace:
- return f"{msg}. Found from : \n {stack_trace}"
+ msg = f"skipping cudagraphs due to mutation on input. Found from : \n {stack_trace}"
+ return msg
- return msg
+ return format_default_skip_message("mutated inputs")
def check_for_mutation(
@@ -71,6 +69,8 @@ def check_for_mutation(
inputs: List[torch.Tensor],
is_cuda_graph_recorded_tensor: Callable[[torch.Tensor], bool],
) -> Optional[str]:
+ default_msg = format_default_skip_message("mutated inputs")
+
# doesnt work for non-trees because the warmup run would apply mutation twice
if torch._inductor.config.triton.cudagraph_trees:
# checking if mutation is only on parameters/static inputs
@@ -82,14 +82,15 @@ def check_for_mutation(
or is_cuda_graph_recorded_tensor(inputs[idx])
)
]
- else:
- mutation_indices = func.mutated_input_idxs
+ has_mutation = len(mutation_indices) != 0
+ if not has_mutation:
+ return None
- return (
- get_mutation_stack_trace(func.placeholders, mutation_indices)
- if mutation_indices
- else None
- )
+ return get_mutation_stack_trace(func.placeholders, mutation_indices)
+
+ else:
+ has_mutation = len(func.mutated_input_idxs) != 0
+ return None if not has_mutation else default_msg
def get_use_stack_trace(node) -> Optional[str]:
@@ -103,11 +104,12 @@ def check_multiple_devices_or_any_cpu_nodes(
device_node_mapping: Dict[torch.device, torch.fx.Node]
) -> Optional[str]:
if cpu_node := device_node_mapping.get(torch.device("cpu")):
- msg = f"cpu device ({cpu_node.name})"
if stack_trace := get_use_stack_trace(cpu_node):
- return format_default_skip_message(f"{msg}. Found from : \n {stack_trace}")
+ return format_default_skip_message(
+ f"cpu device. Found from : \n {stack_trace}"
+ )
- return format_default_skip_message(msg)
+ return format_default_skip_message("cpu device")
if (
len(device_node_mapping) == 1
diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py
index 8f218fb97c..3cf4faa24b 100644
--- a/torch/_inductor/utils.py
+++ b/torch/_inductor/utils.py
@@ -590,7 +590,7 @@ def any_is_symbolic(*args: Any) -> bool:
return any(is_symbolic(a) for a in args)
-def get_first_incompatible_cudagraph_node(gm):
+def has_incompatible_cudagraph_ops(gm):
from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
forbidden_set = {
@@ -626,14 +626,10 @@ def get_first_incompatible_cudagraph_node(gm):
)
for node in gm.graph.nodes:
if str(node.target) in forbidden_set:
- return node
+ return True
if (val := node.meta.get("val")) is not None and free_unbacked_symbols(val):
- return node
- return None
-
-
-def has_incompatible_cudagraph_ops(gm):
- return get_first_incompatible_cudagraph_node(gm) is not None
+ return True
+ return False
def output_node(gm: torch.fx.GraphModule):
|
2.41.0
|
8a016157d7bbca4d17cffd1edcdfda5289b8032
|
Thu, 25 Apr 2024 09:26:25 +0000
|
[PATCH 0639/1000] Revert "[benchmark][cudagraph] Explicitly call aten.div with CUDA denominator for cudagraphs (#119729)"
|
This reverts commit c021c9b8e48b8e787b75fd69a3076beffffb8208. Reverted https://github.com/pytorch/pytorch/pull/119729 on behalf of https://github.com/jeanschmidt due to one PR in this stack seems to have broken linux pull cuda12 tests ([comment](https://github.com/pytorch/pytorch/pull/119729#issuecomment-2076750595))
|
diff --git a/test/inductor/test_compiled_autograd.py b/test/inductor/test_compiled_autograd.py
index 7c9c84c894..5f9dd9b84d 100644
--- a/test/inductor/test_compiled_autograd.py
+++ b/test/inductor/test_compiled_autograd.py
@@ -1,6 +1,5 @@
# Owner(s): ["module: inductor"]
import functools
-import io
import re
import sys
import unittest
@@ -1343,24 +1342,6 @@ TORCH_LIBRARY(test_autograd_cpp_node_data_dependent, m) {
out = compiled_fn(activations)
self.assertTrue(len(activations) == 0)
- @unittest.skipIf(not HAS_CUDA, "requires cuda")
- def test_cudagraphs_cpu_division(self):
- from torch._dynamo.testing import reduce_to_scalar_loss
-
- model = torch.nn.Linear(10, 10, dtype=torch.float16).cuda()
- inputs = torch.randn(10, 10, dtype=torch.float16).cuda()
- out = model(inputs)
- loss = reduce_to_scalar_loss(out)
- torch._inductor.config.triton.cudagraphs = True
-
- stderr_msgs = io.StringIO()
- with mock.patch("sys.stderr", stderr_msgs), compiled_autograd.enable(
- compiler_fn
- ):
- loss.backward()
-
- self.assertFalse("skipping cudagraphs" in stderr_msgs.getvalue())
-
def load_test_module(name):
testdir = Path(__file__).absolute().parent.parent
diff --git a/torch/_dynamo/testing.py b/torch/_dynamo/testing.py
index 2dd384f4d8..c115e1cc09 100644
--- a/torch/_dynamo/testing.py
+++ b/torch/_dynamo/testing.py
@@ -103,7 +103,7 @@ def reduce_to_scalar_loss(out):
"""Reduce the output of a model to get scalar loss"""
if isinstance(out, torch.Tensor):
# Mean does not work on integer tensors
- return out.sum() / torch.tensor(out.numel(), device=out.device)
+ return out.sum() / out.numel()
elif isinstance(out, (list, tuple)):
return sum(reduce_to_scalar_loss(x) for x in out) / len(out)
elif type(out).__name__ in (
|
2.41.0
|
78662a5577da0847c984b978f35ccf53d634a2d
|
Thu, 25 Apr 2024 09:29:57 +0000
|
[PATCH 0640/1000] Revert "Made FlexAttention rewrite getitem calls to use aten.index in score_mod (#124799)"
|
This reverts commit acc4cbea395c25410c26d6fd3c88c072ce24c918. Reverted https://github.com/pytorch/pytorch/pull/124799 on behalf of https://github.com/jeanschmidt due to checking if this diff introduced regressions on linux-focal-py3.11-clang10 and linux-focal-py3.8-clang10 ([comment](https://github.com/pytorch/pytorch/pull/124799#issuecomment-2076756876))
|
diff --git a/c10/cuda/CUDAMiscFunctions.cpp b/c10/cuda/CUDAMiscFunctions.cpp
index f55bba13e9..11ea775366 100644
--- a/c10/cuda/CUDAMiscFunctions.cpp
+++ b/c10/cuda/CUDAMiscFunctions.cpp
@@ -12,7 +12,7 @@ const char* get_cuda_check_suffix() noexcept {
} else {
return "\nCUDA kernel errors might be asynchronously reported at some"
" other API call, so the stacktrace below might be incorrect."
- "\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1";
+ "\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.";
}
}
std::mutex* getFreeMutex() {
diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py
index d656322080..c2cf3b295e 100644
--- a/test/inductor/test_templated_attention.py
+++ b/test/inductor/test_templated_attention.py
@@ -4,7 +4,7 @@ import functools
from collections import namedtuple
from typing import Callable
-from unittest import expectedFailure, skip, skipUnless
+from unittest import skip, skipUnless
from unittest.mock import patch
import torch
@@ -125,7 +125,7 @@ class TestTemplatedSDPA(InductorTestCase):
head_offset = torch.rand(H, device="cuda", dtype=dtype)
def score_mod(score, b, h, m, n):
- return score + head_offset[h]
+ return score + index(head_offset, [h])
self.run_test(score_mod, dtype)
@@ -136,7 +136,9 @@ class TestTemplatedSDPA(InductorTestCase):
seq_idx[S // 2 :] = 1
def seq_mask_mod(score, b, h, q, kv):
- return torch.where(seq_idx[q] == seq_idx[kv], score, float("-inf"))
+ return torch.where(
+ index(seq_idx, [q]) == index(seq_idx, [kv]), score, float("-inf")
+ )
self.run_test(seq_mask_mod, dtype)
@@ -146,7 +148,7 @@ class TestTemplatedSDPA(InductorTestCase):
bias = torch.randn(S, S, device="cuda", dtype=dtype)
def bias_mod(score, b, h, q, kv):
- return score + bias[q, kv]
+ return score + index(bias, [q, kv])
self.run_test(bias_mod, dtype)
@@ -156,7 +158,7 @@ class TestTemplatedSDPA(InductorTestCase):
bias = torch.randn(B, S, S, device="cuda", dtype=dtype)
def bias_mod(score, b, h, q, kv):
- return score + bias[b, q, kv]
+ return score + index(bias, [b, q, kv])
self.run_test(bias_mod, dtype)
@@ -166,7 +168,7 @@ class TestTemplatedSDPA(InductorTestCase):
bias = torch.randn(B, H, S, S, device="cuda", dtype=dtype)
def bias_mod(score, b, h, q, kv):
- return score + bias[b, h, q, kv]
+ return score + index(bias, [b, h, q, kv])
self.run_test(bias_mod, dtype)
@@ -176,7 +178,7 @@ class TestTemplatedSDPA(InductorTestCase):
rel_bias = torch.randn(2 * S, device="cuda", dtype=dtype)
def bias_mod(score, b, h, q, kv):
- return score + rel_bias[(q - kv) + S]
+ return score + index(rel_bias, [(q - kv) + S])
self.run_test(bias_mod, dtype)
@@ -187,7 +189,7 @@ class TestTemplatedSDPA(InductorTestCase):
def bias_mod(score, b, h, q, kv):
causal_attention = q >= kv
- cur_num_bidirectional = num_bidirectional[b]
+ cur_num_bidirectional = index(num_bidirectional, (b,))
bidirectional_attention_on_video = (q <= cur_num_bidirectional) & (
kv <= cur_num_bidirectional
)
@@ -199,38 +201,6 @@ class TestTemplatedSDPA(InductorTestCase):
self.run_test(bias_mod, dtype)
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes_fast)
- def test_natten_2d(self, dtype):
- H = 32
- W = S // H
- WINDOW = 3
- assert W * H == S
-
- def get_x_y(idx):
- # This should be a floor divide, but we don't support that properly
- return idx / W, idx % W
-
- def natten_mask(score, b, h, q, kv):
- q_x, q_y = get_x_y(q)
- kv_x, kv_y = get_x_y(kv)
- return torch.where(
- ((q_x - kv_x).abs() <= WINDOW) | ((q_y - kv_y).abs() <= WINDOW),
- score,
- float("-inf"),
- )
-
- self.run_test(natten_mask, dtype)
-
- @supported_platform
- @expectedFailure
- @common_utils.parametrize("dtype", test_dtypes_fast)
- def test_silu_on_score(self, dtype):
- def silu_score(score, b, h, q, kv):
- return torch.nn.functional.silu(score)
-
- self.run_test(silu_score, dtype)
-
@supported_platform
@skip("Triton bug ") # https://github.com/pytorch/pytorch/issues/124571
@common_utils.parametrize("dtype", test_dtypes)
@@ -244,8 +214,8 @@ class TestTemplatedSDPA(InductorTestCase):
def create_njt_wrapper(orig_score_mod, offsets, seq_idx):
def njt_score_mod(qk, b, h, q, kv):
- q_nested = q - offsets[seq_idx[q]]
- kv_nested = kv - offsets[seq_idx[kv]]
+ q_nested = q - index(offsets, [index(seq_idx, [q])])
+ kv_nested = kv - index(offsets, [index(seq_idx, [kv])])
return orig_score_mod(qk, b, h, q_nested, kv_nested)
return njt_score_mod
@@ -304,9 +274,9 @@ class TestTemplatedSDPA(InductorTestCase):
tok_scale = torch.randn(S, device="cuda")
def bias_mod(score, batch, head, token_q, token_kv):
- score = score + tok_scale[token_q]
- score = score + batch_scale[batch]
- score = score + head_scale[head]
+ score = score + index(tok_scale, [token_q])
+ score = score + index(batch_scale, [batch])
+ score = score + index(head_scale, [head])
return score
self.run_test(bias_mod)
diff --git a/test/test_overrides.py b/test/test_overrides.py
index cb46ca6ed8..d79753f78a 100644
--- a/test/test_overrides.py
+++ b/test/test_overrides.py
@@ -1387,28 +1387,6 @@ class TestTorchFunctionMode(TestCase):
self.assertTrue(called)
- def test_getitem_call(self):
- # This failed because the parser thinks the function is called to()
- # but it's actually called _parse_to()
-
- called = False
-
- class A(TorchFunctionMode):
- def __torch_function__(self, func, types, args=(), kwargs=None):
- nonlocal called
- if kwargs is None:
- kwargs = {}
- called = True
- return func(*args, **kwargs)
-
- a = torch.zeros(5)
- b = torch.tensor(0)
- with A():
- a[b]
-
- self.assertTrue(called)
-
-
def test_distributions_bernoulli(self):
# This failed because improper use of has_torch_function when
# is_tensor_like should have been used instead, inside the
diff --git a/torch/_dynamo/variables/higher_order_ops.py b/torch/_dynamo/variables/higher_order_ops.py
index 26f1eeb91c..a1abcb15fb 100644
--- a/torch/_dynamo/variables/higher_order_ops.py
+++ b/torch/_dynamo/variables/higher_order_ops.py
@@ -1475,7 +1475,6 @@ class TemplatedAttentionHigherOrderVariable(TorchHigherOrderOperatorVariable):
self, tx, query: "VariableTracker", score_function: "VariableTracker"
):
from torch._dynamo.symbolic_convert import InstructionTranslator
- from torch._higher_order_ops.templated_attention import TransformGetItemToIndex
from .builder import SourcelessBuilder
tx: InstructionTranslator = tx
@@ -1500,21 +1499,19 @@ class TemplatedAttentionHigherOrderVariable(TorchHigherOrderOperatorVariable):
bhmn = [create_scalar() for _ in range(4)]
new_args = [score, *bhmn]
-
- with TransformGetItemToIndex():
- (
- (body_output, body_treespec),
- body_graph,
- body_lifted_freevars,
- ) = speculate_subgraph(
- tx,
- score_function,
- new_args,
- {}, # expect only args no kwargs for now
- description="templated_attention",
- source_target=self.value,
- set_subgraph_inputs="flatten_manual",
- )
+ (
+ (body_output, body_treespec),
+ body_graph,
+ body_lifted_freevars,
+ ) = speculate_subgraph(
+ tx,
+ score_function,
+ new_args,
+ {}, # expect only args no kwargs for now
+ description="templated_attention",
+ source_target=self.value,
+ set_subgraph_inputs="flatten_manual",
+ )
body_name = add_subgraph(
tx,
diff --git a/torch/_functorch/vmap.py b/torch/_functorch/vmap.py
index 054a40123e..5d05148faf 100644
--- a/torch/_functorch/vmap.py
+++ b/torch/_functorch/vmap.py
@@ -178,7 +178,7 @@ def _maybe_remove_batch_dim(name, batched_output, vmap_level, batch_size, out_di
raise ValueError(
f"vmap({name}, ...): `{name}` must only return "
f"Tensors, got type {type(batched_output)}. "
- "Did you mean to set out_dims= to None for output?"
+ "Did you mean to set out_dim= to None for output?"
)
return _remove_batch_dim(batched_output, vmap_level, batch_size, out_dim)
diff --git a/torch/_higher_order_ops/templated_attention.py b/torch/_higher_order_ops/templated_attention.py
index 52a9156820..388e741837 100644
--- a/torch/_higher_order_ops/templated_attention.py
+++ b/torch/_higher_order_ops/templated_attention.py
@@ -1,4 +1,4 @@
-from typing import Any, Callable, Tuple
+from typing import Callable, Tuple
import torch
import torch.utils._pytree as pytree
@@ -16,29 +16,6 @@ from torch.fx.experimental.proxy_tensor import (
track_tensor_tree,
)
-from torch.overrides import TorchFunctionMode
-
-
-def transform_getitem_args(x: torch.Tensor, index_args) -> Tuple[Any, ...]:
- if isinstance(index_args, tuple):
- return (x, list(index_args))
- elif not isinstance(index_args, (list, tuple)):
- return (x, [index_args])
- return (x, index_args)
-
-
-class TransformGetItemToIndex(TorchFunctionMode):
- # This is needed since we want to support calling
- # A[q_idx], where q_idx is a scalar tensor in score_mod.
- # Today, when q_idx is a scalar tensor, we implicitly convert it to a python
- # scalar and create a view. We do not want that behavior in this case, so we
- # use this torchfunctionmode to override that behavior for score_mod
- # wherever we're running it.
- def __torch_function__(self, func, types, args, kwargs=None):
- if func == torch.Tensor.__getitem__:
- return torch.ops.aten.index(*transform_getitem_args(*args))
- return func(*args, **(kwargs or {}))
-
class TemplatedAttentionHOP(HigherOrderOperator):
def __init__(self):
@@ -96,10 +73,7 @@ def math_attention(
score_mod = torch.vmap(score_mod, in_dims=(0, None, 0, None, None) + in_dim_buffers)
score_mod = torch.vmap(score_mod, in_dims=(0, 0, None, None, None) + in_dim_buffers)
- # todo: We wouldn't need these overrides in this file if Dynamo always did the
- # rewriting.
- with TransformGetItemToIndex():
- scores = score_mod(scores, b, h, m, n, *other_buffers).to(torch.float32)
+ scores = score_mod(scores, b, h, m, n, *other_buffers).to(torch.float32)
# TODO Unconditionally return logsumexp for backwards
# if any(t.requires_grad for t in (query, key, value)):
@@ -148,8 +122,7 @@ def trace_templated_attention(
example_vals = [
torch.zeros((), dtype=query.dtype, requires_grad=query.requires_grad)
] + [torch.zeros((), dtype=torch.int) for _ in range(4)]
- with TransformGetItemToIndex():
- score_graph = make_fx(score_mod)(*example_vals, *other_buffers)
+ score_graph = make_fx(score_mod)(*example_vals, *other_buffers)
proxy_mode.tracer.root.register_module("sdpa_score", score_graph)
node_args = (query, key, value, score_graph, *other_buffers)
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args)
@@ -214,10 +187,9 @@ def templated_attention_functionalize(
with ctx.redispatch_to_next() as m:
functional_score_mod = ctx.functionalize(score_mod)
pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch
- with TransformGetItemToIndex():
- mutates = _has_potential_branch_input_mutation(
- functional_score_mod, example_vals, pre_dispatch
- )
+ mutates = _has_potential_branch_input_mutation(
+ functional_score_mod, example_vals, pre_dispatch
+ )
# The only care about mutations of existing buffers since we can't replay these.
# However, we can just error if anything is detected
if mutates:
diff --git a/torch/_prims_common/__init__.py b/torch/_prims_common/__init__.py
index 377fc36830..61d602bd36 100644
--- a/torch/_prims_common/__init__.py
+++ b/torch/_prims_common/__init__.py
@@ -85,7 +85,6 @@ torch_function_passthrough = {
torch.Tensor.__format__,
torch.Tensor.__repr__,
torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined]
- torch.Tensor.__getitem__,
}
diff --git a/torch/csrc/autograd/python_variable_indexing.cpp b/torch/csrc/autograd/python_variable_indexing.cpp
index e3cdd04f09..87b0e32293 100644
--- a/torch/csrc/autograd/python_variable_indexing.cpp
+++ b/torch/csrc/autograd/python_variable_indexing.cpp
@@ -32,7 +32,8 @@
using namespace at;
using namespace torch::autograd::utils;
-namespace torch::autograd {
+namespace torch {
+namespace autograd {
Py_ssize_t THPVariable_length(PyObject* self) {
HANDLE_TH_ERRORS
@@ -68,7 +69,7 @@ static inline int64_t count_specified_dimensions(PyObject* index) {
for (Py_ssize_t i = 0; i < size; i++) {
PyObject* obj = PyTuple_GET_ITEM(
index, i); // NOLINT(cppcoreguidelines-pro-type-cstyle-cast)
- if (check_has_torch_function(obj))
+ if (!THPVariable_CheckExact(obj) && check_has_torch_function(obj))
return -1;
if (THPVariable_Check(obj)) {
const auto& var = THPVariable_Unpack(obj);
@@ -340,7 +341,7 @@ static inline THPObjectPtr wrapTuple(PyObject* index) {
// indexing is needed, it calls C++ `at::indexing::dispatch_index`.
PyObject* THPVariable_getitem(PyObject* self, PyObject* index) {
HANDLE_TH_ERRORS
- if (check_has_torch_function(self)) {
+ if (!THPVariable_CheckExact(self) && check_has_torch_function(self)) {
return handle_torch_function_indexing(self, index);
}
const auto& self_ = THPVariable_Unpack(self);
@@ -437,8 +438,9 @@ int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* py_value) {
if (py_value == nullptr) {
throw TypeError("Tensor does not support deleting items");
}
- if ((check_has_torch_function(self)) ||
- (check_has_torch_function(py_value))) {
+ if ((!THPVariable_CheckExact(self) && check_has_torch_function(self)) ||
+ (!THPVariable_CheckExact(py_value) &&
+ check_has_torch_function(py_value))) {
py::object ret = py::reinterpret_steal<py::object>(
handle_torch_function_indexing(self, index, py_value));
return 0;
@@ -551,4 +553,5 @@ int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* py_value) {
END_HANDLE_TH_ERRORS_RET(-1)
}
-} // namespace torch::autograd
+} // namespace autograd
+} // namespace torch
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
index 3d77cc1252..46148424e5 100644
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -22133,9 +22133,9 @@ python_ref_db = [
torch_opinfo_name="roll",
validate_view_consistency=False,
skips=(
- # # RuntimeError: no _refs support for torch.Tensor.__getitem__
- # # Leaving it as a ref because fftshift uses it
- # DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),
+ # RuntimeError: no _refs support for torch.Tensor.__getitem__
+ # Leaving it as a ref because fftshift uses it
+ DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),
),
),
PythonRefInfo(
diff --git a/torch/testing/_internal/opinfo/definitions/fft.py b/torch/testing/_internal/opinfo/definitions/fft.py
index 0601af24bb..3f1d43ee9f 100644
--- a/torch/testing/_internal/opinfo/definitions/fft.py
+++ b/torch/testing/_internal/opinfo/definitions/fft.py
@@ -767,10 +767,18 @@ python_ref_db: List[OpInfo] = [
"_refs.fft.fftshift",
op_db=op_db,
torch_opinfo_name="fft.fftshift",
+ skips=(
+ # TODO Move fftshift to decomps
+ DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"),
+ ),
),
PythonRefInfo(
"_refs.fft.ifftshift",
op_db=op_db,
torch_opinfo_name="fft.ifftshift",
+ skips=(
+ # TODO Move ifftshift to decomps
+ DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"),
+ ),
),
]
diff --git a/torch/testing/_internal/opinfo/definitions/linalg.py b/torch/testing/_internal/opinfo/definitions/linalg.py
index 288aaa34f2..a1b6531b15 100644
--- a/torch/testing/_internal/opinfo/definitions/linalg.py
+++ b/torch/testing/_internal/opinfo/definitions/linalg.py
@@ -2389,6 +2389,8 @@ python_ref_db: List[OpInfo] = [
supports_out=True,
op_db=op_db,
skips=(
+ # no _refs support for Tensor.__getitem__
+ DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"),
# TODO: is this really needed?
DecorateInfo(
unittest.expectedFailure, "TestCommon", "test_python_ref_errors"
|
2.41.0
|
4ed909934925dcd3b99d23758003c08aa581153
|
Thu, 25 Apr 2024 09:46:21 +0000
|
[PATCH 0641/1000] Revert "[CUDA] Fix 64-bit indexing in `vol2col` in conv3d (#124650)"
|
This reverts commit 71d92bace2b9ff6431976cda69c83df668d078f0. Reverted https://github.com/pytorch/pytorch/pull/124650 on behalf of https://github.com/jeanschmidt due to Reverting to check if it introduced regressions for linux-focal-rocm6.0-py3.8 tests ([comment](https://github.com/pytorch/pytorch/pull/124650#issuecomment-2076786795))
|
diff --git a/aten/src/ATen/native/cuda/vol2col.cuh b/aten/src/ATen/native/cuda/vol2col.cuh
index 98ec2c3522..51dbe1c744 100644
--- a/aten/src/ATen/native/cuda/vol2col.cuh
+++ b/aten/src/ATen/native/cuda/vol2col.cuh
@@ -36,7 +36,7 @@ __global__ void vol2col_kernel(
const int height_col,
const int width_col,
T* data_col) {
- CUDA_KERNEL_LOOP_TYPE(index, n, int64_t) {
+ CUDA_KERNEL_LOOP(index, n) {
auto w_out = index % width_col;
index /= width_col;
auto h_out = index % height_col;
diff --git a/test/nn/test_convolution.py b/test/nn/test_convolution.py
index acf83107d2..0bf6065a18 100644
--- a/test/nn/test_convolution.py
+++ b/test/nn/test_convolution.py
@@ -3183,16 +3183,6 @@ class TestConvolutionNNDeviceType(NNTestCase):
output_cpu = model(input_tensor.float().cpu())
self.assertEqual(output.cpu().float(), output_cpu, atol=1e-3, rtol=1e-3)
- @onlyCUDA
- @largeTensorTest("24GB", "cpu")
- @largeTensorTest("20GB", "cuda")
- def test_conv3d_large_batch_1(self, device):
- x = torch.rand(1, 32, 512, 512, 256)
- m = torch.nn.Conv3d(32, 1, kernel_size=1, padding=0, stride=1, bias=False)
- yref = m(x)
- y = m.to(device=device)(x.to(device=device))
- self.assertEqual(yref, y.cpu())
-
@onlyCUDA
@skipCUDAIfNoCudnn
def test_contig_wrong_stride_cudnn(self, device):
|
2.41.0
|
d7f709752ce444e19c9b76e77312150e12ac4d3
|
Wed, 24 Apr 2024 18:10:10 -0700
|
[PATCH 0642/1000] [Inductor] Force the parallel depth as outer loop fusion depth (#123899)
|
**Summary** Fix issue: https://github.com/pytorch/pytorch/issues/123801 which brings performance regression of `pyhpc_turbulent_kinetic_energy` after outer loop fusion. **Root Cause** - [Generated Kernel before Outer Loop Fusion](https://gist.github.com/leslie-fang-intel/54fe21ac8871fc63b9bf20fdb6edf209) - Taking below 2 kernels as example: - [Kernel 0](https://gist.github.com/leslie-fang-intel/54fe21ac8871fc63b9bf20fdb6edf209#file-pyhpc_turbulent_kinetic_energy-before-outer-loop-fusion-py-L255-L305) has 2 loop levels with size [200, 200]. Parallelization is not feasible due to the inefficient number of elements determined by [`decide_parallel_depth`](https://github.com/pytorch/pytorch/blob/aaec97a40364bb6ccfd968f28d309cfff8748d20/torch/_inductor/codegen/cpp.py#L2145-L2164). Therefore, the loop code will be generated with the `#pragma omp single` directive. - [Kernel 1](https://gist.github.com/leslie-fang-intel/54fe21ac8871fc63b9bf20fdb6edf209#file-pyhpc_turbulent_kinetic_energy-before-outer-loop-fusion-py-L306-L316) has 3 loop levels with size [200, 200, 26] which has enough number of elements to be parallelized. - [Generated Kernel after Outer Loop Fusion](https://gist.github.com/leslie-fang-intel/57a497b9d9c6aa82b1c6a686292fc887) - After outer loop fusion, `Kernel0` and `Kernel1` has been fused into one [OuterLoopFusedKernel](https://gist.github.com/leslie-fang-intel/57a497b9d9c6aa82b1c6a686292fc887#file-pyhpc_turbulent_kinetic_energy-after-outer-loop-fusion-py-L261-L497), the outer loop size is [200, 200] which does not contain enough number of elements to do parallelization. In this PR, we propose a fix for `loop_nest` involving `OuterLoopFusedKernel`. The fix entails adding a specific heuristic for `OuterLoopFusedKernel` to determine the parallel depth by combining `outer_loop_fusion_depth` with the internal kernels' parallel depth. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123899 Approved by: https://github.com/jgong5, https://github.com/lezcano
|
diff --git a/torch/_inductor/codegen/cpp.py b/torch/_inductor/codegen/cpp.py
index 1cd2903229..f8f18a80b0 100644
--- a/torch/_inductor/codegen/cpp.py
+++ b/torch/_inductor/codegen/cpp.py
@@ -2014,9 +2014,18 @@ class CppKernel(Kernel):
def codegen_loops_impl(self, loop_nest, code, worksharing):
threads = parallel_num_threads()
assert self.call_ranges is not None
- par_depth = self.decide_parallel_depth(
- self.call_ranges[: loop_nest.max_parallel_depth()], threads
- )
+ kernels = loop_nest.get_kernels()
+ if any(isinstance(kernel, OuterLoopFusedKernel) for kernel in kernels):
+ assert len(kernels) == 1
+ assert isinstance(kernels[0], OuterLoopFusedKernel)
+ par_depth = kernels[0].decide_parallel_depth(
+ loop_nest.max_parallel_depth(), threads
+ )
+ else:
+ par_depth = self.decide_parallel_depth(
+ loop_nest.max_parallel_depth(), threads
+ )
+
with contextlib.ExitStack() as stack:
if par_depth:
if loop_nest.is_reduction_only():
@@ -2149,7 +2158,9 @@ class CppKernel(Kernel):
else:
return "TORCH_CHECK"
- def decide_parallel_depth(self, ranges, threads):
+ def decide_parallel_depth(self, max_parallel_depth, threads):
+ assert self.call_ranges is not None
+ ranges = self.call_ranges[:max_parallel_depth]
seq = self.size_hint()
par = 1
depth = 0
@@ -3543,6 +3554,25 @@ class OuterLoopFusedKernel(CppKernel):
super().__init__(kernel_group.args, kernel_group.ws.num_threads)
self.inner: List["LoopLevel"] = []
+ def decide_parallel_depth(self, max_parallel_depth, threads) -> int:
+ kernels_parallel_depth = []
+ nested_kernels: List[List[CppKernel]] = [
+ loop.get_kernels() for loop in self.inner
+ ]
+ for kernels in nested_kernels:
+ # For any ScalarKernel, VecKernel, or Tile2DKernel,
+ # they should all have the same call_ranges
+ call_ranges = kernels[0].call_ranges
+ assert call_ranges is not None
+ assert all(kernel.call_ranges == call_ranges for kernel in kernels)
+ kernels_parallel_depth.append(
+ kernels[0].decide_parallel_depth(len(call_ranges), threads)
+ )
+ return min(
+ max_parallel_depth,
+ max(kernels_parallel_depth),
+ )
+
class ReasonFusedNodes(Enum):
SAME_VARS_REDUCE = "same_vars_reduce"
@@ -4268,3 +4298,13 @@ class LoopNestWithSplit:
if depth == 0:
self.root = split_loops
return split_loops
+
+ def get_kernels(self) -> List[CppKernel]:
+ """Get all kernel objects under this loop nest"""
+ if self.kernel:
+ return [self.kernel]
+ kernels: List[CppKernel] = []
+ assert self.root is not None
+ for loop in self.root:
+ kernels += loop.get_kernels()
+ return kernels
|
2.41.0
|
3af049b8869d6404b2c6aec68ccc6cd68db3c60
|
Wed, 24 Apr 2024 09:50:17 -0700
|
[PATCH 0645/1000] [DDP][PT2D] Fix the import issue (#124846)
|
As title Differential Revision: [D56521582](https://our.internmc.facebook.com/intern/diff/D56521582/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124846 Approved by: https://github.com/LucasLLC, https://github.com/wz337 ghstack dependencies: #124421, #124422, #123424
|
diff --git a/torch/distributed/_composable/replicate.py b/torch/distributed/_composable/replicate.py
index 6e925c4f22..3409f3a131 100644
--- a/torch/distributed/_composable/replicate.py
+++ b/torch/distributed/_composable/replicate.py
@@ -60,14 +60,17 @@ class _ReplicateState(_State):
prefix=f"{recurse_prefix}{name}",
)
- @torch._dynamo.disable(recursive=True)
def lazy_init(self) -> None:
- self.init(*self._init_args, **self._init_kwargs)
- self.register_comm_hook()
- self._init_args = tuple()
- self._init_kwargs = {}
+ @torch._dynamo.disable(recursive=True)
+ def _lazy_init():
+ assert self._init_args is not None
+ self.init(*self._init_args, **self._init_kwargs)
+ self.register_comm_hook()
+ self._init_args = tuple()
+ self._init_kwargs = {}
+
+ _lazy_init()
- @torch._dynamo.disable(recursive=True)
def init(
self,
module: nn.Module,
@@ -109,7 +112,6 @@ class _ReplicateState(_State):
# Weakref to the DDP instance is currently only used for testing.
replicate.state(self.module)._ddp_weakref = weakref.ref(self._ddp)
- @torch._dynamo.disable(recursive=True)
def register_comm_hook(self) -> None:
for comm_args, comm_kwargs in self._comm_hook_args:
self._ddp.register_comm_hook(*comm_args, **comm_kwargs)
|
2.41.0
|
520233526d0f2965c0d48d425622df0dc50ce8f
|
Thu, 25 Apr 2024 11:14:02 +0000
|
[PATCH 0646/1000] Revert "[dynamo] Refactor into torch/_inductor/runtime/compile_tasks.py (#124681)"
|
This reverts commit 0792ceab4b6a61c6c217f65c3fecf51d75e65a9f. Reverted https://github.com/pytorch/pytorch/pull/124681 on behalf of https://github.com/jeanschmidt due to breaking internal tests, check D56522594 ([comment](https://github.com/pytorch/pytorch/pull/124681#issuecomment-2076937810))
|
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 86cdef08d5..7c6b6c6165 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -54,13 +54,6 @@ from torch._dynamo.device_interface import get_registered_device_interfaces
from torch._dynamo.utils import counters, dynamo_timed
from torch._inductor import config, exc, metrics
from torch._inductor.codegen.cuda import cuda_env
-from torch._inductor.runtime.compile_tasks import (
- _module_to_triton_kernel,
- _reload_python_module,
- _reload_python_module_in_subproc,
- _set_triton_ptxas_path,
- _worker_compile_triton,
-)
from torch._inductor.runtime.runtime_utils import cache_dir
from torch._inductor.utils import clear_on_fresh_inductor_cache, is_linux
@@ -2357,6 +2350,10 @@ class CppWrapperCodeCache(CppPythonBindingsCodeCache):
)
+def _reload_python_module_in_subproc(key, path):
+ return PyCodeCache.load_by_key_path(key, path)
+
+
@clear_on_fresh_inductor_cache
class PyCodeCache:
cache: Dict[str, ModuleType] = dict()
@@ -2389,21 +2386,31 @@ class PyCodeCache:
if linemap is None:
linemap = []
if key not in cls.cache:
- mod = _reload_python_module(key, path)
-
- # another thread might set this first
- cls.cache.setdefault(key, mod)
- # unzip into separate lines/nodes lists
- cls.linemaps[path] = list(zip(*linemap))
-
- if attrs is not None:
- for k, v in attrs.items():
- setattr(mod, k, v)
-
- if not (linemap or attrs):
- mod._reload_in_subproc = functools.partial( # type: ignore[attr-defined]
- _reload_python_module_in_subproc, key, path
- )
+ with open(path) as f:
+ try:
+ code = compile(f.read(), path, "exec")
+ except Exception as e:
+ raise RuntimeError(
+ f"Failed to import {path}\n{type(e).__name__}: {e}"
+ ) from None
+ mod = ModuleType(f"{__name__}.{key}")
+ mod.__file__ = path
+ mod.key = key # type: ignore[attr-defined]
+ exec(code, mod.__dict__, mod.__dict__)
+ sys.modules[mod.__name__] = mod
+ # another thread might set this first
+ cls.cache.setdefault(key, mod)
+ # unzip into separate lines/nodes lists
+ cls.linemaps[path] = list(zip(*linemap))
+
+ if attrs is not None:
+ for k, v in attrs.items():
+ setattr(mod, k, v)
+
+ if not (linemap or attrs):
+ mod._reload_in_subproc = functools.partial( # type: ignore[attr-defined]
+ _reload_python_module_in_subproc, key, path
+ )
return cls.cache[key]
@@ -2436,10 +2443,25 @@ class PyCodeCache:
return parse_stack_trace(entry)
+def _reload_triton_kernel_in_subproc(reload_module, kernel_name):
+ return TritonCodeCache._mod_to_kernel(reload_module(), kernel_name)
+
+
class TritonCodeCache:
@classmethod
def load(cls, kernel_name: str, source_code: str) -> ModuleType:
- return _module_to_triton_kernel(PyCodeCache.load(source_code), kernel_name)
+ mod = PyCodeCache.load(source_code)
+ return cls._mod_to_kernel(mod, kernel_name)
+
+ @classmethod
+ def _mod_to_kernel(cls, mod, kernel_name):
+ kernel = getattr(mod, kernel_name)
+ kernel._reload_in_subproc = functools.partial(
+ _reload_triton_kernel_in_subproc,
+ mod._reload_in_subproc,
+ kernel_name,
+ )
+ return kernel
def _cuda_compiler() -> Optional[str]:
@@ -2727,6 +2749,28 @@ def caching_device_properties():
device_interface.Worker.get_device_properties()
+@functools.lru_cache(None)
+def _set_triton_ptxas_path() -> None:
+ if os.environ.get("TRITON_PTXAS_PATH") is not None:
+ return
+ ptxas_path = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), "..", "bin", "ptxas")
+ )
+ if not os.path.exists(ptxas_path):
+ return
+ if os.path.isfile(ptxas_path) and os.access(ptxas_path, os.X_OK):
+ os.environ["TRITON_PTXAS_PATH"] = ptxas_path
+ else:
+ warnings.warn(f"{ptxas_path} exists but is not an executable")
+
+
+def _worker_compile_triton(
+ load_kernel: Callable[[], Any],
+):
+ _set_triton_ptxas_path()
+ load_kernel().precompile(warm_cache_only=True)
+
+
class CodeCacheFuture:
def result(self):
raise NotImplementedError
diff --git a/torch/_inductor/runtime/compile_tasks.py b/torch/_inductor/runtime/compile_tasks.py
deleted file mode 100644
index 66a36703da..0000000000
--- a/torch/_inductor/runtime/compile_tasks.py
+++ /dev/null
@@ -1,68 +0,0 @@
-from __future__ import annotations
-
-import functools
-import os
-import sys
-import warnings
-from types import ModuleType
-from typing import Any, Callable
-
-
-def _reload_triton_kernel_in_subproc(reload_module, kernel_name):
- return _module_to_triton_kernel(reload_module(), kernel_name)
-
-
-def _module_to_triton_kernel(mod, kernel_name):
- kernel = getattr(mod, kernel_name)
- kernel._reload_in_subproc = functools.partial(
- _reload_triton_kernel_in_subproc,
- mod._reload_in_subproc,
- kernel_name,
- )
- return kernel
-
-
-def _reload_python_module_in_subproc(key, path):
- codecache = sys.modules.get("torch._inductor.codecache")
- if codecache:
- return codecache.PyCodeCache.load_by_key_path(key, path)
- else:
- return _reload_python_module(key, path)
-
-
-def _reload_python_module(key, path):
- with open(path) as f:
- try:
- code = compile(f.read(), path, "exec")
- except Exception as e:
- raise RuntimeError(
- f"Failed to import {path}\n{type(e).__name__}: {e}"
- ) from None
- mod = ModuleType(f"{__name__}.{key}")
- mod.__file__ = path
- mod.key = key # type: ignore[attr-defined]
- exec(code, mod.__dict__, mod.__dict__)
- sys.modules[mod.__name__] = mod
- return mod
-
-
-@functools.lru_cache(None)
-def _set_triton_ptxas_path() -> None:
- if os.environ.get("TRITON_PTXAS_PATH") is not None:
- return
- ptxas_path = os.path.abspath(
- os.path.join(os.path.dirname(__file__), "..", "bin", "ptxas")
- )
- if not os.path.exists(ptxas_path):
- return
- if os.path.isfile(ptxas_path) and os.access(ptxas_path, os.X_OK):
- os.environ["TRITON_PTXAS_PATH"] = ptxas_path
- else:
- warnings.warn(f"{ptxas_path} exists but is not an executable")
-
-
-def _worker_compile_triton(
- load_kernel: Callable[[], Any],
-):
- _set_triton_ptxas_path()
- load_kernel().precompile(warm_cache_only=True)
|
2.41.0
|
890848ec24bc432ffbf4ae6eb00cd9044cd0182
|
Thu, 25 Apr 2024 11:22:19 +0000
|
[PATCH 0647/1000] Revert "[ROCm] Triton upstream AMD backend integration (#121801)"
|
This reverts commit 9888d7495ece6b6df3b7334fc7c2a9d869359250. Reverted https://github.com/pytorch/pytorch/pull/121801 on behalf of https://github.com/jeanschmidt due to need to revert so I can revert https://github.com/pytorch/pytorch/pull/124592 ([comment](https://github.com/pytorch/pytorch/pull/121801#issuecomment-2076951327))
|
diff --git a/.ci/docker/ci_commit_pins/triton-rocm.txt b/.ci/docker/ci_commit_pins/triton-rocm.txt
index 2df035af1f..e2eb3bdf28 100644
--- a/.ci/docker/ci_commit_pins/triton-rocm.txt
+++ b/.ci/docker/ci_commit_pins/triton-rocm.txt
@@ -1 +1 @@
-bbe6246e37d8aa791c67daaf9d9d61b26c9ccfdc
+0a22a91d04c2b4a029a69a198eac390089c3e891
diff --git a/.ci/docker/common/install_triton.sh b/.ci/docker/common/install_triton.sh
index de009c1a3a..c7e60d7d9e 100755
--- a/.ci/docker/common/install_triton.sh
+++ b/.ci/docker/common/install_triton.sh
@@ -13,7 +13,7 @@ conda_reinstall() {
}
if [ -n "${ROCM_VERSION}" ]; then
- TRITON_REPO="https://github.com/openai/triton"
+ TRITON_REPO="https://github.com/ROCmSoftwarePlatform/triton"
TRITON_TEXT_FILE="triton-rocm"
elif [ -n "${BASEKIT_VERSION}" ]; then
TRITON_REPO="https://github.com/intel/intel-xpu-backend-for-triton"
diff --git a/.github/scripts/amd/package_triton_wheel.sh b/.github/scripts/amd/package_triton_wheel.sh
deleted file mode 100755
index 4295a97a34..0000000000
--- a/.github/scripts/amd/package_triton_wheel.sh
+++ /dev/null
@@ -1,99 +0,0 @@
-set -ex
-
-# Set ROCM_HOME isn't available, use ROCM_PATH if set or /opt/rocm
-ROCM_HOME="${ROCM_HOME:-${ROCM_PATH:-/opt/rocm}}"
-
-# Find rocm_version.h header file for ROCm version extract
-rocm_version_h="${ROCM_HOME}/include/rocm-core/rocm_version.h"
-if [ ! -f "$rocm_version_h" ]; then
- rocm_version_h="${ROCM_HOME}/include/rocm_version.h"
-fi
-
-# Error out if rocm_version.h not found
-if [ ! -f "$rocm_version_h" ]; then
- echo "Error: rocm_version.h not found in expected locations." >&2
- exit 1
-fi
-
-# Extract major, minor and patch ROCm version numbers
-MAJOR_VERSION=$(grep 'ROCM_VERSION_MAJOR' "$rocm_version_h" | awk '{print $3}')
-MINOR_VERSION=$(grep 'ROCM_VERSION_MINOR' "$rocm_version_h" | awk '{print $3}')
-PATCH_VERSION=$(grep 'ROCM_VERSION_PATCH' "$rocm_version_h" | awk '{print $3}')
-ROCM_INT=$(($MAJOR_VERSION * 10000 + $MINOR_VERSION * 100 + $PATCH_VERSION))
-echo "ROCm version: $ROCM_INT"
-
-# Check TRITON_ROCM_DIR is set
-if [[ -z "${TRITON_ROCM_DIR}" ]]; then
- export TRITON_ROCM_DIR=third_party/amd/backend
-fi
-
-# Remove packaged libs and headers
-rm -rf $TRITON_ROCM_DIR/include/*
-
-LIBTINFO_PATH="/usr/lib64/libtinfo.so.5"
-LIBNUMA_PATH="/usr/lib64/libnuma.so.1"
-LIBELF_PATH="/usr/lib64/libelf.so.1"
-
-OS_SO_PATHS=(
- $LIBELF_PATH
- $LIBNUMA_PATH
- $LIBTINFO_PATH
-)
-
-for lib in "${OS_SO_PATHS[@]}"
-do
- cp $lib $TRITON_ROCM_DIR/lib/
-done
-
-# Required ROCm libraries
-if [[ "${MAJOR_VERSION}" == "6" ]]; then
- libamdhip="libamdhip64.so.6"
-else
- libamdhip="libamdhip64.so.5"
-fi
-
-# Required ROCm libraries - ROCm 6.0
-ROCM_SO=(
- "${libamdhip}"
- "libhsa-runtime64.so.1"
- "libamd_comgr.so.2"
- "libdrm.so.2"
- "libdrm_amdgpu.so.1"
-)
-
-if [[ $ROCM_INT -ge 60100 ]]; then
- ROCM_SO+=("librocprofiler-register.so.0")
-fi
-
-for lib in "${ROCM_SO[@]}"
-do
- file_path=($(find $ROCM_HOME/lib/ -name "$lib")) # First search in lib
- if [[ -z $file_path ]]; then
- if [ -d "$ROCM_HOME/lib64/" ]; then
- file_path=($(find $ROCM_HOME/lib64/ -name "$lib")) # Then search in lib64
- fi
- fi
- if [[ -z $file_path ]]; then
- file_path=($(find $ROCM_HOME/ -name "$lib")) # Then search in ROCM_HOME
- fi
- if [[ -z $file_path ]]; then
- file_path=($(find /opt/ -name "$lib")) # Then search in /opt
- fi
- if [[ -z $file_path ]]; then
- echo "Error: Library file $lib is not found." >&2
- exit 1
- fi
-
- cp $file_path $TRITON_ROCM_DIR/lib
- # When running locally, and not building a wheel, we need to satisfy shared objects requests that don't look for versions
- LINKNAME=$(echo $lib | sed -e 's/\.so.*/.so/g')
- ln -sf $lib $TRITON_ROCM_DIR/lib/$LINKNAME
-
-done
-
-# Copy Include Files
-cp -r $ROCM_HOME/include/hip $TRITON_ROCM_DIR/include
-
-# Copy linker
-mkdir -p $TRITON_ROCM_DIR/llvm/bin
-cp $ROCM_HOME/llvm/bin/ld.lld $TRITON_ROCM_DIR/llvm/bin/
diff --git a/.github/scripts/amd/patch_triton_wheel.sh b/.github/scripts/amd/patch_triton_wheel.sh
deleted file mode 100755
index d95ca023ff..0000000000
--- a/.github/scripts/amd/patch_triton_wheel.sh
+++ /dev/null
@@ -1,99 +0,0 @@
-#!/bin/bash
-set -x
-
-WHEELHOUSE_DIR=/artifacts
-PATCHELF_BIN=patchelf
-ROCM_LIB=backends/amd/lib
-ROCM_LD=backends/amd/llvm/bin
-PREFIX=triton
-fname_without_so_number() {
- LINKNAME=$(echo $1 | sed -e 's/\.so.*/.so/g')
- echo "$LINKNAME"
-}
-
-replace_needed_sofiles() {
- find $1 -name '*.so*' -o -name 'ld.lld' | while read sofile; do
- origname=$2
- patchedname=$3
- if [[ "$origname" != "$patchedname" ]]; then
- set +e
- origname=$($PATCHELF_BIN --print-needed $sofile | grep "$origname.*")
- ERRCODE=$?
- set -e
- if [ "$ERRCODE" -eq "0" ]; then
- echo "patching $sofile entry $origname to $patchedname"
- $PATCHELF_BIN --replace-needed $origname $patchedname $sofile
- fi
- fi
- done
-}
-
-mkdir -p "/tmp_dir"
-pushd /tmp_dir
-for pkg in /$WHEELHOUSE_DIR/*triton*.whl; do
- echo "Modifying $pkg"
- rm -rf tmp
- mkdir -p tmp
- cd tmp
- cp $pkg .
- unzip -q $(basename $pkg)
- rm -f $(basename $pkg)
- $PATCHELF_BIN --set-rpath ${LD_SO_RPATH:-'$ORIGIN:$ORIGIN/../../lib'} $PREFIX/$ROCM_LD/ld.lld
- $PATCHELF_BIN --print-rpath $PREFIX/$ROCM_LD/ld.lld
- # Modify libtriton.so as it sits in _C directory apart from its dependencies
- find $PREFIX/_C -type f -name "*.so*" | while read sofile; do
- echo "Setting rpath of $sofile"
- $PATCHELF_BIN --set-rpath ${C_SO_RPATH:-'$ORIGIN:$ORIGIN/'../$ROCM_LIB} ${FORCE_RPATH:-} $sofile
- $PATCHELF_BIN --print-rpath $sofile
- done
-
- # All included dependencies are included in a single lib directory
- deps=()
- deps_soname=()
- while read sofile; do
- echo "Setting rpath of $sofile to ${LIB_SO_RPATH:-'$ORIGIN'}"
- $PATCHELF_BIN --set-rpath ${LIB_SO_RPATH:-'$ORIGIN'} ${FORCE_RPATH:-} $sofile
- $PATCHELF_BIN --print-rpath $sofile
- deps+=("$sofile")
- deps_soname+=("$(basename $sofile)")
- done < <(find $PREFIX/$ROCM_LIB -type f -name "*.so*")
-
- patched=()
- for filepath in "${deps[@]}"; do
- filename=$(basename $filepath)
- destpath=$PREFIX/$ROCM_LIB/$filename
- if [[ "$filepath" != "$destpath" ]]; then
- cp $filepath $destpath
- fi
- patchedpath=$(fname_without_so_number $destpath)
- patchedname=$(basename $patchedpath)
- if [[ "$destpath" != "$patchedpath" ]]; then
- mv $destpath $patchedpath
- fi
- patched+=("$patchedname")
- echo "Copied $filepath to $patchedpath"
- done
-
- # Go through all required shared objects and see if any of our other objects are dependants. If so, replace so.ver wth so
- for ((i=0;i<${#deps[@]};++i)); do
- echo "replacing "${deps_soname[i]} ${patched[i]}
- replace_needed_sofiles $PREFIX/$ROCM_LIB ${deps_soname[i]} ${patched[i]}
- replace_needed_sofiles $PREFIX/_C ${deps_soname[i]} ${patched[i]}
- replace_needed_sofiles $PREFIX/$ROCM_LD ${deps_soname[i]} ${patched[i]}
- done
-
- # Re-bundle whl with so adjustments
- zip -rqy $(basename $pkg) *
-
- if [[ -z "${MANYLINUX_VERSION}" ]]; then
- newpkg=$pkg
- else
- newpkg=$(echo $pkg | sed -e "s/\linux_x86_64/${MANYLINUX_VERSION}/g")
- fi
-
- # Remove original whl
- rm -f $pkg
-
- # Move rebuilt whl to original location with new name.
- mv $(basename $pkg) $newpkg
-done
diff --git a/.github/scripts/build_triton_wheel.py b/.github/scripts/build_triton_wheel.py
index 33a49d788a..624de58d93 100644
--- a/.github/scripts/build_triton_wheel.py
+++ b/.github/scripts/build_triton_wheel.py
@@ -10,6 +10,9 @@ from typing import Optional
SCRIPT_DIR = Path(__file__).parent
REPO_DIR = SCRIPT_DIR.parent.parent
+# TODO: Remove me once Triton version is again in sync for vanilla and ROCm
+ROCM_TRITION_VERSION = "2.1.0"
+
def read_triton_pin(rocm_hash: bool = False) -> str:
triton_file = "triton.txt" if not rocm_hash else "triton-rocm.txt"
@@ -29,6 +32,27 @@ def check_and_replace(inp: str, src: str, dst: str) -> str:
return inp.replace(src, dst)
+def patch_setup_py(
+ path: Path,
+ *,
+ version: str,
+ name: str = "triton",
+ expected_version: Optional[str] = None,
+) -> None:
+ with open(path) as f:
+ orig = f.read()
+ # Replace name
+ orig = check_and_replace(orig, 'name="triton",', f'name="{name}",')
+ # Replace version
+ if not expected_version:
+ expected_version = read_triton_version()
+ orig = check_and_replace(
+ orig, f'version="{expected_version}",', f'version="{version}",'
+ )
+ with open(path, "w") as f:
+ f.write(orig)
+
+
def patch_init_py(
path: Path, *, version: str, expected_version: Optional[str] = None
) -> None:
@@ -68,10 +92,11 @@ def build_triton(
with TemporaryDirectory() as tmpdir:
triton_basedir = Path(tmpdir) / "triton"
triton_pythondir = triton_basedir / "python"
- triton_repo = "https://github.com/openai/triton"
if build_rocm:
+ triton_repo = "https://github.com/ROCmSoftwarePlatform/triton"
triton_pkg_name = "pytorch-triton-rocm"
else:
+ triton_repo = "https://github.com/openai/triton"
triton_pkg_name = "pytorch-triton"
check_call(["git", "clone", triton_repo], cwd=tmpdir)
if release:
@@ -137,15 +162,18 @@ def build_triton(
patch_init_py(
triton_pythondir / "triton" / "__init__.py",
version=f"{version}",
- expected_version=None,
+ expected_version=ROCM_TRITION_VERSION if build_rocm else None,
)
if build_rocm:
- check_call(
- [f"{SCRIPT_DIR}/amd/package_triton_wheel.sh"],
- cwd=triton_basedir,
- shell=True,
+ # TODO: Remove me when ROCM triton is updated
+ patch_setup_py(
+ triton_pythondir / "setup.py",
+ name=triton_pkg_name,
+ version=f"{version}",
+ expected_version=ROCM_TRITION_VERSION,
)
+ check_call("scripts/amd/setup_rocm_libs.sh", cwd=triton_basedir, shell=True)
print("ROCm libraries setup for triton installation...")
check_call(
@@ -156,11 +184,8 @@ def build_triton(
shutil.copy(whl_path, Path.cwd())
if build_rocm:
- check_call(
- [f"{SCRIPT_DIR}/amd/patch_triton_wheel.sh"],
- cwd=triton_basedir,
- shell=True,
- )
+ check_call("scripts/amd/fix_so.sh", cwd=triton_basedir, shell=True)
+
return Path.cwd() / whl_path.name
diff --git a/test/inductor/test_select_algorithm.py b/test/inductor/test_select_algorithm.py
index ca5b99f02c..48713bb63e 100644
--- a/test/inductor/test_select_algorithm.py
+++ b/test/inductor/test_select_algorithm.py
@@ -109,8 +109,6 @@ class TestSelectAlgorithm(TestCase):
)
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
- # FIXME: Investigate why _int_mm_out_cuda is not compiled on ROCm
- @skipIfRocm
@patches
def test__int_mm(self):
@torch.compile
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py
index 9614eae4f3..7a99b1f31e 100644
--- a/test/inductor/test_torchinductor.py
+++ b/test/inductor/test_torchinductor.py
@@ -9354,8 +9354,6 @@ class CommonTemplate:
b = torch.randn(65, 2**24, device=self.device)
fn(a, b)
- # Skipped on ROCm until https://github.com/ROCm/triton/issues/443 resolved
- @skipIfRocm
def test_fuse_large_params(self):
def pt2_optimizer_step(optimizer):
@torch.compile()
diff --git a/torch/_dynamo/device_interface.py b/torch/_dynamo/device_interface.py
index d93a265466..d2944bc9c0 100644
--- a/torch/_dynamo/device_interface.py
+++ b/torch/_dynamo/device_interface.py
@@ -202,11 +202,8 @@ class CudaInterface(DeviceInterface):
@staticmethod
def get_compute_capability(device: _device_t = None):
- if torch.version.hip is None:
- major, min = torch.cuda.get_device_capability(device)
- return major * 10 + min
- else:
- return torch.cuda.get_device_properties(device).gcnArchName.split(":", 1)[0]
+ major, min = torch.cuda.get_device_capability(device)
+ return major * 10 + min
get_xpu_stream: Optional[Callable[[int], int]]
diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py
index 49508e9214..d1572f0648 100644
--- a/torch/_inductor/runtime/triton_heuristics.py
+++ b/torch/_inductor/runtime/triton_heuristics.py
@@ -332,18 +332,7 @@ class CachingAutotuner(KernelInterface):
),
)
- cc_str = str(compile_meta["cc"])
- if "gfx10" in cc_str or "gfx11" in cc_str:
- rocm_warp_size = 32
- else:
- rocm_warp_size = 64
-
- target = (
- (compile_meta["device_type"], compile_meta["cc"])
- if not torch.version.hip
- else [compile_meta["device_type"], compile_meta["cc"], rocm_warp_size]
- )
-
+ target = (compile_meta["device_type"], compile_meta["cc"])
options = {
"num_warps": compile_meta["num_warps"],
"num_stages": compile_meta["num_stages"],
@@ -705,12 +694,18 @@ class CachingAutotuner(KernelInterface):
from torch._inductor.codecache import CudaKernelParamCache
- binary = (
- launcher.bin.asm["cubin"]
- if torch.version.hip is None
- else launcher.bin.asm["hsaco"]
- )
- CudaKernelParamCache.set(key, params, binary)
+ if self.device_props.type != "hip":
+ CudaKernelParamCache.set(key, params, launcher.bin.asm["cubin"])
+ else:
+ # There is some divergence between CUDA and ROCm here.
+ # On ROCm's triton we only have the the path to the binary, not the binary itself.
+ # For ROCm we will copy the binary to the new location instead of writing to file
+ import pathlib
+
+ launcher.bin.asm["hsaco"] = pathlib.Path(
+ launcher.bin.asm["hsaco_path"]
+ ).read_bytes()
+ CudaKernelParamCache.set(key, params, launcher.bin.asm["hsaco"])
self.cuda_kernel_saved = True
diff --git a/torch/_utils_internal.py b/torch/_utils_internal.py
index fe7d30fc6b..ae6fd7086b 100644
--- a/torch/_utils_internal.py
+++ b/torch/_utils_internal.py
@@ -151,29 +151,9 @@ def justknobs_getval_int(name: str) -> int:
@functools.lru_cache(None)
def max_clock_rate():
- if not torch.version.hip:
- from triton.testing import nvsmi
+ from triton.testing import nvsmi
- return nvsmi(["clocks.max.sm"])[0]
- else:
- # Manually set max-clock speeds on ROCm until equivalent nvmsi
- # functionality in triton.testing or via pyamdsmi enablement. Required
- # for test_snode_runtime unit tests.
- gcn_arch = str(torch.cuda.get_device_properties(0).gcnArchName.split(":", 1)[0])
- if "gfx94" in gcn_arch:
- return 1700
- elif "gfx90a" in gcn_arch:
- return 1700
- elif "gfx908" in gcn_arch:
- return 1502
- elif "gfx11" in gcn_arch:
- return 1700
- elif "gfx103" in gcn_arch:
- return 1967
- elif "gfx101" in gcn_arch:
- return 1144
- else:
- return 1100
+ return nvsmi(["clocks.max.sm"])[0]
TEST_MASTER_ADDR = "127.0.0.1"
|
2.41.0
|
f9ea261859075eedbc37cb0bc3f482986cb5cf0
|
Thu, 25 Apr 2024 11:24:39 +0000
|
[PATCH 0648/1000] Revert "OSS: Capture triton kernel in ET (#124775)"
|
This reverts commit c55309e58f88dd37e41e80425fd84a71d4b51548. Reverted https://github.com/pytorch/pytorch/pull/124775 on behalf of https://github.com/jeanschmidt due to need to revert so I can revert https://github.com/pytorch/pytorch/pull/124592 ([comment](https://github.com/pytorch/pytorch/pull/124775#issuecomment-2076954322))
|
diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py
index 4a41b820c1..61f2ca314a 100644
--- a/test/profiler/test_profiler.py
+++ b/test/profiler/test_profiler.py
@@ -28,7 +28,6 @@ import weakref
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
from unittest.mock import patch
-from warnings import warn
import expecttest
import torch
@@ -36,7 +35,6 @@ import torch.nn as nn
import torch.optim
import torch.utils.data
import torch.utils.data.datapipes as dp
-from torch import _dynamo as torchdynamo
from torch._C._profiler import _TensorMetadata
from torch.autograd import (
_record_function_with_args_enter,
@@ -68,9 +66,7 @@ from torch.profiler._pattern_matcher import (
report_all_anti_patterns,
SynchronizedDataLoaderPattern,
)
-
-from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
-
+from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import skipCUDAVersionIn
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
@@ -88,8 +84,6 @@ from torch.testing._internal.common_utils import (
TestCase,
)
-from torch.utils._triton import has_triton
-
Json = Dict[str, Any]
try:
@@ -533,54 +527,42 @@ class TestExecutionTrace(TestCase):
assert loop_count == expected_loop_events
@unittest.skipIf(IS_WINDOWS, "torch.compile does not support WINDOWS")
- @unittest.skipIf(
- sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+"
- )
- @unittest.skipIf(not TEST_CUDA or not has_triton(), "need CUDA and triton to run")
def test_execution_trace_with_pt2(self):
- @torchdynamo.optimize("inductor")
- def fn(a, b, c):
- x = torch.nn.functional.linear(a, b)
- x = x + c
- return x.cos()
-
- a, b, c = (torch.randn(4, 4, requires_grad=True).to("cuda") for _ in range(3))
+ class ConvAndRelu(nn.Module):
+ def __init__(self) -> None:
+ super().__init__()
+ self.linear = nn.Linear(4096, 4096)
+ self.relu = nn.ReLU(inplace=True)
- inputs = [a, b, c]
- with torch._inductor.config.patch(compile_threads=1):
- fn(*inputs)
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.linear(x)
+ x = self.relu(x)
+ return x
# Create a temp file to save execution trace data.
fp = tempfile.NamedTemporaryFile("w+t", suffix="_et.json", delete=False)
fp.close()
- with profile(
- activities=torch.profiler.supported_activities(),
- record_shapes=True,
- schedule=torch.profiler.schedule(
- skip_first=3, wait=1, warmup=1, active=2, repeat=1
- ),
- execution_trace_observer=(
- ExecutionTraceObserver().register_callback(fp.name)
- ),
- ) as p:
- for idx in range(10):
- with record_function(f"## LOOP {idx} ##"):
- fn(*inputs)
- p.step()
+ with torch._inductor.config.patch(compile_threads=1):
+ test_module = torch.compile(ConvAndRelu())
+ x = torch.rand(128, 4096)
+ et = ExecutionTraceObserver().register_callback(fp.name)
+ et.start()
+ test_module.forward(x)
+ et.stop()
+
+ assert fp.name == et.get_output_file_path()
+ et.unregister_callback()
nodes = self.get_execution_trace_root(fp.name)
- found_captured_triton_kernel_node = False
+
+ found_root_node = False
for n in nodes:
assert "name" in n
- if "triton_" in n["name"]:
- for attr in n["attrs"]:
- if attr["name"] == "kernel_file" and attr["value"] != "":
- found_captured_triton_kernel_node = True
- assert len(n["inputs"]["values"]) > 0
- assert len(n["outputs"]["values"]) == 0
- if not found_captured_triton_kernel_node:
- warn("triton kernels not found")
+ if "[pytorch|profiler|execution_trace|process]" in n["name"]:
+ found_root_node = True
+
+ assert found_root_node
def test_execution_trace_start_stop(self):
use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py
index d1572f0648..85587c2093 100644
--- a/torch/_inductor/runtime/triton_heuristics.py
+++ b/torch/_inductor/runtime/triton_heuristics.py
@@ -801,7 +801,7 @@ class CachingAutotuner(KernelInterface):
args,
{
"kernel_file": self.filename,
- "kernel_backend": "triton",
+ "kernel_type": "triton",
"grid": grid_info,
"stream": stream,
},
diff --git a/torch/csrc/profiler/standalone/execution_trace_observer.cpp b/torch/csrc/profiler/standalone/execution_trace_observer.cpp
index 346dd0718d..763f449c23 100644
--- a/torch/csrc/profiler/standalone/execution_trace_observer.cpp
+++ b/torch/csrc/profiler/standalone/execution_trace_observer.cpp
@@ -236,8 +236,6 @@ const ExecutionTraceObserver::ID root_id{1};
struct FunctionCallContext : public ObserverContext {
std::string name;
- std::string kernel_backend;
- std::string kernel_file;
ExecutionTraceObserver::ID op_id{uninitialized_id};
ExecutionTraceObserver::ID parent_id{uninitialized_id};
ExecutionTraceObserver::ID fw_parent_id{uninitialized_id};
@@ -275,24 +273,14 @@ static void writeJsonNode(
const std::string& outputs = "[]",
const std::string& output_shapes = "[]",
const std::string& output_types = "[]",
- const std::string& operator_schema = "",
- const std::string& kernel_backend = "",
- const std::string& kernel_file = "") {
+ const std::string& operator_schema = "") {
out << fmt::format(
R"JSON(
{{
"id": {}, "name": "{}", "ctrl_deps": {},
"inputs": {{"values": {}, "shapes": {}, "types": {}}},
"outputs": {{"values": {}, "shapes": {}, "types": {}}},
- "attrs": [{{"name": "rf_id", "type": "uint64", "value": {}}},
- {{"name": "fw_parent", "type": "uint64", "value": {}}},
- {{"name": "seq_id", "type": "int64", "value": {}}},
- {{"name": "scope", "type": "uint64", "value": {}}},
- {{"name": "tid", "type": "uint64", "value": {}}},
- {{"name": "fw_tid", "type": "uint64", "value": {}}},
- {{"name": "op_schema", "type": "string", "value": "{}"}},
- {{"name": "kernel_backend", "type": "string", "value": "{}"}},
- {{"name": "kernel_file", "type": "string", "value": "{}"}}]
+ "attrs": [{{"name": "rf_id", "type": "uint64", "value": {}}}, {{"name": "fw_parent", "type": "uint64", "value": {}}}, {{"name": "seq_id", "type": "int64", "value": {}}}, {{"name": "scope", "type": "uint64", "value": {}}}, {{"name": "tid", "type": "uint64", "value": {}}}, {{"name": "fw_tid", "type": "uint64", "value": {}}}, {{"name": "op_schema", "type": "string", "value": "{}"}}]
}})JSON",
id,
name,
@@ -309,9 +297,7 @@ static void writeJsonNode(
scope,
tid,
fw_tid,
- operator_schema,
- kernel_backend,
- kernel_file);
+ operator_schema);
}
inline std::string timeString(const std::time_t timepoint) {
@@ -340,7 +326,7 @@ static bool initExecutionTraceStart(ExecutionTraceObserver& ob) {
ob.out << fmt::format(
R"JSON({{
- "schema": "1.0.4-chakra.0.0.4", "pid": {}, "time": "{}", "start_ts": {},
+ "schema": "1.0.3-chakra.0.0.4", "pid": {}, "time": "{}", "start_ts": {},
"nodes": [)JSON",
ob.pid,
ob.record_time,
@@ -456,44 +442,6 @@ inline void appendValueInfo(
shapes.push_back(getValueShape(val));
}
-inline void handleKernelBackendInfo(
- FunctionCallContext& fc,
- const RecordFunction& fn) {
- // triton kernel related information are in kwinputs
- const auto& kwinputs = fn.kwinputs();
- if (kwinputs.find("kernel_backend") != kwinputs.end()) {
- fc.kernel_backend = kwinputs.at("kernel_backend").toStringRef();
- if (fc.kernel_backend == "triton") {
- fc.kernel_file = kwinputs.at("kernel_file").toStringRef();
- TORCH_INTERNAL_ASSERT(
- kwinputs.find("kernel_file") != kwinputs.end(),
- "kernel file is missing in triton kernel");
- // Remove the path of the file name
- if (fc.kernel_file.find_last_of('/') != std::string::npos)
- fc.kernel_file =
- fc.kernel_file.substr(fc.kernel_file.find_last_of('/') + 1);
-
- // get grid information
- TORCH_INTERNAL_ASSERT(
- kwinputs.find("grid") != kwinputs.end(),
- "grid is missing in triton kernel");
- fc.input_values.emplace_back(
- "\"" + kwinputs.at("grid").toStringRef() + "\"");
- fc.input_types.emplace_back("\"String\"");
- fc.input_shapes.emplace_back("[]");
-
- // get stream information
- TORCH_INTERNAL_ASSERT(
- kwinputs.find("stream") != kwinputs.end(),
- "stream is missing in triton kernel");
- fc.input_values.emplace_back(
- std::to_string(kwinputs.at("stream").toInt()));
- fc.input_types.emplace_back("\"Int\"");
- fc.input_shapes.emplace_back("[]");
- }
- }
-}
-
static void recordOperatorStart(
ExecutionTraceObserver& ob,
FunctionCallContext& fc,
@@ -543,9 +491,6 @@ static void recordOperatorStart(
appendValueInfo(
ob, inputs[i], fc.input_values, fc.input_types, fc.input_shapes);
}
-
- handleKernelBackendInfo(fc, fn);
-
fc.parent_id = ob.op_stack[tid].top();
// get parent id from the forward stack, this can be different for
// autograd ops, which may execute on a different thread than the original
@@ -670,9 +615,7 @@ static void onFunctionExit(const RecordFunction& fn, ObserverContext* ctx_ptr) {
vectorToString(output_values),
vectorToString(output_shapes),
vectorToString(output_types),
- op_schema_str,
- fc.kernel_backend,
- fc.kernel_file);
+ op_schema_str);
ob->out << ",";
} catch (const std::exception& e) {
LOG(WARNING) << "Exception in execution trace observer: [" << fc.name
diff --git a/torch/profiler/profiler.py b/torch/profiler/profiler.py
index 81f1d2c2f1..82daffcdcb 100644
--- a/torch/profiler/profiler.py
+++ b/torch/profiler/profiler.py
@@ -1,7 +1,6 @@
import gzip
import json
import os
-import shutil
import tempfile
from abc import ABC, abstractmethod
from enum import Enum
@@ -793,36 +792,8 @@ class ExecutionTraceObserver(_ITraceObserver):
"""
Removes ET observer from record function callbacks.
"""
-
- def _save_triton_kernels():
- # Save the kernel paths for the generated kernels
- from torch._inductor.codecache import PyCodeCache as PyCodeCache
-
- kernel_files = [
- v.__file__
- for v in PyCodeCache.cache.values()
- if getattr(v, "__file__", None) is not None
- ]
- work_dir, file_name = os.path.split(self._output_file_path)
- resource_dir = os.path.join(
- work_dir, os.path.splitext(file_name)[0] + "_resources"
- )
- if not os.path.exists(resource_dir):
- os.mkdir(resource_dir)
-
- for kernel_file in kernel_files:
- if kernel_file is None:
- continue
- path, name = os.path.split(kernel_file)
- dst = os.path.join(resource_dir, name)
- shutil.copyfile(kernel_file, dst)
-
if self._registered:
self.stop()
- try:
- _save_triton_kernels()
- except Exception as e:
- warn(f"Execution trace failed to save kernels: {e}")
_remove_execution_trace_observer()
self._registered = False
|
2.41.0
|
8806d65317c1a890d536c0c347598bfb1ee337f
|
Wed, 24 Apr 2024 16:40:01 +0100
|
[PATCH 0649/1000] [decomp] Remove dead device_hint function (#124849)
|
The only use of this function is in `_to_copy` but the result is never used, so this is just dead code. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124849 Approved by: https://github.com/lezcano
|
diff --git a/torch/_decomp/decompositions.py b/torch/_decomp/decompositions.py
index 3b69cc5b91..0780df14a8 100644
--- a/torch/_decomp/decompositions.py
+++ b/torch/_decomp/decompositions.py
@@ -2057,13 +2057,6 @@ def _fused_dropout_decomposition(input, p, generator=None):
return (res, mask)
-def device_hint(tensor):
- if isinstance(tensor, torch._subclasses.FakeTensor):
- return tensor.fake_device
- else:
- return None
-
-
@register_decomposition(aten._to_copy)
@out_wrapper()
def _to_copy(
@@ -2081,7 +2074,6 @@ def _to_copy(
if device is None and dtype is None and memory_format is None:
return x.clone()
dtype_converted = False
- common_device = device_hint(x)
if device is not None and device != x.device:
# avoid conversions on cpu
|
2.41.0
|
6ce94dca5732eb19e65e612c22b0b86aa1146b5
|
Thu, 25 Apr 2024 11:28:21 +0000
|
[PATCH 0650/1000] Revert "[inductor] Remove usage of device_interface from _inductor.runtime (#124592)"
|
This reverts commit 5d45eb77f1aeb57f13391990215b518a607b3c7e. Reverted https://github.com/pytorch/pytorch/pull/124592 on behalf of https://github.com/jeanschmidt due to breaking internal tests, check D56522594 ([comment](https://github.com/pytorch/pytorch/pull/124592#issuecomment-2076957668))
|
diff --git a/test/inductor/test_cuda_repro.py b/test/inductor/test_cuda_repro.py
index db02d19310..684f3cef8f 100644
--- a/test/inductor/test_cuda_repro.py
+++ b/test/inductor/test_cuda_repro.py
@@ -14,7 +14,6 @@ from torch._dynamo.testing import rand_strided
from torch._dynamo.utils import same
from torch._inductor import config
from torch._inductor.compile_fx import compile_fx_inner
-from torch._inductor.runtime.hints import DeviceProperties
from torch._inductor.utils import run_and_get_code
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing import FileCheck
@@ -406,7 +405,7 @@ class CudaReproTests(TestCase):
],
meta={
"signature": {0: "*fp32", 1: "*fp32", 2: "i32"},
- "device": DeviceProperties.create(torch.device("cuda")),
+ "device": 0,
"configs": [instance_descriptor(divisible_by_16=(0, 1), equal_to_1=())],
"constants": {},
},
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 7c6b6c6165..72ec8eb0c5 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -45,12 +45,16 @@ from typing import (
Optional,
Set,
Tuple,
+ Type,
TYPE_CHECKING,
Union,
)
import torch
-from torch._dynamo.device_interface import get_registered_device_interfaces
+from torch._dynamo.device_interface import (
+ get_interface_for_device,
+ get_registered_device_interfaces,
+)
from torch._dynamo.utils import counters, dynamo_timed
from torch._inductor import config, exc, metrics
from torch._inductor.codegen.cuda import cuda_env
@@ -66,6 +70,7 @@ from torch._subclasses.fake_tensor import (
from torch.fx.experimental.symbolic_shapes import has_hint, hint_int, ShapeEnv
if TYPE_CHECKING:
+ from torch._dynamo.device_interface import DeviceInterface
from torch._inductor.graph import GraphLowering
from torch._inductor.ir import ChoiceCaller
@@ -2766,9 +2771,14 @@ def _set_triton_ptxas_path() -> None:
def _worker_compile_triton(
load_kernel: Callable[[], Any],
+ cc: int,
+ device: torch.device,
+ device_interface: Type[DeviceInterface],
):
_set_triton_ptxas_path()
- load_kernel().precompile(warm_cache_only=True)
+ device_interface.Worker.set_device(device.index)
+ kernel = load_kernel()
+ kernel.precompile(warm_cache_only_with_cc=cc)
class CodeCacheFuture:
@@ -2931,13 +2941,17 @@ class AsyncCompile:
kernel = TritonCodeCache.load(kernel_name, source_code)
if config.compile_threads > 1:
- return TritonFuture(
- kernel,
- self.process_pool().submit(
- _worker_compile_triton,
- kernel._reload_in_subproc,
- ),
+ device_interface = get_interface_for_device(device_str)
+ device = torch.device(device_str, device_interface.current_device())
+ cc = device_interface.get_compute_capability(device)
+ future = self.process_pool().submit(
+ _worker_compile_triton,
+ kernel._reload_in_subproc,
+ cc,
+ device,
+ device_interface,
)
+ return TritonFuture(kernel, future)
else:
kernel.precompile()
return kernel
diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py
index df669d10d2..4950f5e802 100644
--- a/torch/_inductor/codegen/triton.py
+++ b/torch/_inductor/codegen/triton.py
@@ -34,7 +34,7 @@ import torch.utils._pytree as pytree
from torch._dynamo.utils import preserve_rng_state
from torch._inductor.metrics import is_metric_table_enabled, log_kernel_metadata
-from torch._inductor.runtime.hints import AutotuneHint, DeviceProperties
+from torch._inductor.runtime.hints import AutotuneHint
from torch._prims_common import is_integer_dtype
from torch.utils._sympy.functions import FloorDiv, ModularIndexing
from torch.utils._sympy.value_ranges import ValueRanges
@@ -125,7 +125,7 @@ def gen_common_triton_imports():
"""
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
- from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
+ from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor
"""
)
return imports.getvalue()
@@ -2833,7 +2833,8 @@ class TritonKernel(Kernel):
)
triton_meta = {
"signature": triton_meta_signature,
- "device": DeviceProperties.create(V.graph.scheduler.current_device),
+ "device": V.graph.scheduler.current_device.index,
+ "device_type": V.graph.scheduler.current_device.type,
"constants": {},
}
diff --git a/torch/_inductor/codegen/triton_foreach.py b/torch/_inductor/codegen/triton_foreach.py
index 210ab6b50a..a0acdcdae0 100644
--- a/torch/_inductor/codegen/triton_foreach.py
+++ b/torch/_inductor/codegen/triton_foreach.py
@@ -6,7 +6,6 @@ from typing import Dict, List, Tuple
from sympy import Integer
from .. import metrics
-from ..runtime.hints import DeviceProperties
from ..scheduler import SchedulerNode
from ..utils import ceildiv, Placeholder
from ..virtualized import V
@@ -158,7 +157,8 @@ class ForeachKernel(Kernel):
_, _, signature = self.args.python_argdefs()
triton_meta = {
"signature": signature_to_meta(signature, size_dtype=size_dtype),
- "device": DeviceProperties.create(V.graph.scheduler.current_device),
+ "device": V.graph.scheduler.current_device.index,
+ "device_type": V.graph.scheduler.current_device.type,
"constants": {},
}
triton_meta["configs"] = [config_of(signature)]
diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py
index adbc93f0dc..93c5ee5ad0 100644
--- a/torch/_inductor/codegen/wrapper.py
+++ b/torch/_inductor/codegen/wrapper.py
@@ -35,7 +35,6 @@ from torch.utils._sympy.singleton_int import SingletonInt
from .. import codecache, config, ir
from ..ir import ReinterpretView
from ..runtime import triton_heuristics
-from ..runtime.hints import DeviceProperties
from ..utils import (
cache_on_self,
get_benchmark_name,
@@ -1107,7 +1106,8 @@ class WrapperCodeGen(CodeGen):
size_dtype=index_dtype,
indices=non_constant_indices,
),
- "device": DeviceProperties.create(V.graph.scheduler.current_device),
+ "device": V.graph.scheduler.current_device.index,
+ "device_type": V.graph.scheduler.current_device.type,
# Triton compiler includes equal_to_1 args into constants even
# when they are not constexpr. otherwise there may be a segfault
# during launching the Inductor-compiled Triton kernel.
diff --git a/torch/_inductor/runtime/hints.py b/torch/_inductor/runtime/hints.py
index 325f37ae25..5b2b53ebff 100644
--- a/torch/_inductor/runtime/hints.py
+++ b/torch/_inductor/runtime/hints.py
@@ -1,8 +1,6 @@
import collections
-import typing
from dataclasses import fields
from enum import auto, Enum
-from typing import Optional
# NOTE: if these fail asserts submit a PR to increase them
@@ -91,39 +89,3 @@ class AutotuneHint(Enum):
# which isn't valid python.
# Enum.__str__ will just return "AutotuneHint.ELEMENTS_PER_WARP_32".
__repr__ = Enum.__str__
-
-
-class DeviceProperties(typing.NamedTuple):
- """Copy device properties into a data structure not requiring torch to be imported"""
-
- type: str # type: ignore[assignment]
- index: int # type: ignore[assignment]
- cc: int
- major: Optional[int] = None
- regs_per_multiprocessor: Optional[int] = None
- max_threads_per_multi_processor: Optional[int] = None
- multi_processor_count: Optional[int] = None
-
- @classmethod
- def create(cls, device):
- import torch
- from torch._dynamo.device_interface import get_interface_for_device
-
- device_type = device.type if torch.version.hip is None else "hip"
- device_interface = get_interface_for_device(device)
- if device_type == "cuda":
- props = device_interface.get_device_properties(device)
- return cls(
- type=device_type,
- index=device.index,
- cc=device_interface.get_compute_capability(device),
- major=props.major,
- regs_per_multiprocessor=props.regs_per_multiprocessor,
- max_threads_per_multi_processor=props.max_threads_per_multi_processor,
- multi_processor_count=props.multi_processor_count,
- )
- return cls(
- type=device_type,
- index=device.index,
- cc=device_interface.get_compute_capability(device),
- )
diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py
index 85587c2093..1b042d4f4a 100644
--- a/torch/_inductor/runtime/triton_heuristics.py
+++ b/torch/_inductor/runtime/triton_heuristics.py
@@ -16,12 +16,12 @@ from typing import Any, Callable, Dict, List, Optional, Set, Tuple
import torch
+from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device
from .coordinate_descent_tuner import CoordescTuner
from .hints import (
_NUM_THREADS_PER_WARP,
AutotuneHint,
- DeviceProperties,
HeuristicType,
ReductionHint,
TileHint,
@@ -144,12 +144,7 @@ class CachingAutotuner(KernelInterface):
assert len(configs) > 0, "Non-empty TritonConfig list required for compiling"
self.fn = fn
- self.device_props: DeviceProperties = triton_meta["device"]
- self.triton_meta = {
- **triton_meta,
- "device": self.device_props.index,
- "device_type": self.device_props.type,
- }
+ self.triton_meta = triton_meta
self.inductor_meta = {} if inductor_meta is None else inductor_meta
self.save_cache_hook = save_cache_hook
self.mutated_arg_names = mutated_arg_names
@@ -157,6 +152,13 @@ class CachingAutotuner(KernelInterface):
self.heuristic_type = heuristic_type
self.custom_kernel = custom_kernel
self.cuda_kernel_saved = False
+
+ # Align the default design that default as cuda
+ self.device_type = (
+ triton_meta["device_type"] if "device_type" in triton_meta else "cuda"
+ )
+ self.device_interface = get_interface_for_device(self.device_type)
+
if log.isEnabledFor(logging.DEBUG):
log.debug(
"CachingAutotuner gets %d configs for %s",
@@ -184,7 +186,7 @@ class CachingAutotuner(KernelInterface):
)
self.filename = filename
- def precompile(self, warm_cache_only=False):
+ def precompile(self, warm_cache_only_with_cc=None):
with self.lock:
if self.launchers:
return
@@ -196,7 +198,7 @@ class CachingAutotuner(KernelInterface):
for c in self.configs:
try:
compiled_binary, launcher = self._precompile_config(
- c, warm_cache_only
+ c, warm_cache_only_with_cc
)
except OutOfResources as e:
if len(self.configs) == 1:
@@ -217,19 +219,19 @@ class CachingAutotuner(KernelInterface):
seen_configs = set(self.configs)
- device_prop = self.device_props
+ device_prop = self.device_interface.Worker.get_device_properties(
+ self.triton_meta["device"]
+ )
if (
self.inductor_meta.get("dynamic_scale_rblock", True)
and self.heuristic_type == HeuristicType.REDUCTION
and self.size_hints is not None
- # Disable for AMDGPU/Intel as Triton is not ready to return n_regs for a compiled_binary.
- and device_prop.type == "cuda"
- and device_prop.major
+ # Disable for AMDGPU as Triton is not ready to return n_regs for a compiled_binary.
+ and not self.inductor_meta.get("is_hip")
+ # Disable for Intel GPU as Triton is not ready to return n_regs for a compiled_binary.
+ and self.device_type != "xpu"
and device_prop.major >= 8
):
- assert device_prop.regs_per_multiprocessor
- assert device_prop.max_threads_per_multi_processor
- assert device_prop.multi_processor_count
for triton_config, compiled_binary in zip(
self.configs, compiled_binaries
):
@@ -290,21 +292,15 @@ class CachingAutotuner(KernelInterface):
continue
seen_configs.add(new_config)
self.launchers.append(
- self._precompile_config(new_config, warm_cache_only)[1]
+ self._precompile_config(new_config, warm_cache_only_with_cc)[1]
)
self.configs = None
- def get_device_interface(self):
- # this code cannot run in compile workers, because it imports from torch
- from torch._dynamo.device_interface import get_interface_for_device
-
- return get_interface_for_device(self.device_props.type.replace("hip", "cuda"))
-
- def _precompile_config(self, cfg: Config, warm_cache_only: bool):
+ def _precompile_config(self, cfg: Config, warm_cache_only_with_cc: Optional[int]):
"""Ahead of time compile a given autotuner config."""
compile_meta = copy.deepcopy(self.triton_meta)
for k, v in cfg.kwargs.items():
- if self.device_props.type != "hip":
+ if torch.version.hip is not None:
if k == "matrix_instr_nonkdim":
compile_meta["matrix_instr_nonkdim"] = v
continue
@@ -318,9 +314,22 @@ class CachingAutotuner(KernelInterface):
"assert_indirect_indexing", True
) and not self.inductor_meta.get("is_hip", False)
- # device type will be "hip" rather than "cuda" here
- compile_meta["device_type"] = self.device_props.type
- compile_meta["cc"] = self.device_props.cc
+ # Setting device_type="hip" required on ROCm to pass down to triton
+ compile_meta["device_type"] = (
+ self.device_type if torch.version.hip is None else "hip"
+ )
+
+ if warm_cache_only_with_cc:
+ cc = warm_cache_only_with_cc
+ else:
+ # Use device_type 'cuda' for both cuda and hip devices to retrieve
+ # the compute capability.
+ device_type = self.device_type if torch.version.hip is None else "cuda"
+ device_id = compile_meta["device"]
+ device = torch.device(device_type, device_id)
+ cc = self.device_interface.get_compute_capability(device)
+
+ compile_meta["cc"] = cc
if ASTSource:
compile_args = (
@@ -332,13 +341,13 @@ class CachingAutotuner(KernelInterface):
),
)
- target = (compile_meta["device_type"], compile_meta["cc"])
+ target = (compile_meta["device_type"], cc)
options = {
"num_warps": compile_meta["num_warps"],
"num_stages": compile_meta["num_stages"],
"debug": compile_meta["debug"],
}
- if self.device_props.type != "hip":
+ if torch.version.hip is not None:
if "waves_per_eu" in compile_meta:
options["waves_per_eu"] = compile_meta["waves_per_eu"]
if "matrix_instr_nonkdim" in compile_meta:
@@ -353,21 +362,16 @@ class CachingAutotuner(KernelInterface):
compile_args = (self.fn,)
compile_kwargs = compile_meta
- if warm_cache_only:
+ if warm_cache_only_with_cc:
return (
triton.compile(*compile_args, **compile_kwargs),
None,
)
- # importing from torch is safe now that precompile has returned
- from torch._dynamo.device_interface import DeviceGuard
-
- device_interface = self.get_device_interface()
-
# load binary to the correct device
- with DeviceGuard(device_interface, compile_meta["device"]): # type: ignore[attr-defined]
+ with DeviceGuard(self.device_interface, compile_meta["device"]): # type: ignore[attr-defined]
# need to initialize context
- device_interface.synchronize(device_interface.current_device())
+ self.device_interface.synchronize(self.device_interface.current_device())
try:
binary = triton.compile(*compile_args, **compile_kwargs)
@@ -585,9 +589,8 @@ class CachingAutotuner(KernelInterface):
)
return float("inf")
- device_interface = self.get_device_interface()
- stream = device_interface.get_raw_stream( # type: ignore[call-arg]
- device_interface.current_device()
+ stream = self.device_interface.get_raw_stream( # type: ignore[call-arg]
+ self.device_interface.current_device()
)
def kernel_call():
@@ -694,7 +697,7 @@ class CachingAutotuner(KernelInterface):
from torch._inductor.codecache import CudaKernelParamCache
- if self.device_props.type != "hip":
+ if torch.version.hip is None:
CudaKernelParamCache.set(key, params, launcher.bin.asm["cubin"])
else:
# There is some divergence between CUDA and ROCm here.
@@ -732,7 +735,7 @@ class CachingAutotuner(KernelInterface):
def benchmark_one_config(config):
with self.lock:
- _, launcher = self._precompile_config(config, False)
+ _, launcher = self._precompile_config(config, None)
config2launcher[config] = launcher
out = self.bench(launcher, *cloned_args, **kwargs)
diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py
index dbabf90ea2..8ba22061a6 100644
--- a/torch/_inductor/select_algorithm.py
+++ b/torch/_inductor/select_algorithm.py
@@ -35,7 +35,6 @@ from .codegen.triton import (
from .codegen.triton_utils import config_of, signature_to_meta
from .exc import CUDACompileError
from .ir import ChoiceCaller, PrimitiveInfoType
-from .runtime.hints import DeviceProperties
from .runtime.runtime_utils import do_bench
from .utils import get_dtype_size, Placeholder, sympy_dot, sympy_product, unique
from .virtualized import V
@@ -148,7 +147,8 @@ class TritonTemplateKernel(TritonKernel):
argdefs, _, signature = self.args.python_argdefs()
triton_meta = {
"signature": signature_to_meta(signature, size_dtype=self.index_dtype),
- "device": DeviceProperties.create(self.output_node.get_device()),
+ "device": self.output_node.get_device().index,
+ "device_type": self.output_node.get_device().type,
"constants": {},
}
triton_meta["configs"] = [config_of(signature)]
|
2.41.0
|
ac402a96ca54e2d882b6daae4bf9e8c956ae03e
|
Thu, 25 Apr 2024 11:39:20 +0000
|
[PATCH 0651/1000] [Distributed] [6/N] Fix clang-tidy warnings in torch/csrc/distributed/c10d (#124701)
|
This PR continues to fix some clang-tidy warnings in distributed/c10d code, following https://github.com/pytorch/pytorch/pull/124043. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124701 Approved by: https://github.com/ezyang
|
diff --git a/torch/csrc/distributed/c10d/Ops.cpp b/torch/csrc/distributed/c10d/Ops.cpp
index 32e0afc6a8..cf8b7cd966 100644
--- a/torch/csrc/distributed/c10d/Ops.cpp
+++ b/torch/csrc/distributed/c10d/Ops.cpp
@@ -220,6 +220,7 @@ IMPL_ALLREDUCE_COALESCED(PrivateUse1)
output_tensors, work); \
}
+// NOLINTBEGIN(cppcoreguidelines-pro-type-const-cast)
IMPL_ALLGATHER(CPU)
IMPL_ALLGATHER(CUDA)
IMPL_ALLGATHER(PrivateUse1)
@@ -440,6 +441,7 @@ IMPL_ALLTOALL_BASE(PrivateUse1)
IMPL_BARRIER(CPU)
IMPL_BARRIER(CUDA)
IMPL_BARRIER(PrivateUse1)
+// NOLINTEND(cppcoreguidelines-pro-type-const-cast)
void monitored_barrier_CPU(
at::Tensor /* unused */,
diff --git a/torch/csrc/distributed/c10d/ParamCommsUtils.cpp b/torch/csrc/distributed/c10d/ParamCommsUtils.cpp
index fe12092ee9..de29c56895 100644
--- a/torch/csrc/distributed/c10d/ParamCommsUtils.cpp
+++ b/torch/csrc/distributed/c10d/ParamCommsUtils.cpp
@@ -19,7 +19,7 @@ ParamCommsDebugInfo::ParamCommsDebugInfo(
int globalRankStart,
int globalRankStride,
int worldSize)
- : pgName_(pgName),
+ : pgName_(std::move(pgName)),
rank_(rank),
worldSize_(worldSize),
collectiveName_(std::move(collName)),
diff --git a/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp b/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp
index 50e1b95a30..e95191436b 100644
--- a/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp
+++ b/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp
@@ -20,6 +20,7 @@
#include <sys/types.h>
#include <type_traits>
+#include <utility>
#include <gloo/allgather.h>
#include <gloo/allgatherv.h>
@@ -171,9 +172,7 @@ void checkRemainingTime(
typedef void (*ReduceFunc)(void*, const void*, const void*, size_t);
-template <
- typename T,
- typename std::enable_if<!std::is_integral<T>::value, int>::type = 0>
+template <typename T, std::enable_if_t<!std::is_integral_v<T>, int> = 0>
ReduceFunc toFunction(const ReduceOp& r) {
switch (r) {
case ReduceOp::SUM:
@@ -207,9 +206,7 @@ ReduceFunc toFunction(const ReduceOp& r) {
}
// Bitwise AND with SFINAE guard for integral types.
-template <
- typename T,
- typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
+template <typename T, std::enable_if_t<std::is_integral_v<T>, int> = 0>
void band(void* c, const void* a, const void* b, size_t n) {
auto tc = static_cast<T*>(c);
auto ta = static_cast<const T*>(a);
@@ -220,9 +217,7 @@ void band(void* c, const void* a, const void* b, size_t n) {
}
// Bitwise OR with SFINAE guard for integral types.
-template <
- typename T,
- typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
+template <typename T, std::enable_if_t<std::is_integral_v<T>, int> = 0>
void bor(void* c, const void* a, const void* b, size_t n) {
auto tc = static_cast<T*>(c);
auto ta = static_cast<const T*>(a);
@@ -233,9 +228,7 @@ void bor(void* c, const void* a, const void* b, size_t n) {
}
// Bitwise XOR with SFINAE guard for integral types.
-template <
- typename T,
- typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
+template <typename T, std::enable_if_t<std::is_integral_v<T>, int> = 0>
void bxor(void* c, const void* a, const void* b, size_t n) {
auto tc = static_cast<T*>(c);
auto ta = static_cast<const T*>(a);
@@ -245,9 +238,7 @@ void bxor(void* c, const void* a, const void* b, size_t n) {
}
}
-template <
- typename T,
- typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
+template <typename T, std::enable_if_t<std::is_integral_v<T>, int> = 0>
ReduceFunc toFunction(const ReduceOp& r) {
switch (r) {
case ReduceOp::SUM:
@@ -321,8 +312,8 @@ at::Tensor pinnedLike(at::Tensor& tensor) {
auto* allocator = at::detail::getCUDAHooks().getPinnedMemoryAllocator();
auto storage = c10::Storage(
c10::Storage::use_byte_size_t(),
- at::detail::computeStorageNbytes(
- tensor.sizes(), tensor.strides(), tensor.dtype().itemsize()),
+ static_cast<int64_t>(at::detail::computeStorageNbytes(
+ tensor.sizes(), tensor.strides(), tensor.dtype().itemsize())),
allocator,
/*resizable=*/false);
return at::empty({0}, tensor.options().device(at::kCPU))
@@ -425,7 +416,8 @@ const auto kLoopbackAddress = "127.0.0.1";
} // namespace
// static
-void ProcessGroupGloo::AsyncWork::execute(c10::intrusive_ptr<AsyncWork> work) {
+void ProcessGroupGloo::AsyncWork::execute(
+ const c10::intrusive_ptr<AsyncWork>& work) {
if (work->recordFunctionBeforeCallback_) {
work->recordFunctionBeforeCallback_();
}
@@ -536,7 +528,8 @@ uint64_t ProcessGroupGloo::AsyncWork::getSequencenumber() const {
return seq_;
}
-void ProcessGroupGloo::AsyncWork::finishWorkGlooError(std::exception_ptr eptr) {
+void ProcessGroupGloo::AsyncWork::finishWorkGlooError(
+ const std::exception_ptr& eptr) {
future_->setError(eptr);
finish(eptr);
}
@@ -758,10 +751,10 @@ ProcessGroupGloo::ProcessGroupGloo(
c10::intrusive_ptr<Options> options)
: Backend(rank, size),
store_(new GlooStore(store)),
- options_(options),
+ options_(std::move(options)),
stop_(false),
collectiveCounter_(0) {
- auto& devices = options->devices;
+ auto& devices = options_->devices;
if (devices.empty()) {
TORCH_CHECK(false, "No device(s) specified");
}
@@ -778,13 +771,13 @@ ProcessGroupGloo::ProcessGroupGloo(
// option is needed if you have a fast NIC that cannot be saturated
// by a single I/O thread.
//
- contexts_.reserve(options->devices.size());
- for (const auto i : c10::irange(options->devices.size())) {
+ contexts_.reserve(options_->devices.size());
+ for (const auto i : c10::irange(options_->devices.size())) {
auto context = std::make_shared<::gloo::rendezvous::Context>(rank_, size_);
auto store = ::gloo::rendezvous::PrefixStore(std::to_string(i), *store_);
- context->setTimeout(options->timeout);
+ context->setTimeout(options_->timeout);
try {
- context->connectFullMesh(store, options->devices[i]);
+ context->connectFullMesh(store, options_->devices[i]);
} catch (const std::runtime_error& e) {
auto err = e.what();
// TORCH_CHECK to print the cpp stacktrace.
@@ -798,9 +791,9 @@ ProcessGroupGloo::ProcessGroupGloo(
// working on in the workInProgress_ vector. It must have size equal
// to the number of workers such that they can simply index into it
// using the worker index they are started with.
- workInProgress_.resize(options->threads);
+ workInProgress_.resize(options_->threads);
- threads_.resize(options->threads);
+ threads_.resize(options_->threads);
for (const auto i : c10::irange(threads_.size())) {
threads_[i] = std::thread(&ProcessGroupGloo::runLoop, this, i);
}
@@ -852,7 +845,7 @@ void ProcessGroupGloo::runLoop(int workerIndex) {
// does not immediately block.
workConsumeCV_.notify_one();
- AsyncWork::execute(std::move(work));
+ AsyncWork::execute(work);
lock.lock();
workInProgress_[workerIndex].reset();
}
@@ -892,7 +885,7 @@ class AsyncBroadcastWork : public ProcessGroupGloo::AsyncWork {
tag(tag) {}
std::shared_ptr<gloo::Context> context;
- std::vector<at::Tensor> inputs;
+ std::vector<at::Tensor> inputs{};
const int rootRank;
const int rootTensor;
const uint32_t tag;
@@ -968,8 +961,8 @@ class AsyncBroadcastCUDAWork : public AsyncBroadcastWork {
}
at::Tensor tmp;
- std::vector<c10::Stream> streams;
- std::vector<c10::Event> events;
+ std::vector<c10::Stream> streams{};
+ std::vector<c10::Event> events{};
};
} // namespace
@@ -1034,11 +1027,11 @@ class AsyncAllreduceWork : public ProcessGroupGloo::AsyncWork {
inputs),
context(context),
inputs(inputs),
- reduceOp(reduceOp),
+ reduceOp(std::move(reduceOp)),
tag(tag) {}
std::shared_ptr<gloo::Context> context;
- std::vector<at::Tensor> inputs;
+ std::vector<at::Tensor> inputs{};
const ReduceOp reduceOp;
const uint32_t tag;
@@ -1062,7 +1055,7 @@ class AsyncAllreduceWork : public ProcessGroupGloo::AsyncWork {
gloo::AllreduceOptions::Func getFunction(
const at::ScalarType& dtype,
- const ReduceOp op) {
+ const ReduceOp& op) {
gloo::AllreduceOptions::Func fn;
GENERATE_ALL_TYPES(dtype, getFunction, fn, op);
return fn;
@@ -1077,7 +1070,7 @@ class AsyncAllreduceCoalescedWork : public AsyncAllreduceWork {
ReduceOp reduceOp,
uint32_t tag,
uint64_t seq)
- : AsyncAllreduceWork(context, inputs, reduceOp, tag, seq) {}
+ : AsyncAllreduceWork(context, inputs, std::move(reduceOp), tag, seq) {}
void run() override {
allreduceCoalesced(inputs);
@@ -1120,7 +1113,7 @@ class AsyncSparseAllreduceWork : public ProcessGroupGloo::AsyncWork {
tag(tag) {}
std::shared_ptr<gloo::Context> context;
- std::vector<at::Tensor> inputs;
+ std::vector<at::Tensor> inputs{};
const uint32_t tag;
// We share dimensionality about the sparse tensors before collecting
@@ -1141,10 +1134,11 @@ class AsyncSparseAllreduceWork : public ProcessGroupGloo::AsyncWork {
// Construct from an existing metadata tensor to facilitate structured
// access to metadata from peers, after gathering it.
explicit SparseTensorMetadata(at::Tensor metadata)
- : metadata_(metadata), data_(metadata_.mutable_data_ptr<int64_t>()) {
- AT_ASSERT(metadata.scalar_type() == at::kLong);
- AT_ASSERT(metadata.dim() == 1);
- AT_ASSERT(metadata.size(0) == dim);
+ : metadata_(std::move(metadata)),
+ data_(metadata_.mutable_data_ptr<int64_t>()) {
+ AT_ASSERT(metadata_.scalar_type() == at::kLong);
+ AT_ASSERT(metadata_.dim() == 1);
+ AT_ASSERT(metadata_.size(0) == dim);
}
// Populate the metadata.
@@ -1291,13 +1285,13 @@ class AsyncSparseAllreduceWork : public ProcessGroupGloo::AsyncWork {
const auto sparseDim = tensor.sparse_dim();
std::vector<size_t> counts(context->size);
- int64_t totalSize = 0;
+ size_t totalSize = 0;
for (const auto i : c10::irange(metadata.size())) {
counts[i] = metadata[i].nnz() * sparseDim;
totalSize += counts[i];
}
- auto output = at::empty({totalSize}, at::kLong);
+ auto output = at::empty({static_cast<int64_t>(totalSize)}, at::kLong);
// tensors copied from cuda may not be contiguous, get a contiguous
// tensor before use its data_ptr
@@ -1384,7 +1378,7 @@ class AsyncAllreduceCUDAWork : public AsyncAllreduceWork {
ReduceOp reduceOp,
uint32_t tag,
uint64_t seq)
- : AsyncAllreduceWork(context, inputs, reduceOp, tag, seq) {
+ : AsyncAllreduceWork(context, inputs, std::move(reduceOp), tag, seq) {
initializeStreamsEvents(inputs, streams, events);
// Kick off copy from CUDA tensors to pinned CPU tensors.
@@ -1423,8 +1417,8 @@ class AsyncAllreduceCUDAWork : public AsyncAllreduceWork {
}
std::vector<at::Tensor> tmp;
- std::vector<c10::Stream> streams;
- std::vector<c10::Event> events;
+ std::vector<c10::Stream> streams{};
+ std::vector<c10::Event> events{};
};
class AsyncSparseAllreduceCUDAWork : public AsyncSparseAllreduceWork {
@@ -1476,9 +1470,9 @@ class AsyncSparseAllreduceCUDAWork : public AsyncSparseAllreduceWork {
}
}
- std::vector<at::Tensor> tmp;
- std::vector<c10::Stream> streams;
- std::vector<c10::Event> events;
+ std::vector<at::Tensor> tmp{};
+ std::vector<c10::Stream> streams{};
+ std::vector<c10::Event> events{};
};
} // namespace
@@ -1636,11 +1630,11 @@ class AsyncReduceWork : public ProcessGroupGloo::AsyncWork {
inputs(inputs),
rootRank(rootRank),
rootTensor(rootTensor),
- reduceOp(reduceOp),
+ reduceOp(std::move(reduceOp)),
tag(tag) {}
std::shared_ptr<gloo::Context> context;
- std::vector<at::Tensor> inputs;
+ std::vector<at::Tensor> inputs{};
const int rootRank;
const int rootTensor;
const ReduceOp reduceOp;
@@ -1668,7 +1662,7 @@ class AsyncReduceWork : public ProcessGroupGloo::AsyncWork {
gloo::ReduceOptions::Func getFunction(
const at::ScalarType& dtype,
- const ReduceOp op) {
+ const ReduceOp& op) {
gloo::ReduceOptions::Func fn;
GENERATE_ALL_TYPES(dtype, getFunction, fn, op);
return fn;
@@ -1690,7 +1684,7 @@ class AsyncReduceCUDAWork : public AsyncReduceWork {
inputs,
rootRank,
rootTensor,
- reduceOp,
+ std::move(reduceOp),
tag,
seq) {
initializeStreamsEvents(inputs, streams, events);
@@ -1731,9 +1725,9 @@ class AsyncReduceCUDAWork : public AsyncReduceWork {
}
}
- std::vector<at::Tensor> tmp;
- std::vector<c10::Stream> streams;
- std::vector<c10::Event> events;
+ std::vector<at::Tensor> tmp{};
+ std::vector<c10::Stream> streams{};
+ std::vector<c10::Event> events{};
};
} // namespace
@@ -1813,8 +1807,8 @@ class AsyncAllgatherWork : public ProcessGroupGloo::AsyncWork {
tag(tag) {}
std::shared_ptr<gloo::Context> context;
- std::vector<std::vector<at::Tensor>> outputs;
- std::vector<at::Tensor> inputs;
+ std::vector<std::vector<at::Tensor>> outputs{};
+ std::vector<at::Tensor> inputs{};
const uint32_t tag;
void allgather(
@@ -1912,13 +1906,13 @@ class AsyncAllgatherCUDAWork : public AsyncAllgatherWork {
}
}
- std::vector<at::Tensor> tmpInputs;
- std::vector<c10::Stream> inputStreams;
- std::vector<c10::Event> inputEvents;
+ std::vector<at::Tensor> tmpInputs{};
+ std::vector<c10::Stream> inputStreams{};
+ std::vector<c10::Event> inputEvents{};
- std::vector<std::vector<at::Tensor>> tmpOutputs;
- std::vector<c10::Stream> outputStreams;
- std::vector<c10::Event> outputEvents;
+ std::vector<std::vector<at::Tensor>> tmpOutputs{};
+ std::vector<c10::Stream> outputStreams{};
+ std::vector<c10::Event> outputEvents{};
};
// A work that takes an lambda on construction and calls it on wait.
@@ -1926,7 +1920,7 @@ class AsyncAllgatherCUDAWork : public AsyncAllgatherWork {
// composing multiple works together.
class LambdaWork : public Work {
public:
- LambdaWork(std::function<void(void)> fn) : fn_(fn) {}
+ LambdaWork(std::function<void(void)> fn) : fn_(std::move(fn)) {}
bool wait(std::chrono::milliseconds /* unused */) override {
fn_();
@@ -2085,8 +2079,8 @@ class AsyncAllgatherCoalescedWork : public ProcessGroupGloo::AsyncWork {
tag(tag) {}
std::shared_ptr<gloo::Context> context;
- std::vector<std::vector<at::Tensor>> output_lists;
- std::vector<at::Tensor> input_list;
+ std::vector<std::vector<at::Tensor>> output_lists{};
+ std::vector<at::Tensor> input_list{};
const uint32_t tag;
void allgather_coalesced() {
@@ -2229,8 +2223,8 @@ class AsyncGatherWork : public ProcessGroupGloo::AsyncWork {
tag(tag) {}
std::shared_ptr<gloo::Context> context;
- std::vector<std::vector<at::Tensor>> outputs;
- std::vector<at::Tensor> inputs;
+ std::vector<std::vector<at::Tensor>> outputs{};
+ std::vector<at::Tensor> inputs{};
const int root;
const uint32_t tag;
@@ -2335,13 +2329,13 @@ class AsyncGatherCUDAWork : public AsyncGatherWork {
}
}
- std::vector<at::Tensor> tmpInputs;
- std::vector<c10::Stream> inputStreams;
- std::vector<c10::Event> inputEvents;
+ std::vector<at::Tensor> tmpInputs{};
+ std::vector<c10::Stream> inputStreams{};
+ std::vector<c10::Event> inputEvents{};
- std::vector<std::vector<at::Tensor>> tmpOutputs;
- std::vector<c10::Stream> outputStreams;
- std::vector<c10::Event> outputEvents;
+ std::vector<std::vector<at::Tensor>> tmpOutputs{};
+ std::vector<c10::Stream> outputStreams{};
+ std::vector<c10::Event> outputEvents{};
};
} // namespace
@@ -2435,8 +2429,8 @@ class AsyncScatterWork : public ProcessGroupGloo::AsyncWork {
tag(tag) {}
std::shared_ptr<gloo::Context> context;
- std::vector<at::Tensor> outputs;
- std::vector<std::vector<at::Tensor>> inputs;
+ std::vector<at::Tensor> outputs{};
+ std::vector<std::vector<at::Tensor>> inputs{};
const int root;
const uint32_t tag;
@@ -2524,13 +2518,13 @@ class AsyncScatterCUDAWork : public AsyncScatterWork {
}
}
- std::vector<at::Tensor> tmpOutputs;
- std::vector<c10::Stream> outputStreams;
- std::vector<c10::Event> outputEvents;
+ std::vector<at::Tensor> tmpOutputs{};
+ std::vector<c10::Stream> outputStreams{};
+ std::vector<c10::Event> outputEvents{};
- std::vector<std::vector<at::Tensor>> tmpInputs;
- std::vector<c10::Stream> inputStreams;
- std::vector<c10::Event> inputEvents;
+ std::vector<std::vector<at::Tensor>> tmpInputs{};
+ std::vector<c10::Stream> inputStreams{};
+ std::vector<c10::Event> inputEvents{};
};
} // namespace
@@ -2633,8 +2627,8 @@ class AsyncAlltoallWork : public ProcessGroupGloo::AsyncWork {
std::shared_ptr<gloo::Context> context;
at::Tensor outputTensor;
at::Tensor inputTensor;
- std::vector<int64_t> outputCounts;
- std::vector<int64_t> inputCounts;
+ std::vector<int64_t> outputCounts{};
+ std::vector<int64_t> inputCounts{};
const uint32_t tag;
void alltoall(at::Tensor& outputTensor, at::Tensor& inputTensor) {
@@ -2724,12 +2718,12 @@ class AsyncAlltoallCUDAWork : public AsyncAlltoallWork {
}
at::Tensor cpuOutput;
- std::vector<c10::Stream> outputStreams;
- std::vector<c10::Event> outputEvents;
+ std::vector<c10::Stream> outputStreams{};
+ std::vector<c10::Event> outputEvents{};
at::Tensor cpuInput;
- std::vector<c10::Stream> inputStreams;
- std::vector<c10::Event> inputEvents;
+ std::vector<c10::Stream> inputStreams{};
+ std::vector<c10::Event> inputEvents{};
};
} // namespace
@@ -2895,7 +2889,7 @@ class AsyncBarrierWork : public ProcessGroupGloo::AsyncWork {
tag(tag) {}
std::shared_ptr<gloo::Context> context;
- std::vector<c10::weak_intrusive_ptr<AsyncWork>> priorWork;
+ std::vector<c10::weak_intrusive_ptr<AsyncWork>> priorWork{};
const uint32_t tag;
void run() override {
diff --git a/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp b/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp
index 3bfbbeaa2b..d40b205c25 100644
--- a/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp
+++ b/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp
@@ -78,7 +78,7 @@ class TORCH_API ProcessGroupGloo : public Backend {
~AsyncWork() override = default;
- static void execute(c10::intrusive_ptr<AsyncWork> work);
+ static void execute(const c10::intrusive_ptr<AsyncWork>& work);
virtual void run() = 0;
@@ -92,7 +92,7 @@ class TORCH_API ProcessGroupGloo : public Backend {
private:
void finishWorkGloo();
- void finishWorkGlooError(std::exception_ptr eptr);
+ void finishWorkGlooError(const std::exception_ptr& eptr);
inline void recordAsyncWorkProfilingInfo(
const char* profilingTitle,
const c10::optional<std::vector<at::Tensor>>& inputTensors);
diff --git a/torch/csrc/distributed/c10d/Utils.hpp b/torch/csrc/distributed/c10d/Utils.hpp
index 3e9bdf8d6f..36add3ad15 100644
--- a/torch/csrc/distributed/c10d/Utils.hpp
+++ b/torch/csrc/distributed/c10d/Utils.hpp
@@ -280,7 +280,7 @@ inline void assertLayoutMatch(
}
inline void assertNonEmpty(
- std::function<void(const std::string&)> fn,
+ const std::function<void(const std::string&)>& fn,
const at::ArrayRef<at::Tensor> tensors) {
if (tensors.empty()) {
fn("requires non-empty tensor list");
@@ -288,7 +288,7 @@ inline void assertNonEmpty(
}
inline void assertSingleElement(
- std::function<void(const std::string&)> fn,
+ const std::function<void(const std::string&)>& fn,
const at::ArrayRef<at::Tensor> tensors) {
if (tensors.size() != 1) {
fn("requires a single-element tensor list");
@@ -296,7 +296,7 @@ inline void assertSingleElement(
}
inline void assertSingleElementInput(
- std::function<void(const std::string&)> fn,
+ const std::function<void(const std::string&)>& fn,
const at::ArrayRef<at::Tensor> tensors) {
if (tensors.size() != 1) {
fn("requires a single-element input tensor list");
@@ -304,7 +304,7 @@ inline void assertSingleElementInput(
}
inline void assertSingleElementOutput(
- std::function<void(const std::string&)> fn,
+ const std::function<void(const std::string&)>& fn,
const at::ArrayRef<at::Tensor> tensors) {
if (tensors.size() != 1) {
fn("requires a single-element output tensor list");
@@ -312,25 +312,25 @@ inline void assertSingleElementOutput(
}
inline void assertRootRank(
- std::function<void(const std::string&)> fn,
- int rank,
- int size) {
+ const std::function<void(const std::string&)>& fn,
+ int64_t rank,
+ int64_t size) {
if (rank < 0 || rank >= size) {
fn("invalid root rank: " + std::to_string(rank));
}
}
inline void assertRootTensor(
- std::function<void(const std::string&)> fn,
- int rank,
- int size) {
+ const std::function<void(const std::string&)>& fn,
+ int64_t rank,
+ int64_t size) {
if (rank < 0 || rank >= size) {
fn("invalid root tensor: " + std::to_string(rank));
}
}
inline void assertDense(
- std::function<void(const std::string&)> fn,
+ const std::function<void(const std::string&)>& fn,
const at::ArrayRef<at::Tensor> tensors) {
const auto& layout = tensors[0].layout();
if (layout != at::kStrided) {
@@ -339,7 +339,7 @@ inline void assertDense(
}
inline void assertCPU(
- std::function<void(const std::string&)> fn,
+ const std::function<void(const std::string&)>& fn,
const at::ArrayRef<at::Tensor> tensors) {
const auto& device = tensors[0].device();
if (device.type() != at::kCPU) {
@@ -348,7 +348,7 @@ inline void assertCPU(
}
inline void assertSameDevice(
- std::function<void(const std::string&)> fn,
+ const std::function<void(const std::string&)>& fn,
const at::ArrayRef<at::Tensor> tensors) {
if (tensors.size() < 2) {
return;
diff --git a/torch/csrc/distributed/c10d/reducer.cpp b/torch/csrc/distributed/c10d/reducer.cpp
index dd90fadb11..dd5deab25c 100644
--- a/torch/csrc/distributed/c10d/reducer.cpp
+++ b/torch/csrc/distributed/c10d/reducer.cpp
@@ -186,7 +186,8 @@ Reducer::Reducer(
hooks_.emplace_back(
grad_accumulator->add_post_hook(
std::make_unique<torch::autograd::utils::LambdaPostHook>(
- [=](const torch::autograd::variable_list& outputs,
+ [this, variable_index](
+ const torch::autograd::variable_list& outputs,
const torch::autograd::variable_list& /* unused */) {
#ifndef _WIN32
this->rpc_context_.set(
@@ -529,7 +530,7 @@ void Reducer::push_rebuilt_params_for_all_indices() {
void Reducer::push_rebuilt_params(const size_t& index) {
rebuilt_params_.push_back(params_[index]);
- rebuilt_param_indices_.push_back(index);
+ rebuilt_param_indices_.push_back(static_cast<int64_t>(index));
}
void Reducer::set_divide_factor() {
@@ -652,7 +653,7 @@ void Reducer::autograd_hook(size_t index) {
return;
}
- grad_ready_order_indices_.push_back(index);
+ grad_ready_order_indices_.push_back(static_cast<int64_t>(index));
// See Note [Skip allreducing local_used_map_dev]
if (dynamic_graph_find_unused() || static_graph_first_iteration()) {
@@ -667,7 +668,7 @@ void Reducer::autograd_hook(size_t index) {
auto& variable = get_param_from_index(index);
runGradCallbackForVariable(variable, [&](auto& grad) {
if (grad.defined()) {
- local_used_map_[index] = 1;
+ local_used_map_[static_cast<int64_t>(index)] = 1;
}
// The gradient is never modified.
return false;
@@ -911,7 +912,7 @@ void Reducer::mark_variable_ready(size_t variable_index) {
all_reduce_local_used_map();
}
- torch::autograd::Engine::get_default_engine().queue_callback([=] {
+ torch::autograd::Engine::get_default_engine().queue_callback([this] {
std::lock_guard<std::mutex> lock(this->mutex_);
if (should_collect_runtime_stats()) {
record_backward_compute_end_time();
@@ -965,7 +966,9 @@ void Reducer::all_reduce_bucket(Bucket& bucket) {
const auto offset = bucket.offsets[i];
const auto length = bucket.lengths[i];
if (!bucket.bucket_views_in[i].is_alias_of(tensor)) {
- tensor.narrow(0, offset, length)
+ tensor
+ .narrow(
+ 0, static_cast<int64_t>(offset), static_cast<int64_t>(length))
.copy_(bucket.bucket_views_in[i].flatten());
}
}
@@ -1242,7 +1245,10 @@ void Reducer::initialize_bucket_views(Reducer::Bucket& bucket) {
// AccumulateGrad will do the same when stashing grads for non-dense
// params.
bucket.bucket_views_in.push_back(
- gradients.narrow(0, offset, length).view(v.sizes()));
+ gradients
+ .narrow(
+ 0, static_cast<int64_t>(offset), static_cast<int64_t>(length))
+ .view(v.sizes()));
}
// By default `bucket_views_out` and `bucket_views_in` are
// essentially the same thing.
@@ -1298,7 +1304,10 @@ void Reducer::populate_bucket_views_out(
// AccumulateGrad will do the same when stashing grads for non-dense
// params.
bucket.bucket_views_out.push_back(
- tensor.narrow(0, offset, length).view(v.sizes()));
+ tensor
+ .narrow(
+ 0, static_cast<int64_t>(offset), static_cast<int64_t>(length))
+ .view(v.sizes()));
}
}
}
@@ -1515,7 +1524,8 @@ void Reducer::finalize_bucket_dense(Bucket& bucket) {
// parameters are always used. Then we only pay the overhead cost if
// there is indeed a parameter that is locally unused, because we need
// to check if it's also globally unused.
- size_t variable_index = bucket.variable_indices[intra_bucket_index];
+ int64_t variable_index =
+ static_cast<int64_t>(bucket.variable_indices[intra_bucket_index]);
// Note: global_unused might not be global yet. As we lazily wait for
// the reduction to complete, it becomes really global only if we get to
// the point as below where we wait for the reduction work, make D2H
@@ -1715,7 +1725,7 @@ void Reducer::sync_bucket_indices(
for (const auto i : c10::irange(num_buckets)) {
auto bucket_size = bucket_indices.at(i).size();
bucket_sizes.push_back(bucket_size);
- total_size += bucket_size;
+ total_size += static_cast<int64_t>(bucket_size);
}
at::TensorOptions options;
@@ -1730,10 +1740,11 @@ void Reducer::sync_bucket_indices(
for (const auto i : c10::irange(num_buckets)) {
const auto& bucket_size = bucket_indices.at(i).size();
for (const auto j : c10::irange(bucket_size)) {
- indices_accessor[indices_accessor_Index++] = bucket_indices[i][j];
+ indices_accessor[indices_accessor_Index++] =
+ static_cast<int>(bucket_indices[i][j]);
}
}
- indices_accessor[indices_accessor_Index] = num_buckets;
+ indices_accessor[indices_accessor_Index] = static_cast<int>(num_buckets);
// Copy CPU tensor to device tensor, as the process_group_ could be NCCL and
// it can only broadcast device tensors.
@@ -1769,7 +1780,7 @@ void Reducer::sync_bucket_indices(
bucket_indices.reserve(num_buckets);
indices_accessor_Index = 0;
for (const auto i : c10::irange(num_buckets)) {
- const auto& bucket_size = bucket_sizes_accessor[i];
+ const auto& bucket_size = bucket_sizes_accessor[static_cast<int64_t>(i)];
std::vector<size_t> bucket;
bucket.reserve(bucket_size);
for (const auto j : c10::irange(bucket_size)) {
@@ -2061,7 +2072,9 @@ struct BucketKey {
BucketKey(c10::ScalarType type, c10::Device device)
: type(type), device(device) {}
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const*)
const c10::ScalarType type;
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const*)
const c10::Device device;
// See torch/csrc/utils/hash.h for dispatch code.
@@ -2263,10 +2276,10 @@ void verify_params_across_processes(
i = 0;
for (const auto& t : params) {
for (const auto& sz : t.sizes()) {
- metadata_accessor[i++] = sz;
+ metadata_accessor[static_cast<int64_t>(i++)] = sz;
}
for (const auto& str : t.strides()) {
- metadata_accessor[i++] = str;
+ metadata_accessor[static_cast<int64_t>(i++)] = str;
}
}
diff --git a/torch/csrc/distributed/c10d/reducer.hpp b/torch/csrc/distributed/c10d/reducer.hpp
index 863aeecb4e..e940a56bd6 100644
--- a/torch/csrc/distributed/c10d/reducer.hpp
+++ b/torch/csrc/distributed/c10d/reducer.hpp
@@ -538,7 +538,7 @@ class TORCH_API Reducer {
std::unordered_map<size_t, std::string> param_names_;
// Variable indices stored sequentially in order of when the gradient is ready
// for the current backwards pass.
- std::vector<int> grad_ready_order_indices_;
+ std::vector<int64_t> grad_ready_order_indices_;
// Bytes capacity of first bucket, can be configured by user
int64_t first_bucket_bytes_cap_;
// Per iteration set of parameter indices that have been marked ready.
|
2.41.0
|
c44e2b23691843466eb7ad1a43afeacff561711
|
Wed, 24 Apr 2024 13:50:18 -0700
|
[PATCH 0652/1000] Improved unbacked SymInt input support in Inductor (#124739)
|
This is a subset of changes extracted from https://github.com/pytorch/pytorch/pull/124683/ This PR contains modifications to make Inductor work with unbacked symbol inputs, which can occur when a data-dependent sized tensor is saved for backwards. The problems to be fixed: * When binding initial symbols, we unconditionally bind unbacked symbols (instead of computing if they are needed, which only looks at backed symbols) * Benchmark generation code doesn't work with unbacked symints as we have no hints to actually feed in real values. So I pick a random number and you are expected to fix it if it doesn't work * Need to make sure we don't install dependencies on unbacked SymInt inputs, that puts us down the "promptly deallocate the input" path, but that's pointless for unbacked SymInt Fixes https://github.com/pytorch/pytorch/issues/124652 Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124739 Approved by: https://github.com/jansel ghstack dependencies: #124310, #124314, #124316, #124394
|
diff --git a/test/inductor/test_torchinductor_dynamic_shapes.py b/test/inductor/test_torchinductor_dynamic_shapes.py
index 9c083668d3..d6eb7f998f 100644
--- a/test/inductor/test_torchinductor_dynamic_shapes.py
+++ b/test/inductor/test_torchinductor_dynamic_shapes.py
@@ -401,6 +401,29 @@ class TestInductorDynamic(TestCase):
arg = torch.tensor([4, 6], device="cuda")
self.assertEqual(f(arg), cf(arg))
+ @torch._dynamo.config.patch(
+ capture_scalar_outputs=True, capture_dynamic_output_shape_ops=True
+ )
+ def test_unbacked_cat_backwards(self, device):
+ def f(x, w):
+ device = w.device
+ a, b = x.tolist()
+ ta = torch.ones(a, device=device)
+ tb = torch.ones(b, device=device)
+ pa = ta * w # make it require gradients
+ pb = tb * w
+ r = torch.cat([pa, pb])
+ return r.sum()
+
+ x = torch.tensor([4, 9])
+ w = torch.randn(1, requires_grad=True)
+ f(x, w).backward()
+ orig_w = w.grad
+ w.grad = None
+
+ torch.compile(fullgraph=True)(f)(x, w).backward()
+ self.assertEqual(orig_w, w.grad)
+
@torch._dynamo.config.patch(
capture_scalar_outputs=True, capture_dynamic_output_shape_ops=True
)
diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py
index 93c5ee5ad0..f709aab575 100644
--- a/torch/_inductor/codegen/wrapper.py
+++ b/torch/_inductor/codegen/wrapper.py
@@ -28,7 +28,12 @@ import torch._ops
from torch._dynamo.utils import counters, dynamo_timed
from torch._inductor.codegen.multi_kernel import MultiKernelState
-from torch.fx.experimental.symbolic_shapes import ConvertIntKey, DivideByKey, SymTypes
+from torch.fx.experimental.symbolic_shapes import (
+ ConvertIntKey,
+ DivideByKey,
+ free_unbacked_symbols,
+ SymTypes,
+)
from torch.fx.node import _get_qualified_name
from torch.utils._sympy.singleton_int import SingletonInt
@@ -868,18 +873,23 @@ class WrapperCodeGen(CodeGen):
filter(lambda x: not is_expr(x), graph_inputs.items())
)
+ def is_unbacked_symbol(s):
+ return isinstance(s, sympy.Symbol) and free_unbacked_symbols(s)
+
for name, shape in graph_inputs_expr:
shape = V.graph.sizevars.simplify(shape) # type: ignore[arg-type]
- if shape in needed:
- needed.remove(shape) # type: ignore[arg-type]
+ if (b := shape in needed) or is_unbacked_symbol(shape):
+ if b:
+ needed.remove(shape) # type: ignore[arg-type]
code.writeline(f"{self.declare}{shape} = {name}{self.ending}")
for name, value in graph_inputs_tensors:
shapes = value.get_size()
for dim, shape in enumerate(shapes):
shape = V.graph.sizevars.simplify(shape) # type: ignore[arg-type]
- if shape in needed:
- needed.remove(shape) # type: ignore[arg-type]
+ if (b := shape in needed) or is_unbacked_symbol(shape):
+ if b:
+ needed.remove(shape) # type: ignore[arg-type]
code.writeline(
f"{self.declare}{shape} = {sizeof(name)}[{dim}]{self.ending}"
)
@@ -888,8 +898,9 @@ class WrapperCodeGen(CodeGen):
shapes = value.get_stride()
for dim, shape in enumerate(shapes):
shape = V.graph.sizevars.simplify(shape) # type: ignore[arg-type]
- if shape in needed:
- needed.remove(shape) # type: ignore[arg-type]
+ if (b := shape in needed) or is_unbacked_symbol(shape):
+ if b:
+ needed.remove(shape) # type: ignore[arg-type]
code.writeline(
f"{self.declare}{shape} = {strideof(name)}[{dim}]{self.ending}"
)
@@ -1014,10 +1025,20 @@ class WrapperCodeGen(CodeGen):
# the subclass.
continue
if isinstance(value, sympy.Expr): # Don't need to add symbolic
- add_expr_input(name, V.graph.sizevars.size_hint(value))
+ # TODO: this fallback and those below actually will generate possibly
+ # invalid benchmark code, because it's not guaranteed 42
+ # is actually a valid value for the kernel in question.
+ # See https://github.com/pytorch/pytorch/issues/124686
+ add_expr_input(name, V.graph.sizevars.size_hint(value, fallback=42))
else:
- shape = [V.graph.sizevars.size_hint(x) for x in value.get_size()]
- stride = [V.graph.sizevars.size_hint(x) for x in value.get_stride()]
+ shape = [
+ V.graph.sizevars.size_hint(x, fallback=42)
+ for x in value.get_size()
+ ]
+ stride = [
+ V.graph.sizevars.size_hint(x, fallback=42)
+ for x in value.get_stride()
+ ]
add_fake_input(
name, shape, stride, value.get_device(), value.get_dtype()
)
diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py
index 30cc4e525f..4c896137fc 100644
--- a/torch/_inductor/scheduler.py
+++ b/torch/_inductor/scheduler.py
@@ -1489,6 +1489,13 @@ class Scheduler:
unbacked_symbol_to_origin_node = {}
+ # NB: None means that the dependency is on an input. Don't actually
+ # generate a dependency because if we do, Inductor will start trying
+ # to free the unbacked int but that's pointless
+ for name, val in V.graph.graph_inputs.items():
+ if isinstance(val, sympy.Symbol):
+ unbacked_symbol_to_origin_node[val] = None
+
for node in self.nodes:
log.debug("scheduling %s", node.node)
@@ -1503,7 +1510,7 @@ class Scheduler:
# because if a MultiOutputLayout buffer propagates an unbacked
# symint to multiple outputs, they will all claim to def it.
if s not in unbacked_symbol_to_origin_node:
- unbacked_symbol_to_origin_node[s] = node
+ unbacked_symbol_to_origin_node[s] = node.get_name()
unbacked_symbol_uses = sorted(
node.node.get_unbacked_symbol_uses(), key=lambda x: x.name
@@ -1513,7 +1520,8 @@ class Scheduler:
assert (
s in unbacked_symbol_to_origin_node
), f"{s} not in {unbacked_symbol_to_origin_node}"
- node.add_fake_dep(StarDep(unbacked_symbol_to_origin_node[s].get_name()))
+ if (r := unbacked_symbol_to_origin_node[s]) is not None:
+ node.add_fake_dep(StarDep(r))
# a node will mutate either 0 or 1 buffers
assert len(node.get_mutations()) <= 1
@@ -1558,9 +1566,11 @@ class Scheduler:
assert (
s in unbacked_symbol_to_origin_node
), f"{s} not in {unbacked_symbol_to_origin_node.keys()}"
- node_name = unbacked_symbol_to_origin_node[s].node.name
- log.debug("scheduling output %s for unbacked symint %s", node_name, s)
- add_user(node_name, OutputNode(StarDep(node_name)))
+ if (node_name := unbacked_symbol_to_origin_node[s]) is not None:
+ log.debug(
+ "scheduling output %s for unbacked symint %s", node_name, s
+ )
+ add_user(node_name, OutputNode(StarDep(node_name)))
# make sure input mutation isn't dead-code-eliminated
for name in self.mutation_renames:
diff --git a/torchgen/gen.py b/torchgen/gen.py
index dee23957e3..5c9b156b50 100644
--- a/torchgen/gen.py
+++ b/torchgen/gen.py
@@ -1399,7 +1399,9 @@ def get_grouped_by_view_native_functions(
assert kind not in grouped_by_views[schema]
grouped_by_views[schema][kind] = f
else:
- assert view_kind not in grouped_by_views[schema]
+ assert (
+ view_kind not in grouped_by_views[schema]
+ ), f"{view_kind} already in {grouped_by_views[schema].keys()}"
grouped_by_views[schema][view_kind] = f
return list(concatMap(maybe_create_view_group, grouped_by_views.values()))
diff --git a/torchgen/gen_functionalization_type.py b/torchgen/gen_functionalization_type.py
index 191d939d2d..92e2ff8ad9 100644
--- a/torchgen/gen_functionalization_type.py
+++ b/torchgen/gen_functionalization_type.py
@@ -44,6 +44,7 @@ from torchgen.native_function_generation import (
)
from torchgen.selective_build.selector import SelectiveBuilder
+from torchgen.utils import dataclass_repr
# Note: [Mutable Ops Not Using Functionalization]
@@ -779,7 +780,7 @@ def gen_functionalization_definition(
if not g.composite:
# invariant: NativeFunctionsViewGroup's always have a view_copy operator
# if the view is not composite (implicit autograd)
- assert g.view_copy is not None
+ assert g.view_copy is not None, dataclass_repr(g, indent=1)
view_defs.append(emit_view_functionalization_body(g, view_inplace=False))
if g.view_inplace is not None:
view_defs.append(emit_view_functionalization_body(g, view_inplace=True))
|
2.41.0
|
4597fffce3d2c276dd41cb2b60cde3aff4a209e
|
Thu, 25 Apr 2024 06:36:43 -0700
|
[PATCH 0653/1000] Try to reuse old symbol name rather than new symbol name when renaming (#124782)MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit
|
Previously, unbacked SymInts would gradually get larger and larger as we kept rebinding them. Now, we do the replacement to preserve the old symbol. Actually doing this is a bit tricky. Here’s the order things happen when retracing data dependent: 1. Run fake tensor prop: allocate new unbacked SymInt 2. Run proxy tensor mode, calculate bindings and associate them with FX node 3. Run PropagateUnbackedSymInts, rename unbacked bindings to their old ones so they are consistent So the problem is when we calculate bindings in step (2), we don't know what the original names are yet, we only find out later at (3). But by the time (3) runs, we've already stuffed some new bindings in meta["unbacked_bindings"] and we don't know how to update them! To fix this, I introduce resolve_unbacked_bindings which post facto applies any of the renamings we discovered in (3). Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124782 Approved by: https://github.com/lezcano ghstack dependencies: #124310, #124314, #124316, #124394, #124739
|
diff --git a/docs/source/fx.experimental.rst b/docs/source/fx.experimental.rst
index ae8dc7ef4e..6abfb89971 100644
--- a/docs/source/fx.experimental.rst
+++ b/docs/source/fx.experimental.rst
@@ -49,3 +49,4 @@ torch.fx.experimental.symbolic_shapes
check_consistent
compute_unbacked_bindings
rebind_unbacked
+ resolve_unbacked_bindings
diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py
index c98598f226..b6e53ba681 100644
--- a/torch/_inductor/graph.py
+++ b/torch/_inductor/graph.py
@@ -25,6 +25,7 @@ from torch.fx.experimental.sym_node import magic_methods, method_to_operator
from torch.fx.experimental.symbolic_shapes import (
free_unbacked_symbols,
has_free_symbols,
+ resolve_unbacked_bindings,
ShapeEnv,
SymTypes,
)
@@ -1326,7 +1327,9 @@ class GraphLowering(torch.fx.Interpreter):
return "***\n".join(r)
if n.op != "placeholder":
- unbacked_bindings = n.meta.get("unbacked_bindings", {})
+ unbacked_bindings = resolve_unbacked_bindings(
+ V.graph.sizevars.shape_env, n.meta.get("unbacked_bindings", {})
+ )
# When we do lowering, it is possible we reallocate unbacked SymInts.
# So we need to line up the unbacked SymInts when performing the test
# here
@@ -1341,11 +1344,12 @@ class GraphLowering(torch.fx.Interpreter):
# end up needing to test equalities on the symbols, and a fresh
# symbol is likely to hit lots of GuardOnDataDependent errors that
# we already know facts for.
- assert new_unbacked_defs >= {
+ renamed_unbacked_bindings = {
V.fake_mode.shape_env.unbacked_renamings.get(s, s)
for s in unbacked_bindings.keys()
- }, (
- f"{unbacked_bindings} != {new_unbacked_defs} (fx != inductor)\n"
+ }
+ assert new_unbacked_defs >= renamed_unbacked_bindings, (
+ f"failed {new_unbacked_defs} >= {renamed_unbacked_bindings} (inductor >= fx)\n"
f"fx node is: {n.format_node()}\n"
f"new buffers are:\n\n{format_buffers()}"
)
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index 129e569537..6d6d513977 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -54,6 +54,7 @@ from torch.fx.experimental.symbolic_shapes import (
DivideByKey,
free_unbacked_symbols,
rebind_unbacked,
+ resolve_unbacked_bindings,
SymTypes,
)
from torch.utils._sympy.functions import CleanDiv, FloorDiv, ModularIndexing
@@ -5229,7 +5230,9 @@ class FallbackKernel(ExternKernelAlloc):
if not hasattr(self, "unbacked_bindings"):
return
- unbacked_bindings = self.unbacked_bindings
+ unbacked_bindings = resolve_unbacked_bindings(
+ V.graph.sizevars.shape_env, self.unbacked_bindings
+ )
if not unbacked_bindings:
return
@@ -5279,7 +5282,9 @@ class FallbackKernel(ExternKernelAlloc):
def get_unbacked_symbol_defs(self) -> Set[sympy.Symbol]:
if unbacked_bindings := getattr(self, "unbacked_bindings", None):
- return unbacked_bindings.keys()
+ return resolve_unbacked_bindings(
+ V.graph.sizevars.shape_env, unbacked_bindings
+ ).keys()
else:
return set()
diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py
index eea5333451..0e579feb6b 100644
--- a/torch/_inductor/lowering.py
+++ b/torch/_inductor/lowering.py
@@ -2350,6 +2350,8 @@ def long_tensor(data):
@register_lowering(aten._local_scalar_dense)
def _local_scalar_dense(data):
+ from torch.fx.experimental.symbolic_shapes import resolve_unbacked_bindings
+
# This is interesting! Most lowerings return tensors, so you can just
# return the buffer you allocated and it will get used (or not used, if
# it's dead.) But _local_scalar_dense (aka item) returns an int,
@@ -2360,7 +2362,9 @@ def _local_scalar_dense(data):
# solely responsible for generating this .item(). The buffer is
# not used for anything (notice we discard it); at codegen time,
# the "buffer" just gets assigned None.
- unbacked_bindings = V.graph.current_node.meta["unbacked_bindings"]
+ unbacked_bindings = resolve_unbacked_bindings(
+ V.graph.sizevars.shape_env, V.graph.current_node.meta["unbacked_bindings"]
+ )
assert len(unbacked_bindings) == 1, unbacked_bindings
# NB: Have to be very careful here. V.graph.current_node.meta["val"]
# seemingly also contains a symbol which you want to do binding for,
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index ae2ddddb0e..8d61e3205f 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -97,7 +97,7 @@ __all__ = [
"StatefulSymbolicContext", "SubclassSymbolicContext", "statically_known_true",
"guard_size_oblivious", "check_consistent",
"compute_unbacked_bindings", "ConvertIntKey",
- "rebind_unbacked",
+ "rebind_unbacked", "resolve_unbacked_bindings",
]
# FX node metadata keys for symbolic shape FX graph.
@@ -266,6 +266,14 @@ def check_consistent(new, old) -> None:
assert isinstance(old, scalar_types) and not isinstance(old, bool), f"{old} != {new}"
torch._check(old == new, lambda: f"{old} != {new} (old != new)")
+def resolve_unbacked_bindings(shape_env, bindings):
+ if bindings is None:
+ return None
+ return {
+ shape_env.unbacked_renamings.get(k, k): v
+ for k, v in bindings.items()
+ }
+
def rebind_unbacked(shape_env, n: torch.fx.Node, result):
"""
Suppose we are retracing a pre-existing FX graph that previously had
@@ -282,7 +290,7 @@ def rebind_unbacked(shape_env, n: torch.fx.Node, result):
if n.op == "placeholder":
return
- if bindings := n.meta.get("unbacked_bindings"):
+ if bindings := resolve_unbacked_bindings(shape_env, n.meta.get("unbacked_bindings")):
for raw_u0, path in bindings.items():
u1 = pytree.key_get(result, path)
# tensor_version ops get specialized after AOTAutograd, it's OK,
@@ -313,7 +321,8 @@ def rebind_unbacked(shape_env, n: torch.fx.Node, result):
# The old and new could be the same if you improperly hit the memo
# while retracing. Make sure you updated FakeTensorMode.epoch
assert raw_u0 != raw_u1, f"{raw_u0} possible memo disaster"
- shape_env._rename_unbacked_to(raw_u0, raw_u1)
+ # Reuse the OLD symbol name
+ shape_env._rename_unbacked_to(raw_u1, raw_u0)
def canonicalize_bool_expr(expr: SympyBoolean) -> SympyBoolean:
r""" Canonicalize a boolean expression by transforming it into a lt / le
@@ -2447,7 +2456,11 @@ class ShapeEnv:
# Unlike set_replacement, this records a shapeenv event
@record_shapeenv_event()
- def _rename_unbacked_to(self, orig_s: sympy.Expr, new_s: sympy.Expr):
+ def _rename_unbacked_to(self, orig_s: sympy.Symbol, new_s: sympy.Symbol):
+ assert isinstance(orig_s, sympy.Symbol), orig_s
+ assert isinstance(new_s, sympy.Symbol), new_s
+ assert free_unbacked_symbols(new_s), new_s
+ assert free_unbacked_symbols(orig_s), orig_s
if self._ignore_fresh_unbacked_symbols_tls():
return
dest = self.replacements.get(orig_s)
|
2.41.0
|
1e05f2fb4b3b58702e47928da86fde6f00f6a99
|
Thu, 25 Apr 2024 06:36:44 -0700
|
[PATCH 0654/1000] Don't ignore fresh unbacked symbols in AOTAutograd forward analysis (#124785)
|
This ensures we have correct SymInts when we allocate tangents. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124785 Approved by: https://github.com/lezcano ghstack dependencies: #124310, #124314, #124316, #124394, #124739, #124782
|
diff --git a/test/inductor/test_torchinductor_dynamic_shapes.py b/test/inductor/test_torchinductor_dynamic_shapes.py
index d6eb7f998f..adacfd46fe 100644
--- a/test/inductor/test_torchinductor_dynamic_shapes.py
+++ b/test/inductor/test_torchinductor_dynamic_shapes.py
@@ -424,6 +424,29 @@ class TestInductorDynamic(TestCase):
torch.compile(fullgraph=True)(f)(x, w).backward()
self.assertEqual(orig_w, w.grad)
+ @torch._dynamo.config.patch(
+ capture_scalar_outputs=True, capture_dynamic_output_shape_ops=True
+ )
+ def test_unbacked_cat_backwards_save_data_dependent(self, device):
+ def f(x, w):
+ device = w.device
+ a, b = x.tolist()
+ ta = torch.ones(a, device=device)
+ tb = torch.ones(b, device=device)
+ pa = ta * w # make it require gradients
+ pb = tb * w
+ r = torch.cat([pa, pb])
+ return r
+
+ x = torch.tensor([4, 9])
+ w = torch.randn(1, requires_grad=True)
+ f(x, w).sum().backward()
+ orig_w = w.grad
+ w.grad = None
+
+ torch.compile(fullgraph=True)(f)(x, w).sum().backward()
+ self.assertEqual(orig_w, w.grad)
+
@torch._dynamo.config.patch(
capture_scalar_outputs=True, capture_dynamic_output_shape_ops=True
)
diff --git a/torch/_functorch/_aot_autograd/collect_metadata_analysis.py b/torch/_functorch/_aot_autograd/collect_metadata_analysis.py
index ac86e1822f..4a0aae0484 100644
--- a/torch/_functorch/_aot_autograd/collect_metadata_analysis.py
+++ b/torch/_functorch/_aot_autograd/collect_metadata_analysis.py
@@ -8,7 +8,6 @@ a functionalized version of the graph under compilation.
"""
import collections
-import contextlib
import logging
from functools import wraps
from typing import Callable, DefaultDict, Dict, List
@@ -144,25 +143,17 @@ def run_functionalized_fw_and_collect_metadata(
torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
)
- # AOTAutograd collect metadata will do fake tensor propagation, but it
- # throws out all the resulting fake tensors and doesn't save anything
- # about sizes (TODO: Actually, the subclass metadata does save size
- # info, this is likely to be incorrect if unbacked SymInts are
- # allowed). The net effect is we generate a bunch of fresh unbacked
- # symbols that we immediately throw out and don't use. NB: we don't
- # want to rename into these symbols, because we aren't going to have
- # binding sites for them.
- ignore_fresh_unbacked = contextlib.nullcontext()
- if (fake_mode := detect_fake_mode()) and fake_mode.shape_env:
- ignore_fresh_unbacked = fake_mode.shape_env.ignore_fresh_unbacked_symbols()
-
# It doesn't matter if we run this under predispatch or not because it is
# only for figuring out metadata
mode = FunctionalTensorMode(_allow_token_discovery=True)
- with disable_above, mode, ignore_fresh_unbacked:
+ with disable_above, mode:
# precondition: The passed in function already handles unflattening inputs + flattening outputs
flat_f_args = pytree.tree_map(_to_fun, flat_args)
flat_f_outs = f(*flat_f_args)
+ # We didn't do any tracing, so we don't need to process the
+ # unbacked symbols, they will just disappear into the ether
+ if (fake_mode := detect_fake_mode()) and (shape_env := fake_mode.shape_env):
+ shape_env.pending_fresh_unbacked_symbols.clear()
if prior_autocast_states != _get_autocast_states():
raise RuntimeError(
|
2.41.0
|
7bec7db4e55f329e077eb7003af2f4817cd4210
|
Thu, 25 Apr 2024 06:36:44 -0700
|
[PATCH 0655/1000] Refactor all top level usages of record_shapeenv_event to ShapeEnv class (#123735)
|
This ensures that first argument to record_shapeenv_event is a ShapeEnv so we can appropriately short circuit when recording is not in progress. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123735 Approved by: https://github.com/ysiraichi, https://github.com/zou3519, https://github.com/albanD ghstack dependencies: #124310, #124314, #124316, #124394, #124739, #124782, #124785
|
diff --git a/torch/_export/serde/serialize.py b/torch/_export/serde/serialize.py
index b71a221a53..aa9d69236e 100644
--- a/torch/_export/serde/serialize.py
+++ b/torch/_export/serde/serialize.py
@@ -1421,8 +1421,7 @@ class GraphModuleDeserializer(metaclass=Final):
self.shape_env.add_var_to_val(sym, hint)
if vr := self.symbol_name_to_range.get(val.expr_str):
- symbolic_shapes._constrain_symbol_range(
- self.shape_env,
+ self.shape_env.constrain_symbol_range(
sym,
compiler_min=vr.lower, # type: ignore[arg-type]
compiler_max=vr.upper, # type: ignore[arg-type]
@@ -1437,8 +1436,7 @@ class GraphModuleDeserializer(metaclass=Final):
if s.name not in self.symbol_name_to_symbol:
self.symbol_name_to_symbol[s.name] = s
if vr := self.symbol_name_to_range.get(s.name):
- symbolic_shapes._constrain_symbol_range(
- self.shape_env,
+ self.shape_env.constrain_symbol_range(
s,
compiler_min=vr.lower, # type: ignore[arg-type]
compiler_max=vr.upper, # type: ignore[arg-type]
diff --git a/torch/_logging/_registrations.py b/torch/_logging/_registrations.py
index 5ff3372feb..4b87a8b592 100644
--- a/torch/_logging/_registrations.py
+++ b/torch/_logging/_registrations.py
@@ -1,7 +1,11 @@
# flake8: noqa: B950
from ._internal import register_artifact, register_log
-DYNAMIC = ["torch.fx.experimental.symbolic_shapes", "torch.fx.experimental.sym_node"]
+DYNAMIC = [
+ "torch.fx.experimental.symbolic_shapes",
+ "torch.fx.experimental.sym_node",
+ "torch.fx.experimental.recording",
+]
DISTRIBUTED = [
"torch.distributed",
"torch._dynamo.backends.distributed",
diff --git a/torch/fx/experimental/recording.py b/torch/fx/experimental/recording.py
index c200c10e6f..4bf9ebab17 100644
--- a/torch/fx/experimental/recording.py
+++ b/torch/fx/experimental/recording.py
@@ -1,4 +1,5 @@
import functools
+import inspect
import itertools
import logging
from dataclasses import dataclass
@@ -220,52 +221,64 @@ def _extract_shape_env_and_assert_equal(args, kwargs):
def record_shapeenv_event(*, save_tracked_fakes: bool = False) -> Callable:
def decorator(fn: Callable) -> Callable:
assert callable(fn)
+ args = inspect.getfullargspec(fn).args
+ assert args and args[0] == "self", (
+ "record_shapeenv_event should only wrap methods on ShapeEnv; refactor your "
+ "code so that it calls into a method on ShapeEnv"
+ )
name = fn.__name__
@functools.wraps(fn)
def wrapper(*args, **kwargs):
from torch.fx.experimental.symbolic_shapes import ShapeEnv
- if isinstance(args[0], ShapeEnv) and args[0].is_recording: # type: ignore[has-type]
- # If ShapeEnv is already recording an event, call the wrapped
- # function directly.
- #
- # NB: here, we skip the check of whether all ShapeEnv instances
- # are equal, in favor of a faster dispatch.
- return fn(*args, **kwargs)
-
- # Retrieve an instance of ShapeEnv.
- # Assumption: the collection of args and kwargs may not reference
- # different ShapeEnv instances.
- self = _extract_shape_env_and_assert_equal(args, kwargs)
-
- # If we are calling this function without any ShapeEnv instance
- # alive in its arguments, we don't record and call the original.
- if self is None:
- return fn(*args, **kwargs)
-
- # Otherwise, start recording and call the function.
- with self._recording():
- # Take a snapshot of the current tracked_fakes.
- tracked_fakes = (
- self._snapshot_tracked_fakes() if save_tracked_fakes else None
- )
- # Record the event for 'fn'.
- event = ShapeEnvEvent(
- fn, list(args), kwargs, tracked_fakes, name=fn.__name__
- )
- # Play the event on this ShapeEnv.
- # NB: It's important to put the event first, because running
- # the event can trigger internal events that must be ordered
- # after this event. However, if an exception happens, we do
- # NOT want to have the event in the list, so pop it off from
- # the record if an error happened
- self.events.append(event)
- try:
- return event.run(self)
- except Exception:
- self.events.pop()
- raise
+ assert isinstance(args[0], ShapeEnv)
+
+ try:
+ if args[0].is_recording: # type: ignore[has-type]
+ # If ShapeEnv is already recording an event, call the wrapped
+ # function directly.
+ #
+ # NB: here, we skip the check of whether all ShapeEnv instances
+ # are equal, in favor of a faster dispatch.
+ return fn(*args, **kwargs)
+
+ # Retrieve an instance of ShapeEnv.
+ # Assumption: the collection of args and kwargs may not reference
+ # different ShapeEnv instances.
+ self = _extract_shape_env_and_assert_equal(args, kwargs)
+
+ # If we are calling this function without any ShapeEnv instance
+ # alive in its arguments, we don't record and call the original.
+ if self is None:
+ return fn(*args, **kwargs)
+
+ # Otherwise, start recording and call the function.
+ with self._recording():
+ # Take a snapshot of the current tracked_fakes.
+ tracked_fakes = (
+ self._snapshot_tracked_fakes() if save_tracked_fakes else None
+ )
+ # Record the event for 'fn'.
+ event = ShapeEnvEvent(
+ fn, list(args), kwargs, tracked_fakes, name=fn.__name__
+ )
+ # Play the event on this ShapeEnv.
+ # NB: It's important to put the event first, because running
+ # the event can trigger internal events that must be ordered
+ # after this event. However, if an exception happens, we do
+ # NOT want to have the event in the list, so pop it off from
+ # the record if an error happened
+ self.events.append(event)
+ try:
+ return event.run(self)
+ except Exception:
+ self.events.pop()
+ raise
+
+ except Exception:
+ log.error("failed while running %s(*%s, **%s)", name, args[1:], kwargs)
+ raise
return wrapper
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index 8d61e3205f..842843895c 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -725,10 +725,6 @@ def guard_scalar(a):
raise AssertionError(f"unrecognized scalar {a}")
-def _constrain_symbol_range(shape_env, s: sympy.Symbol, compiler_min: int, compiler_max: int):
- shape_env.constrain_symbol_range(s, compiler_min, compiler_max)
-
-
def _advise_is_size(a):
"""
Don't use this directly; use torch._check_is_size instead.
@@ -770,7 +766,6 @@ def _advise_is_size(a):
):
_constrain_range_for_size(a)
-@record_shapeenv_event()
def _constrain_range_for_size(a, min: Optional[int] = None, max: Optional[int] = None):
"""
This function is NOT INTENDED to be used by itself.
@@ -782,27 +777,10 @@ def _constrain_range_for_size(a, min: Optional[int] = None, max: Optional[int] =
assert isinstance(a, SymInt), "can only constrain range for SymInt"
assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
- if min is None:
- min = 0
- if max is None:
- max = sys.maxsize - 1
-
- if max < min:
- raise ValueError(
- "Maximum value to constrain_as_size can't be less than the specified min value, "
- "received min={min} and max={max}"
- )
-
- a.node.shape_env.constrain_symbol_range(
- a.node.expr,
- compiler_min=min,
- compiler_max=max,
- )
- a.node.shape_env.size_like.add(a.node.expr)
+ a.node.shape_env._constrain_range_for_size(a.node.expr, min, max)
# inclusive both ways
-@record_shapeenv_event()
def constrain_range(a, *, min: Optional[int], max: Optional[int] = None):
"""
Applies a constraint that the passed in SymInt must lie between min-max
@@ -844,54 +822,24 @@ def constrain_range(a, *, min: Optional[int], max: Optional[int] = None):
raise ValueError(f"Invalid value {a} for range [{min}:{max}]")
return
- if isinstance(a.node.expr, sympy.Integer):
- if not (min <= int(a.node.expr) <= max):
- raise ValueRangeError(f"Invalid value {int(a.node.expr)} for range [{min}:{max}]")
- return
- assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
+ a.node.shape_env._constrain_range(a.node.expr, min, max)
- # TODO: Shouldn't we install a guard if the symbol is backed? Or is the
- # semantics that this is an "unchecked" assert (but it this actually
- # something useful? Might be better to restrict only for unbacked
- # SymInt).
- _constrain_symbol_range(
- a.node.shape_env,
- a.node.expr,
- compiler_min=min,
- compiler_max=max,
- )
-
-
-@record_shapeenv_event()
-def constrain_unify(a, b):
+def constrain_unify(a: torch.SymInt, b: torch.SymInt) -> None:
"""
Given two SymInts, constrain them so that they must be equal. NB:
this will not work with SymInts that represent nontrivial expressions
(yet!)
"""
- # TODO: this does not install a deferred runtime assert yet
-
- # TODO: Maybe dedupe this with _maybe_guard_rel?
if not isinstance(a, SymInt):
if not isinstance(b, SymInt):
assert a == b
+ return
else:
- assert isinstance(b.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
shape_env = b.node.shape_env
- shape_env.replacements[b.node.expr] = sympy.Integer(a)
else:
- # TODO: Actually, we can support this as long as one of them is a symbol.
- # NB: We can't actually do "unification" as our operators are not
- # injective
- assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
shape_env = a.node.shape_env
- if not isinstance(b, SymInt):
- shape_env.replacements[a.node.expr] = sympy.Integer(b)
- else:
- assert a.node.shape_env is b.node.shape_env
- assert isinstance(b.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
- new_var = shape_env._find(a.node.expr)
- shape_env.replacements[b.node.expr] = new_var
+
+ shape_env._constrain_unify(a, b)
# Assume that a boolean is true for the purposes of subsequent symbolic
# reasoning. This will keep track of corresponding runtime checks to verify
@@ -2470,6 +2418,78 @@ class ShapeEnv:
if dest is not None:
self._set_replacement(new_s, dest, "rename_unbacked_to_dest")
+ @record_shapeenv_event()
+ def _constrain_range_for_size(self, a: sympy.Symbol, min: Optional[int] = None, max: Optional[int] = None):
+ if min is None:
+ min = 0
+ if max is None:
+ max = sys.maxsize - 1
+
+ if max < min:
+ raise ValueError(
+ "Maximum value to constrain_as_size can't be less than the specified min value, "
+ "received min={min} and max={max}"
+ )
+
+ self.constrain_symbol_range(
+ a,
+ compiler_min=min,
+ compiler_max=max,
+ )
+ self.size_like.add(a)
+
+ @record_shapeenv_event()
+ def _constrain_range(self, a: sympy.Expr, min: int, max: int):
+ if isinstance(a, sympy.Integer):
+ if not (min <= int(a) <= max):
+ raise ValueRangeError(f"Invalid value {int(a)} for range [{min}:{max}]")
+ return
+ assert isinstance(a, sympy.Symbol), "constraining non-Symbols NYI"
+
+ # TODO: Shouldn't we install a guard if the symbol is backed? Or is the
+ # semantics that this is an "unchecked" assert (but it this actually
+ # something useful? Might be better to restrict only for unbacked
+ # SymInt).
+ self.constrain_symbol_range(
+ a,
+ compiler_min=min,
+ compiler_max=max,
+ )
+
+ @record_shapeenv_event()
+ def _constrain_unify(self, a, b):
+ """
+ Given two SymInts, constrain them so that they must be equal. NB:
+ this will not work with SymInts that represent nontrivial expressions
+ (yet!)
+ """
+ # TODO: this does not install a deferred runtime assert yet
+
+ # TODO: Maybe dedupe this with _maybe_guard_rel?
+ # Update Feb 2024: this is extra important to do, this doesn't handle
+ # unbacked replacements properly nor does it generate deferred runtime
+ # asserts
+ if not isinstance(a, SymInt):
+ if not isinstance(b, SymInt):
+ assert a == b
+ else:
+ assert isinstance(b.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
+ assert b.node.shape_env is self
+ self.replacements[b.node.expr] = sympy.Integer(a)
+ else:
+ # TODO: Actually, we can support this as long as one of them is a symbol.
+ # NB: We can't actually do "unification" as our operators are not
+ # injective
+ assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
+ assert a.node.shape_env is self
+ if not isinstance(b, SymInt):
+ self.replacements[a.node.expr] = sympy.Integer(b)
+ else:
+ assert a.node.shape_env is b.node.shape_env
+ assert isinstance(b.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
+ new_var = self._find(a.node.expr)
+ self.replacements[b.node.expr] = new_var
+
def _ignore_fresh_unbacked_symbols_tls(self):
return getattr(TLS, "ignore_fresh_unbacked_symbols", False)
|
2.41.0
|
5c0d3f3f0b19b7ca88bc92e9dc56e391d18e010
|
Wed, 24 Apr 2024 20:22:24 -0700
|
[PATCH 0656/1000] Fix mypy issues in fake_tensor.py (#124428)
|
fake_tensor.py had mypy error ignored. That seems less than desirable. Also added SafePyObjectT<T> which is a tagged wrapper around a SafePyObject but provides static type checking (with no other guarantees). Used `SafePyObjectT<TorchDispatchModeKey>` on some of the TorchDispatchModeTLS API to ensure that we don't accidentally inject a different type than expected into the stack. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124428 Approved by: https://github.com/malfet
|
diff --git a/c10/core/SafePyObject.h b/c10/core/SafePyObject.h
index 4e56384b2f..19f8f62c71 100644
--- a/c10/core/SafePyObject.h
+++ b/c10/core/SafePyObject.h
@@ -55,6 +55,22 @@ struct C10_API SafePyObject {
c10::impl::PyInterpreter* pyinterpreter_;
};
+// A newtype wrapper around SafePyObject for type safety when a python object
+// represents a specific type. Note that `T` is only used as a tag and isn't
+// actually used for any true purpose.
+template <typename T>
+struct SafePyObjectT : private SafePyObject {
+ SafePyObjectT(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
+ : SafePyObject(data, pyinterpreter) {}
+ SafePyObjectT(SafePyObjectT&& other) noexcept : SafePyObject(other) {}
+ SafePyObjectT(SafePyObjectT const&) = delete;
+ SafePyObjectT& operator=(SafePyObjectT const&) = delete;
+
+ using SafePyObject::ptr;
+ using SafePyObject::pyinterpreter;
+ using SafePyObject::release;
+};
+
// Like SafePyObject, but non-owning. Good for references to global PyObjects
// that will be leaked on interpreter exit. You get a copy constructor/assign
// this way.
diff --git a/c10/core/impl/TorchDispatchModeTLS.cpp b/c10/core/impl/TorchDispatchModeTLS.cpp
index 37c75003e2..e558a70522 100644
--- a/c10/core/impl/TorchDispatchModeTLS.cpp
+++ b/c10/core/impl/TorchDispatchModeTLS.cpp
@@ -25,7 +25,7 @@ bool TorchDispatchModeTLS::any_modes_set(bool skip_infra_modes) {
}
void TorchDispatchModeTLS::push_non_infra_mode_onto_stack(
- std::shared_ptr<SafePyObject> mode) {
+ std::shared_ptr<PyObject_TorchDispatchMode> mode) {
if (!any_modes_set()) {
c10::impl::tls_set_dispatch_key_included(DispatchKey::Python, true);
c10::impl::tls_set_dispatch_key_included(
@@ -34,8 +34,9 @@ void TorchDispatchModeTLS::push_non_infra_mode_onto_stack(
torchDispatchModeState.stack_.push_back(std::move(mode));
}
-const std::shared_ptr<SafePyObject> TorchDispatchModeTLS::pop_stack() {
- std::shared_ptr<SafePyObject> out;
+const std::shared_ptr<PyObject_TorchDispatchMode> TorchDispatchModeTLS::
+ pop_stack() {
+ std::shared_ptr<PyObject_TorchDispatchMode> out;
if (!torchDispatchModeState.stack_.empty()) {
out = torchDispatchModeState.stack_.back();
torchDispatchModeState.stack_.pop_back();
@@ -60,8 +61,9 @@ const std::shared_ptr<SafePyObject> TorchDispatchModeTLS::pop_stack() {
}
return out;
}
-const std::tuple<std::shared_ptr<SafePyObject>, TorchDispatchModeKey>
-TorchDispatchModeTLS::pop_highest_infra_mode() {
+const std::
+ tuple<std::shared_ptr<PyObject_TorchDispatchMode>, TorchDispatchModeKey>
+ TorchDispatchModeTLS::pop_highest_infra_mode() {
for (int64_t i = static_cast<size_t>(TorchDispatchModeKey::NUM_MODE_KEYS) - 1;
i >= 0;
--i) {
@@ -82,8 +84,8 @@ TorchDispatchModeTLS::pop_highest_infra_mode() {
false, "Called pop_highest_infra_mode, but no infra modes were active.")
}
-const std::shared_ptr<SafePyObject>& TorchDispatchModeTLS::get_stack_at(
- int64_t idx) {
+const std::shared_ptr<PyObject_TorchDispatchMode>& TorchDispatchModeTLS::
+ get_stack_at(int64_t idx) {
TORCH_CHECK(idx < stack_len(), "Tried to get stack at idx that's too big");
// Our "logical" stack includes both:
// - any user modes (the entire torchDispatchModeState.stack_)
@@ -119,13 +121,13 @@ int64_t TorchDispatchModeTLS::stack_len() {
return stack_len + infra_modes_len;
}
-const c10::optional<std::shared_ptr<SafePyObject>> TorchDispatchModeTLS::
- get_mode(TorchDispatchModeKey mode_key) {
+const c10::optional<std::shared_ptr<PyObject_TorchDispatchMode>>
+TorchDispatchModeTLS::get_mode(TorchDispatchModeKey mode_key) {
return torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)];
}
void TorchDispatchModeTLS::set_mode(
- const std::shared_ptr<SafePyObject>& mode,
+ const std::shared_ptr<PyObject_TorchDispatchMode>& mode,
TorchDispatchModeKey mode_key) {
TORCH_CHECK(
torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)] ==
@@ -143,8 +145,8 @@ void TorchDispatchModeTLS::set_mode(
torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)] = mode;
}
-const c10::optional<std::shared_ptr<SafePyObject>> TorchDispatchModeTLS::
- unset_mode(TorchDispatchModeKey mode_key) {
+const c10::optional<std::shared_ptr<PyObject_TorchDispatchMode>>
+TorchDispatchModeTLS::unset_mode(TorchDispatchModeKey mode_key) {
auto out = torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)];
torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)] =
c10::nullopt;
diff --git a/c10/core/impl/TorchDispatchModeTLS.h b/c10/core/impl/TorchDispatchModeTLS.h
index 50a92459e6..d9ac8d8449 100644
--- a/c10/core/impl/TorchDispatchModeTLS.h
+++ b/c10/core/impl/TorchDispatchModeTLS.h
@@ -12,31 +12,35 @@ enum class TorchDispatchModeKey : int8_t {
NUM_MODE_KEYS
};
+using PyObject_TorchDispatchMode = SafePyObjectT<TorchDispatchModeKey>;
+
struct C10_API TorchDispatchModeTLS {
// This API is NOT invariant safe.
// It must not take in an infra mode that uses TorchDispatchModeKey
// If you're pushing an infra mode onto the stack, we expect
// you to use set_mode
static void push_non_infra_mode_onto_stack(
- std::shared_ptr<SafePyObject> mode);
+ std::shared_ptr<PyObject_TorchDispatchMode> mode);
// Pops the top mode of the stack,
// giving precedence to user modes before attempting to pop
// any infra modes
- static const std::shared_ptr<SafePyObject> pop_stack();
+ static const std::shared_ptr<PyObject_TorchDispatchMode> pop_stack();
// Returns the highest-priority infra mode on the stack,
// along with its mode key.
- static const std::tuple<std::shared_ptr<SafePyObject>, TorchDispatchModeKey>
- pop_highest_infra_mode();
+ static const std::
+ tuple<std::shared_ptr<PyObject_TorchDispatchMode>, TorchDispatchModeKey>
+ pop_highest_infra_mode();
- static const std::shared_ptr<SafePyObject>& get_stack_at(int64_t idx);
+ static const std::shared_ptr<PyObject_TorchDispatchMode>& get_stack_at(
+ int64_t idx);
static int64_t stack_len();
- static const c10::optional<std::shared_ptr<SafePyObject>> get_mode(
- TorchDispatchModeKey mode_key);
- static const c10::optional<std::shared_ptr<SafePyObject>> unset_mode(
- TorchDispatchModeKey mode_key);
+ static const c10::optional<std::shared_ptr<PyObject_TorchDispatchMode>>
+ get_mode(TorchDispatchModeKey mode_key);
+ static const c10::optional<std::shared_ptr<PyObject_TorchDispatchMode>>
+ unset_mode(TorchDispatchModeKey mode_key);
static void set_mode(
- const std::shared_ptr<SafePyObject>& mode,
+ const std::shared_ptr<PyObject_TorchDispatchMode>& mode,
TorchDispatchModeKey mode_key);
static const TorchDispatchModeTLS& get_state();
@@ -45,13 +49,13 @@ struct C10_API TorchDispatchModeTLS {
static bool any_modes_set(bool skip_infra_modes = false);
private:
- std::vector<std::shared_ptr<c10::SafePyObject>> stack_;
+ std::vector<std::shared_ptr<PyObject_TorchDispatchMode>> stack_;
// Users are allowed to push multiple ProxyTorchDispatchMode objects onto the
// stack
// However, we only allow a single FakeTensorMode onto the stack at a time
// (Pushing additional FakeTensorModes onto the stack is a no-op)
std::array<
- c10::optional<std::shared_ptr<c10::SafePyObject>>,
+ c10::optional<std::shared_ptr<PyObject_TorchDispatchMode>>,
static_cast<size_t>(TorchDispatchModeKey::NUM_MODE_KEYS)>
infra_modes_;
};
diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in
index 9bbc721dad..fad41ec848 100644
--- a/torch/_C/__init__.pyi.in
+++ b/torch/_C/__init__.pyi.in
@@ -55,6 +55,7 @@ from torch.types import (
)
from torch._prims_common import DeviceLikeType
+from torch.utils._python_dispatch import TorchDispatchMode
# This module is defined in torch/csrc/Module.cpp
@@ -1331,11 +1332,11 @@ def _pop_torch_function_stack() -> Any: ...
def _get_function_stack_at(idx: _int) -> Any: ...
def _len_torch_function_stack() -> _int: ...
def _set_torch_dispatch_mode(cls: Any) -> None: ...
-def _push_on_torch_dispatch_stack(cls: Any) -> None: ...
+def _push_on_torch_dispatch_stack(cls: TorchDispatchMode) -> None: ...
def _pop_torch_dispatch_stack(mode_key: Optional[torch._C._TorchDispatchModeKey] = None) -> Any: ...
def _get_dispatch_mode(mode_key: Optional[torch._C._TorchDispatchModeKey]) -> Any: ...
-def _unset_dispatch_mode(mode: torch._C._TorchDispatchModeKey) -> Any: ...
-def _set_dispatch_mode(mode: Any) -> None: ...
+def _unset_dispatch_mode(mode: torch._C._TorchDispatchModeKey) -> Optional[TorchDispatchMode]: ...
+def _set_dispatch_mode(mode: TorchDispatchMode) -> None: ...
def _get_dispatch_stack_at(idx: _int) -> Any: ...
def _len_torch_dispatch_stack() -> _int: ...
def _activate_gpu_trace() -> None: ...
@@ -1548,6 +1549,8 @@ def _dispatch_pystub(name: str, overload: str) -> Optional[Tuple[str, str]]: ...
def _dispatch_is_alias_key(dispatch: _dispatchkey) -> _bool: ...
def _functionality_to_backend_keys(dispatch: _dispatchkey) -> List[DispatchKey]: ...
def _functionalization_reapply_views_tls() -> _bool: ...
+def _only_lift_cpu_tensors() -> _bool: ...
+def _set_only_lift_cpu_tensors(value: _bool) -> None: ...
def _set_throw_on_mutable_data_ptr(tensor: Tensor) -> None: ...
def _set_warn_deprecated_on_mutable_data_ptr(tensor: Tensor) -> None: ...
@@ -2277,6 +2280,7 @@ def _register_py_class_for_device(device: str, cls: Any) -> None: ...
# Defined in torch/csrc/Module.cpp
def _current_graph_task_id() -> _int: ...
def _current_autograd_node() -> _Node: ...
+def _dispatch_key_set(Tensor) -> str: ...
# Defined in torch/csrc/Exceptions.cpp
class OutOfMemoryError(RuntimeError): ...
diff --git a/torch/_ops.py b/torch/_ops.py
index 774b6753c9..1b230b929c 100644
--- a/torch/_ops.py
+++ b/torch/_ops.py
@@ -1254,4 +1254,4 @@ class _Ops(types.ModuleType):
# The ops "namespace"
-ops = _Ops()
+ops: _Ops = _Ops()
diff --git a/torch/_subclasses/fake_tensor.py b/torch/_subclasses/fake_tensor.py
index 8174f0658f..d291605d58 100644
--- a/torch/_subclasses/fake_tensor.py
+++ b/torch/_subclasses/fake_tensor.py
@@ -1,5 +1,3 @@
-# mypy: ignore-errors
-
import contextlib
import functools
import logging
@@ -8,7 +6,18 @@ import traceback
import weakref
from collections import defaultdict
from dataclasses import dataclass
-from typing import Any, Dict, List, Optional, Tuple, Type, TYPE_CHECKING, TypeVar
+from typing import (
+ Any,
+ cast,
+ Dict,
+ List,
+ Optional,
+ Tuple,
+ Type,
+ TYPE_CHECKING,
+ TypeVar,
+ Union,
+)
from weakref import ReferenceType
import torch
@@ -30,6 +39,7 @@ from torch._utils import render_call
from torch.fx.operator_schemas import normalize_function
from torch.multiprocessing.reductions import StorageWeakRef
from torch.overrides import TorchFunctionMode
+from torch.types import _bool
from torch.utils._mode_utils import no_dispatch
from torch.utils._python_dispatch import (
is_traceable_wrapper_subclass,
@@ -42,6 +52,13 @@ from torch.utils._traceback import CapturedTraceback
if TYPE_CHECKING:
from torch.fx.experimental.symbolic_shapes import ShapeEnv
+
+class _Unassigned:
+ pass
+
+
+_UNASSIGNED = _Unassigned()
+
DimList = List
log = logging.getLogger(__name__)
@@ -718,7 +735,7 @@ def extract_tensor_metadata(t: torch.Tensor) -> "TensorMetadata":
"""
Extract the TensorMetadata of a tensor.
"""
- memory_format = suggest_memory_format(t)
+ memory_format: Optional[torch.memory_format] = suggest_memory_format(t)
if is_sparse_any(t) or not t.is_contiguous(memory_format=memory_format):
memory_format = None
@@ -806,10 +823,11 @@ class FakeTensorMode(TorchDispatchMode):
cache: Dict[_DispatchCacheKey, _DispatchCacheEntry] = {}
cache_hits: int = 0
cache_misses: int = 0
- cache_bypasses = defaultdict(int)
+ cache_bypasses: Dict[str, int] = defaultdict(int)
# Every time you retrace using the same fake tensor mode, you should
# advance the epoch so we don't reuse unbacked memos
epoch: int = 0
+ in_kernel_invocation: bool = False
def __init__(
self,
@@ -860,7 +878,9 @@ class FakeTensorMode(TorchDispatchMode):
# in_kernel_invocation
# If another fake mode was already active when we enter, we also stash it here.
# That way when we exit, we know to re-enable the previous fake mode.
- self.enter_stack: List[Tuple[bool, Optional[FakeTensorMode]]] = []
+ self.enter_stack: List[
+ Tuple[bool, Optional[TorchDispatchMode], Optional[_bool]]
+ ] = []
self.shape_env: ShapeEnv = shape_env
@@ -972,7 +992,7 @@ class FakeTensorMode(TorchDispatchMode):
Lookup a cache entry for the given arguments. If none exists, dispatch
and cache the result (if the result is eligible for caching).
"""
- output = unassigned = object()
+ output: Union[FakeTensor, _Unassigned] = _UNASSIGNED
try:
key = self._cache_key(func, args, kwargs)
entry = FakeTensorMode.cache.get(key, None)
@@ -991,7 +1011,7 @@ class FakeTensorMode(TorchDispatchMode):
except _BypassDispatchCache as e:
FakeTensorMode.cache_bypasses[e.reason] += 1
- if output is unassigned:
+ if output is _UNASSIGNED:
output = self._dispatch_impl(func, types, args, kwargs)
return output
@@ -1066,7 +1086,7 @@ class FakeTensorMode(TorchDispatchMode):
if isinstance(args, dict):
args = list(args.keys()) + list(args.values())
- result = []
+ result: List[Any] = []
for arg in args:
if isinstance(arg, FakeTensor):
if not self.is_our_fake(arg):
@@ -1177,7 +1197,7 @@ class FakeTensorMode(TorchDispatchMode):
# Synthesize a new FakeTensor with the cached metadata.
metadata = entry.metadata
- assert not metadata.is_sparse
+ assert metadata and not metadata.is_sparse
empty = torch.empty_strided(
metadata.shape,
@@ -1195,7 +1215,7 @@ class FakeTensorMode(TorchDispatchMode):
if func.is_view:
# For view ops, the storage should be the same as the tensor input.
- storage = args[entry.view_idx].untyped_storage()
+ storage = args[cast(int, entry.view_idx)].untyped_storage()
with in_kernel_invocation_manager(self):
empty.set_(
storage, metadata.storage_offset, metadata.shape, metadata.stride
@@ -1263,7 +1283,7 @@ class FakeTensorMode(TorchDispatchMode):
else:
return self._dispatch_impl(func, types, args, kwargs)
- def _dispatch_impl(self, func, types, args, kwargs):
+ def _dispatch_impl(self, func, types, args, kwargs) -> FakeTensor:
flat_args, args_spec = pytree.tree_flatten((args, kwargs))
flat_arg_fake_tensors = [
@@ -1557,7 +1577,7 @@ class FakeTensorMode(TorchDispatchMode):
If not, try to convert them to fake tensors.
Returns the original args, kwargs, and a flattened list of (args, kwargs) that are fake tensors.
"""
- flat_arg_fake_tensors = []
+ flat_arg_fake_tensors: List[Any] = []
def validate(x):
if not isinstance(x, torch.Tensor):
@@ -1684,7 +1704,7 @@ class FakeTensorMode(TorchDispatchMode):
source: Optional[Source] = None,
symbolic_context=None,
):
- shape_env = self.shape_env
+ shape_env: Optional[ShapeEnv] = self.shape_env
if static_shapes is None:
static_shapes = self.static_shapes
if static_shapes:
diff --git a/torch/csrc/autograd/init.cpp b/torch/csrc/autograd/init.cpp
index 81fc50f100..3f2bbf344a 100644
--- a/torch/csrc/autograd/init.cpp
+++ b/torch/csrc/autograd/init.cpp
@@ -1079,11 +1079,13 @@ static PyObject* push_on_torch_dispatch_stack(
if (maybe_mode_key_obj) {
mode_key = py::cast<c10::impl::TorchDispatchModeKey>(maybe_mode_key_obj);
c10::impl::TorchDispatchModeTLS::set_mode(
- std::make_shared<c10::SafePyObject>(arg, getPyInterpreter()),
+ std::make_shared<c10::impl::PyObject_TorchDispatchMode>(
+ arg, getPyInterpreter()),
mode_key.value());
} else {
c10::impl::TorchDispatchModeTLS::push_non_infra_mode_onto_stack(
- std::make_shared<c10::SafePyObject>(arg, getPyInterpreter()));
+ std::make_shared<c10::impl::PyObject_TorchDispatchMode>(
+ arg, getPyInterpreter()));
}
Py_INCREF(arg);
}
@@ -1147,7 +1149,9 @@ static PyObject* set_dispatch_mode(PyObject* _unused, PyObject* mode) {
Py_INCREF(mode);
c10::impl::TorchDispatchModeTLS::set_mode(
- std::make_shared<c10::SafePyObject>(mode, getPyInterpreter()), mode_key);
+ std::make_shared<c10::impl::PyObject_TorchDispatchMode>(
+ mode, getPyInterpreter()),
+ mode_key);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
diff --git a/torch/csrc/utils/torch_dispatch_mode.h b/torch/csrc/utils/torch_dispatch_mode.h
index 81729f27df..79173aeb3e 100644
--- a/torch/csrc/utils/torch_dispatch_mode.h
+++ b/torch/csrc/utils/torch_dispatch_mode.h
@@ -29,12 +29,12 @@ struct StashTorchDispatchModeGuard {
}
}
- const std::shared_ptr<c10::SafePyObject>& get_cur_mode() {
+ const std::shared_ptr<c10::impl::PyObject_TorchDispatchMode>& get_cur_mode() {
return saved_mode_;
}
private:
- std::shared_ptr<at::SafePyObject> saved_mode_;
+ std::shared_ptr<c10::impl::PyObject_TorchDispatchMode> saved_mode_;
c10::optional<c10::impl::TorchDispatchModeKey> saved_mode_key_;
};
diff --git a/torch/utils/_python_dispatch.py b/torch/utils/_python_dispatch.py
index f5f830c2f1..ec24f006a7 100644
--- a/torch/utils/_python_dispatch.py
+++ b/torch/utils/_python_dispatch.py
@@ -159,7 +159,7 @@ def _get_current_dispatch_mode_stack():
return [_get_dispatch_stack_at(i) for i in range(stack_len)]
-def _push_mode(mode):
+def _push_mode(mode: TorchDispatchMode):
k = mode._dispatch_key if hasattr(mode, "_dispatch_key") else None
assert k is None or k == torch._C.DispatchKey.PreDispatch
if k is None:
|
2.41.0
|
4bb5da529b03aa23410b777df06656c55c1151e
|
Thu, 25 Apr 2024 14:21:15 +0000
|
[PATCH 0657/1000] Fix mkl cmake not support static mkl on Windows. (#124925)
|
Fixes #124869 Fix mkl not support static library on Windows. # Local test: ## MKL static:  MKL backend check: <img width="724" alt="Image" src="https://github.com/pytorch/pytorch/assets/8433590/e45e12a5-2dfc-47a1-ad94-32a667bd4799"> ## MKL shared, original path:  Pull Request resolved: https://github.com/pytorch/pytorch/pull/124925 Approved by: https://github.com/jgong5, https://github.com/ezyang
|
diff --git a/cmake/Modules/FindMKL.cmake b/cmake/Modules/FindMKL.cmake
index a02f3e092d..01de7c7cec 100644
--- a/cmake/Modules/FindMKL.cmake
+++ b/cmake/Modules/FindMKL.cmake
@@ -263,10 +263,13 @@ MACRO(CHECK_ALL_LIBRARIES LIBRARIES OPENMP_TYPE OPENMP_LIBRARY _name _list _flag
ELSE()
IF(MSVC)
SET(lib_names ${_library}_dll)
+ SET(lib_names_static ${_library})
+ # Both seek shared and static mkl library.
+ FIND_LIBRARY(${_prefix}_${_library}_LIBRARY NAMES ${lib_names} ${lib_names_static})
ELSE()
SET(lib_names ${_library})
+ FIND_LIBRARY(${_prefix}_${_library}_LIBRARY NAMES ${lib_names})
ENDIF()
- FIND_LIBRARY(${_prefix}_${_library}_LIBRARY NAMES ${lib_names})
ENDIF()
MARK_AS_ADVANCED(${_prefix}_${_library}_LIBRARY)
IF(NOT (${_library} STREQUAL "tbb"))
|
2.41.0
|
01275934bfa1ff358b1c01d3754f2807cd04ee2
|
Wed, 24 Apr 2024 20:22:25 -0700
|
[PATCH 0658/1000] Fix global flake8 issues (#124771)
|
Prior to this `lintrunner --all-files --take FLAKE8` failed. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124771 Approved by: https://github.com/Skylion007 ghstack dependencies: #124428
|
diff --git a/.github/scripts/cherry_pick.py b/.github/scripts/cherry_pick.py
index fe53fb99cd..4c892de21d 100755
--- a/.github/scripts/cherry_pick.py
+++ b/.github/scripts/cherry_pick.py
@@ -29,7 +29,7 @@ def parse_args() -> Any:
"--onto-branch", type=str, required=True, help="the target release branch"
)
parser.add_argument(
- "--github-actor", type=str, required=True, help="all the world’s a stage"
+ "--github-actor", type=str, required=True, help="all the world's a stage"
)
parser.add_argument(
"--classification",
diff --git a/benchmarks/transformer/better_transformer_vs_mha_functional.py b/benchmarks/transformer/better_transformer_vs_mha_functional.py
index 5b4f794d0f..71be7db456 100644
--- a/benchmarks/transformer/better_transformer_vs_mha_functional.py
+++ b/benchmarks/transformer/better_transformer_vs_mha_functional.py
@@ -152,8 +152,8 @@ def run(
result_entry["sequence_length"] = sequence_length
result_entry["n_heads"] = num_heads
result_entry["embed_dim"] = embed_dim
- result_entry["time_native_mha_slow(μs)"] = f"{time_native_mha_slow:.3f}"
- result_entry["time_native_mha_fast (μs)"] = f"{time_native_mha_fast:.3f}"
+ result_entry["time_native_mha_slow(\u00B5s)"] = f"{time_native_mha_slow:.3f}"
+ result_entry["time_native_mha_fast (\u00B5s)"] = f"{time_native_mha_fast:.3f}"
result_entry["speedup flash_mha v native_mha"] = f"{speedup_fast_internal:.3f}"
result_entry["padding"] = f"{padding:.3f}"
return result_entry
diff --git a/benchmarks/transformer/sdp.py b/benchmarks/transformer/sdp.py
index ede28d5df4..c79ab8358b 100644
--- a/benchmarks/transformer/sdp.py
+++ b/benchmarks/transformer/sdp.py
@@ -81,10 +81,10 @@ class ExperimentResults:
@classmethod
def get_entry_names(cls) -> List[str]:
return [
- "nn_mha_time (μs)",
- "compiled_nn_mha_time (μs)",
- "composite_mha_time (μs)",
- "compiled_composite_mha_time (μs)",
+ "nn_mha_time (\u00B5s)",
+ "compiled_nn_mha_time (\u00B5s)",
+ "composite_mha_time (\u00B5s)",
+ "compiled_composite_mha_time (\u00B5s)",
]
diff --git a/functorch/einops/_parsing.py b/functorch/einops/_parsing.py
index 63adcb6e5a..25f86ec6fe 100644
--- a/functorch/einops/_parsing.py
+++ b/functorch/einops/_parsing.py
@@ -28,7 +28,7 @@ import keyword
import warnings
from typing import Collection, List, Mapping, Optional, Set, Tuple, Union
-_ellipsis: str = "…" # NB, this is a single unicode symbol. String is used as it is not a list, but can be iterated
+_ellipsis: str = "\u2026" # NB, this is a single unicode symbol. String is used as it is not a list, but can be iterated
class AnonymousAxis:
diff --git a/test/distributions/test_distributions.py b/test/distributions/test_distributions.py
index 3df26dec7a..cd9a0d39bb 100644
--- a/test/distributions/test_distributions.py
+++ b/test/distributions/test_distributions.py
@@ -3752,11 +3752,11 @@ class TestDistributions(DistributionsTestCase):
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_dirichlet_log_prob_zero(self):
- # Specifically test the special case where x=0 and α=1. The PDF is
- # proportional to x**(α-1), which in this case works out to 0**0=1.
+ # Specifically test the special case where x=0 and alpha=1. The PDF is
+ # proportional to x**(alpha-1), which in this case works out to 0**0=1.
# The log PDF of this term should therefore be 0. However, it's easy
# to accidentally introduce NaNs by calculating log(x) without regard
- # for the value of α-1.
+ # for the value of alpha-1.
alpha = torch.tensor([1, 2])
dist = Dirichlet(alpha)
x = torch.tensor([0, 1])
diff --git a/test/functorch/test_parsing.py b/test/functorch/test_parsing.py
index ec0f16c724..2b4d4e5e60 100644
--- a/test/functorch/test_parsing.py
+++ b/test/functorch/test_parsing.py
@@ -107,7 +107,7 @@ class TestParsedExpression(TestCase):
ParsedExpression("(a) ((b c) (d ...))")
# invalid identifiers
- ParsedExpression("camelCase under_scored cApiTaLs ß ...")
+ ParsedExpression("camelCase under_scored cApiTaLs \u00DF ...")
with self.assertRaises(ValueError):
ParsedExpression("1a")
with self.assertRaises(ValueError):
diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py
index 1d8af04074..0382cb4e84 100644
--- a/test/inductor/test_templated_attention.py
+++ b/test/inductor/test_templated_attention.py
@@ -190,8 +190,8 @@ class TestTemplatedSDPA(InductorTestCase):
# this means that the base for the LSE computed by ref is e while for the compiled
# version it is 2. To compare we use the change of base formula
# log_2(x_compiled) = log_e(x_ref) * log_2(e) where
- # x_ref = ∑_i e^(scores[i])
- # x_compiled = ∑_i 2^(log2(e) * scores[i])
+ # x_ref = sum(_i e^(scores[i]))
+ # x_compiled = sum(_i 2^(log2(e) * scores[i]))
self.assertTrue(ref_lse.dtype == torch.float32)
self.assertTrue(compiled_lse.dtype == torch.float32)
diff --git a/test/package/test_directory_reader.py b/test/package/test_directory_reader.py
index f98289345d..e5854b2954 100644
--- a/test/package/test_directory_reader.py
+++ b/test/package/test_directory_reader.py
@@ -111,16 +111,16 @@ class DirectoryReaderTest(PackageTestCase):
with PackageExporter(filename) as pe:
# Layout looks like:
# package
- # ├── one/
- # │ ├── a.txt
- # │ ├── b.txt
- # │ ├── c.txt
- # │ └── three/
- # │ ├── d.txt
- # │ └── e.txt
- # └── two/
- # ├── f.txt
- # └── g.txt
+ # |-- one/
+ # | |-- a.txt
+ # | |-- b.txt
+ # | |-- c.txt
+ # | +-- three/
+ # | |-- d.txt
+ # | +-- e.txt
+ # +-- two/
+ # |-- f.txt
+ # +-- g.txt
pe.save_text("one", "a.txt", "hello, a!")
pe.save_text("one", "b.txt", "hello, b!")
pe.save_text("one", "c.txt", "hello, c!")
diff --git a/test/package/test_misc.py b/test/package/test_misc.py
index 59b25ca2e6..d97eaec3ac 100644
--- a/test/package/test_misc.py
+++ b/test/package/test_misc.py
@@ -38,46 +38,46 @@ class TestMisc(PackageTestCase):
export_plain = dedent(
"""\
- ├── .data
- │ ├── extern_modules
- │ ├── python_version
- │ ├── serialization_id
- │ └── version
- ├── main
- │ └── main
- ├── obj
- │ └── obj.pkl
- ├── package_a
- │ ├── __init__.py
- │ └── subpackage.py
- ├── byteorder
- └── module_a.py
+ \u251c\u2500\u2500 .data
+ \u2502 \u251c\u2500\u2500 extern_modules
+ \u2502 \u251c\u2500\u2500 python_version
+ \u2502 \u251c\u2500\u2500 serialization_id
+ \u2502 \u2514\u2500\u2500 version
+ \u251c\u2500\u2500 main
+ \u2502 \u2514\u2500\u2500 main
+ \u251c\u2500\u2500 obj
+ \u2502 \u2514\u2500\u2500 obj.pkl
+ \u251c\u2500\u2500 package_a
+ \u2502 \u251c\u2500\u2500 __init__.py
+ \u2502 \u2514\u2500\u2500 subpackage.py
+ \u251c\u2500\u2500 byteorder
+ \u2514\u2500\u2500 module_a.py
"""
)
export_include = dedent(
"""\
- ├── obj
- │ └── obj.pkl
- └── package_a
- └── subpackage.py
+ \u251c\u2500\u2500 obj
+ \u2502 \u2514\u2500\u2500 obj.pkl
+ \u2514\u2500\u2500 package_a
+ \u2514\u2500\u2500 subpackage.py
"""
)
import_exclude = dedent(
"""\
- ├── .data
- │ ├── extern_modules
- │ ├── python_version
- │ ├── serialization_id
- │ └── version
- ├── main
- │ └── main
- ├── obj
- │ └── obj.pkl
- ├── package_a
- │ ├── __init__.py
- │ └── subpackage.py
- ├── byteorder
- └── module_a.py
+ \u251c\u2500\u2500 .data
+ \u2502 \u251c\u2500\u2500 extern_modules
+ \u2502 \u251c\u2500\u2500 python_version
+ \u2502 \u251c\u2500\u2500 serialization_id
+ \u2502 \u2514\u2500\u2500 version
+ \u251c\u2500\u2500 main
+ \u2502 \u2514\u2500\u2500 main
+ \u251c\u2500\u2500 obj
+ \u2502 \u2514\u2500\u2500 obj.pkl
+ \u251c\u2500\u2500 package_a
+ \u2502 \u251c\u2500\u2500 __init__.py
+ \u2502 \u2514\u2500\u2500 subpackage.py
+ \u251c\u2500\u2500 byteorder
+ \u2514\u2500\u2500 module_a.py
"""
)
diff --git a/test/package/test_resources.py b/test/package/test_resources.py
index 208917be77..2f30c0aeae 100644
--- a/test/package/test_resources.py
+++ b/test/package/test_resources.py
@@ -25,16 +25,16 @@ class TestResources(PackageTestCase):
with PackageExporter(buffer) as pe:
# Layout looks like:
# package
- # ├── one/
- # │ ├── a.txt
- # │ ├── b.txt
- # │ ├── c.txt
- # │ └── three/
- # │ ├── d.txt
- # │ └── e.txt
- # └── two/
- # ├── f.txt
- # └── g.txt
+ # |-- one/
+ # | |-- a.txt
+ # | |-- b.txt
+ # | |-- c.txt
+ # | +-- three/
+ # | |-- d.txt
+ # | +-- e.txt
+ # +-- two/
+ # |-- f.txt
+ # +-- g.txt
pe.save_text("one", "a.txt", "hello, a!")
pe.save_text("one", "b.txt", "hello, b!")
pe.save_text("one", "c.txt", "hello, c!")
diff --git a/test/test_jit.py b/test/test_jit.py
index 6f79267a63..bb6f4e2558 100644
--- a/test/test_jit.py
+++ b/test/test_jit.py
@@ -15679,7 +15679,7 @@ dedent """
def test_unicode_comments(self):
@torch.jit.script
def test(self, a):
- # 🤷🤷🤷🤷
+ # shrug
return torch.nn.functional.relu(a)
def test_get_set_state_with_tensors(self):
diff --git a/test/test_jit_fuser.py b/test/test_jit_fuser.py
index 6e342ea4f5..9d59dcce08 100644
--- a/test/test_jit_fuser.py
+++ b/test/test_jit_fuser.py
@@ -70,7 +70,7 @@ class TestFuser(JitTestCase):
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_abs_cpu_unicode_temp_dir(self):
- with TemporaryDirectoryName(suffix='中文') as dname:
+ with TemporaryDirectoryName(suffix='\u4e2d\u6587') as dname:
shell_env = os.environ.copy()
shell_env['TMP'] = dname
cmd = [sys.executable, os.path.basename(__file__), type(self).__name__ + '.test_abs_cpu']
diff --git a/test/test_linalg.py b/test/test_linalg.py
index e22dabcf56..5ddeac9aa3 100644
--- a/test/test_linalg.py
+++ b/test/test_linalg.py
@@ -1950,7 +1950,7 @@ class TestLinalg(TestCase):
# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
- # The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i
+ # The characteristic equation is p(lambda) = lambda^2 - 2lambda + 5 = 0, with roots lambda = 1[+-]2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out0 = torch.empty(0, device=device, dtype=dtype)
out1 = torch.empty(0, device=device, dtype=dtype)
@@ -2117,7 +2117,7 @@ class TestLinalg(TestCase):
# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
- # The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i
+ # The characteristic equation is p(lambda) = lambda^2 - 2lambda + 5 = 0, with roots lambda = 1[+-]2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvalues to be safely castable"):
diff --git a/test/test_public_bindings.py b/test/test_public_bindings.py
index 16ed16d11a..59e535e144 100644
--- a/test/test_public_bindings.py
+++ b/test/test_public_bindings.py
@@ -428,7 +428,7 @@ class TestPublicBindings(TestCase):
def test_correct_module_names(self):
'''
An API is considered public, if its `__module__` starts with `torch.`
- and there is no name in `__module__` or the object itself that starts with “_”.
+ and there is no name in `__module__` or the object itself that starts with "_".
Each public package should either:
- (preferred) Define `__all__` and all callables and classes in there must have their
`__module__` start with the current submodule's path. Things not in `__all__` should
diff --git a/test/test_serialization.py b/test/test_serialization.py
index 164bfdddb1..0779b744de 100644
--- a/test/test_serialization.py
+++ b/test/test_serialization.py
@@ -924,7 +924,7 @@ class TestSerialization(TestCase, SerializationMixin):
test(fname)
if IS_FILESYSTEM_UTF8_ENCODING:
- with TemporaryDirectoryName(suffix='非ASCIIパス') as dname:
+ with TemporaryDirectoryName(suffix='\u975eASCII\u30d1\u30b9') as dname:
with TemporaryFileName(dir=dname) as fname:
test(fname)
diff --git a/test/test_torch.py b/test/test_torch.py
index 25d1cc14ed..641dac417f 100644
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -8046,7 +8046,7 @@ class TestTorch(TestCase):
assert_with_filename(fname)
if IS_FILESYSTEM_UTF8_ENCODING:
- with TemporaryDirectoryName(suffix='中文') as dname, TemporaryFileName(dir=dname) as fname:
+ with TemporaryDirectoryName(suffix='\u4e2d\u6587') as dname, TemporaryFileName(dir=dname) as fname:
assert_with_filename(fname)
def test_torch_from_file(self):
@@ -8077,7 +8077,7 @@ class TestTorch(TestCase):
assert_with_filename(fname)
if IS_FILESYSTEM_UTF8_ENCODING:
- with TemporaryDirectoryName(suffix='中文') as dname, TemporaryFileName(dir=dname) as fname:
+ with TemporaryDirectoryName(suffix='\u4e2d\u6587') as dname, TemporaryFileName(dir=dname) as fname:
assert_with_filename(fname)
def test_print(self):
diff --git a/torch/_decomp/decompositions.py b/torch/_decomp/decompositions.py
index 0780df14a8..124ed8fb72 100644
--- a/torch/_decomp/decompositions.py
+++ b/torch/_decomp/decompositions.py
@@ -744,7 +744,7 @@ def slice_forward(
raise RuntimeError("slice step must be positive")
start_val = start if start is not None else 0
- end_val = end if end is not None else sys.maxsize # 2^63 – 1
+ end_val = end if end is not None else sys.maxsize # 2^63 - 1
if start_val < 0:
start_val += sizes[dim]
diff --git a/torch/_dynamo/polyfill.py b/torch/_dynamo/polyfill.py
index d6bbb59692..18aaa067a3 100644
--- a/torch/_dynamo/polyfill.py
+++ b/torch/_dynamo/polyfill.py
@@ -57,7 +57,7 @@ def list_cmp(op: Callable[[Any, Any], bool], left: Sequence[Any], right: Sequenc
def dropwhile(predicate, iterable):
- # dropwhile(lambda x: x<5, [1,4,6,4,1]) → 6 4 1
+ # dropwhile(lambda x: x<5, [1,4,6,4,1]) -> 6 4 1
iterable = iter(iterable)
for x in iterable:
if not predicate(x):
diff --git a/torch/_export/error.py b/torch/_export/error.py
index 9fc55092fd..03b7f52fb9 100644
--- a/torch/_export/error.py
+++ b/torch/_export/error.py
@@ -5,13 +5,13 @@ class ExportErrorType(Enum):
# User providing invalid inputs to either tracer, or other public facing APIs
INVALID_INPUT_TYPE = 1
- # User returning values from their models that we don’t support.
+ # User returning values from their models that we don't support.
INVALID_OUTPUT_TYPE = 2
# Generated IR does not conform to Export IR Specification.
VIOLATION_OF_SPEC = 3
- # User’s code contains types and functionalities we don’t support.
+ # User's code contains types and functionalities we don't support.
NOT_SUPPORTED = 4
# User's code didn't provide necessary details for us to successfully trace and export.
diff --git a/torch/_functorch/autograd_function.py b/torch/_functorch/autograd_function.py
index 5017a25022..98ffe6dd16 100644
--- a/torch/_functorch/autograd_function.py
+++ b/torch/_functorch/autograd_function.py
@@ -498,7 +498,7 @@ def get_tangents_in_dims(input_dims, tangents):
# in_dims = 0
# vmap(Sum.apply, in_dims)(x)
#
-# Let’s assume for a moment that we didn’t vmap setup_context in VmappedSum:
+# Let's assume for a moment that we didn't vmap setup_context in VmappedSum:
#
# class VmappedSum(torch.autograd.Function):
# @staticmethod
@@ -519,7 +519,7 @@ def get_tangents_in_dims(input_dims, tangents):
# return gx
#
# We end up saving [B, 4] as x_shape. In the backward, gy has shape [B],
-# and we’re doing:
+# and we're doing:
#
# def backward_no_context(gy):
# return gy.expand([B, 4])
diff --git a/torch/_inductor/codegen/memory_planning.py b/torch/_inductor/codegen/memory_planning.py
index 8b58fe049e..2aade2a297 100644
--- a/torch/_inductor/codegen/memory_planning.py
+++ b/torch/_inductor/codegen/memory_planning.py
@@ -62,8 +62,8 @@ class LiveRange:
Invariant: begin <= end
"""
- begin: float # int | ±inf
- end: float # int | ±inf
+ begin: float # int | +/-inf
+ end: float # int | +/-inf
def contains(self, other: LiveRange):
"""Is other entirely within self"""
diff --git a/torch/_meta_registrations.py b/torch/_meta_registrations.py
index 6245f908db..85fd7c3c5f 100644
--- a/torch/_meta_registrations.py
+++ b/torch/_meta_registrations.py
@@ -5373,7 +5373,7 @@ def meta__scaled_dot_product_flash_attention_for_cpu_backward(
scale: Optional[float] = None,
):
# cpus's grad layout is different from cuda's,
- # i.e. (batch_size, seq_len,num_heads, head_dim)
+ # i.e. (batch_size, seq_len,num_heads, head_dim)
batch_size = query.size(0)
num_heads = query.size(1)
head_dim = query.size(3)
diff --git a/torch/_numpy/_funcs_impl.py b/torch/_numpy/_funcs_impl.py
index 7c09288f45..8f3a70589a 100644
--- a/torch/_numpy/_funcs_impl.py
+++ b/torch/_numpy/_funcs_impl.py
@@ -2008,7 +2008,7 @@ def min_scalar_type(a: ArrayLike, /):
from ._dtypes import DType
if a.numel() > 1:
- # numpy docs: "For non-scalar array a, returns the vector’s dtype unmodified."
+ # numpy docs: "For non-scalar array a, returns the vector's dtype unmodified."
return DType(a.dtype)
if a.dtype == torch.bool:
diff --git a/torch/_refs/__init__.py b/torch/_refs/__init__.py
index fc84b8381b..144419eb5b 100644
--- a/torch/_refs/__init__.py
+++ b/torch/_refs/__init__.py
@@ -485,7 +485,7 @@ def _make_alias(fn, name):
"""
This function defines an alias of another function and sets its __name__ argument.
It also sets its __module__ argument to the module of the caller.
- Note that when naïvely doing `alias = fn`, we have that `alias.__name__ == "fn"`, and
+ Note that when naively doing `alias = fn`, we have that `alias.__name__ == "fn"`, and
`alias.__module__ == fn.__module__`.
"""
diff --git a/torch/_refs/nn/functional/__init__.py b/torch/_refs/nn/functional/__init__.py
index e1548518cb..dd06febbcd 100644
--- a/torch/_refs/nn/functional/__init__.py
+++ b/torch/_refs/nn/functional/__init__.py
@@ -600,7 +600,7 @@ def margin_ranking_loss(
margin: float = 0.0,
reduction: str = "mean",
) -> TensorLikeType:
- # loss_without_reduction = max(0, −target * (input1 − input2) + margin)
+ # loss_without_reduction = max(0, -target * (input1 - input2) + margin)
if input1.ndim != input2.ndim or input1.ndim != target.ndim:
raise RuntimeError(
"margin_ranking_loss : All input tensors should have same dimension but got sizes: "
diff --git a/torch/_refs/special/__init__.py b/torch/_refs/special/__init__.py
index 048de83506..14ec33cf20 100644
--- a/torch/_refs/special/__init__.py
+++ b/torch/_refs/special/__init__.py
@@ -116,7 +116,7 @@ def i1e(a: TensorLikeType) -> TensorLikeType:
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
)
def log_ndtr(a: TensorLikeType) -> TensorLikeType:
- # Note: M_SQRT1_2 is the value of 1 / √2
+ # Note: M_SQRT1_2 is the value of 1 / sqrt(2)
M_SQRT1_2 = 0.707106781186547524400844362104849039
t = a * M_SQRT1_2
return torch.where(
@@ -185,7 +185,7 @@ def multigammaln(a: TensorLikeType, p: int) -> TensorLikeType:
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
)
def ndtr(a: TensorLikeType) -> TensorLikeType:
- # Note: M_SQRT1_2 is the value of 1 / √2
+ # Note: M_SQRT1_2 is the value of 1 / sqrt(2)
M_SQRT1_2 = 0.707106781186547524400844362104849039
a_sqrt_2 = a * M_SQRT1_2
return (1 + torch.erf(a_sqrt_2)) * 0.5
diff --git a/torch/_torch_docs.py b/torch/_torch_docs.py
index d8b864118a..f429fee683 100644
--- a/torch/_torch_docs.py
+++ b/torch/_torch_docs.py
@@ -2305,8 +2305,8 @@ Keyword Args:
times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`.
Must have integral dtype. Ignored if ``None``. Defaults to ``None``.
aweights (tensor, optional): A Scalar or 1D array of observation vector weights.
- These relative weights are typically large for observations considered “important” and smaller for
- observations considered less “important”. Its numel must equal the number of columns of :attr:`input`.
+ These relative weights are typically large for observations considered "important" and smaller for
+ observations considered less "important". Its numel must equal the number of columns of :attr:`input`.
Must have floating point dtype. Ignored if ``None``. Defaults to ``None``.
Returns:
@@ -4773,7 +4773,7 @@ This is detailed in the "Keyword Arguments" section below.
The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
-is estimated using `Taylor’s theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
+is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
diff --git a/torch/ao/quantization/backend_config/backend_config.py b/torch/ao/quantization/backend_config/backend_config.py
index e5a4d2f3af..2e5b90e23e 100644
--- a/torch/ao/quantization/backend_config/backend_config.py
+++ b/torch/ao/quantization/backend_config/backend_config.py
@@ -79,12 +79,12 @@ class DTypeWithConstraints:
* `quant_min_lower_bound` and `quant_max_upper_bound`: Lower and upper
bounds for the minimum and maximum quantized values respectively. If
- the QConfig’s `quant_min` and `quant_max` fall outside this range,
+ the QConfig's `quant_min` and `quant_max` fall outside this range,
then the QConfig will be ignored.
* `scale_min_lower_bound` and `scale_max_upper_bound`: Lower and upper
bounds for the minimum and maximum scale values respectively. If the
- QConfig’s minimum scale value (currently exposed as `eps`) falls below
+ QConfig's minimum scale value (currently exposed as `eps`) falls below
the lower bound, then the QConfig will be ignored. Note that the upper
bound is currently not enforced.
@@ -130,7 +130,7 @@ class DTypeConfig:
dtypes here are the same as the semantics of the dtypes specified in
the observers.
- These dtypes are matched against the ones specified in the user’s
+ These dtypes are matched against the ones specified in the user's
QConfig. If there is a match, and the QConfig satisfies the constraints
specified in the DTypeConfig (if any), then we will quantize the given
pattern using this DTypeConfig. Otherwise, the QConfig is ignored and
diff --git a/torch/distributed/_shard/sharded_tensor/__init__.py b/torch/distributed/_shard/sharded_tensor/__init__.py
index 152c287ee7..602f751637 100644
--- a/torch/distributed/_shard/sharded_tensor/__init__.py
+++ b/torch/distributed/_shard/sharded_tensor/__init__.py
@@ -187,7 +187,7 @@ def full(sharding_spec: ShardingSpec,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
- Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype
+ Creates a :class:`ShardedTensor` filled with fill_value. The tensor's dtype
is inferred from fill_value. If dtype is specified, it will override the
inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion.
Args:
@@ -195,7 +195,7 @@ def full(sharding_spec: ShardingSpec,
describing how to shard the Tensor.
size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
output tensor.
- fill_value (Scalar) – the value to fill the output tensor with.
+ fill_value (Scalar) - the value to fill the output tensor with.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
diff --git a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py
index c421fa327d..c869b71d69 100644
--- a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py
+++ b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py
@@ -117,7 +117,7 @@ def _handle_col_wise_sharding_base(
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
- i.e. it remains as a fixed “pad”.
+ i.e. it remains as a fixed "pad".
Note that the embedding vector at padding_idx is
excluded from the reduction.
@@ -312,7 +312,7 @@ def _handle_row_wise_mask(gather_inp, padding_idx, weight, world_size, rank):
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
- i.e. it remains as a fixed “pad”.
+ i.e. it remains as a fixed "pad".
Note that the embedding vector at padding_idx is
excluded from the reduction.
weight: weight tensor of Embedding look-up table.
diff --git a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py
index e1c1cb6380..c9cfcba1fe 100644
--- a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py
+++ b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py
@@ -202,7 +202,7 @@ def _handle_col_wise_sharding(
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
- i.e. it remains as a fixed “pad”.
+ i.e. it remains as a fixed "pad".
pg: process group.
Returns: final result of lookup.
@@ -250,7 +250,7 @@ def _handle_row_wise_sharding(
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
- i.e. it remains as a fixed “pad”.
+ i.e. it remains as a fixed "pad".
rank: # of cuda process.
pg: process group.
diff --git a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py
index 2d6ea1d705..2f954398f9 100644
--- a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py
+++ b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py
@@ -268,7 +268,7 @@ def _handle_col_wise_sharding(
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
- i.e. it remains as a fixed “pad”.
+ i.e. it remains as a fixed "pad".
Note that the embedding vector at padding_idx is
excluded from the reduction.
pg: process group.
@@ -342,7 +342,7 @@ def _handle_row_wise_sharding(
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
- i.e. it remains as a fixed “pad”.
+ i.e. it remains as a fixed "pad".
Note that the embedding vector at padding_idx is
excluded from the reduction.
rank: # of cuda process.
diff --git a/torch/distributed/elastic/rendezvous/etcd_rendezvous.py b/torch/distributed/elastic/rendezvous/etcd_rendezvous.py
index 4ece7819c9..8997c592f5 100644
--- a/torch/distributed/elastic/rendezvous/etcd_rendezvous.py
+++ b/torch/distributed/elastic/rendezvous/etcd_rendezvous.py
@@ -124,7 +124,7 @@ class EtcdRendezvousHandler(RendezvousHandler):
| | (default 600s) |
+--------------------------------------------+--------------------------+
| last_call_timeout | additional wait amount |
- | | (“last call”) after min |
+ | | ("last call") after min |
| | number of workers has |
| | been reached (defaults |
| | to 30s) |
diff --git a/torch/distributed/pipeline/sync/_balance/blockpartition.py b/torch/distributed/pipeline/sync/_balance/blockpartition.py
index 7afe782f6a..ccdf5fe4df 100644
--- a/torch/distributed/pipeline/sync/_balance/blockpartition.py
+++ b/torch/distributed/pipeline/sync/_balance/blockpartition.py
@@ -4,7 +4,7 @@
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
-"""Implements "Block Partitions of Sequences" by Imre Bárány et al.
+"""Implements "Block Partitions of Sequences" by Imre B\u00e1r\u00e1ny et al.
Paper: https://arxiv.org/pdf/1308.2452.pdf
@@ -18,7 +18,7 @@ def solve(sequence: List[int], partitions: int = 1) -> List[List[int]]:
"""Splits a sequence into several partitions to minimize variance for each
partition.
- The result might not be optimal. However, it can be done only in O(kn³),
+ The result might not be optimal. However, it can be done only in O(kn\u00b3),
where k is the number of partitions and n is the length of the sequence.
"""
@@ -51,14 +51,14 @@ def solve(sequence: List[int], partitions: int = 1) -> List[List[int]]:
while True:
"""
- (1) Fix p ∈ [k] with M(P) = bp. So Bp is a maximal block of P.
+ (1) Fix p element-of [k] with M(P) = bp. So Bp is a maximal block of P.
"""
# max_size: M(P)
max_size, p = max(leaderboard())
while True:
"""
- (2) If M(P) ≤ m(P) + 1, then stop.
+ (2) If M(P) <= m(P) + 1, then stop.
"""
# min_size: m(P)
min_size, q = min(leaderboard())
@@ -67,7 +67,7 @@ def solve(sequence: List[int], partitions: int = 1) -> List[List[int]]:
return [sequence[i:j] for i, j in zip([0] + splits[:-1], splits)]
"""
- (3) If M(P) > m(P) + 1, then let m(P) = bq for the q ∈ [k] which is
+ (3) If M(P) > m(P) + 1, then let m(P) = bq for the q element-of [k] which is
closest to p (ties broken arbitrarily). Thus Bq is a minimal block
of P. Let Bh be the block next to Bq between Bp and Bq. (Note that
Bh is a non-empty block: if it were, then m(P) = 0 and we should
@@ -75,21 +75,21 @@ def solve(sequence: List[int], partitions: int = 1) -> List[List[int]]:
"""
if p < q:
"""
- So either p < q and then h = q−1 and we define P ∗ by moving
- the last element from Bh = Bq−1 to Bq,
+ So either p < q and then h = q-1 and we define P * by moving
+ the last element from Bh = Bq-1 to Bq,
"""
h = q - 1
splits[h] -= 1
else:
"""
- or q < p, and then h = q + 1 and P ∗ is obtained by moving the
+ or q < p, and then h = q + 1 and P * is obtained by moving the
first element of Bh = Bq+1 to Bq.
"""
h = q + 1
splits[q] += 1
"""
- Set P = P ∗ . If p = h, then go to (1), else go to (2).
+ Set P = P * . If p = h, then go to (1), else go to (2).
"""
if p == h:
break
diff --git a/torch/distributed/pipeline/sync/pipeline.py b/torch/distributed/pipeline/sync/pipeline.py
index 8eccc68183..7cd5e58311 100644
--- a/torch/distributed/pipeline/sync/pipeline.py
+++ b/torch/distributed/pipeline/sync/pipeline.py
@@ -157,30 +157,30 @@ class Pipeline:
exc_info: Optional[ExcInfo] = None
# With checkpointing, the autograd graph looks like this diagram:
- # ┌─────┸──────┐
- # │ Copy │
- # └─────┰──────┘ (fence)
- # ─ ─ ─ ╂ ─ ─ ─ ─ ─ ─ ─ ─ ─
- # ┃ (compute)
- # ┌─────┸──────┐
- # │ Wait │ [1] Synchronize the current stream with the copy stream.
- # └─────┰──────┘
- # ┌─────┸──────┐
- # │ Checkpoint │ [2] Compute a partition within checkpointing.
- # └─────┰──────┘
- # ┌─────┸──────┐
- # │ Wait │ [3] Synchronize the copy stream with the current stream.
- # └─────┰──────┘
- # ┠ ─ ─ ─ ┐
- # ┃ ┌─────┴─────┐
- # ┃ │ Recompute │ [4] Schedule the recomputation at backpropagation.
- # ┃ └─────┬─────┘
- # ┠ ─ ─ ─ ┘
- # ┃
- # ─ ─ ─ ╂ ─ ─ ─ ─ ─ ─ ─ ─ ─
- # ┌─────┸──────┐ (fence)
- # │ Copy │
- # └─────┰──────┘
+ # +-----+------+
+ # | Copy |
+ # +-----+------+ (fence)
+ # - - - + - - - - - - - - -
+ # | (compute)
+ # +-----+------+
+ # | Wait | [1] Synchronize the current stream with the copy stream.
+ # +-----+------+
+ # +-----+------+
+ # | Checkpoint | [2] Compute a partition within checkpointing.
+ # +-----+------+
+ # +-----+------+
+ # | Wait | [3] Synchronize the copy stream with the current stream.
+ # +-----+------+
+ # + - - - +
+ # | +-----+-----+
+ # | | Recompute | [4] Schedule the recomputation at backpropagation.
+ # | +-----+-----+
+ # + - - - +
+ # |
+ # - - - + - - - - - - - - -
+ # +-----+------+ (fence)
+ # | Copy |
+ # +-----+------+
for i, j in schedule:
batch = batches[i]
partition = partitions[j]
diff --git a/torch/distributed/pipeline/sync/skip/portal.py b/torch/distributed/pipeline/sync/skip/portal.py
index f3484a1b69..335793f4cc 100644
--- a/torch/distributed/pipeline/sync/skip/portal.py
+++ b/torch/distributed/pipeline/sync/skip/portal.py
@@ -9,7 +9,7 @@ autograd engine. The shared context of three functions (:class:`PortalBlue`,
:class:`PortalOrange`, and :class:`PortalCopy`) out of the computation graph is
one of the most important feature of :mod:`torchpipe.skip`.
-The metaphor is inspired by Portal™ from Valve.
+The metaphor is inspired by Portal(tm) from Valve.
"""
from typing import List, Optional, Tuple
diff --git a/torch/distributed/pipeline/sync/skip/skippable.py b/torch/distributed/pipeline/sync/skip/skippable.py
index 0c01a198f8..aa20792c84 100644
--- a/torch/distributed/pipeline/sync/skip/skippable.py
+++ b/torch/distributed/pipeline/sync/skip/skippable.py
@@ -362,16 +362,16 @@ def verify_skippables(module: nn.Sequential) -> None:
# Layer3 pops "1to3".
nn.Sequential(Layer1(), Layer2())
- # └──── ?
+ # +---- ?
nn.Sequential(Layer2(), Layer3())
- # ? ────┘
+ # ? ----+
nn.Sequential(Layer1(), Layer2(), Layer3(), Layer3())
- # └───────────────────┘ ^^^^^^
+ # +-------------------+ ^^^^^^
nn.Sequential(Layer1(), Layer1(), Layer2(), Layer3())
- # ^^^^^^ └───────────────────┘
+ # ^^^^^^ +-------------------+
To use the same name for multiple skip tensors, they must be isolated by
different namespaces. See :meth:`isolate()
diff --git a/torch/fx/experimental/migrate_gradual_types/constraint.py b/torch/fx/experimental/migrate_gradual_types/constraint.py
index 0f0d23d018..3c1f724d26 100644
--- a/torch/fx/experimental/migrate_gradual_types/constraint.py
+++ b/torch/fx/experimental/migrate_gradual_types/constraint.py
@@ -152,7 +152,7 @@ class TGreatestUpperBound(Constraint):
self.rhs2 = rhs2
def __repr__(self):
- return f'{self.res} = {self.rhs1}⊔*{self.rhs2}'
+ return f'{self.res} = {self.rhs1}\u2294*{self.rhs2}'
def __eq__(self, other):
if isinstance(other, TGreatestUpperBound):
@@ -180,7 +180,7 @@ class DGreatestUpperBound(Constraint):
self.rhs2 = rhs2
def __repr__(self):
- return f'{self.res} = {self.rhs1}⊔{self.rhs2}'
+ return f'{self.res} = {self.rhs1}\u2294{self.rhs2}'
def __eq__(self, other):
if isinstance(other, DGreatestUpperBound):
diff --git a/torch/fx/experimental/migrate_gradual_types/operation.py b/torch/fx/experimental/migrate_gradual_types/operation.py
index ec2cb91bbc..432cd570be 100644
--- a/torch/fx/experimental/migrate_gradual_types/operation.py
+++ b/torch/fx/experimental/migrate_gradual_types/operation.py
@@ -5,10 +5,10 @@ op_div = '/'
op_eq = '='
op_neq = '!='
op_imp = '=>'
-op_matching = '⊳'
+op_matching = '\u22b3' # (contains)
op_consistency = '~'
-op_precision = '⊑'
-op_leq = '≤'
+op_precision = '\u2291' # (square image of or equal to)
+op_leq = '\u2264' # less-than or equal to
op_lt = '<'
op_gt = '>'
op_mod = '%'
diff --git a/torch/linalg/__init__.py b/torch/linalg/__init__.py
index e47bb55ef7..29df838bab 100644
--- a/torch/linalg/__init__.py
+++ b/torch/linalg/__init__.py
@@ -1450,7 +1450,7 @@ Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
dtype (:class:`torch.dtype`, optional): type used to perform the accumulation and the return.
If specified, :attr:`x` is cast to :attr:`dtype` before performing the operation,
- and the returned tensor’s type will be :attr:`dtype` if real and of its real counterpart if complex.
+ and the returned tensor's type will be :attr:`dtype` if real and of its real counterpart if complex.
:attr:`dtype` may be complex if :attr:`x` is complex, otherwise it must be real.
:attr:`x` should be convertible without narrowing to :attr:`dtype`. Default: None
diff --git a/torch/masked/_docs.py b/torch/masked/_docs.py
index bf96b49e3e..fa130bbefb 100644
--- a/torch/masked/_docs.py
+++ b/torch/masked/_docs.py
@@ -1012,7 +1012,7 @@ Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
- unbiased (bool): when True, use Bessel’s correction, otherwise, compute
+ unbiased (bool): when True, use Bessel's correction, otherwise, compute
the uncorrected sample variance.
Keyword args:
@@ -1148,7 +1148,7 @@ Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
- unbiased (bool): when True, use Bessel’s correction, otherwise, compute
+ unbiased (bool): when True, use Bessel's correction, otherwise, compute
the uncorrected sample variance.
Keyword args:
diff --git a/torch/masked/_ops.py b/torch/masked/_ops.py
index 2a2ff3fd6f..b7872a6d4c 100644
--- a/torch/masked/_ops.py
+++ b/torch/masked/_ops.py
@@ -210,7 +210,7 @@ ord (int, float, optional): the order of vector norm. Default: 2.
ord (int, float): the order of vector norm. Default: 2.
See :func:`torch.linalg.vector_norm` for a list of supported norms.""",
unbiased="""\
-unbiased (bool): when True, use Bessel’s correction, otherwise, compute
+unbiased (bool): when True, use Bessel's correction, otherwise, compute
the uncorrected sample variance.""",
eps="""\
eps (float, optional): small value to avoid division by zero. Default: {default}.""",
diff --git a/torch/nested/__init__.py b/torch/nested/__init__.py
index e990510ed0..ea1cce5950 100644
--- a/torch/nested/__init__.py
+++ b/torch/nested/__init__.py
@@ -186,7 +186,7 @@ Example::
def nested_tensor(tensor_list, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor:
r"""
-Constructs a nested tensor with no autograd history (also known as a “leaf tensor”, see
+Constructs a nested tensor with no autograd history (also known as a "leaf tensor", see
:ref:`Autograd mechanics <autograd-mechanics>`) from :attr:`tensor_list` a list of tensors.
Args:
diff --git a/torch/nn/modules/adaptive.py b/torch/nn/modules/adaptive.py
index 3d61e9d8f5..83b37696c8 100644
--- a/torch/nn/modules/adaptive.py
+++ b/torch/nn/modules/adaptive.py
@@ -20,7 +20,7 @@ class AdaptiveLogSoftmaxWithLoss(Module):
As described in
`Efficient softmax approximation for GPUs by Edouard Grave, Armand Joulin,
- Moustapha Cissé, David Grangier, and Hervé Jégou
+ Moustapha Ciss\u00e9, David Grangier, and Herv\u00e9 J\u00e9gou
<https://arxiv.org/abs/1609.04309>`__.
Adaptive softmax is an approximate strategy for training models with large
diff --git a/torch/nn/modules/conv.py b/torch/nn/modules/conv.py
index b3d5770e7b..075d5e9865 100644
--- a/torch/nn/modules/conv.py
+++ b/torch/nn/modules/conv.py
@@ -204,7 +204,7 @@ class Conv1d(_ConvNd):
amount of implicit padding applied on both sides.
* :attr:`dilation` controls the spacing between the kernel points; also
- known as the à trous algorithm. It is harder to describe, but this `link`_
+ known as the \uue0 trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
{groups_note}
@@ -341,7 +341,7 @@ class Conv2d(_ConvNd):
amount of implicit padding applied on both sides.
* :attr:`dilation` controls the spacing between the kernel points; also
- known as the à trous algorithm. It is harder to describe, but this `link`_
+ known as the \u00e0 trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
{groups_note}
@@ -483,7 +483,7 @@ class Conv3d(_ConvNd):
can be either a string {{'valid', 'same'}} or a tuple of ints giving the
amount of implicit padding applied on both sides.
- * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
{groups_note}
@@ -690,7 +690,7 @@ class ConvTranspose1d(_ConvTransposeNd):
* :attr:`output_padding` controls the additional size added to one side
of the output shape. See note below for details.
- * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
{groups_note}
@@ -821,7 +821,7 @@ class ConvTranspose2d(_ConvTransposeNd):
* :attr:`output_padding` controls the additional size added to one side
of the output shape. See note below for details.
- * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
{groups_note}
@@ -978,7 +978,7 @@ class ConvTranspose3d(_ConvTransposeNd):
* :attr:`output_padding` controls the additional size added to one side
of the output shape. See note below for details.
- * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
{groups_note}
diff --git a/torch/nn/modules/fold.py b/torch/nn/modules/fold.py
index 8ae911252f..f8cb083623 100644
--- a/torch/nn/modules/fold.py
+++ b/torch/nn/modules/fold.py
@@ -41,7 +41,7 @@ class Fold(Module):
sides for :attr:`padding` number of points for each dimension before
reshaping.
- * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
Args:
@@ -186,7 +186,7 @@ class Unfold(Module):
sides for :attr:`padding` number of points for each dimension before
reshaping.
- * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
Args:
diff --git a/torch/onnx/_internal/diagnostics/infra/_infra.py b/torch/onnx/_internal/diagnostics/infra/_infra.py
index 2a522b61f8..c118f3e5ae 100644
--- a/torch/onnx/_internal/diagnostics/infra/_infra.py
+++ b/torch/onnx/_internal/diagnostics/infra/_infra.py
@@ -49,7 +49,7 @@ class Tag(enum.Enum):
class PatchedPropertyBag(sarif.PropertyBag):
"""Key/value pairs that provide additional information about the object.
- The definition of PropertyBag via SARIF spec is "A property bag is an object (§3.6)
+ The definition of PropertyBag via SARIF spec is "A property bag is an object (section 3.6)
containing an unordered set of properties with arbitrary names." However it is not
reflected in the json file, and therefore not captured by the python representation.
This patch adds additional **kwargs to the `__init__` method to allow recording
diff --git a/torch/onnx/_internal/onnx_proto_utils.py b/torch/onnx/_internal/onnx_proto_utils.py
index 7fb79e5b20..b33b4a77f4 100644
--- a/torch/onnx/_internal/onnx_proto_utils.py
+++ b/torch/onnx/_internal/onnx_proto_utils.py
@@ -26,13 +26,13 @@ def export_as_test_case(
is as follows:
dir
- ├── test_<name>
- │ ├── model.onnx
- │ └── test_data_set_0
- │ ├── input_0.pb
- │ ├── input_1.pb
- │ ├── output_0.pb
- │ └── output_1.pb
+ \u251c\u2500\u2500 test_<name>
+ \u2502 \u251c\u2500\u2500 model.onnx
+ \u2502 \u2514\u2500\u2500 test_data_set_0
+ \u2502 \u251c\u2500\u2500 input_0.pb
+ \u2502 \u251c\u2500\u2500 input_1.pb
+ \u2502 \u251c\u2500\u2500 output_0.pb
+ \u2502 \u2514\u2500\u2500 output_1.pb
Args:
model_bytes: The ONNX model in bytes.
@@ -80,13 +80,13 @@ def load_test_case(dir: str) -> Tuple[bytes, Any, Any]:
should be as follows:
dir
- ├── test_<name>
- │ ├── model.onnx
- │ └── test_data_set_0
- │ ├── input_0.pb
- │ ├── input_1.pb
- │ ├── output_0.pb
- │ └── output_1.pb
+ \u251c\u2500\u2500 test_<name>
+ \u2502 \u251c\u2500\u2500 model.onnx
+ \u2502 \u2514\u2500\u2500 test_data_set_0
+ \u2502 \u251c\u2500\u2500 input_0.pb
+ \u2502 \u251c\u2500\u2500 input_1.pb
+ \u2502 \u251c\u2500\u2500 output_0.pb
+ \u2502 \u2514\u2500\u2500 output_1.pb
Args:
dir: The directory containing the test case.
diff --git a/torch/onnx/symbolic_opset10.py b/torch/onnx/symbolic_opset10.py
index d35c9e9845..6fd576822e 100644
--- a/torch/onnx/symbolic_opset10.py
+++ b/torch/onnx/symbolic_opset10.py
@@ -785,7 +785,7 @@ def nan_to_num(g: jit_utils.GraphContext, input, nan, posinf, neginf):
)
# For None values of posinf, neginf we use the greatest/lowest finite
- # value representable by input’s dtype.
+ # value representable by input's dtype.
finfo = torch.finfo(input_dtype)
if posinf is None:
posinf = finfo.max
diff --git a/torch/onnx/symbolic_opset11.py b/torch/onnx/symbolic_opset11.py
index b03918c6cc..0282c38f73 100644
--- a/torch/onnx/symbolic_opset11.py
+++ b/torch/onnx/symbolic_opset11.py
@@ -1379,10 +1379,10 @@ def normal(
pin_memory=None,
):
# If you can sample from a given distribution with mean 0 and variance 1, then you can easily sample from a
- # scale-location transformation of that distribution, which has mean μ and variance σ's square. If x is a sample
+ # scale-location transformation of that distribution, which has mean mu and variance sigma's square. If x is a sample
# from a mean 0 and variance 1 distribution then
- # σx+μ
- # is a sample with mean μ and variance σ's square.
+ # sigma x+mu
+ # is a sample with mean mu and variance sigma's square.
if sizes is not None and not symbolic_helper._is_none(sizes):
mean = opset9.expand(g, mean, sizes, None)
result = opset9.mul(g, std, g.op("RandomNormalLike", mean))
diff --git a/torch/onnx/verification.py b/torch/onnx/verification.py
index b60dfe8e1c..6b49e7fc72 100644
--- a/torch/onnx/verification.py
+++ b/torch/onnx/verification.py
@@ -1020,7 +1020,7 @@ class GraphInfoPrettyPrinter:
else ""
)
- return f"{node_count} {'X' if has_mismatch else '✓'} {error_node_kind}"
+ return f"{node_count} {'X' if has_mismatch else chr(0x2713)} {error_node_kind}"
@_beartype.beartype
def _graph_id_segment_str(self) -> str:
@@ -1148,13 +1148,13 @@ class OnnxTestCaseRepro:
structure is as follows:
dir
- ├── test_<name>
- │ ├── model.onnx
- │ └── test_data_set_0
- │ ├── input_0.pb
- │ ├── input_1.pb
- │ ├── output_0.pb
- │ └── output_1.pb
+ \u251c\u2500\u2500 test_<name>
+ \u2502 \u251c\u2500\u2500 model.onnx
+ \u2502 \u2514\u2500\u2500 test_data_set_0
+ \u2502 \u251c\u2500\u2500 input_0.pb
+ \u2502 \u251c\u2500\u2500 input_1.pb
+ \u2502 \u251c\u2500\u2500 output_0.pb
+ \u2502 \u2514\u2500\u2500 output_1.pb
Args:
proto: ONNX model proto.
@@ -1244,19 +1244,19 @@ class GraphInfo:
Example::
==================================== Tree: =====================================
- 5 X __2 X __1 ✓
+ 5 X __2 X __1 \u2713
id: | id: 0 | id: 00
| |
| |__1 X (aten::relu)
| id: 01
|
- |__3 X __1 ✓
+ |__3 X __1 \u2713
id: 1 | id: 10
|
|__2 X __1 X (aten::relu)
id: 11 | id: 110
|
- |__1 ✓
+ |__1 \u2713
id: 111
=========================== Mismatch leaf subgraphs: ===========================
['01', '110']
@@ -1354,13 +1354,13 @@ class GraphInfo:
The repro directory will contain the following files::
dir
- ├── test_<name>
- │ ├── model.onnx
- │ └── test_data_set_0
- │ ├── input_0.pb
- │ ├── input_1.pb
- │ ├── output_0.pb
- │ └── output_1.pb
+ \u251c\u2500\u2500 test_<name>
+ \u2502 \u251c\u2500\u2500 model.onnx
+ \u2502 \u2514\u2500\u2500 test_data_set_0
+ \u2502 \u251c\u2500\u2500 input_0.pb
+ \u2502 \u251c\u2500\u2500 input_1.pb
+ \u2502 \u251c\u2500\u2500 output_0.pb
+ \u2502 \u2514\u2500\u2500 output_1.pb
Args:
repro_dir: The directory to export the repro files to. Defaults to current
@@ -1825,19 +1825,19 @@ def find_mismatch(
Greatest absolute difference: 0.2328854203224182 at index (1, 2) (up to 1e-07 allowed)
Greatest relative difference: 0.699536174352349 at index (1, 3) (up to 0.001 allowed)
==================================== Tree: =====================================
- 5 X __2 X __1 ✓
+ 5 X __2 X __1 \u2713
id: | id: 0 | id: 00
| |
| |__1 X (aten::relu)
| id: 01
|
- |__3 X __1 ✓
+ |__3 X __1 \u2713
id: 1 | id: 10
|
|__2 X __1 X (aten::relu)
id: 11 | id: 110
|
- |__1 ✓
+ |__1 \u2713
id: 111
=========================== Mismatch leaf subgraphs: ===========================
['01', '110']
diff --git a/torch/package/file_structure_representation.py b/torch/package/file_structure_representation.py
index cc5f055c1a..1453ad3a5d 100644
--- a/torch/package/file_structure_representation.py
+++ b/torch/package/file_structure_representation.py
@@ -67,13 +67,16 @@ class Directory:
return "".join(str_list)
def _stringify_tree(
- self, str_list: List[str], preamble: str = "", dir_ptr: str = "─── "
+ self,
+ str_list: List[str],
+ preamble: str = "",
+ dir_ptr: str = "\u2500\u2500\u2500 ",
):
"""Recursive method to generate print-friendly version of a Directory."""
space = " "
- branch = "│ "
- tee = "├── "
- last = "└── "
+ branch = "\u2502 "
+ tee = "\u251c\u2500\u2500 "
+ last = "\u2514\u2500\u2500 "
# add this directory's representation
str_list.append(f"{preamble}{dir_ptr}{self.name}\n")
diff --git a/torch/signal/windows/windows.py b/torch/signal/windows/windows.py
index f2cbe3247c..d86a1245dc 100644
--- a/torch/signal/windows/windows.py
+++ b/torch/signal/windows/windows.py
@@ -748,7 +748,7 @@ Computes the minimum 4-term Blackman-Harris window according to Nuttall.
.. math::
w_n = 1 - 0.36358 \cos{(z_n)} + 0.48917 \cos{(2z_n)} - 0.13659 \cos{(3z_n)} + 0.01064 \cos{(4z_n)}
-where ``z_n = 2 π n/ M``.
+where ``z_n = 2 \u03c0 n/ M``.
""",
"""
@@ -766,12 +766,12 @@ Keyword args:
References::
- - A. Nuttall, “Some windows with very good sidelobe behavior,”
+ - A. Nuttall, "Some windows with very good sidelobe behavior,"
IEEE Transactions on Acoustics, Speech, and Signal Processing, vol. 29, no. 1, pp. 84-91,
Feb 1981. https://doi.org/10.1109/TASSP.1981.1163506
- - Heinzel G. et al., “Spectrum and spectral density estimation by the Discrete Fourier transform (DFT),
- including a comprehensive list of window functions and some new flat-top windows”,
+ - Heinzel G. et al., "Spectrum and spectral density estimation by the Discrete Fourier transform (DFT),
+ including a comprehensive list of window functions and some new flat-top windows",
February 15, 2002 https://holometer.fnal.gov/GH_FFT.pdf
Examples::
diff --git a/torch/special/__init__.py b/torch/special/__init__.py
index a25f0f7c03..07e104c409 100644
--- a/torch/special/__init__.py
+++ b/torch/special/__init__.py
@@ -1036,7 +1036,7 @@ hermite_polynomial_h = _add_docstr(_special.special_hermite_polynomial_h,
r"""
hermite_polynomial_h(input, n, *, out=None) -> Tensor
-Physicist’s Hermite polynomial :math:`H_{n}(\text{input})`.
+Physicist's Hermite polynomial :math:`H_{n}(\text{input})`.
If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
is returned. Otherwise, the recursion:
@@ -1059,7 +1059,7 @@ hermite_polynomial_he = _add_docstr(_special.special_hermite_polynomial_he,
r"""
hermite_polynomial_he(input, n, *, out=None) -> Tensor
-Probabilist’s Hermite polynomial :math:`He_{n}(\text{input})`.
+Probabilist's Hermite polynomial :math:`He_{n}(\text{input})`.
If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
is returned. Otherwise, the recursion:
diff --git a/torch/utils/jit/__init__.py b/torch/utils/jit/__init__.py
index 8b13789179..e69de29bb2 100644
--- a/torch/utils/jit/__init__.py
+++ b/torch/utils/jit/__init__.py
@@ -1 +0,0 @@
-
|
2.41.0
|
dc66e9dc3a3f15de2eb467262ab75e5c362063f
|
Tue, 23 Apr 2024 09:34:46 +0000
|
[PATCH 0659/1000] refactor autocast python APIs (#124479)
|
# Motivation Refactor autocast usage scenario in `torch/amp/autocast_mode.py` and `torch/utils/checkpoint.py` to fix the bug - convention conflict between `torch.xxx.get_autocast_xxx_dtype` defined in `autocast_mode.py` and `torch.xxx.get_autocast_dtype` defined in `checkpoint.py`. # Solution Use device-agnostic APIs like `torch.get_autocast_dtype`, ..., instead. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124479 Approved by: https://github.com/jgong5, https://github.com/gujinghui, https://github.com/EikanWang, https://github.com/albanD ghstack dependencies: #124359
|
diff --git a/aten/src/ATen/autocast_mode.h b/aten/src/ATen/autocast_mode.h
index f4dd7d8766..a2d7b8a24a 100644
--- a/aten/src/ATen/autocast_mode.h
+++ b/aten/src/ATen/autocast_mode.h
@@ -174,12 +174,20 @@ inline DispatchKey get_autocast_dispatch_key_from_device_type(
}
}
-inline at::ScalarType get_lower_precision_fp_from_device_type(
- c10::DeviceType device_type) {
+inline bool is_autocast_available(c10::DeviceType device_type) {
if (device_type == at::kCPU || device_type == at::kCUDA ||
device_type == at::kXPU || device_type == at::kIPU ||
device_type == at::kHPU || device_type == at::kXLA ||
device_type == at::kPrivateUse1) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+inline at::ScalarType get_lower_precision_fp_from_device_type(
+ c10::DeviceType device_type) {
+ if (is_autocast_available(device_type)) {
return get_autocast_dtype(device_type);
} else {
throw std::runtime_error(
diff --git a/test/test_autocast.py b/test/test_autocast.py
index 2f788b7f65..5054944932 100644
--- a/test/test_autocast.py
+++ b/test/test_autocast.py
@@ -336,7 +336,7 @@ class TestTorchAutocast(TestCase):
def test_invalid_device(self):
dev = "not a real device"
- msg = f"unsupported autocast device_type '{dev}'"
+ msg = f"Invalid device string: '{dev}'"
with self.assertRaisesRegex(RuntimeError, msg):
with torch.autocast(device_type=dev):
_ = torch.tensor(1)
diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in
index fad41ec848..34e49e15d8 100644
--- a/torch/_C/__init__.pyi.in
+++ b/torch/_C/__init__.pyi.in
@@ -1301,6 +1301,7 @@ def clear_autocast_cache() -> None: ...
def set_autocast_cpu_enabled(enabled: _bool) -> None: ...
def is_autocast_cpu_enabled() -> _bool: ...
def _is_any_autocast_enabled() -> _bool: ...
+def _is_autocast_available(device_type: str) -> _bool: ...
def set_autocast_cpu_dtype(dtype: _dtype) -> None: ...
def set_autocast_gpu_dtype(dtype: _dtype) -> None: ...
def get_autocast_cpu_dtype() -> _dtype: ...
diff --git a/torch/amp/autocast_mode.py b/torch/amp/autocast_mode.py
index 30c6aefcf1..87ff709fcf 100644
--- a/torch/amp/autocast_mode.py
+++ b/torch/amp/autocast_mode.py
@@ -199,35 +199,20 @@ class autocast:
assert dtype is not None
return
self.device = device_type
+ if not torch._C._is_autocast_available(self.device):
+ raise RuntimeError(
+ f"User specified an unsupported autocast device_type '{self.device}'"
+ )
self.custom_backend_name = torch._C._get_privateuse1_backend_name()
- if self.device == "cuda":
- self.fast_dtype = torch.get_autocast_gpu_dtype()
- elif self.device == "cpu":
- self.fast_dtype = torch.get_autocast_cpu_dtype()
- elif self.device == "xpu":
- self.fast_dtype = torch.xpu.get_autocast_xpu_dtype() # type: ignore[attr-defined]
- elif self.device == "ipu":
- self.fast_dtype = torch.get_autocast_ipu_dtype() # type: ignore[attr-defined]
- elif self.device == "hpu":
- self.fast_dtype = torch.hpu.get_autocast_hpu_dtype() # type: ignore[attr-defined]
- elif self.device == "xla":
- self.fast_dtype = torch.get_autocast_xla_dtype() # type: ignore[attr-defined]
- elif self.device == self.custom_backend_name:
+ self.fast_dtype = torch.get_autocast_dtype(self.device)
+ if self.device == self.custom_backend_name:
necessary_funcs = [
- "is_autocast_enabled",
- "set_autocast_enabled",
- "get_autocast_dtype",
- "set_autocast_dtype",
"get_amp_supported_dtype",
]
message = f"Tried to use AMP with the `{self.custom_backend_name}` backend, but the backend has not "
message += "registered a module or the module miss some necessary funcs. The backend should register "
message += "a module by `torch._register_device_module`, and the module must have these funcs: \n"
- message += "`is_autocast_enabled() -> bool`, `set_autocast_enabled(bool) -> None`, "
- message += "`get_autocast_dtype() -> torch.dtype`, `set_autocast_dtype(torch.dtype) "
- message += (
- "-> None` and `get_amp_supported_dtype() -> List[torch.dtype]`. \n"
- )
+ message += "`get_amp_supported_dtype() -> List[torch.dtype]`. \n"
assert hasattr(torch, self.custom_backend_name), message
self.custom_device_mod = getattr(torch, self.custom_backend_name)
@@ -236,11 +221,6 @@ class autocast:
message + f"But the func `{func}` is missing. \n"
)
- self.fast_dtype = self.custom_device_mod.get_autocast_dtype()
- else:
- raise RuntimeError(
- f"User specified an unsupported autocast device_type '{self.device}'"
- )
self._cache_enabled = torch.is_autocast_cache_enabled()
if (
enabled
@@ -323,48 +303,11 @@ class autocast:
return self
self.prev_cache_enabled = torch.is_autocast_cache_enabled()
- if self.device == "cpu":
- self.prev = torch.is_autocast_cpu_enabled()
- self.prev_fastdtype = torch.get_autocast_cpu_dtype()
- torch.set_autocast_cpu_enabled(self._enabled)
- torch.set_autocast_cpu_dtype(self.fast_dtype) # type: ignore[arg-type]
- torch.autocast_increment_nesting()
- elif self.device == "xpu":
- self.prev = torch.xpu.is_autocast_xpu_enabled() # type: ignore[attr-defined]
- self.prev_fastdtype = torch.xpu.get_autocast_xpu_dtype() # type: ignore[attr-defined]
- torch.xpu.set_autocast_xpu_enabled(self._enabled) # type: ignore[attr-defined]
- torch.xpu.set_autocast_xpu_dtype(self.fast_dtype) # type: ignore[attr-defined]
- torch.autocast_increment_nesting()
- elif self.device == "ipu":
- self.prev = torch.is_autocast_ipu_enabled() # type: ignore[attr-defined]
- self.prev_fastdtype = torch.get_autocast_ipu_dtype() # type: ignore[attr-defined]
- torch.set_autocast_ipu_enabled(self._enabled) # type: ignore[attr-defined]
- torch.set_autocast_ipu_dtype(self.fast_dtype) # type: ignore[attr-defined]
- torch.autocast_increment_nesting()
- elif self.device == "hpu":
- self.prev = torch.hpu.is_autocast_hpu_enabled() # type: ignore[attr-defined]
- self.prev_fastdtype = torch.hpu.get_autocast_hpu_dtype() # type: ignore[attr-defined]
- torch.hpu.set_autocast_hpu_enabled(self._enabled) # type: ignore[attr-defined]
- torch.hpu.set_autocast_hpu_dtype(self.fast_dtype) # type: ignore[attr-defined]
- torch.autocast_increment_nesting()
- elif self.device == "xla":
- self.prev = torch.is_autocast_xla_enabled() # type: ignore[attr-defined]
- self.prev_fastdtype = torch.get_autocast_xla_dtype() # type: ignore[attr-defined]
- torch.set_autocast_xla_enabled(self._enabled) # type: ignore[attr-defined]
- torch.set_autocast_xla_dtype(self.fast_dtype) # type: ignore[attr-defined]
- torch.autocast_increment_nesting()
- elif self.device == self.custom_backend_name:
- self.prev = self.custom_device_mod.is_autocast_enabled()
- self.prev_fastdtype = self.custom_device_mod.get_autocast_dtype()
- self.custom_device_mod.set_autocast_enabled(self._enabled)
- self.custom_device_mod.set_autocast_dtype(self.fast_dtype)
- torch.autocast_increment_nesting()
- else:
- self.prev = torch.is_autocast_enabled()
- self.prev_fastdtype = torch.get_autocast_gpu_dtype()
- torch.set_autocast_gpu_dtype(self.fast_dtype) # type: ignore[arg-type]
- torch.set_autocast_enabled(self._enabled)
- torch.autocast_increment_nesting()
+ self.prev = torch.is_autocast_enabled(self.device)
+ self.prev_fastdtype = torch.get_autocast_dtype(self.device)
+ torch.set_autocast_enabled(self.device, self._enabled)
+ torch.set_autocast_dtype(self.device, self.fast_dtype) # type: ignore[arg-type]
+ torch.autocast_increment_nesting()
torch.set_autocast_cache_enabled(self._cache_enabled)
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override]
@@ -372,41 +315,10 @@ class autocast:
return
# Drop the cache when we exit to a nesting level that's outside any instance of autocast.
- if self.device == "cpu":
- if torch.autocast_decrement_nesting() == 0:
- torch.clear_autocast_cache()
- torch.set_autocast_cpu_enabled(self.prev)
- torch.set_autocast_cpu_dtype(self.prev_fastdtype)
- elif self.device == "xpu":
- if torch.autocast_decrement_nesting() == 0:
- torch.clear_autocast_cache()
- torch.xpu.set_autocast_xpu_enabled(self.prev) # type: ignore[attr-defined]
- torch.xpu.set_autocast_xpu_dtype(self.prev_fastdtype) # type: ignore[attr-defined]
- elif self.device == "ipu":
- if torch.autocast_decrement_nesting() == 0:
- torch.clear_autocast_cache()
- torch.set_autocast_ipu_enabled(self.prev) # type: ignore[attr-defined]
- torch.set_autocast_ipu_dtype(self.prev_fastdtype) # type: ignore[attr-defined]
- elif self.device == "hpu":
- if torch.autocast_decrement_nesting() == 0:
- torch.clear_autocast_cache()
- torch.hpu.set_autocast_hpu_enabled(self.prev) # type: ignore[attr-defined]
- torch.hpu.set_autocast_hpu_dtype(self.prev_fastdtype) # type: ignore[attr-defined]
- elif self.device == "xla":
- if torch.autocast_decrement_nesting() == 0:
- torch.clear_autocast_cache()
- torch.set_autocast_xla_enabled(self.prev) # type: ignore[attr-defined]
- torch.set_autocast_xla_dtype(self.prev_fastdtype) # type: ignore[attr-defined]
- elif self.device == self.custom_backend_name:
- if torch.autocast_decrement_nesting() == 0:
- torch.clear_autocast_cache()
- self.custom_device_mod.set_autocast_enabled(self.prev)
- self.custom_device_mod.set_autocast_dtype(self.prev_fastdtype)
- else:
- if torch.autocast_decrement_nesting() == 0:
- torch.clear_autocast_cache()
- torch.set_autocast_enabled(self.prev)
- torch.set_autocast_gpu_dtype(self.prev_fastdtype)
+ if torch.autocast_decrement_nesting() == 0:
+ torch.clear_autocast_cache()
+ torch.set_autocast_enabled(self.device, self.prev)
+ torch.set_autocast_dtype(self.device, self.prev_fastdtype)
torch.set_autocast_cache_enabled(self.prev_cache_enabled)
return False
diff --git a/torch/csrc/autograd/init.cpp b/torch/csrc/autograd/init.cpp
index 3f2bbf344a..6c9870a5c4 100644
--- a/torch/csrc/autograd/init.cpp
+++ b/torch/csrc/autograd/init.cpp
@@ -574,6 +574,24 @@ static PyObject* is_any_autocast_enabled(PyObject* _unused, PyObject* arg) {
END_HANDLE_TH_ERRORS
}
+static PyObject* is_autocast_available(
+ PyObject* _unused,
+ PyObject* args,
+ PyObject* kwargs) {
+ HANDLE_TH_ERRORS
+ static PythonArgParser parser(
+ {"_is_autocast_available(c10::string_view device_type)"});
+ ParsedArgs<1> parsed_args;
+ auto r = parser.parse(args, kwargs, parsed_args);
+ auto device_type = at::Device(r.string(0)).type();
+ if (at::autocast::is_autocast_available(device_type)) {
+ Py_RETURN_TRUE;
+ } else {
+ Py_RETURN_FALSE;
+ }
+ END_HANDLE_TH_ERRORS
+}
+
static PyObject* set_autocast_cpu_enabled(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
TORCH_CHECK_TYPE(
@@ -1235,6 +1253,10 @@ static PyMethodDef methods[] = { // NOLINT
METH_VARARGS | METH_KEYWORDS,
nullptr},
{"_is_any_autocast_enabled", is_any_autocast_enabled, METH_NOARGS, nullptr},
+ {"_is_autocast_available",
+ castPyCFunctionWithKeywords(is_autocast_available),
+ METH_VARARGS | METH_KEYWORDS,
+ nullptr},
{"clear_autocast_cache", clear_autocast_cache, METH_NOARGS, nullptr},
{"set_autocast_cpu_enabled", set_autocast_cpu_enabled, METH_O, nullptr},
{"is_autocast_cpu_enabled", is_autocast_cpu_enabled, METH_NOARGS, nullptr},
diff --git a/torch/utils/backend_registration.py b/torch/utils/backend_registration.py
index d2d2b1cb89..1fda089204 100644
--- a/torch/utils/backend_registration.py
+++ b/torch/utils/backend_registration.py
@@ -36,20 +36,6 @@ def rename_privateuse1_backend(backend_name: str) -> None:
(1) ``get_amp_supported_dtype() -> List[torch.dtype]``
get the supported dtypes on your "foo" device in AMP, maybe the "foo" device supports one more dtype.
- (2) ``is_autocast_enabled() -> bool``
- check the AMP is enabled or not on your "foo" device.
-
- (3) ``get_autocast_dtype() -> torch.dtype``
- get the supported dtype on your "foo" device in AMP, which is set by ``set_autocast_dtype`` or the
- default dtype, and the default dtype is ``torch.float16``.
-
- (4) ``set_autocast_enabled(bool) -> None``
- enable the AMP or not on your "foo" device.
-
- (5) ``set_autocast_dtype(dtype) -> None``
- set the supported dtype on your "foo" device in AMP, and the dtype be contained in the dtypes got
- from ``get_amp_supported_dtype``.
-
Note(random): If you want to support to set seed for your device, BackendModule needs to have the following API's:
(1) ``_is_in_bad_fork() -> bool``
diff --git a/torch/utils/checkpoint.py b/torch/utils/checkpoint.py
index fc536dd546..ca0e39d537 100644
--- a/torch/utils/checkpoint.py
+++ b/torch/utils/checkpoint.py
@@ -194,7 +194,7 @@ def set_device_states(devices, states) -> None:
def _get_autocast_kwargs(device="cuda"):
- if _supports_autocast(device):
+ if torch._C._is_autocast_available(device):
device_autocast_kwargs = {
"enabled": torch.is_autocast_enabled(device),
"dtype": torch.get_autocast_dtype(device),
@@ -211,10 +211,6 @@ def _get_autocast_kwargs(device="cuda"):
return device_autocast_kwargs, cpu_autocast_kwargs
-def _supports_autocast(device):
- device_module = _get_device_module(device)
- return device == "cuda" or (hasattr(device_module, "is_autocast_enabled")
- and hasattr(device_module, "get_autocast_dtype"))
class CheckpointFunction(torch.autograd.Function):
@staticmethod
@@ -293,7 +289,7 @@ class CheckpointFunction(torch.autograd.Function):
device_autocast_ctx = device_module.amp.autocast(
**ctx.device_autocast_kwargs
- ) if _supports_autocast(ctx.device) else contextlib.nullcontext()
+ ) if torch._C._is_autocast_available(ctx.device) else contextlib.nullcontext()
with torch.enable_grad(), device_autocast_ctx, \
torch.cpu.amp.autocast(**ctx.cpu_autocast_kwargs):
outputs = ctx.run_function(*detached_inputs)
@@ -1400,7 +1396,7 @@ def _checkpoint_without_reentrant_generator(
device_autocast_ctx = device_module.amp.autocast(
**device_autocast_kwargs
- ) if _supports_autocast(device) else contextlib.nullcontext()
+ ) if torch._C._is_autocast_available(device) else contextlib.nullcontext()
with device_autocast_ctx, torch.cpu.amp.autocast(**cpu_autocast_kwargs), \
recompute_context:
fn(*args, **kwargs)
|
2.41.0
|
de78a1b48d2bfdf2dba89c983df518f2120fecf
|
Wed, 24 Apr 2024 20:26:19 -0700
|
[PATCH 0661/1000] [dynamo][cpp-guards] EQUALS MATCH - Cache first passing value (#124627)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124627 Approved by: https://github.com/jansel ghstack dependencies: #124779
|
diff --git a/torch/csrc/dynamo/guards.cpp b/torch/csrc/dynamo/guards.cpp
index 892197a2d7..d61ac4219a 100644
--- a/torch/csrc/dynamo/guards.cpp
+++ b/torch/csrc/dynamo/guards.cpp
@@ -994,8 +994,9 @@ class EQUALS_MATCH : public LeafGuard {
_value_type(Py_TYPE(value.ptr())) {}
bool check_nopybind(PyObject* value) override { // borrowed ref
- // Fast path - pointer equality check.
- if (value != _value.ptr()) {
+ // Fast path - pointer equality check. Pointer equality checks are ok
+ // because objects guarded with EQUALS_MATCH are immutable.
+ if (value != _value.ptr() && value != _first_passing_value.ptr()) {
// Check type
if (Py_TYPE(value) != _value_type) {
return false;
@@ -1006,6 +1007,11 @@ class EQUALS_MATCH : public LeafGuard {
PyErr_Clear();
return false;
}
+
+ // Cache the value here.
+ if (!_first_passing_value && result) {
+ _first_passing_value = py::cast<py::object>(value);
+ }
return result;
}
return true;
@@ -1018,6 +1024,11 @@ class EQUALS_MATCH : public LeafGuard {
// these objects is ok.
py::object _value;
+ // Cache the first value whose pointer is not equal to value.ptr(). This is
+ // useful in nn module guards where getattr name is a string, which is same as
+ // a key in the __dict__ but the pointer is different.
+ py::object _first_passing_value;
+
// Type of the value
PyTypeObject* _value_type;
};
|
2.41.0
|
a1299cc0ef481f8c14934b2ca98b4ca0280d819
|
Thu, 25 Apr 2024 16:06:46 +0000
|
[PATCH 0662/1000] Revert "Add test_cpp_extensions tests for stream_and_event and mita_backend (#123614)"
|
This reverts commit 355dc34f865036c4c625fcdafe54db846b2be2c2. Reverted https://github.com/pytorch/pytorch/pull/123614 on behalf of https://github.com/jeffdaily due to this PR broke ROCm with message RuntimeError: Cannot have MTIA with other devices ([comment](https://github.com/pytorch/pytorch/pull/123612#issuecomment-2077649762))
|
diff --git a/test/cpp_extensions/mtia_extension.cpp b/test/cpp_extensions/mtia_extension.cpp
deleted file mode 100644
index 3b02d3968e..0000000000
--- a/test/cpp_extensions/mtia_extension.cpp
+++ /dev/null
@@ -1,219 +0,0 @@
-#include <ATen/detail/MTIAHooksInterface.h>
-#include <c10/core/Device.h>
-#include <c10/core/Stream.h>
-#include <c10/core/impl/DeviceGuardImplInterface.h>
-#include <c10/util/Logging.h>
-#include <torch/csrc/utils/device_lazy_init.h>
-#include <thread>
-namespace torch::mtia {
-
-constexpr c10::DeviceType kMTIADeviceType = c10::DeviceType::MTIA;
-constexpr c10::DeviceIndex kMTIADeviceCount = 2;
-static thread_local c10::DeviceIndex current_device = 0;
-static thread_local std::array<c10::Stream, kMTIADeviceCount> current_streams =
- {c10::Stream::unpack3(0, 0, c10::DeviceType::MTIA),
- c10::Stream::unpack3(0, 1, c10::DeviceType::MTIA)};
-static int64_t stream_id_gen = 1;
-static int64_t event_id_gen = 1;
-static std::array<c10::Stream, kMTIADeviceCount> default_streams = {
- c10::Stream::unpack3(0, 0, c10::DeviceType::MTIA),
- c10::Stream::unpack3(0, 1, c10::DeviceType::MTIA)};
-struct MTIAGuardImpl final : public c10::impl::DeviceGuardImplInterface {
- MTIAGuardImpl() = default;
- explicit MTIAGuardImpl(c10::DeviceType t) {
- TORCH_INTERNAL_ASSERT(t == kMTIADeviceType);
- }
- c10::DeviceType type() const override {
- return kMTIADeviceType;
- }
- c10::Device exchangeDevice(c10::Device d) const override {
- c10::Device old_device = getDevice();
- if (old_device.index() != d.index()) {
- setDevice(d);
- }
- return old_device;
- }
- c10::Device getDevice() const override {
- return c10::Device(kMTIADeviceType, current_device);
- }
-
- void setDevice(c10::Device d) const override {
- c10::Device current_device = getDevice();
- if (current_device.index() != d.index()) {
- current_device = d;
- }
- }
- void uncheckedSetDevice(c10::Device d) const noexcept override {
- (void)d;
- }
- c10::Stream getStream(c10::Device d) const noexcept override {
- return current_streams[d.index()];
- }
- c10::Stream getNewStream(c10::Device d, int priority = 0) const override {
- (void)priority;
- return c10::Stream::unpack3(stream_id_gen++, d.index(), d.type());
- }
- c10::Stream getDefaultStream(c10::Device d) const override {
- return default_streams[d.index()];
- }
- c10::Stream getStreamFromGlobalPool(
- c10::Device d,
- bool isHighPriority = false) const override {
- return c10::Stream::unpack3(stream_id_gen++, d.index(), d.type());
- }
- // NB: These do NOT set the current device
- c10::Stream exchangeStream(c10::Stream s) const noexcept override {
- c10::Stream old_stream = getStream(s.device());
- return old_stream;
- }
- c10::DeviceIndex deviceCount() const noexcept override {
- return kMTIADeviceCount;
- }
-
- void destroyEvent(void* event, const c10::DeviceIndex device_index)
- const noexcept override {
- (void)device_index;
- }
-
- void record(
- void** event,
- const c10::Stream& stream,
- const c10::DeviceIndex device_index,
- const c10::EventFlag flag) const override {
- TORCH_CHECK(
- device_index == -1 || device_index == stream.device_index(),
- "Event device index ",
- device_index,
- " does not match recording stream's device index ",
- stream.device_index(),
- ".");
-
- const auto orig_device = getDevice();
-
- setDevice(stream.device());
-
- if (*event == nullptr) {
- *event = reinterpret_cast<void*>(event_id_gen++);
- }
- setDevice(orig_device);
- }
-
- void block(void* event, const c10::Stream& stream) const override {
- (void)event;
- (void)stream;
- }
-
- // May be called from any device
- bool queryEvent(void* event) const override {
- (void)event;
- return true;
- }
-
- // Stream-related functions
- bool queryStream(const c10::Stream& stream) const override {
- (void)stream;
- return true;
- }
-
- void synchronizeStream(const c10::Stream& stream) const override {
- (void)stream;
- }
-
- void recordDataPtrOnStream(
- const c10::DataPtr& data_ptr,
- const c10::Stream& stream) const override {
- (void)data_ptr;
- (void)stream;
- }
-
- double elapsedTime(void* event1, void* event2) const override {
- uint64_t elapsed_time = 1e6;
- return (double)(elapsed_time / 1e6);
- }
-
- void synchronizeEvent(void* event) const override {
- (void)event;
- }
-};
-
-struct MTIAHooks : public at::MTIAHooksInterface {
- explicit MTIAHooks(at::MTIAHooksArgs) {}
- void initMTIA() const override {}
-
- bool hasMTIA() const override {
- return true;
- }
-
- c10::DeviceIndex deviceCount() const override {
- torch::utils::device_lazy_init(at::kMTIA);
- return c10::DeviceIndex(2);
- }
-
- void deviceSynchronize(c10::DeviceIndex device_index) const override {
- torch::utils::device_lazy_init(at::kMTIA);
- (void)device_index;
- }
-
- std::string showConfig() const override {
- return "None config";
- }
-
- c10::DeviceIndex exchangeDevice(c10::DeviceIndex device) const override {
- torch::utils::device_lazy_init(at::kMTIA);
- auto orig_device = current_device;
- if (current_device != device) {
- current_device = device;
- }
- return orig_device;
- }
-
- c10::DeviceIndex maybeExchangeDevice(c10::DeviceIndex device) const override {
- torch::utils::device_lazy_init(at::kMTIA);
-
- auto orig_device = current_device;
- if (current_device != device) {
- current_device = device;
- }
- return orig_device;
- }
-
- c10::Stream getDefaultStream(c10::DeviceIndex device) const override {
- torch::utils::device_lazy_init(at::kMTIA);
-
- return default_streams[device];
- }
-
- c10::Stream getCurrentStream(c10::DeviceIndex device) const override {
- torch::utils::device_lazy_init(at::kMTIA);
-
- return current_streams[device];
- }
-
- void setCurrentStream(const c10::Stream& stream) const override {
- torch::utils::device_lazy_init(at::kMTIA);
-
- current_streams[stream.device_index()] = stream;
- }
-
- c10::DeviceIndex getCurrentDevice() const override {
- torch::utils::device_lazy_init(at::kMTIA);
-
- return current_device;
- }
-
- void setCurrentDevice(c10::DeviceIndex device) const override {
- torch::utils::device_lazy_init(at::kMTIA);
-
- if (current_device != device) {
- current_device = device;
- }
- }
-};
-
-using at::MTIAHooksRegistry;
-using at::RegistererMTIAHooksRegistry;
-
-REGISTER_MTIA_HOOKS(MTIAHooks);
-C10_REGISTER_GUARD_IMPL(MTIA, MTIAGuardImpl);
-
-} // namespace torch::mtia
diff --git a/test/run_test.py b/test/run_test.py
index 516dbc753f..3626d31fc2 100755
--- a/test/run_test.py
+++ b/test/run_test.py
@@ -191,8 +191,6 @@ XPU_TEST = [
RUN_PARALLEL_BLOCKLIST = [
"test_cpp_extensions_jit",
"test_cpp_extensions_open_device_registration",
- "test_cpp_extensions_stream_and_event",
- "test_cpp_extensions_mtia_backend",
"test_jit_disabled",
"test_mobile_optimizer",
"test_multiprocessing",
diff --git a/test/test_cpp_extensions_mtia_backend.py b/test/test_cpp_extensions_mtia_backend.py
deleted file mode 100644
index e2ebbf702d..0000000000
--- a/test/test_cpp_extensions_mtia_backend.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# Owner(s): ["module: mtia"]
-
-import os
-import shutil
-import sys
-import tempfile
-import unittest
-
-import torch
-import torch.testing._internal.common_utils as common
-import torch.utils.cpp_extension
-from torch.testing._internal.common_utils import (
- IS_ARM64,
- IS_LINUX,
- skipIfTorchDynamo,
- TEST_CUDA,
- TEST_PRIVATEUSE1,
-)
-from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
-
-
-TEST_CUDA = TEST_CUDA and CUDA_HOME is not None
-TEST_ROCM = TEST_CUDA and torch.version.hip is not None and ROCM_HOME is not None
-
-
-def remove_build_path():
- if sys.platform == "win32":
- # Not wiping extensions build folder because Windows
- return
- default_build_root = torch.utils.cpp_extension.get_default_build_root()
- if os.path.exists(default_build_root):
- shutil.rmtree(default_build_root, ignore_errors=True)
-
-
-@unittest.skipIf(
- IS_ARM64 or not IS_LINUX or TEST_CUDA or TEST_PRIVATEUSE1,
- "Only on linux platform and mutual exclusive to other backends",
-)
-@torch.testing._internal.common_utils.markDynamoStrictTest
-class TestCppExtensionMTIABackend(common.TestCase):
- """Tests MTIA backend with C++ extensions."""
-
- module = None
-
- def setUp(self):
- super().setUp()
- # cpp extensions use relative paths. Those paths are relative to
- # this file, so we'll change the working directory temporarily
- self.old_working_dir = os.getcwd()
- os.chdir(os.path.dirname(os.path.abspath(__file__)))
-
- def tearDown(self):
- super().tearDown()
- # return the working directory (see setUp)
- os.chdir(self.old_working_dir)
-
- @classmethod
- def tearDownClass(cls):
- remove_build_path()
-
- @classmethod
- def setUpClass(cls):
- remove_build_path()
- build_dir = tempfile.mkdtemp()
- # Load the fake device guard impl.
- cls.module = torch.utils.cpp_extension.load(
- name="mtia_extension",
- sources=["cpp_extensions/mtia_extension.cpp"],
- build_directory=build_dir,
- extra_include_paths=[
- "cpp_extensions",
- "path / with spaces in it",
- "path with quote'",
- ],
- is_python_module=False,
- verbose=True,
- )
-
- @skipIfTorchDynamo("Not a TorchDynamo suitable test")
- def test_get_device_module(self):
- device = torch.device("mtia:0")
- default_stream = torch.get_device_module(device).current_stream()
- self.assertEqual(
- default_stream.device_type, int(torch._C._autograd.DeviceType.MTIA)
- )
- print(torch._C.Stream.__mro__)
- print(torch.cuda.Stream.__mro__)
-
- @skipIfTorchDynamo("Not a TorchDynamo suitable test")
- def test_stream_basic(self):
- default_stream = torch.mtia.current_stream()
- user_stream = torch.mtia.Stream()
- self.assertEqual(torch.mtia.current_stream(), default_stream)
- self.assertNotEqual(default_stream, user_stream)
- # Check mtia_extension.cpp, default stream id starts from 0.
- self.assertEqual(default_stream.stream_id, 0)
- self.assertNotEqual(user_stream.stream_id, 0)
- with torch.mtia.stream(user_stream):
- self.assertEqual(torch.mtia.current_stream(), user_stream)
- self.assertTrue(user_stream.query())
- default_stream.synchronize()
- self.assertTrue(default_stream.query())
-
- @skipIfTorchDynamo("Not a TorchDynamo suitable test")
- def test_stream_context(self):
- mtia_stream_0 = torch.mtia.Stream(device="mtia:0")
- mtia_stream_1 = torch.mtia.Stream(device="mtia:0")
- print(mtia_stream_0)
- print(mtia_stream_1)
- with torch.mtia.stream(mtia_stream_0):
- current_stream = torch.mtia.current_stream()
- msg = f"current_stream {current_stream} should be {mtia_stream_0}"
- self.assertTrue(current_stream == mtia_stream_0, msg=msg)
-
- with torch.mtia.stream(mtia_stream_1):
- current_stream = torch.mtia.current_stream()
- msg = f"current_stream {current_stream} should be {mtia_stream_1}"
- self.assertTrue(current_stream == mtia_stream_1, msg=msg)
-
- @skipIfTorchDynamo("Not a TorchDynamo suitable test")
- def test_stream_context_different_device(self):
- device_0 = torch.device("mtia:0")
- device_1 = torch.device("mtia:1")
- mtia_stream_0 = torch.mtia.Stream(device=device_0)
- mtia_stream_1 = torch.mtia.Stream(device=device_1)
- print(mtia_stream_0)
- print(mtia_stream_1)
- orig_current_device = torch.mtia.current_device()
- with torch.mtia.stream(mtia_stream_0):
- current_stream = torch.mtia.current_stream()
- self.assertTrue(torch.mtia.current_device() == device_0.index)
- msg = f"current_stream {current_stream} should be {mtia_stream_0}"
- self.assertTrue(current_stream == mtia_stream_0, msg=msg)
- self.assertTrue(torch.mtia.current_device() == orig_current_device)
- with torch.mtia.stream(mtia_stream_1):
- current_stream = torch.mtia.current_stream()
- self.assertTrue(torch.mtia.current_device() == device_1.index)
- msg = f"current_stream {current_stream} should be {mtia_stream_1}"
- self.assertTrue(current_stream == mtia_stream_1, msg=msg)
- self.assertTrue(torch.mtia.current_device() == orig_current_device)
-
- @skipIfTorchDynamo("Not a TorchDynamo suitable test")
- def test_device_context(self):
- device_0 = torch.device("mtia:0")
- device_1 = torch.device("mtia:1")
- with torch.mtia.device(device_0):
- self.assertTrue(torch.mtia.current_device() == device_0.index)
-
- with torch.mtia.device(device_1):
- self.assertTrue(torch.mtia.current_device() == device_1.index)
-
-
-if __name__ == "__main__":
- common.run_tests()
diff --git a/test/test_cpp_extensions_stream_and_event.py b/test/test_cpp_extensions_stream_and_event.py
deleted file mode 100644
index 0be81dd492..0000000000
--- a/test/test_cpp_extensions_stream_and_event.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Owner(s): ["module: mtia"]
-
-import os
-import shutil
-import sys
-import tempfile
-import unittest
-
-import torch
-import torch.testing._internal.common_utils as common
-import torch.utils.cpp_extension
-from torch.testing._internal.common_utils import (
- IS_ARM64,
- IS_LINUX,
- skipIfTorchDynamo,
- TEST_CUDA,
- TEST_PRIVATEUSE1,
-)
-from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
-
-
-TEST_CUDA = TEST_CUDA and CUDA_HOME is not None
-TEST_ROCM = TEST_CUDA and torch.version.hip is not None and ROCM_HOME is not None
-
-
-def remove_build_path():
- if sys.platform == "win32":
- # Not wiping extensions build folder because Windows
- return
- default_build_root = torch.utils.cpp_extension.get_default_build_root()
- if os.path.exists(default_build_root):
- shutil.rmtree(default_build_root, ignore_errors=True)
-
-
-# Since we use a fake MTIA device backend to test generic Stream/Event, device backends are mutual exclusive to each other.
-# The test will be skipped if any of the following conditions are met:
-@unittest.skipIf(
- IS_ARM64 or not IS_LINUX or TEST_CUDA or TEST_PRIVATEUSE1,
- "Only on linux platform and mutual exclusive to other backends",
-)
-@torch.testing._internal.common_utils.markDynamoStrictTest
-class TestCppExtensionStreamAndEvent(common.TestCase):
- """Tests Stream and Event with C++ extensions."""
-
- module = None
-
- def setUp(self):
- super().setUp()
- # cpp extensions use relative paths. Those paths are relative to
- # this file, so we'll change the working directory temporarily
- self.old_working_dir = os.getcwd()
- os.chdir(os.path.dirname(os.path.abspath(__file__)))
-
- def tearDown(self):
- super().tearDown()
- # return the working directory (see setUp)
- os.chdir(self.old_working_dir)
-
- @classmethod
- def tearDownClass(cls):
- remove_build_path()
-
- @classmethod
- def setUpClass(cls):
- remove_build_path()
- build_dir = tempfile.mkdtemp()
- # Load the fake device guard impl.
- src = f"{os.path.abspath(os.path.dirname(__file__))}/cpp_extensions/mtia_extension.cpp"
- cls.module = torch.utils.cpp_extension.load(
- name="mtia_extension",
- sources=[src],
- build_directory=build_dir,
- extra_include_paths=[
- "cpp_extensions",
- "path / with spaces in it",
- "path with quote'",
- ],
- is_python_module=False,
- verbose=True,
- )
-
- @skipIfTorchDynamo("Not a TorchDynamo suitable test")
- def test_stream_event(self):
- s = torch.Stream()
- self.assertTrue(s.device_type, int(torch._C._autograd.DeviceType.MTIA))
- e = torch.Event()
- self.assertTrue(e.device.type, "mtia")
- # Should be nullptr by default
- self.assertTrue(e.event_id == 0)
- s.record_event(event=e)
- print(f"recorded event 1: {e}")
- self.assertTrue(e.event_id != 0)
- e2 = s.record_event()
- print(f"recorded event 2: {e2}")
- self.assertTrue(e2.event_id != 0)
- self.assertTrue(e2.event_id != e.event_id)
- e.synchronize()
- e2.synchronize()
- time_elapsed = e.elapsed_time(e2)
- print(f"time elapsed between e1 and e2: {time_elapsed}")
- old_event_id = e.event_id
- e.record(stream=s)
- print(f"recorded event 1: {e}")
- self.assertTrue(e.event_id == old_event_id)
-
-
-if __name__ == "__main__":
- common.run_tests()
diff --git a/tools/testing/modulefinder_determinator.py b/tools/testing/modulefinder_determinator.py
index ba58d75c57..ce55fdb424 100644
--- a/tools/testing/modulefinder_determinator.py
+++ b/tools/testing/modulefinder_determinator.py
@@ -21,8 +21,6 @@ TARGET_DET_LIST = [
"test_cpp_extensions_aot_no_ninja",
"test_cpp_extensions_jit",
"test_cpp_extensions_open_device_registration",
- "test_cpp_extensions_stream_and_event",
- "test_cpp_extensions_mtia_backend",
"test_cuda",
"test_cuda_primary_ctx",
"test_dataloader",
|
2.41.0
|
3a319a4fc3b2aac0c9d35d36e76dbfde37746f0
|
Thu, 25 Apr 2024 16:58:57 +0000
|
[PATCH 0665/1000] [export] kill _process_constraints() (#123985)
|
The process for populating range_constraints follows separate methods for non-strict (`make_constraints`), and strict (`_process_constraints`). The strict method is somewhat more convoluted, and the analysis that Dynamo performs for strict is already present as part of the non-strict process in make_constraints (produce_guards(), running the export constraint solver). This PR kills _process_constraints() and replaces calls with make_constraints, without duplicating the work that Dynamo already does. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123985 Approved by: https://github.com/avikchaudhuri
|
diff --git a/torch/_export/__init__.py b/torch/_export/__init__.py
index 650e1c0711..5591b40e2f 100644
--- a/torch/_export/__init__.py
+++ b/torch/_export/__init__.py
@@ -28,6 +28,7 @@ from torch._dispatch.python import enable_python_dispatcher
from torch._dynamo.exc import UserError, UserErrorType
from torch._dynamo.source import ConstantSource
from torch._export.passes.collect_tracepoints_pass import CollectTracepointsPass
+from torch._export.non_strict_utils import make_constraints
from torch._functorch.aot_autograd import aot_export_module, GraphSignature
from torch._functorch.eager_transforms import functionalize
from torch._guards import detect_fake_mode
@@ -39,7 +40,6 @@ from torch._utils_internal import log_export_usage
from torch.export._tree_utils import reorder_kwargs
from torch.export._unlift import _create_stateful_graph_module
from torch.export.dynamic_shapes import (
- _process_constraints,
Constraint,
dims,
dynamic_dim,
@@ -175,7 +175,12 @@ def capture_pre_autograd_graph(
_restore_state_dict(f, m)
flat_args, _ = pytree.tree_flatten((args, kwargs or {}))
- range_constraints = _process_constraints(fake_mode, m, 0, flat_args)
+ range_constraints = make_constraints(
+ fake_mode,
+ m,
+ dynamic_shapes,
+ 0,
+ )
module = _create_stateful_graph_module(
m,
diff --git a/torch/_export/non_strict_utils.py b/torch/_export/non_strict_utils.py
index 425f37ba64..56812fe191 100644
--- a/torch/_export/non_strict_utils.py
+++ b/torch/_export/non_strict_utils.py
@@ -16,8 +16,7 @@ from torch._guards import Source
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.export import Constraint
from torch.export.dynamic_shapes import _Dim
-from torch.export.exported_program import InputKind
-from torch.export.graph_signature import CustomObjArgument, InputSpec, TensorArgument
+from torch.export.graph_signature import CustomObjArgument
from torch.fx.experimental.symbolic_shapes import (
ConstraintViolationError,
DimDynamic,
@@ -174,51 +173,47 @@ def make_fake_inputs(nn_module, args, kwargs, dynamic_shapes):
return fake_mode, fake_args, fake_kwargs, equalities_inputs, original_signature
-def make_constraints(
+def _flatten_dynamic_shapes(
+ dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any]]
+):
+ def _is_dynamic_shape_leaf(x):
+ if isinstance(x, dict):
+ x = list(x.values())
+ return x is None or all(isinstance(y, (_Dim, int)) or y is None for y in x)
+
+ if isinstance(dynamic_shapes, (list, tuple)):
+ flat_dynamic_shapes = []
+ for item in dynamic_shapes:
+ flat_shapes, _ = tree_flatten(
+ dynamic_shapes, is_leaf=_is_dynamic_shape_leaf
+ )
+ flat_dynamic_shapes += flat_shapes
+ else:
+ flat_dynamic_shapes, _ = tree_flatten(
+ dynamic_shapes, is_leaf=_is_dynamic_shape_leaf
+ )
+ return flat_dynamic_shapes
+
+
+def produce_guards_and_solve_constraints(
fake_mode: FakeTensorMode,
+ gm: torch.fx.GraphModule,
equalities_inputs: EqualityConstraint,
- dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any]],
- input_specs: List[InputSpec],
original_signature: inspect.Signature,
- gm: torch.fx.GraphModule,
):
"""
Given a fake mode, sources pairs corresponding to equal dynamic shape dimensions,
and a graph module, produce guards on the fake mode's shape env (raising constraint
- violations if any), solve (to suggest simplifications or fixes), and return the
- resulting range constraints and equality constraints.
- """
- # TODO(avik): refactor Dynamo to avoid duplication of the following code
- # between non-strict and strict.
- # Specifically, here (non-strict) we do the following post-tracing steps:
- # - Produce guards.
- # - Solve constraints.
- # - Install shape metadata in IR.
- # In strict, these steps are spread across multiple files:
- # - guards.py produces guards.
- # - eval_frame.py solves constraints
- # - _trace.py installs shape metadata in IR.
-
- inline_constraints = gm.meta.get("inline_constraints", [])
- range_constraints = {
- symbol: inline_constraints[symbol] for symbol in inline_constraints
- }
- if dynamic_shapes == []:
- return range_constraints
-
- def _is_dynamic_shape_leaf(x):
- if x is None:
- return True
- if isinstance(x, dict):
- x = list(x.values())
- return all(isinstance(y, (_Dim, int)) or y is None for y in x)
-
- flat_dynamic_shapes, _ = tree_flatten(
- dynamic_shapes, is_leaf=_is_dynamic_shape_leaf
- )
+ violations if any), solve (to suggest simplifications or fixes).
+ Dynamo already performs this, so this is for non-strict mode.
+ Additional inputs:
+ equalities_inputs: the equality constraints to use for guards
+ original_signature: the signature of the forward method
+ """
shape_env = fake_mode.shape_env
assert shape_env.tracked_fakes is not None
+
placeholders = [tf.fake for tf in shape_env.tracked_fakes]
sources = [tf.source for tf in shape_env.tracked_fakes]
input_contexts = [tf.symbolic_context for tf in shape_env.tracked_fakes]
@@ -255,23 +250,41 @@ def make_constraints(
if constraint_violation_error:
raise constraint_violation_error
- user_tensor_input_names = {
- spec.arg.name
- for spec in input_specs
- if spec.kind == InputKind.USER_INPUT and isinstance(spec.arg, TensorArgument)
+
+def make_constraints(
+ fake_mode: FakeTensorMode,
+ gm: torch.fx.GraphModule,
+ dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None],
+ num_lifted_inputs: int,
+):
+ """
+ Given a fake mode's shape env and user-specified dynamic shapes,
+ return the resulting range constraints and equality constraints.
+
+ Additional args:
+ num_lifted_inputs: the number of non-user-input placeholder nodes in the graph
+ (used only to enumerate the user-input nodes)
+ """
+
+ shape_env = fake_mode.shape_env
+ inline_constraints = gm.meta.get("inline_constraints", [])
+ range_constraints = {
+ symbol: inline_constraints[symbol] for symbol in inline_constraints
}
+ if not dynamic_shapes:
+ return range_constraints
+ flat_dynamic_shapes = _flatten_dynamic_shapes(dynamic_shapes)
input_dims = defaultdict(list)
free_symbols = set()
- input_index = 0
- for node in gm.graph.nodes:
- if node.name not in user_tensor_input_names:
+ for input_index, node in enumerate(gm.graph.nodes):
+ if input_index < num_lifted_inputs or node.op != "placeholder":
continue
if _is_constant_argument(node.meta["val"]) or isinstance(
node.meta["val"], CustomObjArgument
):
continue
- shape_spec = flat_dynamic_shapes[input_index]
+ shape_spec = flat_dynamic_shapes[input_index - num_lifted_inputs]
for i, d in enumerate(node.meta["val"].shape):
if isinstance(d, torch.SymInt):
# Look up the range constraint for the symbol corresponding to this shape dimension
@@ -290,7 +303,6 @@ def make_constraints(
]
input_dims[d.node.expr].append(InputDim(input_name=node.name, dim=i))
free_symbols.update(d.node.expr.free_symbols)
- input_index += 1
for symbol in free_symbols:
if symbol not in range_constraints:
diff --git a/torch/export/_trace.py b/torch/export/_trace.py
index e27bf9a016..918e10cb90 100644
--- a/torch/export/_trace.py
+++ b/torch/export/_trace.py
@@ -18,6 +18,7 @@ from torch._export.non_strict_utils import (
make_constraints,
make_fake_inputs,
make_fake_params_buffers,
+ produce_guards_and_solve_constraints,
)
from torch._export.passes.add_runtime_assertions_for_constraints_pass import (
_AddRuntimeAssertionsForInlineConstraintsPass,
@@ -50,7 +51,6 @@ from torch.utils._sympy.value_ranges import ValueRangeError
from ._safeguard import AutogradStateOpsFailSafeguard
-from .dynamic_shapes import _process_constraints
from .exported_program import (
_disable_prexisiting_fake_mode,
ExportedProgram,
@@ -1005,18 +1005,30 @@ def _export(
for k, v in fake_mode.shape_env.var_to_range.items()
if free_unbacked_symbols(k)
}
+ num_lifted = len(
+ [
+ spec
+ for spec in ep_non_strict.sig.input_specs
+ if spec.kind != InputKind.USER_INPUT
+ ]
+ )
try:
- range_constraints = make_constraints(
+ produce_guards_and_solve_constraints(
fake_mode,
+ ep_non_strict.gm,
equalities_inputs,
- dynamic_shapes if dynamic_shapes else [],
- ep_non_strict.sig.input_specs,
original_signature,
- ep_non_strict.gm,
)
except (ConstraintViolationError, ValueRangeError) as e:
raise UserError(UserErrorType.CONSTRAINT_VIOLATION, str(e)) # noqa: TRY200
+ range_constraints = make_constraints(
+ fake_mode,
+ ep_non_strict.gm,
+ dynamic_shapes,
+ num_lifted,
+ )
+
assert out_spec is not None
gm = ep_non_strict.gm
@@ -1216,11 +1228,11 @@ def _export(
),
len(export_graph_signature.input_specs),
)
- range_constraints = _process_constraints(
+ range_constraints = make_constraints(
dynamo_fake_mode,
gm,
+ dynamic_shapes,
num_lifted,
- flat_args,
)
# Do some cleanups on the graph module to restore the state dict to the
diff --git a/torch/export/dynamic_shapes.py b/torch/export/dynamic_shapes.py
index 4571781147..806a7bef9a 100644
--- a/torch/export/dynamic_shapes.py
+++ b/torch/export/dynamic_shapes.py
@@ -5,10 +5,9 @@ import math
import sys
import weakref
from collections import defaultdict
-from typing import Any, Callable, Dict, List, Optional, Set, Tuple, TYPE_CHECKING, Union
+from typing import Any, Callable, Dict, List, Optional, Tuple, TYPE_CHECKING, Union
import torch
-from torch._subclasses.fake_tensor import FakeTensor
from torch.utils._pytree import SUPPORTED_NODES
from .exported_program import ExportedProgram
@@ -560,7 +559,6 @@ def _process_dynamic_shapes(
kwargs: Optional[Dict[str, Any]] = None,
dynamic_shapes: Optional[Union[Dict[str, Any], Tuple[Any], List[Any]]] = None,
) -> Optional[List[Constraint]]:
- from collections import defaultdict
from collections.abc import Mapping, Sequence
from torch._dynamo.exc import UserError, UserErrorType
@@ -810,106 +808,3 @@ def _process_dynamic_shapes(
constraints.append(primary)
return constraints # type: ignore[return-value]
-
-
-def _process_constraints(
- fake_mode,
- graph_module: torch.fx.GraphModule,
- num_lifted_params_buffers: int,
- example_inputs: List[torch.Tensor],
-) -> Dict:
- """
- Process the constraints stored in the graph module to return something more readable.
-
- Args:
- graph_module (torch.fx.GraphModule): GraphModule returned from
- dynamo.export, which contains the "input_shape_constraints" and
- "inline_constraints" metadata
-
- example_inputs: Flattened list of example inputs used to export the graph module
-
- Returns:
- range_constraints (Dict[sympy.Symbol, ValueRanges]): Mapping of
- symbols (from SymInts) appearing in the fake tensors in
- node.meta["val"] to their range constraints, which are a tuple
- containing (lower, upper) constraints.
- """
- from torch._export.passes.add_runtime_assertions_for_constraints_pass import (
- InputDim,
- )
-
- # Import sympy locally
- from torch.fx.experimental.symbolic_shapes import SymInt
- from torch.utils._sympy.value_ranges import ValueRanges
-
- input_shape_constraints = graph_module.meta.get("input_shape_constraints", [])
- inline_constraints = graph_module.meta.get("inline_constraints", [])
-
- # Create dict mapping tensor_id to node names
- tensor_id_to_nodes: Dict[int, List[str]] = defaultdict(list)
- # Create dict mapping placeholder node names to their nodes
- placeholder_nodes: Dict[str, torch.fx.Node] = {}
- for i, node in enumerate(graph_module.graph.nodes):
- if node.op != "placeholder":
- # All placeholder nodes should be together in the beginning of the
- # graph
- break
- if i >= num_lifted_params_buffers:
- example_input = example_inputs[i - num_lifted_params_buffers]
- tensor_id_to_nodes[id(example_input)].append(node.name)
- placeholder_nodes[node.name] = node
-
- # Create dict mapping (node name, dim) a list of range (lower, upper)
- # constraints
- multi_range_constraints: Dict[InputDim, List[ValueRanges]] = defaultdict(list)
- for constraint in input_shape_constraints:
- for node in tensor_id_to_nodes[constraint["t_id"]]:
- # skip static shape constraints
- if constraint["min"] == constraint["max"]:
- continue
- node_dim = InputDim(node, constraint["dim"])
-
- # Accumulate range constraints
- multi_range_constraints[node_dim].append(
- ValueRanges(constraint["min"], constraint["max"])
- )
-
- # Create dict mapping symbol to a singular range (lower, upper)
- range_constraints: Dict[Any, ValueRanges] = {}
-
- # Add inline constraints to range_constraints
- range_constraints = {
- symbol: inline_constraints[symbol] for symbol in inline_constraints
- }
-
- free_symbols: Set["Symbol"] = set()
- # Add input range constraints to range_constraints
- for input_dim, multi_range_constraint in multi_range_constraints.items(): # type: ignore[assignment]
- # Simplify the range constraints into a single range constraint
- # Ex. ranges [2, 10] and [3, 11] would get merged to [3, 10]
- min_vals = [rc.lower for rc in multi_range_constraint]
- max_vals = [rc.upper for rc in multi_range_constraint]
- min_val = max(min_vals) # type: ignore[type-var]
- max_val = min(max_vals) # type: ignore[type-var]
- assert min_val <= max_val # type: ignore[operator]
-
- # Add input node range constraints
- val = placeholder_nodes[input_dim.input_name].meta["val"]
- assert isinstance(val, FakeTensor)
- symint = val.shape[input_dim.dim]
- assert isinstance(
- symint, SymInt
- ), f"Expected SymInt but got {symint}: {type(symint)}"
- symbol = symint.node.expr
- range_constraints[symbol] = ValueRanges(min_val, max_val)
- free_symbols.update(symbol.free_symbols)
-
- for symbol in free_symbols:
- if symbol not in range_constraints:
- # Placeholders can have symbolic shapes that are derived expressions.
- # The above code will record direct range constraints for them
- # so that we can do runtime assertions. In addition, for serde checks
- # we want to record range constraints for their root symbols.
- range_constraints[symbol] = fake_mode.shape_env.var_to_range[symbol]
-
- return range_constraints
diff --git a/torch/export/exported_program.py b/torch/export/exported_program.py
index 64844ea20d..7a829df900 100644
--- a/torch/export/exported_program.py
+++ b/torch/export/exported_program.py
@@ -651,8 +651,7 @@ class ExportedProgram:
new_range_constraints = _get_updated_range_constraints(
gm,
- self._num_lifted_params_buffers(),
- pytree.tree_leaves(self.example_inputs),
+ self.range_constraints,
_is_executorch=False,
)
@@ -764,8 +763,7 @@ class ExportedProgram:
state_dict=self.state_dict,
range_constraints=_get_updated_range_constraints(
transformed_gm,
- self._num_lifted_params_buffers(),
- pytree.tree_leaves(self.example_inputs),
+ self.range_constraints,
_is_executorch=False,
),
module_call_graph=copy.deepcopy(self._module_call_graph),
@@ -812,8 +810,7 @@ class ExportedProgram:
def _get_updated_range_constraints(
gm: torch.fx.GraphModule,
- num_lifted: Optional[int] = None,
- example_inputs: Optional[List[Any]] = None,
+ old_range_constraints: "Optional[Dict[sympy.Symbol, Any]]" = None,
_is_executorch: bool = True,
) -> "Dict[sympy.Symbol, Any]":
def get_shape_env(gm):
@@ -833,8 +830,7 @@ def _get_updated_range_constraints(
# FIXME(tmanlaibaatar) Remove this whole branch once https://github.com/pytorch/pytorch/pull/123764
if _is_executorch:
- assert num_lifted is None
- assert example_inputs is None
+ assert old_range_constraints is None
shape_env, _ = get_shape_env(gm)
if shape_env is None:
return {}
@@ -851,17 +847,13 @@ def _get_updated_range_constraints(
range_constraints[k] = v
return range_constraints
- assert num_lifted is not None
- assert example_inputs is not None
+ assert old_range_constraints is not None
shape_env, fake_mode = get_shape_env(gm)
if shape_env is None:
return {}
- from torch.export.dynamic_shapes import _process_constraints
-
- range_constraints = _process_constraints(fake_mode, gm, num_lifted, example_inputs)
-
+ range_constraints = copy.copy(old_range_constraints)
range_constraints = {
k: v for k, v in range_constraints.items() if k not in shape_env.replacements
}
|
2.41.0
|
9b22fbef9186be31467c3da790e4220ae029ac1
|
Thu, 25 Apr 2024 09:41:55 -0400
|
[PATCH 0666/1000] Typo fix: s/nonzero/unique/ (#124935)
|
Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124935 Approved by: https://github.com/albanD, https://github.com/Skylion007
|
diff --git a/torch/_subclasses/fake_impls.py b/torch/_subclasses/fake_impls.py
index b7977049e7..dba028b4df 100644
--- a/torch/_subclasses/fake_impls.py
+++ b/torch/_subclasses/fake_impls.py
@@ -285,8 +285,8 @@ def unique2(
# symint cannot equal zero). We could also unconditionally
# allocate an unbacked SymInt and not refine its range,
# but this seems more precise.
- nnz = arg._nonzero_memo = 0
- arg._nonzero_memo_vc = arg._version
+ nnz = arg._unique_memo = 0
+ arg._unique_memo_vc = arg._version
else:
nnz = fake_mode.shape_env.create_unbacked_symint()
|
2.41.0
|
8aed4ce3f67aa4236e80a11209a40dd0c4ebc87
|
Thu, 25 Apr 2024 17:27:25 +0000
|
[PATCH 0667/1000] Fix MPI_Group initialization errors (#124824)
|
Fixes MPI_Group initialization errors introduced in #124156, since MPI_Group is not a pointer in some MPI implementations. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124824 Approved by: https://github.com/ezyang
|
diff --git a/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp b/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp
index 90031f4a93..94d7cd9cca 100644
--- a/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp
+++ b/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp
@@ -267,8 +267,8 @@ c10::intrusive_ptr<ProcessGroupMPI> ProcessGroupMPI::createProcessGroupMPI(
// If no ranks are specified, assume we're creating the root group
if (!ranks.empty()) {
- MPI_Group worldGroup = nullptr;
- MPI_Group ranksGroup = nullptr;
+ MPI_Group worldGroup{};
+ MPI_Group ranksGroup{};
MPI_CHECK(MPI_Comm_group(MPI_COMM_WORLD, &worldGroup));
MPI_CHECK(
MPI_Group_incl(worldGroup, ranks.size(), ranks.data(), &ranksGroup));
|
2.41.0
|
51d9a319d5e703c2b726d39a5711c0112ff5648
|
Tue, 23 Apr 2024 16:10:51 +0000
|
[PATCH 0668/1000] [AOTI] Add a unit test (#124486)
|
Summary: from https://github.com/pytorch/pytorch/issues/123745, the test seems already fixed in the nightly, but still worth to add it. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124486 Approved by: https://github.com/chenyang78
|
diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py
index 24d2ba8e2a..e48f4ed2ef 100644
--- a/test/inductor/test_aot_inductor.py
+++ b/test/inductor/test_aot_inductor.py
@@ -10,6 +10,7 @@ from unittest import skip
import torch
import torch._inductor
+import torch.nn as nn
from torch._dynamo.testing import rand_strided, same
from torch._dynamo.utils import counters
from torch._inductor import config
@@ -2668,6 +2669,29 @@ class AOTInductorTestsTemplate:
example_inputs = (torch.randn(16, 16, 16, device=self.device),)
self.check_model(Model(), example_inputs)
+ def test_misc_1(self):
+ class Model(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.mlp = nn.Sequential(
+ nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 32), nn.Sigmoid()
+ )
+ self.emb = nn.EmbeddingBag(num_embeddings=128, embedding_dim=32)
+ self.over_arch = nn.Sequential(
+ nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, 32), nn.Sigmoid()
+ )
+
+ def forward(self, x, y):
+ mlp_output = self.mlp(x)
+ emb_output = self.emb(y)
+ return self.over_arch(torch.concat([mlp_output, emb_output], dim=1))
+
+ example_inputs = (
+ torch.randn(16, 128, device=self.device),
+ torch.randint(0, 128, (16, 10), device=self.device),
+ )
+ self.check_model(Model(), example_inputs)
+
common_utils.instantiate_parametrized_tests(AOTInductorTestsTemplate)
@@ -2874,6 +2898,7 @@ if not IS_FBCODE:
"test_empty_graph": fail_minimal_arrayref_interface(is_skip=True),
"test_large": fail_minimal_arrayref_interface(is_skip=True),
"test_large_mmaped_weights": fail_minimal_arrayref_interface(is_skip=True),
+ "test_misc_1": fail_minimal_arrayref_interface(is_skip=True),
"test_missing_output": fail_minimal_arrayref_interface(is_skip=True),
"test_model_modified_weights": fail_minimal_arrayref_interface(
is_skip=True
|
2.41.0
|
5182bb75bbc109cb327212e7205981fbf72cb5e
|
Thu, 25 Apr 2024 18:31:03 +0000
|
[PATCH 0670/1000] Enable UFMT on `test/test_cuda*.py` (#124352)
|
Part of: #123062 Ran lintrunner on: - test/test_cuda.py - test/test_cuda_expandable_segments.py - test/test_cuda_multigpu.py - test/test_cuda_nvml_based_avail.py - test/test_cuda_primary_ctx.py - test/test_cuda_sanitizer.py - test/test_cuda_trace.py Detail: ```bash $ lintrunner -a --take UFMT --all-files ok No lint issues. Successfully applied all patches. ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124352 Approved by: https://github.com/ezyang
|
diff --git a/.lintrunner.toml b/.lintrunner.toml
index 55fb5970ec..7ea6532ccf 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -1051,13 +1051,6 @@ exclude_patterns = [
'test/quantization/fx/test_numeric_suite_fx.py',
'test/quantization/fx/test_quantize_fx.py',
'test/quantization/fx/test_subgraph_rewriter.py',
- 'test/test_cuda.py',
- 'test/test_cuda_expandable_segments.py',
- 'test/test_cuda_multigpu.py',
- 'test/test_cuda_nvml_based_avail.py',
- 'test/test_cuda_primary_ctx.py',
- 'test/test_cuda_sanitizer.py',
- 'test/test_cuda_trace.py',
'test/test_custom_op_testing.py',
'test/test_dataloader.py',
'test/test_datapipe.py',
diff --git a/test/test_cuda.py b/test/test_cuda.py
index 800c94cefb..24acfb0dc2 100644
--- a/test/test_cuda.py
+++ b/test/test_cuda.py
@@ -1,41 +1,71 @@
# Owner(s): ["module: cuda"]
-from itertools import product
import collections
import contextlib
-from copy import deepcopy
import gc
+import json
import os
import pickle
+import random
+import subprocess
import sys
import tempfile
import threading
import unittest
import warnings
-import subprocess
-import random
+from copy import deepcopy
+from itertools import product
from random import randint
-import json
import torch
import torch.cuda
-from torch.cuda._memory_viz import profile_plot, _profile_to_snapshot
-from torch.cuda._memory_viz import trace_plot
-from torch.cuda._memory_viz import segment_plot
from torch import inf, nan
-from torch.utils.checkpoint import checkpoint_sequential
-from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
- NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_WINDOWS, \
- slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_CUDA, TEST_CUDA_GRAPH, TEST_WITH_ROCM, TEST_NUMPY, \
- get_cycles_per_ms, parametrize, instantiate_parametrized_tests, subtest, IS_JETSON, gcIfJetson, NoTest, IS_LINUX, IS_ARM64, \
- serialTest
-from torch.testing._internal.common_cuda import TEST_CUDNN, TEST_MULTIGPU, \
- _create_scaling_case, _get_torch_cuda_version
-from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA
-from torch.testing._internal.common_optimizers import (
- optim_db, optims)
+from torch.cuda._memory_viz import (
+ _profile_to_snapshot,
+ profile_plot,
+ segment_plot,
+ trace_plot,
+)
from torch.testing._internal.autocast_test_lists import AutocastTestLists
+from torch.testing._internal.common_cuda import (
+ _create_scaling_case,
+ _get_torch_cuda_version,
+ TEST_CUDNN,
+ TEST_MULTIGPU,
+)
+from torch.testing._internal.common_device_type import (
+ instantiate_device_type_tests,
+ onlyCUDA,
+)
+from torch.testing._internal.common_optimizers import optim_db, optims
+from torch.testing._internal.common_utils import (
+ freeze_rng_state,
+ gcIfJetson,
+ get_cycles_per_ms,
+ instantiate_parametrized_tests,
+ IS_ARM64,
+ IS_JETSON,
+ IS_LINUX,
+ IS_WINDOWS,
+ load_tests,
+ NO_MULTIPROCESSING_SPAWN,
+ NoTest,
+ parametrize,
+ run_tests,
+ serialTest,
+ skipCUDAMemoryLeakCheckIf,
+ skipCUDANonDefaultStreamIf,
+ skipIfRocm,
+ slowTest,
+ subtest,
+ TEST_CUDA,
+ TEST_CUDA_GRAPH,
+ TEST_NUMPY,
+ TEST_WITH_ROCM,
+ TestCase,
+)
+from torch.utils.checkpoint import checkpoint_sequential
from torch.utils.viz._cycles import observe_tensor_cycles
# load_tests from common_utils is used to automatically filter tests for
@@ -43,7 +73,7 @@ from torch.utils.viz._cycles import observe_tensor_cycles
load_tests = load_tests
if not TEST_CUDA:
- print('CUDA not available, skipping tests', file=sys.stderr)
+ print("CUDA not available, skipping tests", file=sys.stderr)
TestCase = NoTest # noqa: F811
try:
@@ -55,7 +85,9 @@ except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
-TEST_CUDAMALLOCASYNC = TEST_CUDA and (torch.cuda.get_allocator_backend() == "cudaMallocAsync")
+TEST_CUDAMALLOCASYNC = TEST_CUDA and (
+ torch.cuda.get_allocator_backend() == "cudaMallocAsync"
+)
TEST_LARGE_TENSOR = TEST_CUDA
TEST_MEDIUM_TENSOR = TEST_CUDA
TEST_BF16 = False
@@ -76,14 +108,16 @@ class TestCuda(TestCase):
def setUp(self):
super().setUp()
- self.autocast_lists = AutocastTestLists(torch.device('cuda:0'))
+ self.autocast_lists = AutocastTestLists(torch.device("cuda:0"))
def tearDown(self):
del self.autocast_lists
super().tearDown()
def test_pinned_memory_with_cudaregister(self):
- torch.cuda.memory._set_allocator_settings("pinned_use_cuda_host_register:True,pinned_num_register_threads:8")
+ torch.cuda.memory._set_allocator_settings(
+ "pinned_use_cuda_host_register:True,pinned_num_register_threads:8"
+ )
t = torch.ones(20)
self.assertFalse(t.is_pinned())
try:
@@ -97,8 +131,10 @@ class TestCuda(TestCase):
def test_pinned_memory_with_cudaregister_multithread(self):
num_threads = 4
- threads = [threading.Thread(target=self.test_pinned_memory_with_cudaregister)
- for t in range(num_threads)]
+ threads = [
+ threading.Thread(target=self.test_pinned_memory_with_cudaregister)
+ for t in range(num_threads)
+ ]
for thread in threads:
thread.start()
for thread in threads:
@@ -134,8 +170,9 @@ class TestCuda(TestCase):
# Assert this call doesn't raise.
torch.cuda.check_error(0)
- with self.assertRaisesRegex(torch.cuda.CudaError,
- "out of memory|hipErrorOutOfMemory"):
+ with self.assertRaisesRegex(
+ torch.cuda.CudaError, "out of memory|hipErrorOutOfMemory"
+ ):
torch.cuda.check_error(2)
def test_cuda_get_device_name(self):
@@ -161,33 +198,45 @@ class TestCuda(TestCase):
self.assertEqual(current_device_capability, device_capability_no_argument)
def test_out_of_memory(self):
- tensor = torch.zeros(1024, device='cuda')
+ tensor = torch.zeros(1024, device="cuda")
- oom_regex = "would exceed allowed memory" if TEST_CUDAMALLOCASYNC else \
- "Tried to allocate 800000000.00 GiB"
+ oom_regex = (
+ "would exceed allowed memory"
+ if TEST_CUDAMALLOCASYNC
+ else "Tried to allocate 800000000.00 GiB"
+ )
with self.assertRaisesRegex(RuntimeError, oom_regex):
- torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device='cuda')
+ torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device="cuda")
- with self.assertRaisesRegex(RuntimeError, "Tried to allocate more than 1EB memory"):
- torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device='cuda')
+ with self.assertRaisesRegex(
+ RuntimeError, "Tried to allocate more than 1EB memory"
+ ):
+ torch.empty(
+ 1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device="cuda"
+ )
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
- @unittest.skipIf(TEST_CUDAMALLOCASYNC or IS_JETSON, "Segmentation fault (core dumped)")
+ @unittest.skipIf(
+ TEST_CUDAMALLOCASYNC or IS_JETSON, "Segmentation fault (core dumped)"
+ )
@serialTest()
def test_out_of_memory_retry(self):
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
- oom_regex = "would exceed allowed memory" if TEST_CUDAMALLOCASYNC else \
- "Tried to allocate"
+ oom_regex = (
+ "would exceed allowed memory"
+ if TEST_CUDAMALLOCASYNC
+ else "Tried to allocate"
+ )
size = int(total_memory * 0.5)
- a = torch.empty(size , dtype=torch.int8, device='cuda')
+ a = torch.empty(size, dtype=torch.int8, device="cuda")
with self.assertRaisesRegex(RuntimeError, oom_regex):
- b = torch.empty(size, dtype=torch.int8, device='cuda')
+ b = torch.empty(size, dtype=torch.int8, device="cuda")
del a
- b = torch.empty(size, dtype=torch.int8, device='cuda')
+ b = torch.empty(size, dtype=torch.int8, device="cuda")
del b
# We used a lot of memory here, clean up so we don't affect other tests too much
torch.cuda.empty_cache()
@@ -202,23 +251,24 @@ class TestCuda(TestCase):
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(2.0)
- tensor = torch.zeros(1024, device='cuda')
+ tensor = torch.zeros(1024, device="cuda")
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
torch.cuda.set_per_process_memory_fraction(0.5, 0)
# test 0.499 allocation is ok.
application = int(total_memory * 0.499) - torch.cuda.max_memory_reserved()
- tmp_tensor = torch.empty(application, dtype=torch.int8, device='cuda')
+ tmp_tensor = torch.empty(application, dtype=torch.int8, device="cuda")
del tmp_tensor
torch.cuda.empty_cache()
application = int(total_memory * 0.5)
# it will get OOM when try to allocate more than half memory.
- oom_regex = "would exceed allowed memory" if TEST_CUDAMALLOCASYNC else \
- "out of memory"
+ oom_regex = (
+ "would exceed allowed memory" if TEST_CUDAMALLOCASYNC else "out of memory"
+ )
with self.assertRaisesRegex(RuntimeError, oom_regex):
- torch.empty(application, dtype=torch.int8, device='cuda')
+ torch.empty(application, dtype=torch.int8, device="cuda")
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
@@ -277,9 +327,11 @@ class TestCuda(TestCase):
for dst, try_non_blocking in product(("cuda", "cpu"), (True, False)):
# Creates source on the opposite device from destination.
- src = torch.randn(1000000,
- device="cuda" if dst == "cpu" else "cpu",
- pin_memory=True if dst == "cuda" else False)
+ src = torch.randn(
+ 1000000,
+ device="cuda" if dst == "cpu" else "cpu",
+ pin_memory=True if dst == "cuda" else False,
+ )
_test_to_non_blocking(src, try_non_blocking, dst)
def test_to_cpu_blocking_by_default(self):
@@ -310,10 +362,15 @@ class TestCuda(TestCase):
q_copy[1].fill_(10)
self.assertEqual(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
- @unittest.skipIf(TEST_CUDAMALLOCASYNC or TEST_WITH_ROCM, "temporarily disabled for async")
- @unittest.skipIf(_get_torch_cuda_version() >= (12, 2), "skipped as explicit workspace allocation is removed")
+ @unittest.skipIf(
+ TEST_CUDAMALLOCASYNC or TEST_WITH_ROCM, "temporarily disabled for async"
+ )
+ @unittest.skipIf(
+ _get_torch_cuda_version() >= (12, 2),
+ "skipped as explicit workspace allocation is removed",
+ )
def test_cublas_workspace_explicit_allocation(self):
- a = torch.randn(7, 7, device='cuda', requires_grad=False)
+ a = torch.randn(7, 7, device="cuda", requires_grad=False)
default_workspace_size = 4096 * 2 * 1024 + 16 * 8 * 1024 # :4096:2:16:8
# different size (32 MiB) expected on Hopper GPU
if torch.cuda.get_device_capability() == (9, 0):
@@ -321,29 +378,30 @@ class TestCuda(TestCase):
def check_workspace_size(inp):
torch._C._cuda_clearCublasWorkspaces()
- start = torch.torch.cuda.memory_stats()['active_bytes.all.allocated']
+ start = torch.torch.cuda.memory_stats()["active_bytes.all.allocated"]
with torch.no_grad():
torch.matmul(inp, inp)
- finish = torch.torch.cuda.memory_stats()['active_bytes.all.allocated']
+ finish = torch.torch.cuda.memory_stats()["active_bytes.all.allocated"]
return finish - start
# check default
- os.environ['CUBLAS_WORKSPACE_CONFIG'] = ''
+ os.environ["CUBLAS_WORKSPACE_CONFIG"] = ""
self.assertTrue(abs(check_workspace_size(a) - default_workspace_size) < 524288)
# check default with bad user config
- os.environ['CUBLAS_WORKSPACE_CONFIG'] = '-1'
+ os.environ["CUBLAS_WORKSPACE_CONFIG"] = "-1"
self.assertTrue(abs(check_workspace_size(a) - default_workspace_size) < 524288)
# check valid config
- os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':128:8:64:16:32:32'
+ os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":128:8:64:16:32:32"
self.assertTrue(abs(check_workspace_size(a) - (3072 * 1024)) < 524288)
torch._C._cuda_clearCublasWorkspaces()
def test_cublas_allow_tf32_get_set(self):
- skip_tf32_cublas = 'TORCH_ALLOW_TF32_CUBLAS_OVERRIDE' in os.environ and\
- int(os.environ['TORCH_ALLOW_TF32_CUBLAS_OVERRIDE'])
+ skip_tf32_cublas = "TORCH_ALLOW_TF32_CUBLAS_OVERRIDE" in os.environ and int(
+ os.environ["TORCH_ALLOW_TF32_CUBLAS_OVERRIDE"]
+ )
if skip_tf32_cublas:
self.assertTrue(torch.backends.cuda.matmul.allow_tf32)
return
@@ -356,42 +414,55 @@ class TestCuda(TestCase):
def test_float32_matmul_precision_get_set(self):
orig = torch.get_float32_matmul_precision()
- skip_tf32_cublas = 'TORCH_ALLOW_TF32_CUBLAS_OVERRIDE' in os.environ and\
- int(os.environ['TORCH_ALLOW_TF32_CUBLAS_OVERRIDE'])
+ skip_tf32_cublas = "TORCH_ALLOW_TF32_CUBLAS_OVERRIDE" in os.environ and int(
+ os.environ["TORCH_ALLOW_TF32_CUBLAS_OVERRIDE"]
+ )
# this is really just checking that the environment variable is respected during testing
# and not overwritten by another function that doesn't revert it to the intitial value
if not skip_tf32_cublas:
self.assertFalse(torch.backends.cuda.matmul.allow_tf32)
- self.assertEqual(torch.get_float32_matmul_precision(), 'highest')
+ self.assertEqual(torch.get_float32_matmul_precision(), "highest")
else:
self.assertTrue(torch.backends.cuda.matmul.allow_tf32)
- for p in ('medium', 'high'):
+ for p in ("medium", "high"):
torch.set_float32_matmul_precision(p)
self.assertEqual(torch.get_float32_matmul_precision(), p)
self.assertTrue(torch.backends.cuda.matmul.allow_tf32)
- torch.set_float32_matmul_precision('highest')
- self.assertEqual(torch.get_float32_matmul_precision(), 'highest')
+ torch.set_float32_matmul_precision("highest")
+ self.assertEqual(torch.get_float32_matmul_precision(), "highest")
self.assertFalse(torch.backends.cuda.matmul.allow_tf32)
torch.set_float32_matmul_precision(orig)
def test_cublas_allow_fp16_reduced_precision_reduction_get_set(self):
orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
- self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), orig)
+ self.assertEqual(
+ torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), orig
+ )
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = not orig
- self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), not orig)
+ self.assertEqual(
+ torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), not orig
+ )
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig
def test_cublas_allow_bf16_reduced_precision_reduction_get_set(self):
orig = torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction
- self.assertEqual(torch._C._get_cublas_allow_bf16_reduced_precision_reduction(), orig)
+ self.assertEqual(
+ torch._C._get_cublas_allow_bf16_reduced_precision_reduction(), orig
+ )
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = not orig
- self.assertEqual(torch._C._get_cublas_allow_bf16_reduced_precision_reduction(), not orig)
+ self.assertEqual(
+ torch._C._get_cublas_allow_bf16_reduced_precision_reduction(), not orig
+ )
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = orig
def test_cudnn_allow_tf32_get_set(self):
- with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
+ with torch.backends.cudnn.flags(
+ enabled=None, benchmark=None, deterministic=None, allow_tf32=False
+ ):
self.assertFalse(torch.backends.cudnn.allow_tf32)
- with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
+ with torch.backends.cudnn.flags(
+ enabled=None, benchmark=None, deterministic=None, allow_tf32=True
+ ):
self.assertTrue(torch.backends.cudnn.allow_tf32)
def test_type_conversions(self):
@@ -411,7 +482,7 @@ class TestCuda(TestCase):
@unittest.skip("was disabled due to not enough memory, but actually it always fail")
def test_arithmetic_large_tensor(self):
- x = torch.empty(2**30, device='cuda')
+ x = torch.empty(2**30, device="cuda")
x.fill_(1)
self.assertEqual(x.sum(), 2**30)
@@ -432,9 +503,11 @@ class TestCuda(TestCase):
self.assertEqual(x.sum(), 2**29)
def test_gather_bool(self):
- t = torch.tensor([[False, True], [True, True]], device='cuda')
- self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device='cuda')),
- torch.tensor([[False, False], [True, True]], device='cuda'))
+ t = torch.tensor([[False, True], [True, True]], device="cuda")
+ self.assertEqual(
+ torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device="cuda")),
+ torch.tensor([[False, False], [True, True]], device="cuda"),
+ )
def test_torch_manual_seed_seeds_cuda_devices(self):
with freeze_rng_state():
@@ -463,23 +536,28 @@ class TestCuda(TestCase):
def test_specify_improper_device_name(self):
import os
+
fname = "tempfile.pt"
try:
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
- torch.save([torch.nn.Parameter(torch.randn(10, 10))], fname,
- _use_new_zipfile_serialization=True)
- torch.load(fname, 'cuda0')
+ torch.save(
+ [torch.nn.Parameter(torch.randn(10, 10))],
+ fname,
+ _use_new_zipfile_serialization=True,
+ )
+ torch.load(fname, "cuda0")
finally:
if os.path.exists(fname):
os.remove(fname)
def test_get_device_index(self):
from torch.cuda._utils import _get_device_index
+
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
- _get_device_index('cuda0', optional=True)
+ _get_device_index("cuda0", optional=True)
with self.assertRaisesRegex(ValueError, "Expected a cuda device"):
- cpu_device = torch.device('cpu')
+ cpu_device = torch.device("cpu")
_get_device_index(cpu_device, optional=True)
def test_serialization_array_with_empty(self):
@@ -552,7 +630,9 @@ class TestCuda(TestCase):
with torch.cuda.stream(stream):
tmp2 = torch.cuda.FloatTensor(t.size())
tmp2.zero_()
- self.assertNotEqual(tmp2.data_ptr(), ptr[0], msg='allocation re-used to soon')
+ self.assertNotEqual(
+ tmp2.data_ptr(), ptr[0], msg="allocation re-used to soon"
+ )
self.assertEqual(result.tolist(), [1, 2, 3, 4])
@@ -562,7 +642,7 @@ class TestCuda(TestCase):
torch.cuda.current_stream().synchronize()
with torch.cuda.stream(stream):
tmp3 = torch.cuda.FloatTensor(t.size())
- self.assertEqual(tmp3.data_ptr(), ptr[0], msg='allocation not re-used')
+ self.assertEqual(tmp3.data_ptr(), ptr[0], msg="allocation not re-used")
def test_record_stream_on_shifted_view(self):
# See issue #27366
@@ -609,7 +689,7 @@ class TestCuda(TestCase):
ptr = t.data_ptr()
del t
t = torch.FloatTensor([1]).pin_memory()
- self.assertEqual(t.data_ptr(), ptr, msg='allocation not reused')
+ self.assertEqual(t.data_ptr(), ptr, msg="allocation not reused")
# check that the allocation is not re-used if it's in-use by a copy
gpu_tensor = torch.cuda.FloatTensor([0])
@@ -617,7 +697,7 @@ class TestCuda(TestCase):
gpu_tensor.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([1]).pin_memory()
- self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
+ self.assertNotEqual(t.data_ptr(), ptr, msg="allocation re-used too soon")
self.assertEqual(list(gpu_tensor), [1])
def test_caching_allocator_record_stream_oom(self):
@@ -626,10 +706,10 @@ class TestCuda(TestCase):
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
- y = torch.zeros(40 * 1024 * 1024, device='cuda')
+ y = torch.zeros(40 * 1024 * 1024, device="cuda")
for _ in range(100):
- x = torch.empty(40 * 1024 * 1024, device='cuda')
+ x = torch.empty(40 * 1024 * 1024, device="cuda")
with torch.cuda.stream(stream):
y += x
# delays re-use of `x` until after all operations in `stream`
@@ -642,62 +722,101 @@ class TestCuda(TestCase):
# Tests for historic illegal memory access, see #17040.
def test_reduction_gpu_memory_accessing(self):
- x = torch.ones(512, 8, dtype=torch.float32, device='cuda')
+ x = torch.ones(512, 8, dtype=torch.float32, device="cuda")
torch.sum(x, 0)
def test_sum_fp16(self):
- x = torch.zeros(10, device='cuda', dtype=torch.float16)
+ x = torch.zeros(10, device="cuda", dtype=torch.float16)
self.assertEqual(x.sum(), 0)
- x = torch.ones(65504, device='cuda', dtype=torch.float16)
+ x = torch.ones(65504, device="cuda", dtype=torch.float16)
self.assertEqual(x.sum(), 65504)
self.assertEqual(x.sum(dtype=torch.float32), 65504)
- x = torch.ones(65536, device='cuda', dtype=torch.float16)
+ x = torch.ones(65536, device="cuda", dtype=torch.float16)
self.assertEqual(x.sum(dtype=torch.float32), 65536)
a = torch.zeros(1203611).bernoulli_(0.0005)
- x = a.to(device='cuda', dtype=torch.float16)
+ x = a.to(device="cuda", dtype=torch.float16)
self.assertEqual(x.sum().item(), a.sum().item())
a = torch.zeros(100, 121, 80).bernoulli_(0.0005)
- x = a.to(device='cuda', dtype=torch.float16)
+ x = a.to(device="cuda", dtype=torch.float16)
self.assertEqual(x.sum((0, 2)).float().cpu(), a.sum((0, 2)))
def test_mean_fp16(self):
- x = torch.ones(65536, device='cuda', dtype=torch.float16)
+ x = torch.ones(65536, device="cuda", dtype=torch.float16)
self.assertEqual(x.mean(), 1)
- x = torch.ones(65536, device='cuda', dtype=torch.float16)
+ x = torch.ones(65536, device="cuda", dtype=torch.float16)
self.assertEqual(x.mean(dtype=torch.float32), 1)
def test_prod_large(self):
# tests global reduction (should_global_reduce = true) in case of non-zero identity element
- x = torch.ones(240000, device='cuda', dtype=torch.float32)
+ x = torch.ones(240000, device="cuda", dtype=torch.float32)
self.assertEqual(x.prod(), 1)
# test for complex types. Note 240k is divisible by 4
for dtype in [torch.cfloat, torch.cdouble]:
- x = torch.ones(240000, device='cuda', dtype=dtype) * (0 + 1j)
+ x = torch.ones(240000, device="cuda", dtype=dtype) * (0 + 1j)
self.assertEqual(x.prod(), 1)
def test_multinomial_ext(self):
# Test two corner cases from older PyTorch (Issue #4858)
- freqs = torch.cuda.FloatTensor([
- 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.03178183361887932, 0.027680952101945877, 0.033176131546497345,
- 0.046052902936935425, 0.07742464542388916, 0.11543981730937958,
- 0.14148041605949402, 0.15784293413162231, 0.13180233538150787,
- 0.08271478116512299, 0.049702685326337814, 0.027557924389839172,
- 0.018125897273421288, 0.011851548217236996, 0.010252203792333603,
- 0.007422595750540495, 0.005372154992073774, 0.0045109698548913,
- 0.0036087757907807827, 0.0035267581697553396, 0.0018864056328311563,
- 0.0024605290964245796, 0.0022964938543736935, 0.0018453967059031129,
- 0.0010662291897460818, 0.0009842115687206388, 0.00045109697384759784,
- 0.0007791675161570311, 0.00020504408166743815, 0.00020504408166743815,
- 0.00020504408166743815, 0.00012302644609007984, 0.0,
- 0.00012302644609007984, 4.100881778867915e-05, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0])
+ freqs = torch.cuda.FloatTensor(
+ [
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.03178183361887932,
+ 0.027680952101945877,
+ 0.033176131546497345,
+ 0.046052902936935425,
+ 0.07742464542388916,
+ 0.11543981730937958,
+ 0.14148041605949402,
+ 0.15784293413162231,
+ 0.13180233538150787,
+ 0.08271478116512299,
+ 0.049702685326337814,
+ 0.027557924389839172,
+ 0.018125897273421288,
+ 0.011851548217236996,
+ 0.010252203792333603,
+ 0.007422595750540495,
+ 0.005372154992073774,
+ 0.0045109698548913,
+ 0.0036087757907807827,
+ 0.0035267581697553396,
+ 0.0018864056328311563,
+ 0.0024605290964245796,
+ 0.0022964938543736935,
+ 0.0018453967059031129,
+ 0.0010662291897460818,
+ 0.0009842115687206388,
+ 0.00045109697384759784,
+ 0.0007791675161570311,
+ 0.00020504408166743815,
+ 0.00020504408166743815,
+ 0.00020504408166743815,
+ 0.00012302644609007984,
+ 0.0,
+ 0.00012302644609007984,
+ 4.100881778867915e-05,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ 0.0,
+ ]
+ )
torch.cuda.manual_seed(11042)
sample = torch.multinomial(freqs, 1000, True)
@@ -711,14 +830,19 @@ class TestCuda(TestCase):
# test corner case from Issue #13867
torch.cuda.manual_seed(33)
- probs = torch.randn(1000000, device='cuda').clamp(min=0) * 3e-5
+ probs = torch.randn(1000000, device="cuda").clamp(min=0) * 3e-5
samples = probs.multinomial(1000000, replacement=True)
self.assertGreater(probs[samples].min().item(), 0)
def _spawn_test_multinomial_invalid_probs_cuda(self, probs):
import subprocess
+
try:
- p = subprocess.Popen([sys.executable, '-c', f"""\
+ p = subprocess.Popen(
+ [
+ sys.executable,
+ "-c",
+ f"""\
import sys
import torch
from torch import inf, nan
@@ -729,29 +853,37 @@ try:
sys.exit(-1) # Should not be reached
except RuntimeError as e:
sys.exit(-2)
-"""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
+""",
+ ],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True,
+ )
out, err = p.communicate(timeout=10)
p.wait(timeout=10)
except subprocess.TimeoutExpired as e:
p.kill()
out, err = p.communicate()
expected_messages = [
- 'device-side assert triggered', # CUDA
- 'Assertion', # CUDA
- 'HSA_STATUS_ERROR_EXCEPTION', # ROCm
- 'Device-side assertion' # ROCm
+ "device-side assert triggered", # CUDA
+ "Assertion", # CUDA
+ "HSA_STATUS_ERROR_EXCEPTION", # ROCm
+ "Device-side assertion", # ROCm
]
self.assertTrue(any(msg in out or msg in err for msg in expected_messages))
@slowTest
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support device side asserts")
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
def test_multinomial_invalid_probs_cuda(self):
- self._spawn_test_multinomial_invalid_probs_cuda([1., -1., 1.])
- self._spawn_test_multinomial_invalid_probs_cuda([1., inf, 1.])
- self._spawn_test_multinomial_invalid_probs_cuda([1., -inf, 1.])
- self._spawn_test_multinomial_invalid_probs_cuda([1., 1., nan])
+ self._spawn_test_multinomial_invalid_probs_cuda([1.0, -1.0, 1.0])
+ self._spawn_test_multinomial_invalid_probs_cuda([1.0, inf, 1.0])
+ self._spawn_test_multinomial_invalid_probs_cuda([1.0, -inf, 1.0])
+ self._spawn_test_multinomial_invalid_probs_cuda([1.0, 1.0, nan])
@staticmethod
def _mute_init():
@@ -762,7 +894,7 @@ except RuntimeError as e:
with ctx.Pool(1, initializer=self._mute_init) as pool:
errors = pool.map(method, [arg])
for e in errors:
- if 'device-side assert triggered' not in str(e):
+ if "device-side assert triggered" not in str(e):
self.fail(e)
@staticmethod
@@ -775,13 +907,18 @@ except RuntimeError as e:
return err
@slowTest
- @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
- don't support multiprocessing with spawn start method")
+ @unittest.skipIf(
+ NO_MULTIPROCESSING_SPAWN,
+ "Disabled for environments that \
+ don't support multiprocessing with spawn start method",
+ )
@skipIfRocm
def test_index_out_of_bounds_exception_cuda(self):
test_method = TestCuda._test_index_bounds_cuda
# Test in-bound access works fine
- self.assertEqual(test_method(1), "x[torch.tensor([1)]=tensor([1], device='cuda:0')")
+ self.assertEqual(
+ test_method(1), "x[torch.tensor([1)]=tensor([1], device='cuda:0')"
+ )
# Test that indexing out of bounds causes assert
self._spawn_method(test_method, 11)
@@ -789,8 +926,10 @@ except RuntimeError as e:
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
@serialTest()
def test_huge_index(self):
- src = torch.empty(15000000, 45, device='cuda', dtype=torch.long).random_(0, 2**22)
- idx = torch.randperm(src.shape[0], device='cuda')
+ src = torch.empty(15000000, 45, device="cuda", dtype=torch.long).random_(
+ 0, 2**22
+ )
+ idx = torch.randperm(src.shape[0], device="cuda")
res = src[idx]
res_cpu = src.cpu()[idx.cpu()]
self.assertEqual(res.cpu(), res_cpu)
@@ -819,20 +958,20 @@ except RuntimeError as e:
def test_bincount_ext(self):
# ensure CUDA code coverage
input_size = (100000,)
- w = torch.randn(input_size, dtype=torch.double, device='cuda')
+ w = torch.randn(input_size, dtype=torch.double, device="cuda")
w_cpu = w.cpu()
# test shared memory impl
- t = torch.randint(50, input_size, dtype=torch.int8, device='cuda')
+ t = torch.randint(50, input_size, dtype=torch.int8, device="cuda")
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test global memory impl
# see `CUDAHistogramMemoryType` in SummaryOps.cu
# 50000 * sizeof(int64_t) == 390 KiB, which should exceed smem of any known GPU
- t = torch.randint(50000, input_size, dtype=torch.int64, device='cuda')
+ t = torch.randint(50000, input_size, dtype=torch.int64, device="cuda")
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
- t = torch.zeros([10], dtype=torch.int32, device='cuda')
+ t = torch.zeros([10], dtype=torch.int32, device="cuda")
# 35488 * 65536 as int32 would cause overflow to negative value
# giving negative bin offset
t[0] = 35488
@@ -850,10 +989,12 @@ except RuntimeError as e:
self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536)
def test_cuda_memory_leak_detection_propagates_errors(self):
- with self.assertRaisesRegex(RuntimeError, r"The size of tensor a \(3\) must match"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"The size of tensor a \(3\) must match"
+ ):
with self.assertLeaksNoCudaTensors():
- x = torch.randn(3, 1, device='cuda')
- y = torch.randn(2, 1, device='cuda')
+ x = torch.randn(3, 1, device="cuda")
+ y = torch.randn(2, 1, device="cuda")
z = x + y
@unittest.skipIf(not TEST_MEDIUM_TENSOR, "not enough memory")
@@ -911,7 +1052,7 @@ except RuntimeError as e:
# Tests using grads outside the backward() stream context
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
- x = torch.randn(5, 5, device='cuda', requires_grad=True)
+ x = torch.randn(5, 5, device="cuda", requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 2)
@@ -924,7 +1065,7 @@ except RuntimeError as e:
# Tests that using grads in the same stream context as backward()
# is safe regardless what streams bwd ops ran on
bwd_ambient_stream = torch.cuda.Stream()
- x = torch.randn(5, 5, device='cuda', requires_grad=True)
+ x = torch.randn(5, 5, device="cuda", requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 3)
@@ -970,14 +1111,16 @@ except RuntimeError as e:
for x_first_use_on_ambient in (True, False):
# the out_of_place=False, iters=1 case stresses if proper syncs are inserted
# when grads are initially None and stolen by backward ops.
- for out_of_place, iters in ((True, 1),
- (False, 1),
- (False, 5)):
+ for out_of_place, iters in ((True, 1), (False, 1), (False, 5)):
with torch.cuda.stream(stream):
- x = torch.randn(5, 5, device='cuda', requires_grad=True)
+ x = torch.randn(5, 5, device="cuda", requires_grad=True)
model = StreamModel().cuda()
- x.register_hook(lambda grad: self.assertEqual(torch.cuda.current_stream(),
- stream if x_first_use_on_ambient else model.stream0))
+ x.register_hook(
+ lambda grad: self.assertEqual(
+ torch.cuda.current_stream(),
+ stream if x_first_use_on_ambient else model.stream0,
+ )
+ )
for p in model.parameters():
self.assertTrue(p.grad is None)
for i in range(iters):
@@ -1071,7 +1214,11 @@ except RuntimeError as e:
stash.append(b.grad.clone())
# Use a hook on e to install the callback
- e.register_hook(lambda grad: torch.autograd.Variable._execution_engine.queue_callback(clone_leaf_grads))
+ e.register_hook(
+ lambda grad: torch.autograd.Variable._execution_engine.queue_callback(
+ clone_leaf_grads
+ )
+ )
s2.wait_stream(s1)
with torch.cuda.stream(s2):
@@ -1081,11 +1228,19 @@ except RuntimeError as e:
self.assertEqual(stash[0], torch.full_like(a, 6))
self.assertEqual(stash[1], torch.full_like(a, 6))
- @unittest.skipIf(TEST_WITH_ROCM, "In ROCm, kernel asserts are disabled due to performance overhead")
+ @unittest.skipIf(
+ TEST_WITH_ROCM,
+ "In ROCm, kernel asserts are disabled due to performance overhead",
+ )
def test_fixed_cuda_assert_async(self):
- with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Boolean value of Tensor with no values is ambiguous"
+ ):
torch._assert_async(torch.tensor([], device="cuda"))
- with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Boolean value of Tensor with more than one value is ambiguous",
+ ):
torch._assert_async(torch.tensor([0, 0], device="cuda"))
torch._assert_async(torch.tensor(1, device="cuda"))
@@ -1102,42 +1257,83 @@ except RuntimeError as e:
]
import subprocess
+
for stmt in fail_stmts:
with self.subTest(stmt=stmt):
- r = subprocess.call([sys.executable, '-c', f"""\
+ r = subprocess.call(
+ [
+ sys.executable,
+ "-c",
+ f"""\
import torch
{stmt}
torch.cuda.synchronize()
-"""])
+""",
+ ]
+ )
self.assertTrue(r != 0)
# Compare non-fused optimizer vs fused one as the fused one unscales gradients
# inside its cuda kernel unlike the other.
def test_grad_scaling_autocast_fused_optimizers(self):
- for optimizer_ctor, optimizer_kwargs, separate_unscale in list(product(
- (torch.optim.Adam, torch.optim.AdamW),
- ({"fused": True, "amsgrad": False}, {"fused": True, "amsgrad": True}),
- (False, True),
- )) + list(product(
- (torch.optim.SGD,),
- [
- {"momentum": 0.0, "dampening": d, "weight_decay": w, "nesterov": n, "fused": True}
- for d, w, n in product((0.0, 0.5), (0.0, 0.5), (False,))
- ] + [
- {"momentum": 0.5, "dampening": d, "weight_decay": w, "nesterov": n, "fused": True}
- for d, w, n in product((0.0,), (0.0, 0.5), (True, False))
- ],
- (False, True),
- )):
- with self.subTest(optim=optimizer_ctor, kwargs=optimizer_kwargs, separate_unscale=separate_unscale):
+ for optimizer_ctor, optimizer_kwargs, separate_unscale in list(
+ product(
+ (torch.optim.Adam, torch.optim.AdamW),
+ ({"fused": True, "amsgrad": False}, {"fused": True, "amsgrad": True}),
+ (False, True),
+ )
+ ) + list(
+ product(
+ (torch.optim.SGD,),
+ [
+ {
+ "momentum": 0.0,
+ "dampening": d,
+ "weight_decay": w,
+ "nesterov": n,
+ "fused": True,
+ }
+ for d, w, n in product((0.0, 0.5), (0.0, 0.5), (False,))
+ ]
+ + [
+ {
+ "momentum": 0.5,
+ "dampening": d,
+ "weight_decay": w,
+ "nesterov": n,
+ "fused": True,
+ }
+ for d, w, n in product((0.0,), (0.0, 0.5), (True, False))
+ ],
+ (False, True),
+ )
+ ):
+ with self.subTest(
+ optim=optimizer_ctor,
+ kwargs=optimizer_kwargs,
+ separate_unscale=separate_unscale,
+ ):
self._grad_scaling_autocast_fused_optimizers(
- optimizer_ctor=optimizer_ctor, optimizer_kwargs=optimizer_kwargs, separate_unscale=separate_unscale)
+ optimizer_ctor=optimizer_ctor,
+ optimizer_kwargs=optimizer_kwargs,
+ separate_unscale=separate_unscale,
+ )
- def _grad_scaling_autocast_fused_optimizers(self, optimizer_ctor, optimizer_kwargs, separate_unscale):
+ def _grad_scaling_autocast_fused_optimizers(
+ self, optimizer_ctor, optimizer_kwargs, separate_unscale
+ ):
(
- mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, _,
- ) = _create_scaling_case(optimizer_ctor=optimizer_ctor, optimizer_kwargs=optimizer_kwargs)
+ mod_control,
+ mod_scaling,
+ opt_control,
+ opt_scaling,
+ data,
+ loss_fn,
+ _,
+ ) = _create_scaling_case(
+ optimizer_ctor=optimizer_ctor, optimizer_kwargs=optimizer_kwargs
+ )
kwargs = deepcopy(optimizer_kwargs)
kwargs["fused"] = False
opt_control = optimizer_ctor(mod_control.parameters(), lr=1.0, **kwargs)
@@ -1146,7 +1342,7 @@ torch.cuda.synchronize()
for input, target in data:
opt_control.zero_grad()
- with torch.autocast('cuda'):
+ with torch.autocast("cuda"):
output_control = mod_control(input)
loss_control = loss_fn(output_control, target)
scaler.scale(loss_control).backward()
@@ -1154,7 +1350,7 @@ torch.cuda.synchronize()
scaler.update()
opt_scaling.zero_grad()
- with torch.autocast('cuda'):
+ with torch.autocast("cuda"):
output_scaling = mod_scaling(input)
loss_scaling = loss_fn(output_scaling, target)
scaler.scale(loss_scaling).backward()
@@ -1164,11 +1360,16 @@ torch.cuda.synchronize()
scaler.update()
self.assertEqual(loss_control, loss_scaling)
- for param_control, param_scaling in zip(mod_control.parameters(), mod_scaling.parameters()):
+ for param_control, param_scaling in zip(
+ mod_control.parameters(), mod_scaling.parameters()
+ ):
self.assertEqual(param_control.grad, param_scaling.grad)
self.assertEqual(param_control, param_scaling)
- state_control, state_scaling = opt_control.state[param_control], opt_scaling.state[param_scaling]
+ state_control, state_scaling = (
+ opt_control.state[param_control],
+ opt_scaling.state[param_scaling],
+ )
for k in state_control:
actual = state_scaling[k]
@@ -1186,7 +1387,7 @@ torch.cuda.synchronize()
trials = 3
test_iters = 100
- weight = torch.ones((size, size), device='cuda')
+ weight = torch.ones((size, size), device="cuda")
results = {}
barrier = threading.Barrier(num_threads)
@@ -1214,10 +1415,11 @@ torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
- results[t] = torch.ones((size, size), device='cuda')
+ results[t] = torch.ones((size, size), device="cuda")
- threads = [threading.Thread(target=_worker,
- args=(t,)) for t in range(num_threads)]
+ threads = [
+ threading.Thread(target=_worker, args=(t,)) for t in range(num_threads)
+ ]
for thread in threads:
thread.start()
@@ -1228,14 +1430,14 @@ torch.cuda.synchronize()
self.assertEqual(results[t].sum().item(), size * size)
# Test is flaky on Windows (https://github.com/pytorch/pytorch/issues/57401)
- @unittest.skipIf(IS_WINDOWS, 'Test is flaky on Windows (see issue 57401)')
- @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
+ @unittest.skipIf(IS_WINDOWS, "Test is flaky on Windows (see issue 57401)")
+ @unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
@skipIfRocm
def test_cudnn_multiple_threads_same_device(self):
# This function is intended to test the lazy creation and reuse of per-thread
# cudnn handles on each device in aten/src/ATen/cudnn/Handles.cpp.
# Failure here likely indicates something wrong with that logic.
- weight = torch.ones((1, 1, 2, 2), device='cuda')
+ weight = torch.ones((1, 1, 2, 2), device="cuda")
results = {}
@@ -1245,6 +1447,7 @@ torch.cuda.synchronize()
barrier = threading.Barrier(num_threads)
with torch.backends.cudnn.flags(enabled=True):
+
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
@@ -1263,16 +1466,20 @@ torch.cuda.synchronize()
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but now races with its convolution.
- results[t] = torch.nn.functional.conv2d(results[t], weight, padding=0)
+ results[t] = torch.nn.functional.conv2d(
+ results[t], weight, padding=0
+ )
results[t].div_(4.0)
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
- results[t] = torch.ones((1, 1, 2048, 2048), device='cuda')
+ results[t] = torch.ones((1, 1, 2048, 2048), device="cuda")
- threads = [threading.Thread(target=_worker,
- args=(t,)) for t in range(num_threads)]
+ threads = [
+ threading.Thread(target=_worker, args=(t,))
+ for t in range(num_threads)
+ ]
for thread in threads:
thread.start()
@@ -1280,8 +1487,10 @@ torch.cuda.synchronize()
thread.join()
for t in range(num_threads):
- self.assertEqual(results[t].sum().item(),
- (2048 - test_iters) * (2048 - test_iters))
+ self.assertEqual(
+ results[t].sum().item(),
+ (2048 - test_iters) * (2048 - test_iters),
+ )
def test_cusparse_multiple_threads_same_device(self):
size = 1024
@@ -1290,9 +1499,9 @@ torch.cuda.synchronize()
test_iters = 500
def ones_sparse(size):
- a = torch.arange(size, device='cuda')
+ a = torch.arange(size, device="cuda")
indices = torch.cartesian_prod(a, a).t()
- values = torch.ones(size * size, device='cuda')
+ values = torch.ones(size * size, device="cuda")
return torch.sparse_coo_tensor(indices, values)
weight = ones_sparse(size)
@@ -1323,10 +1532,11 @@ torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
- results[t] = torch.ones((size, size), device='cuda')
+ results[t] = torch.ones((size, size), device="cuda")
- threads = [threading.Thread(target=_worker,
- args=(t,)) for t in range(num_threads)]
+ threads = [
+ threading.Thread(target=_worker, args=(t,)) for t in range(num_threads)
+ ]
for thread in threads:
thread.start()
@@ -1336,7 +1546,9 @@ torch.cuda.synchronize()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
- def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):
+ def _run_autocast_outofplace(
+ self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None
+ ):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
@@ -1350,7 +1562,7 @@ torch.cuda.synchronize()
add_kwargs = {}
fast_dtype = torch.bfloat16 if run_as_type == torch.bfloat16 else torch.float16
self.assertFalse(torch.is_autocast_enabled())
- with torch.autocast('cuda', dtype=fast_dtype):
+ with torch.autocast("cuda", dtype=fast_dtype):
self.assertTrue(torch.is_autocast_enabled())
out_type = out_type if out_type is not None else run_as_type
@@ -1360,18 +1572,24 @@ torch.cuda.synchronize()
if module is not None and hasattr(module, op):
output = getattr(module, op)(*args, **add_kwargs)
if isinstance(output, torch.Tensor):
- self.assertTrue(out_type == output.dtype,
- f"autocast for torch.{op} produced {output.dtype}, should produce {out_type}")
+ self.assertTrue(
+ out_type == output.dtype,
+ f"autocast for torch.{op} produced {output.dtype}, should produce {out_type}",
+ )
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
- self.assertTrue(out_type == output_method.dtype,
- f"autocast for torch.{op} produced {output_method.dtype}, should produce torch.{out_type}")
+ self.assertTrue(
+ out_type == output_method.dtype,
+ f"autocast for torch.{op} produced {output_method.dtype}, should produce torch.{out_type}",
+ )
- self.assertTrue((output is not None) or (output_method is not None),
- f"{op} not found as an attribute on either Tensor or the requested module {module}")
+ self.assertTrue(
+ (output is not None) or (output_method is not None),
+ f"{op} not found as an attribute on either Tensor or the requested module {module}",
+ )
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
# For example, lstm_cell returns a tuple and equal returns bool.
@@ -1387,18 +1605,24 @@ torch.cuda.synchronize()
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
- self.assertTrue(comparison, f"torch.{op} result did not match Tensor.{op} result")
+ self.assertTrue(
+ comparison, f"torch.{op} result did not match Tensor.{op} result"
+ )
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
output_to_compare = output if output is not None else output_method
- with torch.autocast('cuda', enabled=False):
+ with torch.autocast("cuda", enabled=False):
self.assertFalse(torch.is_autocast_enabled())
if module is not None and hasattr(module, op):
- control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)
+ control = getattr(module, op)(
+ *cast(args, run_as_type), **add_kwargs
+ )
else:
- control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
+ control = getattr(args[0].to(run_as_type), op)(
+ *cast(args[1:], run_as_type), **add_kwargs
+ )
self.assertTrue(type(output_to_compare) == type(control))
comparison = compare(output_to_compare, control)
self.assertTrue(comparison, f"torch.{op} result did not match control")
@@ -1411,7 +1635,7 @@ torch.cuda.synchronize()
else:
return op_with_args[0], op_with_args[1], op_with_args[2]
- @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
+ @unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_autocast_torch_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
@@ -1422,7 +1646,7 @@ torch.cuda.synchronize()
if not skip_test:
self._run_autocast_outofplace(op, args, torch.float16)
- @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
+ @unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_autocast_torch_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
@@ -1430,89 +1654,109 @@ torch.cuda.synchronize()
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
- should_error_from_cudnn = 'cudnn' in op and \
- ('TORCH_CUDNN_V8_API_DISABLED' in os.environ and
- int(os.environ['TORCH_CUDNN_V8_API_DISABLED']) or
- torch.cuda.get_device_capability() < (8, 0))
+ should_error_from_cudnn = "cudnn" in op and (
+ "TORCH_CUDNN_V8_API_DISABLED" in os.environ
+ and int(os.environ["TORCH_CUDNN_V8_API_DISABLED"])
+ or torch.cuda.get_device_capability() < (8, 0)
+ )
should_error_from_not_implemented = should_error_from_cudnn
if not skip_test:
if should_error_from_not_implemented:
- with self.assertRaises(RuntimeError, msg=str(op) + ' should not be supported for bfloat16!'):
+ with self.assertRaises(
+ RuntimeError,
+ msg=str(op) + " should not be supported for bfloat16!",
+ ):
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
- with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
+ with self.assertRaisesRegex(
+ RuntimeError, "Device does not support bfloat16"
+ ):
self._run_autocast_outofplace(op, args, torch.bfloat16)
- @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
+ @unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
- self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
+ self._run_autocast_outofplace(
+ op, args, torch.float32, add_kwargs=maybe_kwargs
+ )
- @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
+ @unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
- @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
+ @unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)
- @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
+ @unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_autocast_nn_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
- self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._nn)
+ self._run_autocast_outofplace(
+ op, args, torch.float16, module=torch._C._nn
+ )
- @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
+ @unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_autocast_nn_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
if torch.cuda.is_bf16_supported():
- self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
+ self._run_autocast_outofplace(
+ op, args, torch.bfloat16, module=torch._C._nn
+ )
else:
- with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
- self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
-
- @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
+ with self.assertRaisesRegex(
+ RuntimeError, "Device does not support bfloat16"
+ ):
+ self._run_autocast_outofplace(
+ op, args, torch.bfloat16, module=torch._C._nn
+ )
+
+ @unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_autocast_nn_fp32(self):
for op, args in self.autocast_lists.nn_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn)
- @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
+ @unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_autocast_linalg_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.linalg_fp16:
- self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._linalg)
+ self._run_autocast_outofplace(
+ op, args, torch.float16, module=torch._C._linalg
+ )
- @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
+ @unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_autocast_methods_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.methods_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=None)
- @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
+ @unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_autocast_methods_fp32(self):
for op, args in self.autocast_lists.methods_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=None)
- @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
+ @unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_autocast_methods_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:
- self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)
+ self._run_autocast_outofplace(
+ op, args, torch.float32, module=None, out_type=out_type
+ )
def test_autocast_banned(self):
- with torch.autocast('cuda'):
+ with torch.autocast("cuda"):
for op, args, module in self.autocast_lists.banned:
with self.assertRaises(RuntimeError):
getattr(module, op)(*args)
def test_autocast_ignored_types(self):
- with torch.autocast('cuda'):
+ with torch.autocast("cuda"):
for ignore_type in (torch.double, torch.int32):
a_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
b_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
@@ -1523,24 +1767,26 @@ torch.cuda.synchronize()
if ignore_type is torch.double:
with self.assertRaises(RuntimeError):
torch.mm(a_ignore, c_16)
- with torch.autocast('cuda', enabled=False):
+ with torch.autocast("cuda", enabled=False):
type_no_autocast = torch.mm(a_ignore, b_ignore).dtype
- self.assertTrue(torch.mm(a_ignore, b_ignore).dtype is type_no_autocast)
+ self.assertTrue(
+ torch.mm(a_ignore, b_ignore).dtype is type_no_autocast
+ )
# Tests if CastPolicy::fp32 ops ignore double and int
- with torch.autocast('cuda', enabled=False):
+ with torch.autocast("cuda", enabled=False):
type_no_autocast = torch.pow(a_ignore, 2.0).dtype
self.assertTrue(torch.pow(a_ignore, 2.0).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_set_opt_dtype ops ignore double and int
- with torch.autocast('cuda', enabled=False):
+ with torch.autocast("cuda", enabled=False):
type_no_autocast = torch.sum(a_ignore).dtype
self.assertTrue(torch.sum(a_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_append_dtype ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
- with torch.autocast('cuda', enabled=False):
+ with torch.autocast("cuda", enabled=False):
type_no_autocast = torch.norm(a_ignore).dtype
self.assertTrue(torch.norm(a_ignore).dtype is type_no_autocast)
@@ -1602,9 +1848,18 @@ torch.cuda.synchronize()
# Puts one input tensor in a nested container. y's contained Tensor won't receive a gradient,
# because torch.autograd.Function can't hand gradients back to non-Tensor forward arguments.
# Sets requires_grad=False explicitly so we don't lie about expecting a gradient.
- y = (0, {0: torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=False)})
+ y = (
+ 0,
+ {
+ 0: torch.randn(
+ (8, 8), device="cuda", dtype=torch.float16, requires_grad=False
+ )
+ },
+ )
- with torch.autocast('cuda', ):
+ with torch.autocast(
+ "cuda",
+ ):
output = mymm(x, y, torch.float32)
self.assertTrue(output.dtype is torch.float32)
loss = output.sum()
@@ -1632,14 +1887,14 @@ torch.cuda.synchronize()
model = Model()
model_jit_script = torch.jit.script(model)
- with torch.autocast('cuda', enabled=True):
+ with torch.autocast("cuda", enabled=True):
model()
model_jit_script()
# cudnn RNNs require special backend handling (weights are cast to FP16 and reflattened)
# so they get a dedicated test.
# Despite the large number of RNN cases it tries, the test takes < 15 seconds on a Titan V (similar to V100).
- @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
+ @unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_autocast_rnn(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
# seq, batch, features, hidden size
@@ -1648,10 +1903,27 @@ torch.cuda.synchronize()
dtypes = (torch.float16, torch.float32)
input_layouts = ("seq_first", "batch_first", "packed")
- for (cls, num_layers, bias, input_layout, bidirectional, try_nonpreflattened_weights,
- input_dtype, hidden_dtype, weight_dtype) in \
- product(clses, (1, 2), (True, False), input_layouts, (True, False), (True, False),
- dtypes, dtypes, dtypes):
+ for (
+ cls,
+ num_layers,
+ bias,
+ input_layout,
+ bidirectional,
+ try_nonpreflattened_weights,
+ input_dtype,
+ hidden_dtype,
+ weight_dtype,
+ ) in product(
+ clses,
+ (1, 2),
+ (True, False),
+ input_layouts,
+ (True, False),
+ (True, False),
+ dtypes,
+ dtypes,
+ dtypes,
+ ):
if input_layout == "seq_first":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
@@ -1660,44 +1932,72 @@ torch.cuda.synchronize()
x = torch.randn((B, T, F), device="cuda", dtype=input_dtype)
elif input_layout == "packed":
batch_first = False
- x = torch.nn.utils.rnn.pack_padded_sequence(torch.randn((T, B, F),
- device="cuda", dtype=input_dtype),
- lengths=(3, 2, 1, 3),
- enforce_sorted=False)
-
- rnn = getattr(torch.nn, cls)(F, H, num_layers=num_layers, bidirectional=bidirectional,
- bias=bias, batch_first=batch_first).cuda().to(dtype=weight_dtype)
+ x = torch.nn.utils.rnn.pack_padded_sequence(
+ torch.randn((T, B, F), device="cuda", dtype=input_dtype),
+ lengths=(3, 2, 1, 3),
+ enforce_sorted=False,
+ )
+
+ rnn = (
+ getattr(torch.nn, cls)(
+ F,
+ H,
+ num_layers=num_layers,
+ bidirectional=bidirectional,
+ bias=bias,
+ batch_first=batch_first,
+ )
+ .cuda()
+ .to(dtype=weight_dtype)
+ )
if try_nonpreflattened_weights:
for p in rnn.parameters():
with torch.no_grad():
p.set_(p.clone())
- h = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
- device="cuda", dtype=hidden_dtype)
+ h = torch.randn(
+ (num_layers * (2 if bidirectional else 1), B, H),
+ device="cuda",
+ dtype=hidden_dtype,
+ )
if cls == "LSTM":
- c = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
- device="cuda", dtype=hidden_dtype)
+ c = torch.randn(
+ (num_layers * (2 if bidirectional else 1), B, H),
+ device="cuda",
+ dtype=hidden_dtype,
+ )
h = (h, c)
- with torch.autocast('cuda', ):
+ with torch.autocast(
+ "cuda",
+ ):
out, h_out = rnn(x, h)
out = out.data if input_layout == "packed" else out
self.assertEqual(out.dtype, torch.float16)
# Autocast wrapper requires at::_cudnn_rnn is autograd-exposed. This check can't guarantee
# at::_cudnn_rnn is autograd-exposed, but if it fires, it indicates some funny business has
# occurred and we should double check that at::_cudnn_rnn remains autograd-exposed.
- self.assertEqual(out.grad_fn.name(), "MiopenRnnBackward0" if torch.version.hip else "CudnnRnnBackward0")
+ self.assertEqual(
+ out.grad_fn.name(),
+ "MiopenRnnBackward0" if torch.version.hip else "CudnnRnnBackward0",
+ )
out.sum().backward()
grads = [p.grad.clone() for p in rnn.parameters()]
rnn.zero_grad()
if cls == "LSTM":
- out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), (h[0].half(), h[1].half()))
+ out_control, h_out_control = rnn.to(dtype=torch.float16)(
+ x.half(), (h[0].half(), h[1].half())
+ )
else:
- out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), h.half())
- out_control = out_control.data if input_layout == "packed" else out_control
+ out_control, h_out_control = rnn.to(dtype=torch.float16)(
+ x.half(), h.half()
+ )
+ out_control = (
+ out_control.data if input_layout == "packed" else out_control
+ )
out_control.sum().backward()
grads_control = [p.grad.clone() for p in rnn.parameters()]
@@ -1706,7 +2006,10 @@ torch.cuda.synchronize()
self.assertEqual(out, out_control)
if cls == "LSTM":
- self.assertTrue(h_out[0].dtype is torch.float16 and h_out[1].dtype is torch.float16)
+ self.assertTrue(
+ h_out[0].dtype is torch.float16
+ and h_out[1].dtype is torch.float16
+ )
self.assertEqual(h_out[0], h_out_control[0])
self.assertEqual(h_out[1], h_out_control[1])
else:
@@ -1720,10 +2023,12 @@ torch.cuda.synchronize()
# Test is used to check, if autocast recaches the same parameters
# when executed in a `torch.no_grad()` block.
- linear = torch.nn.Linear(10, 10).to('cuda')
- data = torch.randn(1, 10, device='cuda')
+ linear = torch.nn.Linear(10, 10).to("cuda")
+ data = torch.randn(1, 10, device="cuda")
- with torch.autocast('cuda', ):
+ with torch.autocast(
+ "cuda",
+ ):
with torch.no_grad():
out = linear(data)
first_iter_mem = torch.cuda.memory_allocated()
@@ -1732,11 +2037,15 @@ torch.cuda.synchronize()
self.assertTrue(first_iter_mem == torch.cuda.memory_allocated())
def test_autocast_checkpointing(self):
- model = torch.nn.Sequential(torch.nn.Linear(8, 8),
- torch.nn.Linear(8, 8),
- torch.nn.Linear(8, 8)).cuda()
- input = torch.rand((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
- with torch.autocast('cuda', ):
+ model = torch.nn.Sequential(
+ torch.nn.Linear(8, 8), torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)
+ ).cuda()
+ input = torch.rand(
+ (8, 8), device="cuda", dtype=torch.float16, requires_grad=True
+ )
+ with torch.autocast(
+ "cuda",
+ ):
output = checkpoint_sequential(model, 2, input, use_reentrant=True)
self.assertTrue(output.requires_grad)
self.assertTrue(output.dtype is torch.float16)
@@ -1746,7 +2055,7 @@ torch.cuda.synchronize()
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
@serialTest()
def test_max_large_axis(self):
- x = torch.zeros(2**32, device='cuda', dtype=torch.int8)
+ x = torch.zeros(2**32, device="cuda", dtype=torch.int8)
x[-1] = 1
val, idx = x.max(0)
self.assertEqual(val, 1)
@@ -1768,7 +2077,9 @@ torch.cuda.synchronize()
self.assertTrue(torch.cuda.is_current_stream_capturing())
g.capture_end()
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_graph_capture_simple(self):
s = torch.cuda.Stream()
@@ -1785,13 +2096,12 @@ torch.cuda.synchronize()
g.replay()
- self.assertTrue(b.sum().item() == 11000.)
+ self.assertTrue(b.sum().item() == 11000.0)
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graphsafe_set_get_rng_state(self):
-
# Define a function to create generator states, with optional graph registration
def create_states(generator):
"""Initializes generator states and registers them with a CUDA graph if provided."""
@@ -1944,7 +2254,9 @@ torch.cuda.synchronize()
test(3, 2)
test(10, 20)
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_graph_capture_reset_recapture(self):
s = torch.cuda.Stream()
@@ -1961,7 +2273,7 @@ torch.cuda.synchronize()
g.replay()
- self.assertTrue(b.sum().item() == 11000.)
+ self.assertTrue(b.sum().item() == 11000.0)
g.reset()
@@ -1974,12 +2286,14 @@ torch.cuda.synchronize()
torch.cuda.current_stream().wait_stream(s)
g.replay()
- self.assertTrue(b.sum().item() == 22000.)
+ self.assertTrue(b.sum().item() == 22000.0)
g.reset()
del g
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_graph_error(self):
# We need to run this test in a separate thread as the error we trigger
# puts the cuda context in a bad state
@@ -1998,20 +2312,28 @@ exit(2)
"""
try:
a = subprocess.check_output(
- [sys.executable, '-c', script],
+ [sys.executable, "-c", script],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
- cwd=os.path.dirname(os.path.realpath(__file__)),)
+ cwd=os.path.dirname(os.path.realpath(__file__)),
+ )
except subprocess.CalledProcessError as e:
if e.returncode == 1:
- self.assertTrue(False, "Error raise by starting capture without a stream is not the expected one")
+ self.assertTrue(
+ False,
+ "Error raise by starting capture without a stream is not the expected one",
+ )
elif e.returncode == 2:
- self.assertTrue(False, "Error raised by starting capture without a stream was not caught")
+ self.assertTrue(
+ False,
+ "Error raised by starting capture without a stream was not caught",
+ )
- @unittest.skipIf((not TEST_CUDA) or
- TEST_WITH_ROCM or
- int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
+ @unittest.skipIf(
+ (not TEST_CUDA) or TEST_WITH_ROCM or int(torch.version.cuda.split(".")[0]) < 11,
+ "CUDA >= 11.0 required for graphs",
+ )
def test_graph_warn_if_has_zero_nodes(self):
with warnings.catch_warnings(record=True) as caught:
g = torch.cuda.CUDAGraph()
@@ -2019,22 +2341,29 @@ exit(2)
with torch.cuda.stream(s):
g.capture_begin()
g.capture_end()
- self.assertTrue(any("The CUDA Graph is empty" in str(w.message) for w in caught))
+ self.assertTrue(
+ any("The CUDA Graph is empty" in str(w.message) for w in caught)
+ )
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_graph_capture_oom(self):
- oom_regex = "would exceed allowed memory" if TEST_CUDAMALLOCASYNC else \
- "out of memory"
+ oom_regex = (
+ "would exceed allowed memory" if TEST_CUDAMALLOCASYNC else "out of memory"
+ )
with self.assertRaisesRegex(RuntimeError, oom_regex):
with torch.cuda.graph(torch.cuda.CUDAGraph()):
- torch.zeros(2 ** 40, device="cuda")
+ torch.zeros(2**40, device="cuda")
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
@serialTest()
def test_repeat_graph_capture_cublas_workspace_memory(self):
(x, y, z) = 1024, 512, 64
- a = torch.rand((x, y), device='cuda')
- b = torch.rand((y, z), device='cuda')
+ a = torch.rand((x, y), device="cuda")
+ b = torch.rand((y, z), device="cuda")
# warmup
torch.mm(a, b)
@@ -2053,10 +2382,14 @@ exit(2)
self.assertFalse(used_gb_before + 0.1 < used_gb_after)
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_graph_rng_functional(self):
- ops_with_kwargs = ((torch.nn.functional.dropout, {"p": 0.1}),
- (torch.nn.functional.rrelu, {"training": True}),)
+ ops_with_kwargs = (
+ (torch.nn.functional.dropout, {"p": 0.1}),
+ (torch.nn.functional.rrelu, {"training": True}),
+ )
size = 10000
def run(op, kwargs):
@@ -2137,42 +2470,48 @@ exit(2)
for op, kwargs in ops_with_kwargs:
run(op, kwargs)
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_graph_rng_distributions(self):
size = 10000
input = torch.rand((size,), device="cuda", dtype=torch.float)
alloc = torch.empty((size,), device="cuda", dtype=torch.float)
# Torch ops to test with sample args (tuple) and kwargs (dict)
- torch_with_args = (("bernoulli", (input.clone(),), {}),
- # multinomial uses some uncapturable CUDA calls.
- # TODO: reenable multinomial tests if/when the implementation is capturable.
- # ("multinomial", (input.clone(), size, True), {}),
- # ("multinomial", (input.clone(), size // 2, False), {}),
- # TODO: reenable normal test, where std is a device
- # tensor, when graph test failures are fixed
- # ("normal", (input.clone() + 1, input.clone()), {}),
- ("normal", (input.clone() + 1, 1.0), {}),
- ("poisson", (input.clone(),), {}),
- ("rand", (size,), {"device": "cuda", "dtype": torch.float}),
- ("randint", (0, 3, (size,)), {"device": "cuda", "dtype": torch.float}),
- ("randn", (size,), {"device": "cuda", "dtype": torch.float}),)
+ torch_with_args = (
+ ("bernoulli", (input.clone(),), {}),
+ # multinomial uses some uncapturable CUDA calls.
+ # TODO: reenable multinomial tests if/when the implementation is capturable.
+ # ("multinomial", (input.clone(), size, True), {}),
+ # ("multinomial", (input.clone(), size // 2, False), {}),
+ # TODO: reenable normal test, where std is a device
+ # tensor, when graph test failures are fixed
+ # ("normal", (input.clone() + 1, input.clone()), {}),
+ ("normal", (input.clone() + 1, 1.0), {}),
+ ("poisson", (input.clone(),), {}),
+ ("rand", (size,), {"device": "cuda", "dtype": torch.float}),
+ ("randint", (0, 3, (size,)), {"device": "cuda", "dtype": torch.float}),
+ ("randn", (size,), {"device": "cuda", "dtype": torch.float}),
+ )
# Tensor methods to test with sample args (tuple)
- tensor_with_args = (("bernoulli_", (input.clone(),)),
- ("cauchy_", ()),
- ("exponential_", ()),
- ("geometric_", (0.3,)),
- ("log_normal_", ()),
- ("normal_", ()),
- ("random_", ()),
- ("uniform_", ()),)
+ tensor_with_args = (
+ ("bernoulli_", (input.clone(),)),
+ ("cauchy_", ()),
+ ("exponential_", ()),
+ ("geometric_", (0.3,)),
+ ("log_normal_", ()),
+ ("normal_", ()),
+ ("random_", ()),
+ ("uniform_", ()),
+ )
def run(module, op, args, kwargs):
torch.cuda.manual_seed(5)
# Each path runs a dummy op to increment the state a bit before creating controls.
- if (module == "torch"):
+ if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
@@ -2191,7 +2530,7 @@ exit(2)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
- if (module == "torch"):
+ if module == "torch":
g.capture_begin()
t1 = getattr(torch, op)(*args, **kwargs)
t2 = getattr(torch, op)(*args, **kwargs)
@@ -2224,7 +2563,7 @@ exit(2)
torch.cuda.manual_seed(seed)
# Runs a dummy op prelude, as for controls, to make sure replay()
# picks up the dummy op's state increment.
- if (module == "torch"):
+ if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
@@ -2234,7 +2573,7 @@ exit(2)
getattr(control2, op)(*args)
torch.cuda.manual_seed(seed)
- if (module == "torch"):
+ if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
else:
getattr(dummy, op)(*args)
@@ -2264,7 +2603,9 @@ exit(2)
# Adds an empty dict for kwargs, which none of the Tensor methods use
run("Tensor", *(meth_with_args + ({},)))
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_graph_two_successive(self):
torch.cuda.empty_cache()
@@ -2286,7 +2627,11 @@ exit(2)
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
- g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
+ g0_args = (
+ (torch.cuda.graph_pool_handle(),)
+ if share_mem == "via graph_pool_handle()"
+ else ()
+ )
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
@@ -2317,24 +2662,35 @@ exit(2)
if not TEST_CUDAMALLOCASYNC:
# These stat checks are specific to the native allocator.
if share_mem != "Don't share":
- self.assertEqual(reserved_no_sharing - torch.cuda.memory_stats()["reserved_bytes.all.current"], # noqa: F821
- kSmallBuffer)
+ self.assertEqual(
+ reserved_no_sharing # noqa: F821
+ - torch.cuda.memory_stats()["reserved_bytes.all.current"],
+ kSmallBuffer,
+ )
else:
- reserved_no_sharing = torch.cuda.memory_stats()["reserved_bytes.all.current"]
+ reserved_no_sharing = torch.cuda.memory_stats()[
+ "reserved_bytes.all.current"
+ ]
del a, b, c, g0, g1
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
- @unittest.skipIf((not TEST_CUDA_GRAPH) or
- IS_WINDOWS or # appears to still be broken on Windows as of 11.4+
- (torch.version.cuda and
- int(torch.version.cuda.split(".")[0]) == 11 and
- int(torch.version.cuda.split(".")[1]) < 4),
- "Graph bindings disallow concurrent replay for CUDA < 11.4, see " +
- "https://github.com/pytorch/pytorch/pull/57556")
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ (not TEST_CUDA_GRAPH)
+ or IS_WINDOWS
+ or ( # appears to still be broken on Windows as of 11.4+
+ torch.version.cuda
+ and int(torch.version.cuda.split(".")[0]) == 11
+ and int(torch.version.cuda.split(".")[1]) < 4
+ ),
+ "Graph bindings disallow concurrent replay for CUDA < 11.4, see "
+ + "https://github.com/pytorch/pytorch/pull/57556",
+ )
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_graph_concurrent_replay(self):
torch.cuda.empty_cache()
@@ -2358,7 +2714,11 @@ exit(2)
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
- g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
+ g0_args = (
+ (torch.cuda.graph_pool_handle(),)
+ if share_mem == "via graph_pool_handle()"
+ else ()
+ )
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
@@ -2403,7 +2763,9 @@ exit(2)
torch.cuda.synchronize()
torch.cuda.empty_cache()
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_graph_three_successive(self):
torch.cuda.empty_cache()
@@ -2420,7 +2782,11 @@ exit(2)
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
- g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
+ g0_args = (
+ (torch.cuda.graph_pool_handle(),)
+ if share_mem == "via graph_pool_handle()"
+ else ()
+ )
g0.capture_begin(*g0_args)
b = a.clone()
c = b + 1
@@ -2452,10 +2818,14 @@ exit(2)
g2.replay()
g1.replay()
- expect_corruption = (not TEST_CUDAMALLOCASYNC) and (share_mem != "Don't share")
+ expect_corruption = (not TEST_CUDAMALLOCASYNC) and (
+ share_mem != "Don't share"
+ )
# If we used the native allocator and shared mempools, g2's capture should have reused c's memory for f.
# We replayed g2 then g1, so we expect g1's captured "e = c + 3" mistakenly filled e with "f's vals + 3".
- self.assertEqual(e.sum().item(), size * (7 + 3) if expect_corruption else size * 5)
+ self.assertEqual(
+ e.sum().item(), size * (7 + 3) if expect_corruption else size * 5
+ )
self.assertEqual(f.sum().item(), size * 7)
del a, b, d, e, f, g0, g1, g2
@@ -2463,8 +2833,10 @@ exit(2)
torch.cuda.synchronize()
torch.cuda.empty_cache()
- @unittest.skipIf((not TEST_CUDA_GRAPH) or
- TEST_CUDAMALLOCASYNC , "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ (not TEST_CUDA_GRAPH) or TEST_CUDAMALLOCASYNC,
+ "CUDA >= 11.0 or ROCM >= 5.3 required for graphs",
+ )
def test_graph_memory_stats_and_use_result_after_destroy_graph(self):
kSmallSize = 1048576
kSmallBuffer = 2097152
@@ -2475,19 +2847,31 @@ exit(2)
elem = 4
# this was annoying to write but stresses the expectations pretty rigorously
- cases = ((512 // elem, 1, kSmallBuffer, kSmallBuffer, "small_pool"),
- (kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, "small_pool"),
- ((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, "large_pool"),
- ((kMinLargeAlloc - 512) // elem, 2, 2 * kLargeBuffer, kLargeBuffer, "large_pool"),
- ((kMinLargeAlloc + 512) // elem, 3,
- 3 * (kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)),
- kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),
- "large_pool"),)
-
- stats_to_check = ("segment.",
- "reserved_bytes.",
- "active.",
- "active_bytes.")
+ cases = (
+ (512 // elem, 1, kSmallBuffer, kSmallBuffer, "small_pool"),
+ (kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, "small_pool"),
+ ((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, "large_pool"),
+ (
+ (kMinLargeAlloc - 512) // elem,
+ 2,
+ 2 * kLargeBuffer,
+ kLargeBuffer,
+ "large_pool",
+ ),
+ (
+ (kMinLargeAlloc + 512) // elem,
+ 3,
+ 3
+ * (
+ kRoundLarge
+ * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)
+ ),
+ kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),
+ "large_pool",
+ ),
+ )
+
+ stats_to_check = ("segment.", "reserved_bytes.", "active.", "active_bytes.")
gc.collect()
torch.cuda.empty_cache()
@@ -2531,17 +2915,24 @@ exit(2)
postcapture_stats = torch.cuda.memory_stats()
- expecteds = (delta_cudaMallocs,
- delta_cudaMalloc_bytes,
- delta_active_blocks,
- delta_active_bytes)
+ expecteds = (
+ delta_cudaMallocs,
+ delta_cudaMalloc_bytes,
+ delta_active_blocks,
+ delta_active_bytes,
+ )
# Double checks replay and stats before and after a call to empty_cache
for i in range(2):
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postcapture_stats[stat] - precapture_stats[stat]
- self.assertEqual(current, expected, "Pre to post capture delta of " +
- stat + f" = {current}, expected = {expected}, numel = {numel}")
+ self.assertEqual(
+ current,
+ expected,
+ "Pre to post capture delta of "
+ + stat
+ + f" = {current}, expected = {expected}, numel = {numel}",
+ )
g.replay()
self.assertEqual(b.sum().item(), 6 * numel)
@@ -2561,8 +2952,13 @@ exit(2)
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postdel_stats[stat] - precapture_stats[stat]
- self.assertEqual(current, expected, "Pre capture to post graph delete delta of " +
- stat + f" = {current}, expected = {expected}, numel = {numel}")
+ self.assertEqual(
+ current,
+ expected,
+ "Pre capture to post graph delete delta of "
+ + stat
+ + f" = {current}, expected = {expected}, numel = {numel}",
+ )
# del a, b before the next case is essential, otherwise overwriting a and b in the next case
# can throw off its allocation/deallocation counts.
@@ -2571,7 +2967,9 @@ exit(2)
torch.cuda.synchronize()
torch.cuda.empty_cache()
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_graph_record_stream(self):
# Makes sure graph capture defers attempting to reclaim allocations used across streams. See
# "Q. Why skip process_events if a capture might be underway?" in c10/cuda/CUDACachingAllocator.cpp
@@ -2588,7 +2986,7 @@ exit(2)
with torch.cuda.stream(s0):
potential_problem.record_stream(s0)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
- potential_problem.fill_(1.)
+ potential_problem.fill_(1.0)
del potential_problem
with torch.cuda.stream(s1):
@@ -2601,7 +2999,7 @@ exit(2)
# Let's also see what happens if we record_stream on a tensor during capture.
s2.wait_stream(s1)
with torch.cuda.stream(s2):
- b.fill_(1.)
+ b.fill_(1.0)
b.record_stream(s2) # dummy record_stream
del b
s1.wait_stream(s2)
@@ -2612,7 +3010,9 @@ exit(2)
c = torch.zeros((3,), device="cuda")
@skipIfRocm
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
# If this test is the first in the process to try cudnn rnns with dropout, it'll initialize
# DropoutState's long-lived internal buffer. Calling code perceives this (correct) behavior
# as a memory leak unless we skip the leak check.
@@ -2642,7 +3042,9 @@ exit(2)
y = model(x)
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
@parametrize(
"with_amp,cache_enabled,allow_unused_input",
[
@@ -2766,7 +3168,9 @@ exit(2)
model_graphed({"x": real_inputs[0]}), model_control({"x": real_inputs[0]})
)
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_graph_make_graphed_callables_same_pool(self):
torch.manual_seed(5)
torch.cuda.manual_seed(5)
@@ -2786,7 +3190,9 @@ exit(2)
for model in models:
x = torch.randn([64, 32], device="cuda")
graphed_model = deepcopy(model)
- graphed_model = torch.cuda.make_graphed_callables(graphed_model, (x,), pool=mempool)
+ graphed_model = torch.cuda.make_graphed_callables(
+ graphed_model, (x,), pool=mempool
+ )
graphed_models.append(graphed_model)
for model, graphed_model in zip(models, graphed_models):
@@ -2806,15 +3212,20 @@ exit(2)
self.assertNotEqual(p.data_ptr(), pg.data_ptr())
self.assertNotEqual(p.grad.data_ptr, pg.grad.data_ptr)
- def _test_graphed_optimizer(self, steps_warmup, steps_train, optimizer_ctor, kwargs):
+ def _test_graphed_optimizer(
+ self, steps_warmup, steps_train, optimizer_ctor, kwargs
+ ):
for actually_do_graphs in (True, False):
- params = [
- torch.randn((i + 5, i + 5), device="cuda") for i in range(2)
- ] + [torch.randn((), device="cuda")]
+ params = [torch.randn((i + 5, i + 5), device="cuda") for i in range(2)] + [
+ torch.randn((), device="cuda")
+ ]
params_control = [p.clone().requires_grad_() for p in params]
params_graphed = [p.clone().requires_grad_() for p in params]
- grads = [[torch.randn_like(p) for p in params] for _ in range(steps_warmup + steps_train)]
+ grads = [
+ [torch.randn_like(p) for p in params]
+ for _ in range(steps_warmup + steps_train)
+ ]
# Control (capturable=False)
@@ -2854,57 +3265,155 @@ exit(2)
for p_control, p_graphed in zip(params_control, params_graphed):
self.assertEqual(p_control, p_graphed)
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_graph_optims(self):
# Needs generalization if we want to extend this test to non-Adam-like optimizers.
- cases = [
- (optimizer_ctor, {"lr": 0.1, "betas": (0.8, 0.7), "foreach": foreach,
- "decoupled_weight_decay": decoupled_weight_decay, "weight_decay": weight_decay})
- for optimizer_ctor, foreach, decoupled_weight_decay, weight_decay in product(
- (torch.optim.NAdam, torch.optim.RAdam,), (False, True,), (False, True,), (0.0, 0.1,))
- ] + [
- (torch.optim.Rprop, {"lr": 0.1, "foreach": foreach, "maximize": maximize})
- for foreach, maximize in product((False, True,), (False, True,))
- ] + [
- (optimizer_ctor, {"lr": 0.1, "betas": (0.8, 0.7), "foreach": foreach, "amsgrad": amsgrad})
- for optimizer_ctor, foreach, amsgrad in product(
- (torch.optim.Adam, torch.optim.AdamW), (False, True), (False, True),)
- ] + [
- (optimizer_ctor, {"lr": 0.1, "betas": (0.8, 0.7), "fused": True, "amsgrad": amsgrad})
- for optimizer_ctor, amsgrad in product((torch.optim.Adam, torch.optim.AdamW), (False, True))
- ] + [
- (optimizer_ctor, {"lr": 0.1, "foreach": foreach, "maximize": maximize, "weight_decay": weight_decay})
- for optimizer_ctor, foreach, maximize, weight_decay in product((torch.optim.Adamax, torch.optim.ASGD,
- torch.optim.Adadelta, torch.optim.RMSprop),
- (False, True), (False, True), (0, 0.1))
- ]
+ cases = (
+ [
+ (
+ optimizer_ctor,
+ {
+ "lr": 0.1,
+ "betas": (0.8, 0.7),
+ "foreach": foreach,
+ "decoupled_weight_decay": decoupled_weight_decay,
+ "weight_decay": weight_decay,
+ },
+ )
+ for optimizer_ctor, foreach, decoupled_weight_decay, weight_decay in product(
+ (
+ torch.optim.NAdam,
+ torch.optim.RAdam,
+ ),
+ (
+ False,
+ True,
+ ),
+ (
+ False,
+ True,
+ ),
+ (
+ 0.0,
+ 0.1,
+ ),
+ )
+ ]
+ + [
+ (
+ torch.optim.Rprop,
+ {"lr": 0.1, "foreach": foreach, "maximize": maximize},
+ )
+ for foreach, maximize in product(
+ (
+ False,
+ True,
+ ),
+ (
+ False,
+ True,
+ ),
+ )
+ ]
+ + [
+ (
+ optimizer_ctor,
+ {
+ "lr": 0.1,
+ "betas": (0.8, 0.7),
+ "foreach": foreach,
+ "amsgrad": amsgrad,
+ },
+ )
+ for optimizer_ctor, foreach, amsgrad in product(
+ (torch.optim.Adam, torch.optim.AdamW),
+ (False, True),
+ (False, True),
+ )
+ ]
+ + [
+ (
+ optimizer_ctor,
+ {"lr": 0.1, "betas": (0.8, 0.7), "fused": True, "amsgrad": amsgrad},
+ )
+ for optimizer_ctor, amsgrad in product(
+ (torch.optim.Adam, torch.optim.AdamW), (False, True)
+ )
+ ]
+ + [
+ (
+ optimizer_ctor,
+ {
+ "lr": 0.1,
+ "foreach": foreach,
+ "maximize": maximize,
+ "weight_decay": weight_decay,
+ },
+ )
+ for optimizer_ctor, foreach, maximize, weight_decay in product(
+ (
+ torch.optim.Adamax,
+ torch.optim.ASGD,
+ torch.optim.Adadelta,
+ torch.optim.RMSprop,
+ ),
+ (False, True),
+ (False, True),
+ (0, 0.1),
+ )
+ ]
+ )
for optimizer_ctor, kwargs in cases:
with self.subTest(optimizer_ctor=optimizer_ctor, kwargs=kwargs):
self._test_graphed_optimizer(3, 2, optimizer_ctor, kwargs)
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_graph_optims_with_explicitly_capturable_param_groups(self):
# mimicking `_test_graphed_optimizer` maladroitly to pass two param_groups to optimizer.__init__
n_warmup, n_replay = 3, 2
- for optimizer, second_param_group_capturable in product((torch.optim.Adam, torch.optim.AdamW,
- torch.optim.ASGD, torch.optim.Adamax,
- torch.optim.NAdam, torch.optim.RAdam,
- torch.optim.Adadelta, torch.optim.RMSprop,
- torch.optim.Rprop), (True, False)):
- ref_p1, param1 = (torch.nn.Parameter(torch.ones(1, device="cuda")) for _ in range(2))
- ref_p2, param2 = (torch.nn.Parameter(torch.ones(1, device="cuda")) for _ in range(2))
- grads1, grads2 = ([torch.randn_like(param1) for _ in range(n_warmup + n_replay)] for _ in range(2))
- ref_grads1, ref_grads2 = ([t.clone() for t in tensors] for tensors in (grads1, grads2))
+ for optimizer, second_param_group_capturable in product(
+ (
+ torch.optim.Adam,
+ torch.optim.AdamW,
+ torch.optim.ASGD,
+ torch.optim.Adamax,
+ torch.optim.NAdam,
+ torch.optim.RAdam,
+ torch.optim.Adadelta,
+ torch.optim.RMSprop,
+ torch.optim.Rprop,
+ ),
+ (True, False),
+ ):
+ ref_p1, param1 = (
+ torch.nn.Parameter(torch.ones(1, device="cuda")) for _ in range(2)
+ )
+ ref_p2, param2 = (
+ torch.nn.Parameter(torch.ones(1, device="cuda")) for _ in range(2)
+ )
+ grads1, grads2 = (
+ [torch.randn_like(param1) for _ in range(n_warmup + n_replay)]
+ for _ in range(2)
+ )
+ ref_grads1, ref_grads2 = (
+ [t.clone() for t in tensors] for tensors in (grads1, grads2)
+ )
params = [
{"params": [param1], "capturable": True},
{"params": [param2], "capturable": second_param_group_capturable},
]
opt = optimizer(params)
- opt_ = optimizer([
- {"params": [ref_p1], "capturable": False},
- {"params": [ref_p2], "capturable": False},
- ])
+ opt_ = optimizer(
+ [
+ {"params": [ref_p1], "capturable": False},
+ {"params": [ref_p2], "capturable": False},
+ ]
+ )
for i in range(n_warmup + n_replay):
ref_p1.grad = ref_grads1[i]
@@ -2932,21 +3441,45 @@ exit(2)
self.assertEqual(ref_p1, param1)
self.assertEqual(ref_p2, param2)
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_graph_scaling_fused_optimizers(self):
cases = [
- (optimizer_ctor, {"lr": 0.1, "betas": (0.8, 0.7), "fused": True, "amsgrad": amsgrad})
- for optimizer_ctor, amsgrad in product((torch.optim.Adam, torch.optim.AdamW), (False, True))
- ] + list(product(
- (torch.optim.SGD,),
- [
- {"lr": 0.1, "momentum": 0.0, "dampening": d, "weight_decay": w, "nesterov": n, "fused": True}
- for d, w, n in product((0.0, 0.5), (0.0, 0.5), (False,))
- ] + [
- {"lr": 0.1, "momentum": 0.5, "dampening": d, "weight_decay": w, "nesterov": n, "fused": True}
- for d, w, n in product((0.0,), (0.0, 0.5), (True, False))
- ],
- ))
+ (
+ optimizer_ctor,
+ {"lr": 0.1, "betas": (0.8, 0.7), "fused": True, "amsgrad": amsgrad},
+ )
+ for optimizer_ctor, amsgrad in product(
+ (torch.optim.Adam, torch.optim.AdamW), (False, True)
+ )
+ ] + list(
+ product(
+ (torch.optim.SGD,),
+ [
+ {
+ "lr": 0.1,
+ "momentum": 0.0,
+ "dampening": d,
+ "weight_decay": w,
+ "nesterov": n,
+ "fused": True,
+ }
+ for d, w, n in product((0.0, 0.5), (0.0, 0.5), (False,))
+ ]
+ + [
+ {
+ "lr": 0.1,
+ "momentum": 0.5,
+ "dampening": d,
+ "weight_decay": w,
+ "nesterov": n,
+ "fused": True,
+ }
+ for d, w, n in product((0.0,), (0.0, 0.5), (True, False))
+ ],
+ )
+ )
steps_warmup = 3
steps_train = 2
@@ -2959,7 +3492,10 @@ exit(2)
params_graphed = [p.clone().requires_grad_() for p in params]
# `GradScaler` in-place updates gradients thus it's necessary to duplicate gradients.
- grads = [[torch.randn_like(p) for p in params] for _ in range(steps_warmup + steps_train)]
+ grads = [
+ [torch.randn_like(p) for p in params]
+ for _ in range(steps_warmup + steps_train)
+ ]
with torch.no_grad():
grads_control = [[g.clone() for g in gs] for gs in grads]
grads_graphed = [[g.clone() for g in gs] for gs in grads]
@@ -2967,12 +3503,16 @@ exit(2)
# Gradient Scaler
scaler_for_control = torch.cuda.amp.GradScaler(init_scale=128.0)
with torch.no_grad():
- scaler_for_control._lazy_init_scale_growth_tracker(torch.device("cuda"))
+ scaler_for_control._lazy_init_scale_growth_tracker(
+ torch.device("cuda")
+ )
scaler_for_graphed = torch.cuda.amp.GradScaler()
scaler_for_graphed.load_state_dict(scaler_for_control.state_dict())
with torch.no_grad():
- scaler_for_graphed._lazy_init_scale_growth_tracker(torch.device("cuda"))
+ scaler_for_graphed._lazy_init_scale_growth_tracker(
+ torch.device("cuda")
+ )
# Control (capturable=False)
if has_capturable_arg:
@@ -3018,7 +3558,9 @@ exit(2)
for p_control, p_graphed in zip(params_control, params_graphed):
self.assertEqual(p_control, p_graphed)
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_cuda_graph_error_options(self):
def fn():
x = torch.zeros([2000], device="cuda")
@@ -3055,7 +3597,9 @@ exit(2)
torch.cuda.current_stream().wait_stream(stream)
torch.cuda.synchronize()
try:
- with torch.cuda.graph(graph, stream=stream, capture_error_mode=capture_error_mode):
+ with torch.cuda.graph(
+ graph, stream=stream, capture_error_mode=capture_error_mode
+ ):
out = fn()
thread = threading.Thread(target=raw_malloc)
thread.start()
@@ -3073,7 +3617,9 @@ exit(2)
# Exception would Corrupt Process and make other tests fail
# self.assertTrue(throws_on_cuda_event("global"))
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
def test_cuda_graph_allocator_propagates_stream(self):
segments = torch.cuda.memory_snapshot()
existing_pools = {s["segment_pool_id"] for s in segments}
@@ -3093,18 +3639,28 @@ exit(2)
with torch.cuda.stream(s0):
g.capture_end()
segments = torch.cuda.memory_snapshot()
- x = [s["segment_pool_id"] for s in segments if s["segment_pool_id"] not in existing_pools]
+ x = [
+ s["segment_pool_id"]
+ for s in segments
+ if s["segment_pool_id"] not in existing_pools
+ ]
self.assertEqual(len(x), 2)
self.assertEqual(x[0], x[1])
def test_batch_norm_gather_stats(self):
- input = torch.randn(1, 3, 3, 3, device='cuda')
+ input = torch.randn(1, 3, 3, 3, device="cuda")
mean, invstd = torch.batch_norm_gather_stats(
- input, mean=torch.ones(2, 3, device='cuda'), invstd=torch.ones(2, 3, device='cuda'),
- running_mean=None, running_var=None , momentum=.1, eps=1e-5, count=2
+ input,
+ mean=torch.ones(2, 3, device="cuda"),
+ invstd=torch.ones(2, 3, device="cuda"),
+ running_mean=None,
+ running_var=None,
+ momentum=0.1,
+ eps=1e-5,
+ count=2,
)
- self.assertEqual(mean, torch.ones(3, device='cuda'))
- self.assertEqual(invstd, torch.ones(3, device='cuda'))
+ self.assertEqual(mean, torch.ones(3, device="cuda"))
+ self.assertEqual(invstd, torch.ones(3, device="cuda"))
def test_matmul_memory_use(self):
def get_max_used():
@@ -3159,7 +3715,7 @@ exit(2)
return MyFunction.apply(x, self.a)
model = MyModule()
- criterion = torch.nn.MSELoss(reduction='sum')
+ criterion = torch.nn.MSELoss(reduction="sum")
optimizer = torch.optim.SGD(model.parameters(), lr=1e-6)
x = torch.randn(5, 5)
@@ -3172,25 +3728,38 @@ exit(2)
def test_matmul_device_mismatch(self):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
- with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Expected all tensors to be on the same device"
+ ):
cpu @ cuda
- with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Expected all tensors to be on the same device"
+ ):
cuda @ cpu
for s, m1, m2 in product((cpu, cuda), repeat=3):
if s.device == m1.device == m2.device:
torch.addmm(s, m1, m2)
else:
- with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Expected all tensors to be on the same device"
+ ):
torch.addmm(s, m1, m2)
@unittest.skipIf(TEST_MULTIGPU, "Testing on one GPU is sufficient")
def test_lazy_init(self):
- """ Validate that no CUDA calls are made during `import torch` call"""
+ """Validate that no CUDA calls are made during `import torch` call"""
+
def check_output(script: str) -> str:
- return subprocess.check_output([sys.executable, "-c", script]).decode("ascii").strip()
+ return (
+ subprocess.check_output([sys.executable, "-c", script])
+ .decode("ascii")
+ .strip()
+ )
- VISIBLE_DEVICES = "HIP_VISIBLE_DEVICES" if TEST_WITH_ROCM else "CUDA_VISIBLE_DEVICES"
+ VISIBLE_DEVICES = (
+ "HIP_VISIBLE_DEVICES" if TEST_WITH_ROCM else "CUDA_VISIBLE_DEVICES"
+ )
test_script = f"import os; import torch;os.environ['{VISIBLE_DEVICES}']='32';print(torch.cuda.device_count())"
rc = check_output(test_script)
self.assertEqual(rc, "0")
@@ -3199,8 +3768,12 @@ exit(2)
# By using ctypes and calling cuDeviceCountGet() and expect CUDA_ERROR_NOT_INITIALIZED == 3
# See https://github.com/pytorch/pytorch/issues/116276 for more details
libcuda_name = "libcuda.so.1" if not IS_WINDOWS else "nvcuda.dll"
- cuda_driver_api_call = f"ctypes.CDLL('{libcuda_name}').cuDeviceGetCount(ctypes.byref(x))"
- rc = check_output(f"import torch; import ctypes;x=ctypes.c_int(-1);print({cuda_driver_api_call})")
+ cuda_driver_api_call = (
+ f"ctypes.CDLL('{libcuda_name}').cuDeviceGetCount(ctypes.byref(x))"
+ )
+ rc = check_output(
+ f"import torch; import ctypes;x=ctypes.c_int(-1);print({cuda_driver_api_call})"
+ )
self.assertEqual(rc, "3")
@unittest.skipIf(not TEST_MULTIGPU, "requires multiple devices")
@@ -3216,7 +3789,11 @@ torch.empty(10, device='cuda')
print(f"{r1}, {r2}")
"""
- r = subprocess.check_output([sys.executable, "-c", test_script]).decode("ascii").strip()
+ r = (
+ subprocess.check_output([sys.executable, "-c", test_script])
+ .decode("ascii")
+ .strip()
+ )
x = torch.cuda.device_count()
self.assertEqual(f"{x}, 1", r)
@@ -3224,49 +3801,54 @@ print(f"{r1}, {r2}")
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestCudaMallocAsync(TestCase):
- @unittest.skipIf(TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync")
+ @unittest.skipIf(
+ TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync"
+ )
def test_memory_snapshot(self):
try:
torch.cuda.memory.empty_cache()
torch.cuda.memory._record_memory_history("state", stacks="python")
# make x the second block in a segment
- torch.rand(2 * 311, 411, device='cuda')
- unused = torch.rand(310, 410, device='cuda')
- x = torch.rand(311, 411, device='cuda')
+ torch.rand(2 * 311, 411, device="cuda")
+ unused = torch.rand(310, 410, device="cuda")
+ x = torch.rand(311, 411, device="cuda")
# create a bunch of tensors that all will tile into the
# same segment to exercise the history merging code
# 512B is the minimum block size,
# so we allocate all the tensors to this size to make sure
# they tile evenly
- tensors = [torch.rand(128, device='cuda') for _ in range(1000)]
+ tensors = [torch.rand(128, device="cuda") for _ in range(1000)]
while tensors:
del tensors[randint(0, len(tensors) - 1)]
# exercise the history trimming code
- torch.rand(128 * 5, device='cuda')
+ torch.rand(128 * 5, device="cuda")
ss = torch.cuda.memory._snapshot()
found_it = False
- for seg in ss['segments']:
- self.assertTrue('frames' in seg)
- for b in seg['blocks']:
- if b['requested_size'] == 311 * 411 * 4:
- self.assertTrue('test_cuda' in b['frames'][0]['filename'])
+ for seg in ss["segments"]:
+ self.assertTrue("frames" in seg)
+ for b in seg["blocks"]:
+ if b["requested_size"] == 311 * 411 * 4:
+ self.assertTrue("test_cuda" in b["frames"][0]["filename"])
found_it = True
- self.assertEqual(x.untyped_storage().data_ptr(), b['address'])
+ self.assertEqual(x.untyped_storage().data_ptr(), b["address"])
self.assertTrue(found_it)
if not IS_WINDOWS:
with tempfile.NamedTemporaryFile() as f:
torch.cuda.memory._save_segment_usage(f.name)
with open(f.name) as f2:
- self.assertTrue('test_cuda.py' in f2.read())
+ self.assertTrue("test_cuda.py" in f2.read())
del unused
del x
torch.cuda.empty_cache()
ss = torch.cuda.memory._snapshot()
- self.assertTrue(ss['device_traces'][0][-1]['action'] in ('segment_free', 'segment_unmap'))
+ self.assertTrue(
+ ss["device_traces"][0][-1]["action"]
+ in ("segment_free", "segment_unmap")
+ )
finally:
torch.cuda.memory._record_memory_history(None)
@@ -3274,26 +3856,29 @@ class TestCudaMallocAsync(TestCase):
@unittest.skipIf(IS_ARM64 or not IS_LINUX, "x86 linux only cpp unwinding")
def test_direct_traceback(self):
from torch._C._profiler import gather_traceback, symbolize_tracebacks
+
c = gather_traceback(True, True, True)
- r, = symbolize_tracebacks([c])
+ (r,) = symbolize_tracebacks([c])
r = str(r)
self.assertTrue("test_cuda.py" in r)
self.assertTrue("unwind" in r)
- @unittest.skipIf(TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync")
+ @unittest.skipIf(
+ TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync"
+ )
@unittest.skipIf(IS_ARM64 or not IS_LINUX, "cpp contexts are x86 linux only")
def test_memory_snapshot_with_cpp(self):
try:
torch.cuda.memory.empty_cache()
torch.cuda.memory._record_memory_history("state", stacks="all")
- x = torch.rand(311, 411, device='cuda')
+ x = torch.rand(311, 411, device="cuda")
- ss = torch.cuda.memory._snapshot()['segments']
+ ss = torch.cuda.memory._snapshot()["segments"]
found_it = False
for seg in ss:
- for b in seg['blocks']:
- if b['requested_size'] == 311 * 411 * 4:
- self.assertTrue('::rand' in str(b['frames']))
+ for b in seg["blocks"]:
+ if b["requested_size"] == 311 * 411 * 4:
+ self.assertTrue("::rand" in str(b["frames"]))
found_it = True
self.assertTrue(found_it)
@@ -3303,19 +3888,19 @@ class TestCudaMallocAsync(TestCase):
@skipIfRocm
def test_memory_profiler_viz(self):
with torch.profiler.profile(
- with_stack=True,
- profile_memory=True,
- record_shapes=True
+ with_stack=True, profile_memory=True, record_shapes=True
) as prof:
- x = torch.rand(128, 128, device='cuda')
+ x = torch.rand(128, 128, device="cuda")
x * x + x * x
plot = profile_plot(prof)
plot = json.dumps(_profile_to_snapshot(prof))
self.assertTrue("test_cuda.py" in plot)
self.assertTrue("test_memory_profiler_viz" in plot)
- self.assertTrue('category' in plot)
+ self.assertTrue("category" in plot)
- @unittest.skipIf(TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync")
+ @unittest.skipIf(
+ TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync"
+ )
@unittest.skipIf(IS_ARM64 or not IS_LINUX, "cpp contexts are x86 linux only")
def test_cycles(self):
fired = False
@@ -3323,9 +3908,9 @@ class TestCudaMallocAsync(TestCase):
def observer(html):
nonlocal fired
fired = True
- self.assertTrue('torch.Tensor' in html)
- self.assertTrue('test_cuda' in html)
- self.assertTrue('cell_contents' in html)
+ self.assertTrue("torch.Tensor" in html)
+ self.assertTrue("test_cuda" in html)
+ self.assertTrue("cell_contents" in html)
disarm = observe_tensor_cycles(observer)
@@ -3333,15 +3918,18 @@ class TestCudaMallocAsync(TestCase):
pass
try:
+
def create():
- x = torch.empty(3, 4, device='cuda')
+ x = torch.empty(3, 4, device="cuda")
def foo(p):
if p:
return foo(not p)
else:
return x
+
return foo
+
create()
gc.collect()
# the callback has to run outside of the collect
@@ -3352,16 +3940,24 @@ class TestCudaMallocAsync(TestCase):
finally:
disarm()
- @unittest.skipIf(TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync")
+ @unittest.skipIf(
+ TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync"
+ )
@unittest.skipIf(IS_ARM64 or not IS_LINUX, "cpp contexts are x86 linux only")
def test_memory_plots(self):
- for context, stacks in (("all", "all" if IS_LINUX else "python"), ("all", "python"), (None, "python")):
+ for context, stacks in (
+ ("all", "all" if IS_LINUX else "python"),
+ ("all", "python"),
+ (None, "python"),
+ ):
try:
torch.cuda.memory.empty_cache()
- torch.cuda.memory._record_memory_history("all", context=context, stacks=stacks)
+ torch.cuda.memory._record_memory_history(
+ "all", context=context, stacks=stacks
+ )
def run():
- x = torch.rand(128, 128, device='cuda')
+ x = torch.rand(128, 128, device="cuda")
x * x + x * x
run()
@@ -3380,7 +3976,9 @@ class TestCudaMallocAsync(TestCase):
finally:
torch.cuda.memory._record_memory_history(None)
- @unittest.skipIf(TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync")
+ @unittest.skipIf(
+ TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync"
+ )
@unittest.skipIf(IS_ARM64 or not IS_LINUX, "cpp contexts are x86 linux only")
def test_memory_plots_free_stack(self):
for context in ["alloc", "all", "state"]:
@@ -3391,7 +3989,7 @@ class TestCudaMallocAsync(TestCase):
def thealloc():
nonlocal x
- x = torch.rand(3, 4, device='cuda')
+ x = torch.rand(3, 4, device="cuda")
def thefree():
nonlocal x
@@ -3400,12 +3998,14 @@ class TestCudaMallocAsync(TestCase):
thealloc()
thefree()
ss = json.dumps(torch.cuda.memory._snapshot())
- self.assertTrue(('thefree' in ss) == (context == 'all'))
- self.assertTrue(('thealloc' in ss) == (context != 'state'))
+ self.assertTrue(("thefree" in ss) == (context == "all"))
+ self.assertTrue(("thealloc" in ss) == (context != "state"))
finally:
torch.cuda.memory._record_memory_history(None)
- @unittest.skipIf(TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync")
+ @unittest.skipIf(
+ TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync"
+ )
@unittest.skipIf(IS_ARM64 or not IS_LINUX, "cpp contexts are x86 linux only")
def test_memory_plots_history_context(self):
try:
@@ -3414,15 +4014,15 @@ class TestCudaMallocAsync(TestCase):
def should_capture1():
nonlocal x
- x = torch.rand(4, 4, device='cuda')
+ x = torch.rand(4, 4, device="cuda")
def should_not_capture():
nonlocal x
- x = torch.rand(3, 4, device='cuda')
+ x = torch.rand(3, 4, device="cuda")
def should_capture2():
nonlocal x
- x = torch.rand(4, 4, device='cuda')
+ x = torch.rand(4, 4, device="cuda")
# Recording with context and python call stacks should capture the call stack.
torch.cuda.memory._record_memory_history(context="all", stacks="python")
@@ -3435,29 +4035,33 @@ class TestCudaMallocAsync(TestCase):
should_capture2()
ss = json.dumps(torch.cuda.memory._snapshot())
- self.assertTrue('should_capture1' in ss)
- self.assertTrue('should_not_capture' not in ss)
- self.assertTrue('should_capture2' in ss)
+ self.assertTrue("should_capture1" in ss)
+ self.assertTrue("should_not_capture" not in ss)
+ self.assertTrue("should_capture2" in ss)
finally:
torch.cuda.memory._record_memory_history(None)
- @unittest.skipIf(TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync")
+ @unittest.skipIf(
+ TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync"
+ )
@unittest.skipIf(IS_ARM64 or not IS_LINUX, "cpp contexts are x86 linux only")
def test_memory_plots_free_segment_stack(self):
for context in ["alloc", "all", "state"]:
try:
torch.cuda.memory.empty_cache()
torch.cuda.memory._record_memory_history(context=context)
- x = torch.rand(3, 4, device='cuda')
+ x = torch.rand(3, 4, device="cuda")
del x
torch.cuda.memory.empty_cache()
ss = json.dumps(torch.cuda.memory._snapshot())
- self.assertTrue(('empty_cache' in ss) == (context == 'all'))
+ self.assertTrue(("empty_cache" in ss) == (context == "all"))
finally:
torch.cuda.memory._record_memory_history(None)
- @unittest.skipIf(TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync")
+ @unittest.skipIf(
+ TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync"
+ )
def test_memory_snapshot_script(self):
try:
torch.cuda.memory.empty_cache()
@@ -3465,16 +4069,16 @@ class TestCudaMallocAsync(TestCase):
@torch.jit.script
def foo():
- return torch.rand(311, 411, device='cuda')
+ return torch.rand(311, 411, device="cuda")
x = foo()
- ss = torch.cuda.memory._snapshot()['segments']
+ ss = torch.cuda.memory._snapshot()["segments"]
found_it = False
for seg in ss:
- for b in seg['blocks']:
- if b['requested_size'] == 311 * 411 * 4:
- self.assertTrue(b['frames'][0]['name'] == 'foo')
+ for b in seg["blocks"]:
+ if b["requested_size"] == 311 * 411 * 4:
+ self.assertTrue(b["frames"][0]["name"] == "foo")
found_it = True
self.assertTrue(found_it)
@@ -3495,8 +4099,12 @@ class TestCudaMallocAsync(TestCase):
return ret
torch.cuda.memory.empty_cache()
- key_allocated = 'active_bytes.all.allocated' if not TEST_CUDAMALLOCASYNC else 'allocated_bytes.all.current'
- key_requested = 'requested_bytes.all.allocated'
+ key_allocated = (
+ "active_bytes.all.allocated"
+ if not TEST_CUDAMALLOCASYNC
+ else "allocated_bytes.all.current"
+ )
+ key_requested = "requested_bytes.all.allocated"
nelems = 21 * 1024 * 1024
nbytes = 4 * nelems # floats are 4 bytes
@@ -3506,13 +4114,13 @@ class TestCudaMallocAsync(TestCase):
start_mem = torch.cuda.memory_stats()[key_allocated]
torch.cuda.memory._set_allocator_settings("")
- x = torch.rand(nelems, device='cuda')
+ x = torch.rand(nelems, device="cuda")
# test roundup_power2_divisions single value syntax
reg_mem = torch.cuda.memory_stats()[key_allocated]
start_requested = torch.cuda.memory_stats()[key_requested]
torch.cuda.memory._set_allocator_settings("roundup_power2_divisions:4")
- y = torch.rand(nelems, device='cuda')
+ y = torch.rand(nelems, device="cuda")
pow2_div4_mem = torch.cuda.memory_stats()[key_allocated]
current_requested = torch.cuda.memory_stats()[key_requested]
@@ -3524,21 +4132,24 @@ class TestCudaMallocAsync(TestCase):
self.assertTrue(current_requested - start_requested == nbytes)
torch.cuda.memory._set_allocator_settings("garbage_collection_threshold:0.5")
- torch.cuda.memory._set_allocator_settings("garbage_collection_threshold:0.5,max_split_size_mb:40")
+ torch.cuda.memory._set_allocator_settings(
+ "garbage_collection_threshold:0.5,max_split_size_mb:40"
+ )
# should have reset the power2 divisions now
torch.cuda.memory.empty_cache()
start_mem = torch.cuda.memory_stats()[key_allocated]
- z = torch.rand(nelems, device='cuda')
+ z = torch.rand(nelems, device="cuda")
reg_mem = torch.cuda.memory_stats()[key_allocated]
self.assertTrue(reg_mem - start_mem == nbytes)
# roundup_power2_divisions knob array syntax
torch.cuda.memory.empty_cache()
torch.cuda.memory._set_allocator_settings(
- "garbage_collection_threshold:0.5,roundup_power2_divisions:[64:8,128:2,256:2,512:2,1024:1,>:1]")
+ "garbage_collection_threshold:0.5,roundup_power2_divisions:[64:8,128:2,256:2,512:2,1024:1,>:1]"
+ )
start_mem = torch.cuda.memory_stats()[key_allocated]
- w = torch.rand(nelems, device='cuda')
+ w = torch.rand(nelems, device="cuda")
pow2_div8_mem = torch.cuda.memory_stats()[key_allocated]
if not TEST_CUDAMALLOCASYNC:
@@ -3547,7 +4158,7 @@ class TestCudaMallocAsync(TestCase):
torch.cuda.memory.empty_cache()
start_mem = torch.cuda.memory_stats()[key_allocated]
- v = torch.rand(nelems_big, device='cuda')
+ v = torch.rand(nelems_big, device="cuda")
pow2_div2_mem = torch.cuda.memory_stats()[key_allocated]
if not TEST_CUDAMALLOCASYNC:
@@ -3557,7 +4168,7 @@ class TestCudaMallocAsync(TestCase):
torch.cuda.memory.empty_cache()
torch.cuda.memory._set_allocator_settings("release_lock_on_cudamalloc:True")
start_mem = torch.cuda.memory_stats()[key_allocated]
- w = torch.rand(nelems, device='cuda')
+ w = torch.rand(nelems, device="cuda")
reg_mem = torch.cuda.memory_stats()[key_allocated]
self.assertTrue(reg_mem - start_mem == nbytes)
@@ -3565,7 +4176,9 @@ class TestCudaMallocAsync(TestCase):
torch.cuda.memory._set_allocator_settings("foo:1,bar:2")
with self.assertRaises(RuntimeError):
- torch.cuda.memory._set_allocator_settings("garbage_collection_threshold:1.2")
+ torch.cuda.memory._set_allocator_settings(
+ "garbage_collection_threshold:1.2"
+ )
with self.assertRaises(RuntimeError):
torch.cuda.memory._set_allocator_settings("max_split_size_mb:2")
@@ -3574,17 +4187,21 @@ class TestCudaMallocAsync(TestCase):
torch.cuda.memory._set_allocator_settings("release_lock_on_cudamalloc:none")
with self.assertRaises(RuntimeError):
- torch.cuda.memory._set_allocator_settings("pinned_use_cuda_host_register:none")
+ torch.cuda.memory._set_allocator_settings(
+ "pinned_use_cuda_host_register:none"
+ )
with self.assertRaises(RuntimeError):
- torch.cuda.memory._set_allocator_settings("pinned_num_register_threads:none")
+ torch.cuda.memory._set_allocator_settings(
+ "pinned_num_register_threads:none"
+ )
with self.assertRaises(RuntimeError):
- torch.cuda.memory._set_allocator_settings("pinned_num_register_threads:1024")
+ torch.cuda.memory._set_allocator_settings(
+ "pinned_num_register_threads:1024"
+ )
- @parametrize(
- "max_split_size_mb_setting", [False, True]
- )
+ @parametrize("max_split_size_mb_setting", [False, True])
def test_raises_oom(self, max_split_size_mb_setting):
if max_split_size_mb_setting:
# CudaCachingAllocator does early return when searching available blocks
@@ -3593,12 +4210,17 @@ class TestCudaMallocAsync(TestCase):
torch.cuda.memory._set_allocator_settings("max_split_size_mb:1024")
torch.cuda.memory.empty_cache()
with self.assertRaises(torch.cuda.OutOfMemoryError):
- torch.empty(1024 * 1024 * 1024 * 1024, device='cuda')
+ torch.empty(1024 * 1024 * 1024 * 1024, device="cuda")
- @unittest.skipIf(not (IS_LINUX and os.uname().machine == "x86_64"), 'cpp traces only on linux')
- @unittest.skipIf(TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync")
+ @unittest.skipIf(
+ not (IS_LINUX and os.uname().machine == "x86_64"), "cpp traces only on linux"
+ )
+ @unittest.skipIf(
+ TEST_CUDAMALLOCASYNC, "setContextRecorder not supported by CUDAMallocAsync"
+ )
def test_cpp_memory_snapshot_pickle(self):
from torch.utils.cpp_extension import load_inline
+
source = """
#include <torch/csrc/cuda/memory_snapshot.h>
py::object do_snapshot() {
@@ -3609,14 +4231,16 @@ class TestCudaMallocAsync(TestCase):
torch::cuda::_record_memory_history(e, ctx, 10, ctx, ctx);
}
"""
- m = load_inline(name='snapshot', cpp_sources=[source], functions=['do_snapshot', 'record'])
+ m = load_inline(
+ name="snapshot", cpp_sources=[source], functions=["do_snapshot", "record"]
+ )
for ctx in (False, True):
try:
m.record(True, ctx)
@torch.jit.script
def the_script_fn():
- return torch.rand(311, 411, device='cuda')
+ return torch.rand(311, 411, device="cuda")
def run():
t = the_script_fn()
@@ -3624,22 +4248,22 @@ class TestCudaMallocAsync(TestCase):
mem = run()
found = False
- for s in mem['segments']:
- for b in s['blocks']:
- if b['state'] == 'active_allocated':
- if b['requested_size'] == 311 * 411 * 4:
+ for s in mem["segments"]:
+ for b in s["blocks"]:
+ if b["state"] == "active_allocated":
+ if b["requested_size"] == 311 * 411 * 4:
if ctx:
- frame_text = str(b['frames'])
+ frame_text = str(b["frames"])
# C++ frame
- self.assertTrue('::rand' in frame_text)
+ self.assertTrue("::rand" in frame_text)
# script frame
- self.assertTrue('the_script_fn' in frame_text)
+ self.assertTrue("the_script_fn" in frame_text)
# python frame
- self.assertTrue('case.py' in frame_text)
+ self.assertTrue("case.py" in frame_text)
found = True
- last_action = mem['device_traces'][0][-1]
- self.assertTrue(last_action['action'] == 'alloc')
- self.assertTrue(last_action['size'] == 311 * 411 * 4)
+ last_action = mem["device_traces"][0][-1]
+ self.assertTrue(last_action["action"] == "alloc")
+ self.assertTrue(last_action["size"] == 311 * 411 * 4)
self.assertTrue(found)
finally:
m.record(False, False)
@@ -3651,9 +4275,10 @@ class TestCudaMallocAsync(TestCase):
def cb(device, alloc, device_alloc, device_free):
nonlocal x
x = True
+
torch._C._cuda_attach_out_of_memory_observer(cb)
with self.assertRaises(torch.cuda.OutOfMemoryError):
- torch.empty(1024 * 1024 * 1024 * 1024, device='cuda')
+ torch.empty(1024 * 1024 * 1024 * 1024, device="cuda")
self.assertTrue(x)
def test_allocator_fuzz(self):
@@ -3669,7 +4294,7 @@ class TestCudaMallocAsync(TestCase):
def alloc():
nonlocal total, c
b = random.randrange(2 * 1024 * 1024 // 4, 200 * 1024 * 1024 // 4)
- mem.append((c, torch.full((b,), c, dtype=torch.int32, device='cuda')))
+ mem.append((c, torch.full((b,), c, dtype=torch.int32, device="cuda")))
c += 1
total += b
@@ -3684,7 +4309,7 @@ class TestCudaMallocAsync(TestCase):
for i in range(N):
while total >= 1024 * 1024 * 1024 / 4:
free()
- action, = random.choices(choices, weights=[1, 1 if mem else 0, .1])
+ (action,) = random.choices(choices, weights=[1, 1 if mem else 0, 0.1])
action()
finally:
random.setstate(state)
@@ -3711,14 +4336,17 @@ SMALL_SIZE = 1048576
SMALL_BUFFER = 2097152
LARGE_BUFFER = 20971520
+
def get_cudagraph_segments(pool_id):
segments = torch.cuda.memory_snapshot()
return [segment for segment in segments if segment["segment_pool_id"] == pool_id]
+
def get_all_cudagraph_segments():
segments = torch.cuda.memory_snapshot()
return [segment for segment in segments if segment["segment_pool_id"] != (0, 0)]
+
def cudagraphify(fn, inputs, pool=None):
if not TEST_CUDA_GRAPH:
raise unittest.SkipTest("cuda graph test is skipped")
@@ -3738,9 +4366,11 @@ def cudagraphify(fn, inputs, pool=None):
return graph, static_outputs
+
def int8_cuda(size):
return torch.ones([size], device="cuda", dtype=torch.uint8)
+
def live_blocks(pool_id):
blocks = 0
seg = get_cudagraph_segments(pool_id)
@@ -3779,7 +4409,6 @@ def reconstruct_from_tensor_metadata(metadata):
@unittest.skipIf(TEST_CUDAMALLOCASYNC or TEST_WITH_ROCM, "NYI")
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestBlockStateAbsorption(TestCase):
-
def checkCheckpointedBlock(self, before_block, after_block):
for field in ("size", "state"):
self.assertEqual(before_block[field], after_block[field])
@@ -3787,24 +4416,45 @@ class TestBlockStateAbsorption(TestCase):
def checkCheckpointedState(self, before_segments, after_segments):
# after may contain additional segments, but all of the segments in before
# should be exactly equivalent to after
- after_ptr_to_segment = {segment["address"] : segment for segment in after_segments}
+ after_ptr_to_segment = {
+ segment["address"]: segment for segment in after_segments
+ }
for before_segment in before_segments:
self.assertTrue(before_segment["address"] in after_ptr_to_segment)
after_segment = after_ptr_to_segment[before_segment["address"]]
- for field in ("device", "total_size", "allocated_size", "active_size", "segment_type", "segment_pool_id"):
+ for field in (
+ "device",
+ "total_size",
+ "allocated_size",
+ "active_size",
+ "segment_type",
+ "segment_pool_id",
+ ):
self.assertEqual(before_segment[field], after_segment[field])
- self.assertEqual(len(before_segment["blocks"]), len(after_segment["blocks"]))
- for before_block, after_block in zip(before_segment["blocks"], after_segment["blocks"]):
+ self.assertEqual(
+ len(before_segment["blocks"]), len(after_segment["blocks"])
+ )
+ for before_block, after_block in zip(
+ before_segment["blocks"], after_segment["blocks"]
+ ):
self.checkCheckpointedBlock(before_block, after_block)
@staticmethod
- def setCheckpointPoolState(device, state, stale_storages_ptr, storages_deleters=None):
+ def setCheckpointPoolState(
+ device, state, stale_storages_ptr, storages_deleters=None
+ ):
stale_storages_ptr = [t.untyped_storage()._cdata for t in stale_storages_ptr]
- storages_deleters = [] if not storages_deleters else [t.untyped_storage()._cdata for t in storages_deleters]
- torch._C._cuda_setCheckpointPoolState(device, state, stale_storages_ptr, storages_deleters)
+ storages_deleters = (
+ []
+ if not storages_deleters
+ else [t.untyped_storage()._cdata for t in storages_deleters]
+ )
+ torch._C._cuda_setCheckpointPoolState(
+ device, state, stale_storages_ptr, storages_deleters
+ )
def checkFunction(self, fn, inputs, pool=None):
graph, outputs = cudagraphify(fn, inputs, pool=pool)
@@ -3817,7 +4467,9 @@ class TestBlockStateAbsorption(TestCase):
state = torch._C._cuda_getCheckpointState(device, pool_id)
self.setCheckpointPoolState(device, state, [], [])
- self.checkCheckpointedState(segments_before_checkpoint, get_cudagraph_segments(pool_id))
+ self.checkCheckpointedState(
+ segments_before_checkpoint, get_cudagraph_segments(pool_id)
+ )
def setUp(self):
super().setUp()
@@ -3833,7 +4485,6 @@ class TestBlockStateAbsorption(TestCase):
super().tearDown()
def test_simple(self):
-
def foo():
x = torch.zeros([SMALL_SIZE * 8], device="cuda", dtype=torch.uint8)
x = x + x
@@ -3845,7 +4496,6 @@ class TestBlockStateAbsorption(TestCase):
self.checkFunction(foo, [])
def test_allocated_in_middle_of_segment(self):
-
def foo():
small_buffers = [int8_cuda(MIN_BLOCK_SIZE) for _ in range(11)]
return small_buffers[5].add_(2)
@@ -3853,7 +4503,6 @@ class TestBlockStateAbsorption(TestCase):
self.checkFunction(foo, [])
def test_multiple_middle_allocations(self):
-
def foo():
small_buffers = [int8_cuda(MIN_BLOCK_SIZE) for _ in range(11)]
return small_buffers[5], small_buffers[8]
@@ -3868,12 +4517,11 @@ class TestBlockStateAbsorption(TestCase):
self.checkFunction(foo, [])
def test_additional_free_following_checkpoint(self):
-
def foo():
- return int8_cuda(MIN_BLOCK_SIZE),
+ return (int8_cuda(MIN_BLOCK_SIZE),)
def foo2():
- return int8_cuda(MIN_BLOCK_SIZE),
+ return (int8_cuda(MIN_BLOCK_SIZE),)
graph, outputs = cudagraphify(foo, [])
pool_id = graph.pool()
@@ -3884,12 +4532,13 @@ class TestBlockStateAbsorption(TestCase):
graph2, outputs2 = cudagraphify(foo2, [], pool=graph.pool())
-
self.setCheckpointPoolState(outputs[0].device.index, state, outputs2, [])
del outputs2
- self.checkCheckpointedState(segments_before_checkpoint, get_cudagraph_segments(pool_id))
+ self.checkCheckpointedState(
+ segments_before_checkpoint, get_cudagraph_segments(pool_id)
+ )
# TODO: re-enable
# def test_additional_free_error(self):
@@ -3906,12 +4555,11 @@ class TestBlockStateAbsorption(TestCase):
# state = torch._C._cuda_getCheckpointState(outputs[0].device.index, pool_id)
- # graph2, outputs2 = cudagraphify(foo2, [], pool=graph.pool())
- # with self.assertRaisesRegex(Exception, "being manually freed must be passed"):
- # self.setCheckpointPoolState(outputs[0].device.index, state, [], [])
+ # graph2, outputs2 = cudagraphify(foo2, [], pool=graph.pool())
+ # with self.assertRaisesRegex(Exception, "being manually freed must be passed"):
+ # self.setCheckpointPoolState(outputs[0].device.index, state, [], [])
def test_tensor_dies_after_checkpoint(self):
-
def foo():
return int8_cuda(MIN_BLOCK_SIZE), int8_cuda(MIN_BLOCK_SIZE)
@@ -3935,9 +4583,12 @@ class TestBlockStateAbsorption(TestCase):
self.assertEqual(live_blocks(pool_id), 0)
def test_assigning_back_deleter_fns_to_tensor(self):
-
def foo(x):
- return int8_cuda(SMALL_BUFFER) + x, int8_cuda(SMALL_BUFFER) + x, int8_cuda(LARGE_BUFFER) + x
+ return (
+ int8_cuda(SMALL_BUFFER) + x,
+ int8_cuda(SMALL_BUFFER) + x,
+ int8_cuda(LARGE_BUFFER) + x,
+ )
inp = torch.tensor([1], device="cuda")
graph, outputs = cudagraphify(foo, [inp])
@@ -3960,7 +4611,9 @@ class TestBlockStateAbsorption(TestCase):
self.assertEqual(live_blocks(pool_id), 0)
- reconstructed_tensors = [reconstruct_from_tensor_metadata(metadata) for metadata in ten_metadata]
+ reconstructed_tensors = [
+ reconstruct_from_tensor_metadata(metadata) for metadata in ten_metadata
+ ]
for i in range(len(reconstructed_tensors)):
self.assertTrue(reconstructed_tensors[i].mean(dtype=torch.float) == 2)
@@ -3971,7 +4624,9 @@ class TestBlockStateAbsorption(TestCase):
for i in range(len(reconstructed_tensors)):
self.assertTrue(reconstructed_tensors[i].mean(dtype=torch.float) == 3)
- self.setCheckpointPoolState(device, state, [], [reconstructed_tensors[0], reconstructed_tensors[1]])
+ self.setCheckpointPoolState(
+ device, state, [], [reconstructed_tensors[0], reconstructed_tensors[1]]
+ )
self.assertEqual(live_blocks(pool_id), 3)
@@ -3992,6 +4647,7 @@ class TestBlockStateAbsorption(TestCase):
@skipIfNoTorchVision
def test_resnet(self):
import torchvision
+
m = torchvision.models.resnet50()
m.eval()
m = m.cuda()
@@ -4000,7 +4656,6 @@ class TestBlockStateAbsorption(TestCase):
self.checkFunction(m, [inp])
def test_check_pool_live_allocations(self):
-
def foo():
return torch.ones([4], device="cuda")
@@ -4020,9 +4675,7 @@ class TestBlockStateAbsorption(TestCase):
del outputs
self.assertTrue(check(set()))
-
def test_allocate_in_thread_to_pool(self):
-
def foo():
return torch.rand([4], device="cuda")
@@ -4051,7 +4704,6 @@ class TestBlockStateAbsorption(TestCase):
torch._C._cuda_releasePool(device, mem_pool)
stream_context.__exit__(None, None, None)
-
segments = get_cudagraph_segments(pool)
self.assertEqual(len(get_cudagraph_segments(pool)), 1)
@@ -4069,7 +4721,6 @@ class TestBlockStateAbsorption(TestCase):
# three more allocations not in pool
alloc_three()
-
def no_pool():
# two allocations
for _ in range(10):
@@ -4095,33 +4746,44 @@ class TestBlockStateAbsorption(TestCase):
self.assertEqual(len(get_cudagraph_segments(pool)), 0)
-
def test_no_triton_on_import(self):
- """ Test that Trition is not imported on first GPU use """
+ """Test that Trition is not imported on first GPU use"""
script = "import sys; import torch; torch.rand(2, device='cuda'); print('triton' in sys.modules)"
- rc = subprocess.check_output(
- [sys.executable, '-c', script],
- # On Windows, opening the subprocess with the default CWD makes `import torch`
- # fail, so just set CWD to this script's directory
- cwd=os.path.dirname(os.path.realpath(__file__))).strip().decode('ascii')
+ rc = (
+ subprocess.check_output(
+ [sys.executable, "-c", script],
+ # On Windows, opening the subprocess with the default CWD makes `import torch`
+ # fail, so just set CWD to this script's directory
+ cwd=os.path.dirname(os.path.realpath(__file__)),
+ )
+ .strip()
+ .decode("ascii")
+ )
self.assertEqual(rc, "False", "Triton was imported when importing torch!")
+
class TestCudaOptims(TestCase):
# These tests will be instantiate with instantiate_device_type_tests
# to apply the new OptimizerInfo structure.
@onlyCUDA
- @unittest.skipIf(not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs")
+ @unittest.skipIf(
+ not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
+ )
@parametrize("foreach, fused", [(False, False), (True, False), (False, True)])
@optims(
- [optim for optim in optim_db if "foreach" in optim.supported_impls and "fused" in optim.supported_impls],
- dtypes=[torch.float32]
+ [
+ optim
+ for optim in optim_db
+ if "foreach" in optim.supported_impls and "fused" in optim.supported_impls
+ ],
+ dtypes=[torch.float32],
)
def test_graph_grad_scaling(self, device, dtype, optim_info, foreach, fused):
torch.cuda.empty_cache()
- scaler = torch.cuda.amp.GradScaler(init_scale=4.)
+ scaler = torch.cuda.amp.GradScaler(init_scale=4.0)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
@@ -4154,10 +4816,9 @@ class TestCudaOptims(TestCase):
expected_growth_trackers = [1, 0, 1, 0]
expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
- for data, scale, growth_tracker, grad_val in zip(input_vals,
- expected_scales,
- expected_growth_trackers,
- expected_grad_vals):
+ for data, scale, growth_tracker, grad_val in zip(
+ input_vals, expected_scales, expected_growth_trackers, expected_grad_vals
+ ):
static_input.fill_(data)
g.replay()
self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
@@ -4166,9 +4827,10 @@ class TestCudaOptims(TestCase):
self.assertEqual(scaler._scale, scale)
self.assertEqual(scaler._growth_tracker, growth_tracker)
+
instantiate_parametrized_tests(TestCuda)
instantiate_parametrized_tests(TestCudaMallocAsync)
instantiate_device_type_tests(TestCudaOptims, globals())
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
diff --git a/test/test_cuda_expandable_segments.py b/test/test_cuda_expandable_segments.py
index 8b634b774e..ed1ccc2ca3 100644
--- a/test/test_cuda_expandable_segments.py
+++ b/test/test_cuda_expandable_segments.py
@@ -2,13 +2,14 @@
# run time cuda tests, but with the allocator using expandable segments
import os
+
import torch
from torch.testing._internal.common_cuda import IS_JETSON
if torch.cuda.is_available() and not IS_JETSON:
- torch.cuda.memory._set_allocator_settings('expandable_segments:True')
+ torch.cuda.memory._set_allocator_settings("expandable_segments:True")
current_dir = os.path.dirname(os.path.abspath(__file__))
- filepath = os.path.join(current_dir, 'test_cuda.py')
- exec(compile(open(filepath).read(), filepath, mode='exec'))
+ filepath = os.path.join(current_dir, "test_cuda.py")
+ exec(compile(open(filepath).read(), filepath, mode="exec"))
diff --git a/test/test_cuda_multigpu.py b/test/test_cuda_multigpu.py
index 77e8d5693c..8d101003d7 100644
--- a/test/test_cuda_multigpu.py
+++ b/test/test_cuda_multigpu.py
@@ -3,38 +3,45 @@
import collections
import contextlib
import ctypes
-import io
import gc
+import io
import queue
import sys
import tempfile
import threading
-import torch
-import torch.cuda.comm as comm
import unittest
-from itertools import repeat, chain
+from itertools import chain, repeat
from typing import NamedTuple
+
+import torch
+import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
+from torch.testing._internal.common_cuda import (
+ _create_scaling_case,
+ _create_scaling_models_optimizers,
+ TEST_MULTIGPU,
+)
from torch.testing._internal.common_utils import (
+ get_cycles_per_ms,
+ instantiate_parametrized_tests,
IS_JETSON,
IS_REMOTE_GPU,
IS_SANDCASTLE,
NoTest,
- TEST_CUDA,
- TestCase,
- get_cycles_per_ms,
- instantiate_parametrized_tests,
run_tests,
skipCUDANonDefaultStreamIf,
skipIfRocm,
+ TEST_CUDA,
+ TestCase,
)
-from torch.testing._internal.common_cuda import TEST_MULTIGPU, _create_scaling_case, _create_scaling_models_optimizers
-TEST_CUDAMALLOCASYNC = TEST_CUDA and (torch.cuda.get_allocator_backend() == "cudaMallocAsync")
+TEST_CUDAMALLOCASYNC = TEST_CUDA and (
+ torch.cuda.get_allocator_backend() == "cudaMallocAsync"
+)
if not TEST_CUDA:
- print('CUDA not available, skipping tests', file=sys.stderr)
+ print("CUDA not available, skipping tests", file=sys.stderr)
TestCase = NoTest # noqa: F811
@@ -44,7 +51,9 @@ class TestCudaMultiGPU(TestCase):
def _check_memory_stat_consistency(self):
snapshot = torch.cuda.memory_snapshot()
- expected_each_device = collections.defaultdict(lambda: collections.defaultdict(int))
+ expected_each_device = collections.defaultdict(
+ lambda: collections.defaultdict(int)
+ )
for segment in snapshot:
expandable = segment["is_expandable"]
@@ -56,7 +65,9 @@ class TestCudaMultiGPU(TestCase):
expected["segment." + pool_str + ".current"] += 1
expected["allocated_bytes.all.current"] += segment["allocated_size"]
- expected["allocated_bytes." + pool_str + ".current"] += segment["allocated_size"]
+ expected["allocated_bytes." + pool_str + ".current"] += segment[
+ "allocated_size"
+ ]
expected["reserved_bytes.all.current"] += segment["total_size"]
expected["reserved_bytes." + pool_str + ".current"] += segment["total_size"]
@@ -65,7 +76,9 @@ class TestCudaMultiGPU(TestCase):
expected["active_bytes." + pool_str + ".current"] += segment["active_size"]
expected["requested_bytes.all.current"] += segment["requested_size"]
- expected["requested_bytes." + pool_str + ".current"] += segment["requested_size"]
+ expected["requested_bytes." + pool_str + ".current"] += segment[
+ "requested_size"
+ ]
sum_requested = 0
is_split = len(segment["blocks"]) > 1
@@ -83,7 +96,9 @@ class TestCudaMultiGPU(TestCase):
expected["inactive_split.all.current"] += 1
expected["inactive_split." + pool_str + ".current"] += 1
expected["inactive_split_bytes.all.current"] += block["size"]
- expected["inactive_split_bytes." + pool_str + ".current"] += block["size"]
+ expected["inactive_split_bytes." + pool_str + ".current"] += block[
+ "size"
+ ]
self.assertEqual(sum_requested, segment["requested_size"])
@@ -94,15 +109,15 @@ class TestCudaMultiGPU(TestCase):
def test_cuda_synchronize(self):
torch.cuda.synchronize()
- torch.cuda.synchronize('cuda')
- torch.cuda.synchronize('cuda:0')
+ torch.cuda.synchronize("cuda")
+ torch.cuda.synchronize("cuda:0")
torch.cuda.synchronize(0)
- torch.cuda.synchronize(torch.device('cuda:0'))
+ torch.cuda.synchronize(torch.device("cuda:0"))
if TEST_MULTIGPU:
- torch.cuda.synchronize('cuda:1')
+ torch.cuda.synchronize("cuda:1")
torch.cuda.synchronize(1)
- torch.cuda.synchronize(torch.device('cuda:1'))
+ torch.cuda.synchronize(torch.device("cuda:1"))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize(torch.device("cpu"))
@@ -285,8 +300,10 @@ class TestCudaMultiGPU(TestCase):
# interlace
torch.cuda.empty_cache()
- gen0 = self._test_memory_stats_generator(self, device='cuda:0', N=35)
- gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
+ gen0 = self._test_memory_stats_generator(self, device="cuda:0", N=35)
+ gen1 = self._test_memory_stats_generator(
+ self, device=torch.device("cuda:1"), N=35
+ )
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
@@ -295,7 +312,9 @@ class TestCudaMultiGPU(TestCase):
# semi-random order
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device=0, N=35)
- gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
+ gen1 = self._test_memory_stats_generator(
+ self, device=torch.device("cuda:1"), N=35
+ )
end0 = end1 = False
while not (end0 and end1):
@@ -396,10 +415,10 @@ class TestCudaMultiGPU(TestCase):
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_streams(self):
- d0 = torch.device('cuda:0')
+ d0 = torch.device("cuda:0")
x0 = torch.zeros(5, 5, device=d0)
- d1 = torch.device('cuda:1')
+ d1 = torch.device("cuda:1")
x1 = torch.zeros(5, 5, device=d1)
self._test_copy_sync_current_stream(x0, x1)
@@ -416,13 +435,13 @@ class TestCudaMultiGPU(TestCase):
@unittest.skipIf(torch.cuda.device_count() >= 10, "Loading a cuda:9 tensor")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:9' restore location
- tensor = torch.randn(2, device='cuda')
+ tensor = torch.randn(2, device="cuda")
buf = io.BytesIO()
torch.save(tensor, buf)
# NB: this might not work in the future if serialization changes
- buf = io.BytesIO(buf.getvalue().replace(b'cuda:0', b'cuda:9'))
+ buf = io.BytesIO(buf.getvalue().replace(b"cuda:0", b"cuda:9"))
- msg = r'Attempting to deserialize object on CUDA device 9'
+ msg = r"Attempting to deserialize object on CUDA device 9"
with self.assertRaisesRegex(RuntimeError, msg):
_ = torch.load(buf)
@@ -431,7 +450,7 @@ class TestCudaMultiGPU(TestCase):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
def gpu_remap(storage, location):
- if location == 'cuda:1':
+ if location == "cuda:1":
return storage.cuda(0)
with tempfile.NamedTemporaryFile() as f:
@@ -450,7 +469,7 @@ class TestCudaMultiGPU(TestCase):
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
- x_copy = torch.load(f, map_location={'cuda:1': 'cuda:0'})
+ x_copy = torch.load(f, map_location={"cuda:1": "cuda:0"})
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
@@ -458,10 +477,10 @@ class TestCudaMultiGPU(TestCase):
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_storage_clone(self):
- x = torch.randn(4, 4, device='cuda:1').storage()
+ x = torch.randn(4, 4, device="cuda:1").storage()
y = x.clone()
self.assertEqual(x.get_device(), y.get_device())
- for t in ['byte', 'char', 'short', 'int', 'long', 'half', 'double']:
+ for t in ["byte", "char", "short", "int", "long", "half", "double"]:
self.assertEqual(getattr(x, t)().get_device(), x.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@@ -479,8 +498,8 @@ class TestCudaMultiGPU(TestCase):
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
- d0 = torch.device('cuda:0')
- d1 = torch.device('cuda:1')
+ d0 = torch.device("cuda:0")
+ d1 = torch.device("cuda:1")
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(device=1)
@@ -501,15 +520,14 @@ class TestCudaMultiGPU(TestCase):
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s1)
- with self.assertRaisesRegex(ValueError,
- "Expected a cuda device, but got: cpu"):
- torch.cuda.current_stream(torch.device('cpu'))
+ with self.assertRaisesRegex(ValueError, "Expected a cuda device, but got: cpu"):
+ torch.cuda.current_stream(torch.device("cpu"))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipCUDANonDefaultStreamIf(True)
def test_default_stream(self):
- d0 = torch.device('cuda:0')
- d1 = torch.device('cuda:1')
+ d0 = torch.device("cuda:0")
+ d1 = torch.device("cuda:1")
with torch.cuda.device(d0):
s0 = torch.cuda.default_stream()
@@ -533,14 +551,13 @@ class TestCudaMultiGPU(TestCase):
with torch.cuda.device(d1):
self.assertEqual(torch.cuda.current_stream(), s1)
- with self.assertRaisesRegex(ValueError,
- "Expected a cuda device, but got: cpu"):
- torch.cuda.default_stream(torch.device('cpu'))
+ with self.assertRaisesRegex(ValueError, "Expected a cuda device, but got: cpu"):
+ torch.cuda.default_stream(torch.device("cpu"))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_device(self):
- d0 = torch.device('cuda:0')
- d1 = torch.device('cuda:1')
+ d0 = torch.device("cuda:0")
+ d1 = torch.device("cuda:1")
e0 = torch.cuda.Event()
self.assertEqual(None, e0.device)
@@ -553,10 +570,10 @@ class TestCudaMultiGPU(TestCase):
s1 = torch.cuda.Stream()
e1 = s1.record_event()
- self.assertEqual(s0.device, torch.device('cuda:0'))
- self.assertEqual(e0.device, torch.device('cuda:0'))
- self.assertEqual(s1.device, torch.device('cuda:1'))
- self.assertEqual(e1.device, torch.device('cuda:1'))
+ self.assertEqual(s0.device, torch.device("cuda:0"))
+ self.assertEqual(e0.device, torch.device("cuda:0"))
+ self.assertEqual(s1.device, torch.device("cuda:1"))
+ self.assertEqual(e1.device, torch.device("cuda:1"))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_context(self):
@@ -592,18 +609,17 @@ class TestCudaMultiGPU(TestCase):
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu(self):
default_stream = torch.cuda.current_stream()
- self.assertEqual(default_stream.device, torch.device('cuda:0'))
+ self.assertEqual(default_stream.device, torch.device("cuda:0"))
stream = torch.cuda.Stream(device=1)
- self.assertEqual(stream.device, torch.device('cuda:1'))
+ self.assertEqual(stream.device, torch.device("cuda:1"))
with torch.cuda.device(1):
- self.assertEqual(
- torch.cuda.current_stream().device, torch.device('cuda:1'))
+ self.assertEqual(torch.cuda.current_stream().device, torch.device("cuda:1"))
self.assertNotEqual(torch.cuda.current_stream(), default_stream)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_query(self):
- d0 = torch.device('cuda:0')
- d1 = torch.device('cuda:1')
+ d0 = torch.device("cuda:0")
+ d1 = torch.device("cuda:1")
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
@@ -642,8 +658,8 @@ class TestCudaMultiGPU(TestCase):
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_eq(self):
- d0 = torch.device('cuda:0')
- d1 = torch.device('cuda:1')
+ d0 = torch.device("cuda:0")
+ d1 = torch.device("cuda:1")
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
@@ -676,12 +692,12 @@ class TestCudaMultiGPU(TestCase):
s0 = torch.cuda.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
- self.assertEqual(torch.device('cuda:0'), s0.device)
+ self.assertEqual(torch.device("cuda:0"), s0.device)
s1 = torch.cuda.Stream(device=1, priority=high)
self.assertEqual(high, s1.priority)
- self.assertEqual(torch.device('cuda:1'), s1.device)
+ self.assertEqual(torch.device("cuda:1"), s1.device)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_tensor_device(self):
@@ -754,7 +770,7 @@ class TestCudaMultiGPU(TestCase):
@staticmethod
def _test_stream_event_nogil(self, sync_func, p2c, c2p):
- with torch.cuda.device('cuda:1'):
+ with torch.cuda.device("cuda:1"):
c2p.put(0)
p2c.get()
c2p.put(sync_func(self, TestCudaMultiGPU.FIFTY_MIL_CYCLES))
@@ -763,9 +779,11 @@ class TestCudaMultiGPU(TestCase):
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_nogil(self):
- for sync_func in [TestCudaMultiGPU._stream_synchronize,
- TestCudaMultiGPU._event_synchronize,
- TestCudaMultiGPU._event_wait]:
+ for sync_func in [
+ TestCudaMultiGPU._stream_synchronize,
+ TestCudaMultiGPU._event_synchronize,
+ TestCudaMultiGPU._event_wait,
+ ]:
p2c = queue.Queue()
c2p = queue.Queue()
e_tik = torch.cuda.Event(enable_timing=True)
@@ -773,12 +791,13 @@ class TestCudaMultiGPU(TestCase):
t = threading.Thread(
target=TestCudaMultiGPU._test_stream_event_nogil,
- args=(self, sync_func, p2c, c2p))
+ args=(self, sync_func, p2c, c2p),
+ )
t.daemon = True
t.start()
c2p.get()
- with torch.cuda.device('cuda:0'):
+ with torch.cuda.device("cuda:0"):
e_tik.record()
p2c.put(0)
parent_time = sync_func(self, TestCudaMultiGPU.FIFTY_MIL_CYCLES)
@@ -801,8 +820,8 @@ class TestCudaMultiGPU(TestCase):
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_wait(self):
- d0 = torch.device('cuda:0')
- d1 = torch.device('cuda:1')
+ d0 = torch.device("cuda:0")
+ d1 = torch.device("cuda:1")
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
@@ -827,8 +846,8 @@ class TestCudaMultiGPU(TestCase):
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_multi_gpu_query(self):
- d0 = torch.device('cuda:0')
- d1 = torch.device('cuda:1')
+ d0 = torch.device("cuda:0")
+ d1 = torch.device("cuda:1")
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
@@ -869,8 +888,8 @@ class TestCudaMultiGPU(TestCase):
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipIfRocm
def test_events_multi_gpu_elapsed_time(self):
- d0 = torch.device('cuda:0')
- d1 = torch.device('cuda:1')
+ d0 = torch.device("cuda:0")
+ d1 = torch.device("cuda:1")
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
@@ -934,8 +953,7 @@ class TestCudaMultiGPU(TestCase):
def test_external_streams_multi_device(self):
device = torch.cuda.device(1)
with self._get_external_stream(device) as stream_v:
- ext_stream = torch.cuda.ExternalStream(
- stream_v, device=device)
+ ext_stream = torch.cuda.ExternalStream(stream_v, device=device)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
@@ -956,7 +974,7 @@ class TestCudaMultiGPU(TestCase):
del t
t = torch.FloatTensor([2]).pin_memory()
- self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
+ self.assertNotEqual(t.data_ptr(), ptr, msg="allocation re-used too soon")
with torch.cuda.device(0):
gpu_tensor0.copy_(t, non_blocking=True)
@@ -988,7 +1006,7 @@ class TestCudaMultiGPU(TestCase):
def _test(idx):
before_free_bytes, before_available_bytes = torch.cuda.mem_get_info(idx)
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
- t = torch.randn(1024 * 1024 * 8, device='cuda:' + str(idx))
+ t = torch.randn(1024 * 1024 * 8, device="cuda:" + str(idx))
if IS_JETSON:
# w/o syncing, mem_get_info will run before memory allocated has actually increased.
# This race condition causes consistent failure
@@ -1022,6 +1040,7 @@ class TestCudaMultiGPU(TestCase):
leak_gpu0()
except RuntimeError as e:
import re
+
assert re.match(regex, str(e)), str(e) + "\n does not match: \n" + regex
else:
# assertRaisesRegex does not pass with Python for Jetson,
@@ -1030,12 +1049,15 @@ class TestCudaMultiGPU(TestCase):
leak_gpu0()
if TEST_MULTIGPU:
+
@self.wrap_with_cuda_memory_check
def leak_gpu1():
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
l.append(torch.randn(1024 * 1024 * 8, device=torch.device("cuda:1")))
- with self.assertRaisesRegex(RuntimeError, r"CUDA driver API confirmed .+ on device 1.+"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"CUDA driver API confirmed .+ on device 1.+"
+ ):
leak_gpu1()
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@@ -1071,8 +1093,8 @@ class TestCudaMultiGPU(TestCase):
# Multiply by 2 here so to's backward creates gradient values that are different from the case above,
# to mitigate weirdness if the caching allocator happens to reuse memory regions that were populated
# with 1s by the case above
- s0 = to_backward_recipient.to(device="cuda:0").sum() * 2.
- s1 = to_backward_recipient.to(device="cuda:0").sum() * 2.
+ s0 = to_backward_recipient.to(device="cuda:0").sum() * 2.0
+ s1 = to_backward_recipient.to(device="cuda:0").sum() * 2.0
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s0.backward(retain_graph=True)
@@ -1085,7 +1107,12 @@ class TestCudaMultiGPU(TestCase):
def test_cuda_init_race(self):
# See https://github.com/pytorch/pytorch/issues/16559
import subprocess
- subprocess.check_call([sys.executable, '-c', """\
+
+ subprocess.check_call(
+ [
+ sys.executable,
+ "-c",
+ """\
import torch
import threading
@@ -1096,7 +1123,9 @@ t1 = threading.Thread(target=worker, args=(0,))
t2 = threading.Thread(target=worker, args=(1,))
t1.start()
t2.start()
-"""])
+""",
+ ]
+ )
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_device_as_key(self):
@@ -1128,14 +1157,24 @@ t2.start()
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_scale(self):
- scaler = torch.cuda.amp.GradScaler(init_scale=2.)
+ scaler = torch.cuda.amp.GradScaler(init_scale=2.0)
t0 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0")
t1 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:1")
# Create some nested iterables of tensors on different devices.
- outputs = (t1.clone(), (t0.clone(), t1.clone()), [t0.clone(), (t1.clone(), t0.clone())])
+ outputs = (
+ t1.clone(),
+ (t0.clone(), t1.clone()),
+ [t0.clone(), (t1.clone(), t0.clone())],
+ )
outputs = scaler.scale(outputs)
- self.assertTrue(outputs[0] == 8.0 and outputs[1][0] == 8.0 and outputs[1][1] == 8.0 and
- outputs[2][0] == 8.0 and outputs[2][1][0] == 8.0 and outputs[2][1][1] == 8.0)
+ self.assertTrue(
+ outputs[0] == 8.0
+ and outputs[1][0] == 8.0
+ and outputs[1][1] == 8.0
+ and outputs[2][0] == 8.0
+ and outputs[2][1][0] == 8.0
+ and outputs[2][1][1] == 8.0
+ )
self.assertTrue(scaler._scale.device == t1.device)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@@ -1148,12 +1187,25 @@ t2.start()
dev1 = torch.device("cuda:1")
for enabled in True, False:
- mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
- _create_scaling_case()
- mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
- _create_scaling_models_optimizers(device=dev1)
-
- scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
+ (
+ mod_control0,
+ mod_scaling0,
+ opt_control0,
+ opt_scaling0,
+ data,
+ loss_fn,
+ skip_iter,
+ ) = _create_scaling_case()
+ (
+ mod_control1,
+ mod_scaling1,
+ opt_control1,
+ opt_scaling1,
+ ) = _create_scaling_models_optimizers(device=dev1)
+
+ scaler = torch.cuda.amp.GradScaler(
+ init_scale=128.0, growth_factor=2.0, enabled=enabled, growth_interval=1
+ )
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
@@ -1162,13 +1214,15 @@ t2.start()
output0 = model0(input)
output1 = model1(input.to(dev1))
loss0 = loss_fn(0.3 * output0 + 0.7 * output1.to(dev0), target)
- loss1 = loss_fn(0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1))
+ loss1 = loss_fn(
+ 0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1)
+ )
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
- model1[1].weight.grad.data.fill_(float('inf'))
+ model1[1].weight.grad.data.fill_(float("inf"))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
@@ -1178,11 +1232,20 @@ t2.start()
# Make sure the found_infs were collected properly across optimizers and devices.
if scaler.is_enabled():
- self.assertTrue(len(scaler._found_inf_per_device(optimizer0)) == 1)
- self.assertTrue(len(scaler._found_inf_per_device(optimizer1)) == 1)
- self.assertTrue(scaler._found_inf_per_device(optimizer0)[dev0].item() == 0.)
- self.assertTrue(scaler._found_inf_per_device(optimizer1)[dev1].item() ==
- float(i == skip_iter))
+ self.assertTrue(
+ len(scaler._found_inf_per_device(optimizer0)) == 1
+ )
+ self.assertTrue(
+ len(scaler._found_inf_per_device(optimizer1)) == 1
+ )
+ self.assertTrue(
+ scaler._found_inf_per_device(optimizer0)[dev0].item()
+ == 0.0
+ )
+ self.assertTrue(
+ scaler._found_inf_per_device(optimizer1)[dev1].item()
+ == float(i == skip_iter)
+ )
scaler.update()
else:
@@ -1196,25 +1259,41 @@ t2.start()
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
- self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
- scaler.get_backoff_factor()**1) if enabled else 1.0)
+ self.assertTrue(
+ scaler.get_scale()
+ == (
+ 128.0
+ * scaler.get_growth_factor() ** 3
+ * scaler.get_backoff_factor() ** 1
+ )
+ if enabled
+ else 1.0
+ )
# Copy mod_control1 and mod_scaling1 back the device 0 for comparison
mod_control1.to(dev0)
mod_scaling1.to(dev0)
- for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
- chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
+ for c, s in zip(
+ chain(mod_control0.parameters(), mod_control1.parameters()),
+ chain(mod_scaling0.parameters(), mod_scaling1.parameters()),
+ ):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_cuda_device_memory_allocated(self):
from torch.cuda import memory_allocated
+
device_count = torch.cuda.device_count()
current_alloc = [memory_allocated(idx) for idx in range(device_count)]
x = torch.ones(10, device="cuda:0")
self.assertGreater(memory_allocated(0), current_alloc[0])
- self.assertTrue(all(memory_allocated(torch.cuda.device(idx)) == current_alloc[idx] for idx in range(1, device_count)))
+ self.assertTrue(
+ all(
+ memory_allocated(torch.cuda.device(idx)) == current_alloc[idx]
+ for idx in range(1, device_count)
+ )
+ )
class TestCudaComm(TestCase):
@@ -1226,12 +1305,17 @@ class TestCudaComm(TestCase):
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
- if input.is_cuda and input.get_device() == i: # test not copying on same device
+ if (
+ input.is_cuda and input.get_device() == i
+ ): # test not copying on same device
self.assertEqual(t.data_ptr(), input.data_ptr())
# test out=
for inplace in [True, False]:
if inplace:
- outputs = [torch.empty_like(input, device=0), torch.empty_like(input, device=1)]
+ outputs = [
+ torch.empty_like(input, device=0),
+ torch.empty_like(input, device=1),
+ ]
else:
outputs = [input.cuda(0), torch.empty_like(input, device=1)]
results = comm.broadcast(input, out=outputs)
@@ -1241,13 +1325,19 @@ class TestCudaComm(TestCase):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
# test error msg
- with self.assertRaisesRegex(RuntimeError, r"Exactly one of 'devices' and 'out'"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Exactly one of 'devices' and 'out'"
+ ):
comm.broadcast(input, (0, 1), out=outputs)
- with self.assertRaisesRegex(RuntimeError,
- r"Expected all output tensors to be CUDA tensors, but output tensor at index 1"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"Expected all output tensors to be CUDA tensors, but output tensor at index 1",
+ ):
comm.broadcast(input, out=[input.cuda(0), input.cpu()])
- with self.assertRaisesRegex(RuntimeError,
- r"Expected all output tensors to have same shape as the source .+ at index 1"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"Expected all output tensors to have same shape as the source .+ at index 1",
+ ):
comm.broadcast(input, out=[input.cuda(0), input.cuda(1).unsqueeze(0)])
def test_broadcast_cpu(self):
@@ -1289,16 +1379,16 @@ class TestCudaComm(TestCase):
numel = 5
num_bytes = numel * 8
tensors = [
- self.genSparseTensor((2, 3), 2, 1, False, 'cuda', torch.float64)[0],
+ self.genSparseTensor((2, 3), 2, 1, False, "cuda", torch.float64)[0],
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
- self.genSparseTensor((2, 3), 2, 10, False, 'cuda', torch.float64)[0],
- self.genSparseTensor((2, 3), 2, 5, False, 'cuda', torch.float64)[0],
- self.genSparseTensor((3, 3), 2, 7, False, 'cuda', torch.int64)[0],
- self.genSparseTensor((2, 3), 2, 2, False, 'cuda', torch.float32)[0],
+ self.genSparseTensor((2, 3), 2, 10, False, "cuda", torch.float64)[0],
+ self.genSparseTensor((2, 3), 2, 5, False, "cuda", torch.float64)[0],
+ self.genSparseTensor((3, 3), 2, 7, False, "cuda", torch.int64)[0],
+ self.genSparseTensor((2, 3), 2, 2, False, "cuda", torch.float32)[0],
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
- self.genSparseTensor((2, 7), 2, 3, False, 'cuda', torch.int64)[0],
+ self.genSparseTensor((2, 7), 2, 3, False, "cuda", torch.int64)[0],
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
@@ -1323,7 +1413,7 @@ class TestCudaComm(TestCase):
tensors = [
torch.tensor([]).byte().cuda(),
torch.randn(5).cuda(),
- torch.randn(5).double().cuda()
+ torch.randn(5).double().cuda(),
]
self._test_broadcast_coalesced(tensors, 256)
@@ -1364,16 +1454,16 @@ class TestCudaComm(TestCase):
numel = 5
num_bytes = numel * 8
tensors = [
- self.genSparseTensor((2, 3), 2, 1, False, 'cuda', torch.float64)[0],
+ self.genSparseTensor((2, 3), 2, 1, False, "cuda", torch.float64)[0],
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
- self.genSparseTensor((2, 3), 2, 10, False, 'cuda', torch.float64)[0],
- self.genSparseTensor((2, 3), 2, 5, False, 'cuda', torch.float64)[0],
- self.genSparseTensor((3, 3), 2, 7, False, 'cuda', torch.int64)[0],
- self.genSparseTensor((2, 3), 2, 2, False, 'cuda', torch.float32)[0],
+ self.genSparseTensor((2, 3), 2, 10, False, "cuda", torch.float64)[0],
+ self.genSparseTensor((2, 3), 2, 5, False, "cuda", torch.float64)[0],
+ self.genSparseTensor((3, 3), 2, 7, False, "cuda", torch.int64)[0],
+ self.genSparseTensor((2, 3), 2, 2, False, "cuda", torch.float32)[0],
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
- self.genSparseTensor((2, 7), 2, 3, False, 'cuda', torch.int64)[0],
+ self.genSparseTensor((2, 7), 2, 3, False, "cuda", torch.int64)[0],
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
@@ -1412,7 +1502,9 @@ class TestCudaComm(TestCase):
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
if r.device == input.device:
- self.assertEqual(r.data_ptr(), input.data_ptr()) # for target @ same device, a view should be returned
+ self.assertEqual(
+ r.data_ptr(), input.data_ptr()
+ ) # for target @ same device, a view should be returned
# test out
out = [torch.empty_like(t) for t in result]
@@ -1429,20 +1521,38 @@ class TestCudaComm(TestCase):
# test error msg
if chunk_sizes is not None:
- with self.assertRaisesRegex(RuntimeError, r"Expected devices and chunk_sizes to be of same length"):
- comm.scatter(input, [0 for _ in range(len(chunk_sizes) + 1)], dim=dim, chunk_sizes=chunk_sizes)
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expected devices and chunk_sizes to be of same length"
+ ):
+ comm.scatter(
+ input,
+ [0 for _ in range(len(chunk_sizes) + 1)],
+ dim=dim,
+ chunk_sizes=chunk_sizes,
+ )
with self.assertRaisesRegex(RuntimeError, r"'devices' must not be specified"):
comm.scatter(input, (0, 1), dim=dim, out=out)
- with self.assertRaisesRegex(RuntimeError, r"Expected at least one device to scatter to"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expected at least one device to scatter to"
+ ):
comm.scatter(input, (), dim=dim)
- with self.assertRaisesRegex(RuntimeError, r"Expected at least one output tensor to scatter to"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expected at least one output tensor to scatter to"
+ ):
comm.scatter(input, dim=dim, out=[])
- with self.assertRaisesRegex(RuntimeError,
- r"Expected all output tensors to be CUDA tensors, but output tensor at index 0"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"Expected all output tensors to be CUDA tensors, but output tensor at index 0",
+ ):
comm.scatter(input, dim=dim, out=([out[0].cpu()] + out[1:]))
- with self.assertRaisesRegex(RuntimeError, r"Output tensor at index 0 has incorrect shape"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Output tensor at index 0 has incorrect shape"
+ ):
comm.scatter(input, dim=dim, out=([out[0].unsqueeze(0)] + out[1:]))
- with self.assertRaisesRegex(RuntimeError, r"Total size for output tensors along scatter dim \d+ does not match"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"Total size for output tensors along scatter dim \d+ does not match",
+ ):
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(1, None)
comm.scatter(input, dim=dim, out=([out[0][tuple(index)]] + out[1:]))
@@ -1480,13 +1590,13 @@ class TestCudaComm(TestCase):
expected_size[dim] += y.size(dim)
expected_size = torch.Size(expected_size)
- destinations = [None, torch.device('cuda:0'), torch.device('cpu')]
+ destinations = [None, torch.device("cuda:0"), torch.device("cpu")]
if torch.cuda.device_count() > 2:
- destinations.append(torch.device('cuda:2'))
+ destinations.append(torch.device("cuda:2"))
with torch.cuda.device(1):
for destination in destinations:
if destination is None:
- expected_device = torch.device('cuda', torch.cuda.current_device())
+ expected_device = torch.device("cuda", torch.cuda.current_device())
else:
expected_device = destination
for use_out in [True, False]:
@@ -1507,15 +1617,31 @@ class TestCudaComm(TestCase):
self.assertEqual(result[tuple(index)], y)
# test error msg
- with self.assertRaisesRegex(RuntimeError, r"'destination' must not be specified"):
- comm.gather((x, y), dim, destination='cpu', out=torch.empty(expected_size, device='cpu'))
- with self.assertRaisesRegex(RuntimeError, r"Expected at least one tensor to gather from"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"'destination' must not be specified"
+ ):
+ comm.gather(
+ (x, y),
+ dim,
+ destination="cpu",
+ out=torch.empty(expected_size, device="cpu"),
+ )
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expected at least one tensor to gather from"
+ ):
comm.gather(())
- with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to be CUDA tensors, "):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Expected all input tensors to be CUDA tensors, "
+ ):
comm.gather((x.cpu(), y))
- with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to have the same number of dimensions"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"Expected all input tensors to have the same number of dimensions",
+ ):
comm.gather((x, y.unsqueeze(0)))
- with self.assertRaisesRegex(RuntimeError, r"Input tensor at index 1 has invalid shape"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"Input tensor at index 1 has invalid shape"
+ ):
if dim in [0, -2]:
comm.gather((x, y[:, 1:]), dim=dim)
elif dim in [1, -1]:
@@ -1532,7 +1658,9 @@ class TestCudaComm(TestCase):
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_format_scatter_gather(self):
- nhwc = torch.randn((10, 3, 32, 32), device='cpu').contiguous(memory_format=torch.channels_last)
+ nhwc = torch.randn((10, 3, 32, 32), device="cpu").contiguous(
+ memory_format=torch.channels_last
+ )
results = torch.cuda.comm.scatter(nhwc, (0, 1), None, 0)
for result in results:
self.assertFalse(result.is_contiguous())
@@ -1541,7 +1669,6 @@ class TestCudaComm(TestCase):
gathered = torch.cuda.comm.gather(results)
self.assertTrue(gathered.is_contiguous(memory_format=torch.channels_last))
-
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_scatter_namedtuple(self):
# tests ability to scatter namedtuples and retrieve a list where each
@@ -1589,8 +1716,8 @@ class TestCudaComm(TestCase):
def test_gather_namedtuple(self):
# tests ability to gather a list of namedtuples and return a namedtuple where each
# element is of the expected tensor type.
- fields = ['a', 'b']
- TestNamedTupleInput_0 = collections.namedtuple('NamedTuple', fields)
+ fields = ["a", "b"]
+ TestNamedTupleInput_0 = collections.namedtuple("NamedTuple", fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
@@ -1603,10 +1730,10 @@ class TestCudaComm(TestCase):
outputs = [out1, out2]
- out = scatter_gather.gather(outputs, 'cpu') # test on CPU
+ out = scatter_gather.gather(outputs, "cpu") # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1]))) # x must be a tensor
- cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
+ cat = torch.cat((outputs[0][i].to("cpu"), outputs[1][i].to("cpu")))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 0) # test on GPU
@@ -1635,15 +1762,15 @@ class TestCudaComm(TestCase):
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
- out = scatter_gather.gather(outputs, 'cpu') # test on CPU
+ out = scatter_gather.gather(outputs, "cpu") # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
- cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
+ cat = torch.cat((outputs[0][i].to("cpu"), outputs[1][i].to("cpu")))
self.assertTrue(torch.equal(x, cat))
instantiate_parametrized_tests(TestCudaMultiGPU)
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
diff --git a/test/test_cuda_nvml_based_avail.py b/test/test_cuda_nvml_based_avail.py
index 809e8026dc..4cfe8dfc0e 100644
--- a/test/test_cuda_nvml_based_avail.py
+++ b/test/test_cuda_nvml_based_avail.py
@@ -1,19 +1,28 @@
# Owner(s): ["module: cuda"]
+import multiprocessing
import os
import sys
-import multiprocessing
-import torch
import unittest
from unittest.mock import patch
+import torch
+
# NOTE: Each of the tests in this module need to be run in a brand new process to ensure CUDA is uninitialized
# prior to test initiation.
with patch.dict(os.environ, {"PYTORCH_NVML_BASED_CUDA_CHECK": "1"}):
# Before executing the desired tests, we need to disable CUDA initialization and fork_handler additions that would
# otherwise be triggered by the `torch.testing._internal.common_utils` module import
- from torch.testing._internal.common_utils import (parametrize, instantiate_parametrized_tests, run_tests, TestCase,
- IS_WINDOWS, IS_JETSON, NoTest)
+ from torch.testing._internal.common_utils import (
+ instantiate_parametrized_tests,
+ IS_JETSON,
+ IS_WINDOWS,
+ NoTest,
+ parametrize,
+ run_tests,
+ TestCase,
+ )
+
# NOTE: Because `remove_device_and_dtype_suffixes` initializes CUDA context (triggered via the import of
# `torch.testing._internal.common_device_type` which imports `torch.testing._internal.common_cuda`) we need
# to bypass that method here which should be irrelevant to the parameterized tests in this module.
@@ -21,7 +30,7 @@ with patch.dict(os.environ, {"PYTORCH_NVML_BASED_CUDA_CHECK": "1"}):
TEST_CUDA = torch.cuda.is_available()
if not TEST_CUDA:
- print('CUDA not available, skipping tests', file=sys.stderr)
+ print("CUDA not available, skipping tests", file=sys.stderr)
TestCase = NoTest # type: ignore[misc, assignment] # noqa: F811
@@ -30,11 +39,14 @@ class TestExtendedCUDAIsAvail(TestCase):
SUBPROCESS_REMINDER_MSG = (
"\n REMINDER: Tests defined in test_cuda_nvml_based_avail.py must be run in a process "
"where there CUDA Driver API has not been initialized. Before further debugging, ensure you are either using "
- "run_test.py or have added --subprocess to run each test in a different subprocess.")
+ "run_test.py or have added --subprocess to run each test in a different subprocess."
+ )
def setUp(self):
super().setUp()
- torch.cuda._cached_device_count = None # clear the lru_cache on this method before our test
+ torch.cuda._cached_device_count = (
+ None # clear the lru_cache on this method before our test
+ )
@staticmethod
def in_bad_fork_test() -> bool:
@@ -47,31 +59,33 @@ class TestExtendedCUDAIsAvail(TestCase):
# If the NVML-based assessment is attempted but fails, the CUDA Runtime API check should be executed
@unittest.skipIf(IS_WINDOWS, "Needs fork")
@parametrize("nvml_avail", [True, False])
- @parametrize("avoid_init", ['1', '0', None])
+ @parametrize("avoid_init", ["1", "0", None])
def test_cuda_is_available(self, avoid_init, nvml_avail):
- if IS_JETSON and nvml_avail and avoid_init == '1':
- self.skipTest('Not working for Jetson')
+ if IS_JETSON and nvml_avail and avoid_init == "1":
+ self.skipTest("Not working for Jetson")
patch_env = {"PYTORCH_NVML_BASED_CUDA_CHECK": avoid_init} if avoid_init else {}
with patch.dict(os.environ, **patch_env):
if nvml_avail:
_ = torch.cuda.is_available()
else:
- with patch.object(torch.cuda, '_device_count_nvml', return_value=-1):
+ with patch.object(torch.cuda, "_device_count_nvml", return_value=-1):
_ = torch.cuda.is_available()
with multiprocessing.get_context("fork").Pool(1) as pool:
in_bad_fork = pool.apply(TestExtendedCUDAIsAvail.in_bad_fork_test)
- if os.getenv('PYTORCH_NVML_BASED_CUDA_CHECK') == '1' and nvml_avail:
- self.assertFalse(in_bad_fork, TestExtendedCUDAIsAvail.SUBPROCESS_REMINDER_MSG)
+ if os.getenv("PYTORCH_NVML_BASED_CUDA_CHECK") == "1" and nvml_avail:
+ self.assertFalse(
+ in_bad_fork, TestExtendedCUDAIsAvail.SUBPROCESS_REMINDER_MSG
+ )
else:
assert in_bad_fork
@torch.testing._internal.common_utils.markDynamoStrictTest
class TestVisibleDeviceParses(TestCase):
-
def test_env_var_parsing(self):
def _parse_visible_devices(val):
from torch.cuda import _parse_visible_devices as _pvd
+
with patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": val}, clear=True):
return _pvd()
@@ -96,39 +110,57 @@ class TestVisibleDeviceParses(TestCase):
def test_partial_uuid_resolver(self):
from torch.cuda import _transform_uuid_to_ordinals
- uuids = ['GPU-9942190a-aa31-4ff1-4aa9-c388d80f85f1',
- 'GPU-9e8d35e3-a134-0fdd-0e01-23811fdbd293',
- 'GPU-e429a63e-c61c-4795-b757-5132caeb8e70',
- 'GPU-eee1dfbc-0a0f-6ad8-5ff6-dc942a8b9d98',
- 'GPU-bbcd6503-5150-4e92-c266-97cc4390d04e',
- 'GPU-472ea263-58d7-410d-cc82-f7fdece5bd28',
- 'GPU-e56257c4-947f-6a5b-7ec9-0f45567ccf4e',
- 'GPU-1c20e77d-1c1a-d9ed-fe37-18b8466a78ad']
+
+ uuids = [
+ "GPU-9942190a-aa31-4ff1-4aa9-c388d80f85f1",
+ "GPU-9e8d35e3-a134-0fdd-0e01-23811fdbd293",
+ "GPU-e429a63e-c61c-4795-b757-5132caeb8e70",
+ "GPU-eee1dfbc-0a0f-6ad8-5ff6-dc942a8b9d98",
+ "GPU-bbcd6503-5150-4e92-c266-97cc4390d04e",
+ "GPU-472ea263-58d7-410d-cc82-f7fdece5bd28",
+ "GPU-e56257c4-947f-6a5b-7ec9-0f45567ccf4e",
+ "GPU-1c20e77d-1c1a-d9ed-fe37-18b8466a78ad",
+ ]
self.assertEqual(_transform_uuid_to_ordinals(["GPU-9e8d35e3"], uuids), [1])
- self.assertEqual(_transform_uuid_to_ordinals(["GPU-e4", "GPU-9e8d35e3"], uuids), [2, 1])
- self.assertEqual(_transform_uuid_to_ordinals("GPU-9e8d35e3,GPU-1,GPU-47".split(","), uuids), [1, 7, 5])
+ self.assertEqual(
+ _transform_uuid_to_ordinals(["GPU-e4", "GPU-9e8d35e3"], uuids), [2, 1]
+ )
+ self.assertEqual(
+ _transform_uuid_to_ordinals("GPU-9e8d35e3,GPU-1,GPU-47".split(","), uuids),
+ [1, 7, 5],
+ )
# First invalid UUID aborts parsing
- self.assertEqual(_transform_uuid_to_ordinals(["GPU-123", "GPU-9e8d35e3"], uuids), [])
- self.assertEqual(_transform_uuid_to_ordinals(["GPU-9e8d35e3", "GPU-123", "GPU-47"], uuids), [1])
+ self.assertEqual(
+ _transform_uuid_to_ordinals(["GPU-123", "GPU-9e8d35e3"], uuids), []
+ )
+ self.assertEqual(
+ _transform_uuid_to_ordinals(["GPU-9e8d35e3", "GPU-123", "GPU-47"], uuids),
+ [1],
+ )
# First ambigous UUID aborts parsing
- self.assertEqual(_transform_uuid_to_ordinals(["GPU-9e8d35e3", "GPU-e", "GPU-47"], uuids), [1])
+ self.assertEqual(
+ _transform_uuid_to_ordinals(["GPU-9e8d35e3", "GPU-e", "GPU-47"], uuids), [1]
+ )
# Duplicate UUIDs result in empty set
- self.assertEqual(_transform_uuid_to_ordinals(["GPU-9e8d35e3", "GPU-47", "GPU-9e8"], uuids), [])
+ self.assertEqual(
+ _transform_uuid_to_ordinals(["GPU-9e8d35e3", "GPU-47", "GPU-9e8"], uuids),
+ [],
+ )
def test_ordinal_parse_visible_devices(self):
def _device_count_nvml(val):
from torch.cuda import _device_count_nvml as _dc
+
with patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": val}, clear=True):
return _dc()
- with patch.object(torch.cuda, '_raw_device_count_nvml', return_value=2):
+ with patch.object(torch.cuda, "_raw_device_count_nvml", return_value=2):
self.assertEqual(_device_count_nvml("1, 0"), 2)
# Ordinal out of bounds aborts parsing
self.assertEqual(_device_count_nvml("1, 5, 0"), 1)
-
instantiate_parametrized_tests(TestExtendedCUDAIsAvail)
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
diff --git a/test/test_cuda_primary_ctx.py b/test/test_cuda_primary_ctx.py
index 089d07678c..333d4bbc15 100644
--- a/test/test_cuda_primary_ctx.py
+++ b/test/test_cuda_primary_ctx.py
@@ -1,15 +1,21 @@
# Owner(s): ["module: cuda"]
-import torch
-from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocmVersionLessThan, NoTest
-from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
import sys
import unittest
+import torch
+from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
+from torch.testing._internal.common_utils import (
+ NoTest,
+ run_tests,
+ skipIfRocmVersionLessThan,
+ TestCase,
+)
+
# NOTE: this needs to be run in a brand new process
if not TEST_CUDA:
- print('CUDA not available, skipping tests', file=sys.stderr)
+ print("CUDA not available, skipping tests", file=sys.stderr)
TestCase = NoTest # noqa: F811
@@ -18,17 +24,21 @@ class TestCudaPrimaryCtx(TestCase):
CTX_ALREADY_CREATED_ERR_MSG = (
"Tests defined in test_cuda_primary_ctx.py must be run in a process "
"where CUDA contexts are never created. Use either run_test.py or add "
- "--subprocess to run each test in a different subprocess.")
+ "--subprocess to run each test in a different subprocess."
+ )
@skipIfRocmVersionLessThan((4, 4, 21504))
def setUp(self):
for device in range(torch.cuda.device_count()):
# Ensure context has not been created beforehand
- self.assertFalse(torch._C._cuda_hasPrimaryContext(device), TestCudaPrimaryCtx.CTX_ALREADY_CREATED_ERR_MSG)
+ self.assertFalse(
+ torch._C._cuda_hasPrimaryContext(device),
+ TestCudaPrimaryCtx.CTX_ALREADY_CREATED_ERR_MSG,
+ )
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_str_repr(self):
- x = torch.randn(1, device='cuda:1')
+ x = torch.randn(1, device="cuda:1")
# We should have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
@@ -43,13 +53,13 @@ class TestCudaPrimaryCtx(TestCase):
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy(self):
- x = torch.randn(1, device='cuda:1')
+ x = torch.randn(1, device="cuda:1")
# We should have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
- y = torch.randn(1, device='cpu')
+ y = torch.randn(1, device="cpu")
y.copy_(x)
# We should still have only created context on 'cuda:1'
@@ -58,7 +68,7 @@ class TestCudaPrimaryCtx(TestCase):
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_pin_memory(self):
- x = torch.randn(1, device='cuda:1')
+ x = torch.randn(1, device="cuda:1")
# We should have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
@@ -70,7 +80,7 @@ class TestCudaPrimaryCtx(TestCase):
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
- x = torch.randn(3, device='cpu').pin_memory()
+ x = torch.randn(3, device="cpu").pin_memory()
# We should still have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
@@ -82,19 +92,19 @@ class TestCudaPrimaryCtx(TestCase):
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
- x = torch.randn(3, device='cpu', pin_memory=True)
+ x = torch.randn(3, device="cpu", pin_memory=True)
# We should still have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
- x = torch.zeros(3, device='cpu', pin_memory=True)
+ x = torch.zeros(3, device="cpu", pin_memory=True)
# We should still have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
- x = torch.empty(3, device='cpu', pin_memory=True)
+ x = torch.empty(3, device="cpu", pin_memory=True)
# We should still have only created context on 'cuda:1'
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
@@ -106,5 +116,6 @@ class TestCudaPrimaryCtx(TestCase):
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
self.assertTrue(torch._C._cuda_hasPrimaryContext(1))
-if __name__ == '__main__':
+
+if __name__ == "__main__":
run_tests()
diff --git a/test/test_cuda_sanitizer.py b/test/test_cuda_sanitizer.py
index a117346c7a..4c210fcdb8 100644
--- a/test/test_cuda_sanitizer.py
+++ b/test/test_cuda_sanitizer.py
@@ -7,8 +7,8 @@ from typing import List
import torch
import torch.cuda._sanitizer as csan
-from torch.cuda._sanitizer import StreamId, DataPtr, EventId
-from torch.testing._internal.common_utils import TestCase, run_tests, NoTest, TEST_CUDA
+from torch.cuda._sanitizer import DataPtr, EventId, StreamId
+from torch.testing._internal.common_utils import NoTest, run_tests, TEST_CUDA, TestCase
if not TEST_CUDA:
diff --git a/test/test_cuda_trace.py b/test/test_cuda_trace.py
index 0e2f457a9d..2cbad3d888 100644
--- a/test/test_cuda_trace.py
+++ b/test/test_cuda_trace.py
@@ -6,7 +6,7 @@ import unittest.mock
import torch
import torch.cuda._gpu_trace as gpu_trace
-from torch.testing._internal.common_utils import TestCase, run_tests, NoTest, TEST_CUDA
+from torch.testing._internal.common_utils import NoTest, run_tests, TEST_CUDA, TestCase
# NOTE: Each test needs to be run in a brand new process, to reset the registered hooks
# and make sure the CUDA streams are initialized for each test that uses them.
|
2.41.0
|
8b04b26fbf160874f7f1a9db61e49801fd4fcbe
|
Thu, 25 Apr 2024 18:42:09 +0000
|
[PATCH 0671/1000] Forward fix for D56289438 (#124882)
|
Summary: D56289438 from OSS breaks test deeplearning/aot_inductor/cpu/test:cpu_lowering_utils_test - test_cpu_lower_merge_with_ibb_3 (deeplearning.aot_inductor.cpu.test.test_lowering_utils.CPULoweringTest) The issue is that we use partial for aten.cat that shouldn't be directly failed out with assertion Test Plan: ``` deeplearning/aot_inductor/cpu/test:cpu_lowering_utils_test - test_cpu_lower_merge_with_ibb_3 ``` Differential Revision: D56541352 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124882 Approved by: https://github.com/chenyang78
|
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index 6d6d513977..9e366d9b26 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -3814,7 +3814,12 @@ class ConcatKernel(NopKernel):
break
any_input_is_storage_and_layout = any(is_storage_and_layout(x) for x in inputs)
fx_node_args = V.graph.current_node.args[0]
- assert V.graph.current_node.target in [aten.cat, aten.cat.default]
+ target_func = (
+ V.graph.current_node.target.func
+ if isinstance(V.graph.current_node.target, functools.partial)
+ else V.graph.current_node.target
+ )
+ assert target_func in [aten.cat, aten.cat.default]
assert isinstance(fx_node_args, list)
# If any of the inputs has meta tensor and the meta tensor is in CL format, use CL format for the output
if any_input_is_storage_and_layout is False and any(
|
2.41.0
|
c515a14fd153bc8d108ec13564366f17ce8c1d0
|
Thu, 25 Apr 2024 21:55:26 +0000
|
[PATCH 0674/1000] [caffe2] Add build configuration for linux-arm64 (#124618)
|
Summary: This diff adds a new build configuration that works on linux-arm64. Test Plan: Before: ``` $ buck2 build @//arvr/mode/linux/jetson/opt :c10_ovrsource BUILD FAILED fbsource//xplat/caffe2/c10:c10_ovrsource is incompatible with cfg:linux-arm64-fbcode-platform010-aarch64-no-san#d47c4385e5d19fe0 (ovr_config//os:android unsatisfied), check the target's compatibility attributes ``` After: ``` $ buck2 build @//arvr/mode/linux/jetson/opt :c10_ovrsource BUILD SUCCEEDED ``` Differential Revision: D56088211 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124618 Approved by: https://github.com/izaitsevfb
|
diff --git a/c10/ovrsource_defs.bzl b/c10/ovrsource_defs.bzl
index 0ca1f72863..66b973de76 100644
--- a/c10/ovrsource_defs.bzl
+++ b/c10/ovrsource_defs.bzl
@@ -8,6 +8,7 @@ cpu_supported_platforms = [
"ovr_config//os:macos",
"ovr_config//os:windows-x86_64",
"ovr_config//runtime:arm64-linux-ubuntu-neon",
+ "ovr_config//os:linux-arm64",
]
cuda_supported_platforms = [
|
2.41.0
|
0c5859aeb73c191eb14361d2d91a97b15068830
|
Thu, 25 Apr 2024 22:07:24 +0000
|
[PATCH 0675/1000] [dynamo] Add support for DELETE_SUBSCR (#123526)
|
Fixes #123317 Co-authored-by: Jason Ansel <jansel@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123526 Approved by: https://github.com/jansel
|
diff --git a/test/dynamo/test_repros.py b/test/dynamo/test_repros.py
index 995c8754f3..93ae6b89a0 100644
--- a/test/dynamo/test_repros.py
+++ b/test/dynamo/test_repros.py
@@ -3300,6 +3300,29 @@ class ReproTests(torch._dynamo.test_case.TestCase):
obj1 = MyObj(x, x)
self.assertRaises(AttributeError, lambda: fn(x, obj1))
+ def test_delsubscr(self):
+ @torch.compile(backend="eager")
+ def fn(x):
+ del x["a"]
+ y = x["b"] + 1
+ return y
+
+ x = {"a": torch.tensor([1]), "b": torch.tensor([1])}
+ result = fn(x)
+ self.assertFalse(hasattr(x, "a"))
+ self.assertEqual(result.item(), 2)
+
+ def test_delsubscr_raises(self):
+ @torch.compile(backend="eager")
+ def fn(x):
+ del x["a"]
+ y = x["a"] + 1 # should raise KeyError
+ return y
+
+ x = {"a": torch.tensor([1]), "b": torch.tensor([1])}
+ # FIXME It should be KeyError here
+ self.assertRaises(torch._dynamo.exc.InternalTorchDynamoError, lambda: fn(x))
+
def test_attached_attribute_in_dir(self):
class MyModule(torch.nn.Module):
def __init__(self):
diff --git a/test/dynamo_expected_failures/TestAOTModuleSimplified.test_aot_module_simplified_fake_tensor_gm_raises b/test/dynamo_expected_failures/TestAOTModuleSimplified.test_aot_module_simplified_fake_tensor_gm_raises
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_expected_failures/TestReductionsCPU.test_std_vs_numpy_cpu_complex128 b/test/dynamo_expected_failures/TestReductionsCPU.test_std_vs_numpy_cpu_complex128
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_expected_failures/TestReductionsCPU.test_std_vs_numpy_cpu_complex64 b/test/dynamo_expected_failures/TestReductionsCPU.test_std_vs_numpy_cpu_complex64
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_expected_failures/TestReductionsCPU.test_std_vs_numpy_cpu_float32 b/test/dynamo_expected_failures/TestReductionsCPU.test_std_vs_numpy_cpu_float32
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_expected_failures/TestReductionsCPU.test_std_vs_numpy_cpu_float64 b/test/dynamo_expected_failures/TestReductionsCPU.test_std_vs_numpy_cpu_float64
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_expected_failures/TestReductionsCPU.test_var_vs_numpy_cpu_complex128 b/test/dynamo_expected_failures/TestReductionsCPU.test_var_vs_numpy_cpu_complex128
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_expected_failures/TestReductionsCPU.test_var_vs_numpy_cpu_complex64 b/test/dynamo_expected_failures/TestReductionsCPU.test_var_vs_numpy_cpu_complex64
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_expected_failures/TestReductionsCPU.test_var_vs_numpy_cpu_float32 b/test/dynamo_expected_failures/TestReductionsCPU.test_var_vs_numpy_cpu_float32
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_expected_failures/TestReductionsCPU.test_var_vs_numpy_cpu_float64 b/test/dynamo_expected_failures/TestReductionsCPU.test_var_vs_numpy_cpu_float64
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test/dynamo_skips/TestLoadStateDict.test_load_state_dict_BC_swap_True b/test/dynamo_skips/TestLoadStateDict.test_load_state_dict_BC_swap_True
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/torch/_dynamo/symbolic_convert.py b/torch/_dynamo/symbolic_convert.py
index 0c8e5898e6..78d5b08e68 100644
--- a/torch/_dynamo/symbolic_convert.py
+++ b/torch/_dynamo/symbolic_convert.py
@@ -1420,6 +1420,10 @@ class InstructionTranslatorBase(
val, obj, key = self.popn(3)
result = obj.call_method(self, "__setitem__", [key, val], {})
+ def DELETE_SUBSCR(self, inst):
+ obj, key = self.popn(2)
+ obj.call_method(self, "__delitem__", [key], {})
+
def BUILD_TUPLE(self, inst):
items = self.popn(inst.argval)
self.push(TupleVariable(items))
diff --git a/torch/_dynamo/variables/dicts.py b/torch/_dynamo/variables/dicts.py
index f1f7df8ce7..db97004d54 100644
--- a/torch/_dynamo/variables/dicts.py
+++ b/torch/_dynamo/variables/dicts.py
@@ -249,6 +249,10 @@ class ConstDictVariable(VariableTracker):
tx.output.side_effects.mutation(self)
self.items[Hashable(args[0])] = args[1]
return ConstantVariable.create(None)
+ elif name == "__delitem__" and arg_hashable and self.mutable_local:
+ tx.output.side_effects.mutation(self)
+ self.items.__delitem__(Hashable(args[0]))
+ return ConstantVariable.create(None)
elif name in ("pop", "get") and len(args) in (1, 2) and args[0] not in self:
# missing item, return the default value
if len(args) == 1:
@@ -889,18 +893,13 @@ class PythonSysModulesVariable(VariableTracker):
def call_method(
self, tx, name, args: List[VariableTracker], kwargs: Dict[str, VariableTracker]
):
- from .builder import VariableBuilder
-
if name == "__getitem__":
return self.call_getitem(tx, *args, **kwargs)
elif name == "get":
return self.call_get(tx, *args, **kwargs)
elif name == "__contains__":
return self.call_contains(tx, *args, **kwargs)
-
- # Fallback to dict implementation
- real_dict = VariableBuilder(tx, self.source)(sys.modules)
- return real_dict.call_method(tx, name, args, kwargs)
+ unimplemented(f"sys.modules.{name}(*{args}, **{kwargs})")
def _contains_helper(self, tx, key: VariableTracker):
k = key.as_python_constant()
|
2.41.0
|
db42e7688e85ab41803032099c50e265c275c83
|
Thu, 25 Apr 2024 22:24:08 +0000
|
[PATCH 0676/1000] [EZ][GHF] Rephrase cancelled message (#124947)
|
To encourage people to reissue the command if merge timed out Pull Request resolved: https://github.com/pytorch/pytorch/pull/124947 Approved by: https://github.com/kit1980, https://github.com/clee2000
|
diff --git a/.github/scripts/comment_on_pr.py b/.github/scripts/comment_on_pr.py
index 88edcce27c..57fce4bf97 100644
--- a/.github/scripts/comment_on_pr.py
+++ b/.github/scripts/comment_on_pr.py
@@ -23,8 +23,10 @@ def main() -> None:
job_link = f"[job]({run_url})" if run_url is not None else "job"
msg = (
- f"The {args.action} {job_link} was canceled. If you believe this is a mistake,"
- + f" then you can re trigger it through [pytorch-bot]({BOT_COMMANDS_WIKI})."
+ f"The {args.action} {job_link} was canceled or timed out. This most often happen if two merge requests were issued"
+ + " for the same PR, or if merge job was waiting for more than 6 hours for tests to finish."
+ + " In later case, please do not hesitate to reissue the merge command\n"
+ + f" For more information see [pytorch-bot wiki]({BOT_COMMANDS_WIKI})."
)
gh_post_pr_comment(org, project, args.pr_num, msg)
|
2.41.0
|
259e5d0e07203122f76009c3d81afd42f0e89ed
|
Wed, 24 Apr 2024 15:30:42 -0700
|
[PATCH 0677/1000] [inductor] Specialize on unguarded alignment of example inputs (#123319)
|
When inductor generates triton code, the triton code can either assume that the inputs given to it are aligned or unaligned. If they are aligned, triton can use more efficient instructions (like vectorized loads or tensor cores). However, if we generate "aligned" code and pass in unaligned inputs, the triton code will error out; to fix this, we clone unaligned inputs that are passed to triton kernels that expect aligned inputs. This can lead to excessive clones if we have inputs that are not expected to be aligned. In this PR, we use the example input to decide whether the generated triton code should assume alignment or not. If the example input is aligned, then we will generate triton code that assumes alignment; if at runtime we receive an unaligned input, we'll make a clone. Meanwhile, if the example input is not aligned, the generated triton code will not assume inputs are aligned and we won't ever need to clone. Note that the alignment of the inputs is not guarded on; we found that adding guards on tensor offsets (a) was slow in cases where we do a lot of comparisons on tensor offsets, and (b) led to a lot of recompilations. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123319 Approved by: https://github.com/eellison
|
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py
index 9614eae4f3..8928fad6bd 100644
--- a/test/inductor/test_torchinductor.py
+++ b/test/inductor/test_torchinductor.py
@@ -9044,6 +9044,110 @@ class CommonTemplate:
self.common(fn, (inp, offsets), check_lowp=False)
+ @requires_gpu()
+ @config.patch(assume_aligned_inputs=False)
+ def test_config_option_dont_assume_alignment(self):
+ def fn(x: torch.Tensor) -> torch.Tensor:
+ return x.sin() + x.cos()
+
+ # Inductor specializes on the (unguarded) alignment of the initial input.
+ # Make sure that for different configurations, nothing breaks.
+ for offset in (0, 1, 2, 3, 4):
+ base = torch.randn(64 * 64 + 64, dtype=torch.float32, device=GPU_TYPE)
+ inp = torch.as_strided(base, (64, 64), (64, 1), offset)
+ torch._dynamo.reset()
+ fn_c = torch.compile(fn)
+
+ ref = fn(inp)
+ res = fn_c(inp)
+ self.assertEqual(ref, res)
+
+ for offset2 in (0, 1, 2, 3, 4):
+ base2 = torch.randn(64 * 64 + 64, dtype=torch.float32, device=GPU_TYPE)
+ inp2 = torch.as_strided(base, (64, 64), (64, 1), offset2)
+ ref2 = fn(inp2)
+ res2 = fn_c(inp2)
+ self.assertEqual(ref2, res2)
+
+ @requires_gpu()
+ @config.patch(assume_aligned_inputs=False)
+ def test_config_option_dont_assume_alignment_recompiles(self):
+ # Inputs:
+ # 1. (32, 32) shape
+ # 2. (64, 64) shape -> causes a recompile
+ # 3. (64, 64) shape with different storage offset -> should NOT cause a recompile
+ failed_guards = []
+
+ def fail(guard):
+ nonlocal failed_guards
+ failed_guards.append(guard)
+
+ def fn(x: torch.Tensor) -> torch.Tensor:
+ return x.sin() + x.cos()
+
+ base = torch.randn(64 * 64 + 64, dtype=torch.float32, device=GPU_TYPE)
+
+ inp1 = torch.as_strided(base, (32, 32), (32, 1), 4)
+ inp2 = torch.as_strided(base, (64, 64), (64, 1), 4)
+ inp3 = torch.as_strided(base, (64, 64), (64, 1), 5)
+
+ torch._dynamo.reset()
+
+ fn_c = torch._dynamo.optimize("inductor", guard_fail_fn=fail)(fn)
+
+ ref1 = fn(inp1)
+ res1 = fn_c(inp1)
+ self.assertEqual(ref1, res1)
+ self.assertEqual(0, len(failed_guards))
+
+ ref2 = fn(inp2)
+ res2 = fn_c(inp2)
+ self.assertEqual(ref2, res2)
+ # if dynamic shapes isn't already turned on, we might have a guard failure as we turn
+ # on dynamic shapes
+ self.assertLessEqual(len(failed_guards), 1)
+ failed_guard_count_iteration_2 = len(failed_guards)
+
+ failed_guards = []
+ ref3 = fn(inp3)
+ res3 = fn_c(inp3)
+ self.assertEqual(ref3, res3)
+ # we might still have the dynamics shapes failure, but offset change shouldn't be guarded on
+ # see Note: [Input Alignment handling in Inductor]
+ self.assertLessEqual(len(failed_guards), failed_guard_count_iteration_2)
+
+ @requires_gpu()
+ @config.patch(assume_aligned_inputs=False)
+ def test_config_option_dont_assume_alignment_cudagraphs(self):
+ def fn(x):
+ return x.cos() * x.sin()
+
+ fn_c = torch.compile(fn, mode="reduce-overhead", dynamic=True)
+
+ for size, stride, offset in (
+ ((32, 32), (32, 1), 4),
+ ((48, 48), (48, 1), 4),
+ ((64, 64), (64, 1), 5),
+ ):
+ torch.manual_seed(42)
+ base = torch.randn(64 * 64 + 64, dtype=torch.float32, device=GPU_TYPE)
+ torch.manual_seed(42)
+ base_ref = torch.randn(64 * 64 + 64, dtype=torch.float32, device=GPU_TYPE)
+
+ inp = torch.as_strided(base, size, stride, offset)
+ inp_ref = torch.as_strided(base_ref, size, stride, offset)
+
+ inp.requires_grad_(True)
+ inp_ref.requires_grad_(True)
+
+ res = fn_c(inp)
+ ref = fn(inp_ref)
+ self.assertEqual(ref, res)
+
+ res.sum().backward()
+ ref.sum().backward()
+ self.assertEqual(base.grad, base_ref.grad)
+
@config.patch(implicit_fallbacks=True)
def test_custom_op_1(self):
import torch.library
@@ -9786,12 +9890,13 @@ if HAS_GPU and RUN_GPU and not TEST_WITH_ASAN:
torch._dynamo.reset()
@config.patch(assume_aligned_inputs=False)
- def test_config_option_dont_assume_alignment(self):
+ def test_codegen_config_option_dont_assume_alignment(self):
def fn(x: torch.Tensor) -> torch.Tensor:
return x.sin() + x.cos()
- for offset in (0, 1, 2):
- base = torch.randn(64 * 64 + 64, device=GPU_TYPE)
+ # We want code that assumes alignment if the initial input is 16-byte aligned
+ for offset in (0, 1, 2, 3, 4):
+ base = torch.randn(64 * 64 + 64, dtype=torch.float32, device=GPU_TYPE)
inps = torch.as_strided(base, (64, 64), (64, 1), offset)
torch._dynamo.reset()
kernels = self.get_kernels(fn, [inps])
@@ -9802,7 +9907,20 @@ if HAS_GPU and RUN_GPU and not TEST_WITH_ASAN:
# NO_ALIGN ALIGN ALIGN
# def triton_(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr)
- self.assertEqual(arguments_that_are_divisible_by_16, (1, 2))
+ if offset % 4 == 0:
+ expected_aligned = (0, 1, 2)
+ else:
+ expected_aligned = (1, 2)
+ self.assertEqual(arguments_that_are_divisible_by_16, expected_aligned)
+
+ # If input isn't a view, storage offset != , inductor will assume alignment.
+ torch._dynamo.reset()
+ inp = torch.randn((64, 64), device=GPU_TYPE)
+ kernels = self.get_kernels(fn, [inp])
+ arguments_that_are_divisible_by_16 = (
+ kernels[0].triton_meta["configs"][0].divisible_by_16
+ )
+ self.assertEqual(arguments_that_are_divisible_by_16, (0, 1, 2))
def test_optimize_indexing_dtype(self):
def fn(x: torch.Tensor) -> torch.Tensor:
diff --git a/torch/_inductor/codegen/triton_utils.py b/torch/_inductor/codegen/triton_utils.py
index 630f55ee94..ea6f25ae2c 100644
--- a/torch/_inductor/codegen/triton_utils.py
+++ b/torch/_inductor/codegen/triton_utils.py
@@ -69,7 +69,8 @@ def signature_to_meta(
def is_unaligned_buffer(arg: TensorArg):
buf_name = arg.buffer
if buf_name in V.graph.graph_inputs:
- return not config.assume_aligned_inputs
+ # See Note: [Input Alignment handling in Inductor]
+ return buf_name not in V.graph.aligned_inputs
if buf_name in V.graph.constants:
# all constants are assumed to be aligned
diff --git a/torch/_inductor/compile_fx.py b/torch/_inductor/compile_fx.py
index c99d15a86f..93c5fa5ca7 100644
--- a/torch/_inductor/compile_fx.py
+++ b/torch/_inductor/compile_fx.py
@@ -32,7 +32,12 @@ from torch._inductor.codecache import code_hash, CompiledFxGraph, FxGraphCache
from torch._inductor.cudagraph_utils import BoxedDeviceIndex, get_placeholders
from torch._inductor.debug import save_args_for_compile_fx_inner
-from torch._inductor.utils import BoxedBool, count_tangents
+from torch._inductor.utils import (
+ BoxedBool,
+ count_tangents,
+ should_assume_input_aligned,
+ tensor_is_aligned,
+)
from torch._logging import trace_structured
from torch._ops import OpOverload
from torch._subclasses.fake_tensor import FakeTensor
@@ -53,8 +58,8 @@ from .graph import GraphLowering
from .ir import ExternKernelNode
from .utils import (
get_cloned_parameter_buffer_name,
- get_dtype_size,
has_incompatible_cudagraph_ops,
+ maybe_get_suppress_shape_guards_ctx,
output_node,
)
from .virtualized import V
@@ -837,20 +842,34 @@ def get_input_idxs_to_check(
inputs: Union[List[torch.Tensor], Sequence[int]],
static_input_idxs: Sequence[int],
) -> Sequence[int]:
- def is_aligned(storage_offset, dtype):
- return (storage_offset * get_dtype_size(dtype)) % ALIGNMENT == 0
-
+ """
+ This function runs at compile time, and generates a list of indices for which we
+ might need to do a copy to preserve alignment requirements.
+ """
ids_to_check = []
+
for i, input in enumerate(inputs):
- if (
- isinstance(input, torch.Tensor)
- and (
- i not in static_input_idxs
- or not is_aligned(input.storage_offset(), input.dtype)
- )
- and input.device.type == "cuda"
- ):
- ids_to_check.append(i)
+ if not isinstance(input, torch.Tensor):
+ # non-tensors don't need alignment
+ continue
+ if input.device.type != "cuda":
+ # right now we only care for cuda tensors
+ continue
+ with maybe_get_suppress_shape_guards_ctx():
+ # suppress guards so that tensor_is_aligned and should_assume_input_aligned
+ # do not add guards on input's storage offset
+ if i in static_input_idxs and tensor_is_aligned(input):
+ continue
+ if not should_assume_input_aligned(input):
+ continue
+
+ # if we get here, then
+ # (a) our triton code assumes that the input is aligned
+ # (b) we can't be sure ahead of time that the input will actually be aligned.
+ # therefore, at runtime, we'll need to check that the input is aligned
+ # (and if not, clone it to make it aligned.)
+ ids_to_check.append(i)
+
return ids_to_check
@@ -872,10 +891,7 @@ def align_inputs(
inputs: List[torch.Tensor],
static_input_idxs: Sequence[int] = (),
):
- if config.assume_aligned_inputs:
- inputs_to_check = get_input_idxs_to_check(inputs, static_input_idxs)
- else:
- inputs_to_check = []
+ inputs_to_check = get_input_idxs_to_check(inputs, static_input_idxs)
return align_inputs_from_check_idxs(model, inputs_to_check)
diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py
index b6e53ba681..c3b5cd3bf8 100644
--- a/torch/_inductor/graph.py
+++ b/torch/_inductor/graph.py
@@ -74,6 +74,8 @@ from .utils import (
gather_origins,
get_cloned_parameter_buffer_name,
get_sympy_Expr_dtype,
+ maybe_get_suppress_shape_guards_ctx,
+ should_assume_input_aligned,
)
from .virtualized import V
@@ -397,6 +399,8 @@ class GraphLowering(torch.fx.Interpreter):
self.effectful_ops: Dict[_EffectType, ir.Buffer] = {}
+ self.aligned_inputs: Set[str] = set()
+
@staticmethod
def decide_layout_opt(gm, *, is_inference) -> bool:
"""
@@ -703,6 +707,7 @@ class GraphLowering(torch.fx.Interpreter):
and buffer.get_device() is not None
):
self.add_device_info(buffer.get_device())
+
return name
def register_list(self, buffer_names: List[str]):
@@ -856,6 +861,22 @@ class GraphLowering(torch.fx.Interpreter):
self.graph_inputs[target] = tensor
self.graph_inputs_original[target] = tensor.data.data
self.add_device_info(example.device)
+
+ # Note: [Input Alignment handling in Inductor]
+ # Alignment matters for generating efficient code. Some operations,
+ # e.g. vectorized loads, can only be performed on aligned inputs.
+ #
+ # But if we codegen assuming aligned inputs and then get unaligned
+ # inputs at runtime, then we are forced to clone - which is bad for
+ # both perf and memory usage.
+ #
+ # One option would be to guard on storage_offset%ALIGNMENT, and then
+ # codegen based on this. But storage_offset guards turned out to be
+ # expensive and cause recompiles; Instead, we're generating code
+ # based on the alignment of the example input without guarding.
+ with maybe_get_suppress_shape_guards_ctx():
+ if should_assume_input_aligned(example):
+ self.aligned_inputs.add(target)
return tensor
def call_function(self, target, args, kwargs):
diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py
index 3cf4faa24b..b5b724eb0d 100644
--- a/torch/_inductor/utils.py
+++ b/torch/_inductor/utils.py
@@ -56,6 +56,8 @@ log = logging.getLogger(__name__)
_T = TypeVar("_T")
VarRanges = Dict[sympy.Expr, sympy.Expr]
+ALIGNMENT = 16
+
def do_bench_using_profiling(fn: Callable[[], Any], warmup=25, rep=100) -> float:
"""
@@ -1424,3 +1426,39 @@ def dump_node_schedule(node_schedule):
print(dep)
else:
raise RuntimeError(f"Unrecognized node type: {type(node)}")
+
+
+def tensor_is_aligned(tensor: torch.Tensor):
+ # See Note: [Input Alignment handling in Inductor]
+ # Right now, we don't try to guard on the alignment of the storage offset.
+ # When this comment was written, non-symbolic storage_offsets are not guarded on
+ # but symbolic storage_offsets are. For consistency, we suppress guard creation
+ # upon performing this check: that ensures that we don't add recompiles when we
+ # add this logic.
+ return (tensor.storage_offset() * get_dtype_size(tensor.dtype)) % ALIGNMENT == 0
+
+
+def should_assume_input_aligned(example_input: torch.Tensor):
+ # See Note: [Input Alignment handling in Inductor]
+
+ # right now, we only care about alignment for cuda tensors.
+ if example_input.device.type != "cuda":
+ return False
+ return config.assume_aligned_inputs or tensor_is_aligned(example_input)
+
+
+def maybe_get_suppress_shape_guards_ctx():
+ # Try to get TracingContext.try_get().fake_mode.shape_env.suppress_guards()
+ # If it's not available, return a nullcontext.
+
+ # If we're dealing with cudagraphs, we might not have a tracing_context
+ tracing_context = torch._guards.TracingContext.try_get()
+ if not tracing_context:
+ return contextlib.nullcontext()
+
+ # In standalone inductor compile mode, we might not have a shape_env attached to the fake mode
+ shape_env = tracing_context.fake_mode.shape_env
+ if not shape_env:
+ return contextlib.nullcontext()
+
+ return shape_env.suppress_guards()
|
2.41.0
|
bb89bcaa44fb5513c7810d039d0b4dc45346574
|
Thu, 25 Apr 2024 22:44:16 +0000
|
[PATCH 0678/1000] [export] Fix state dict reparametrization in non-strict. (#124847)
|
Summary: There are multiple things implemented incorrectly in non strict for reparametrizing state dict: 1. The same fake tensor should be generated for duplicated weights. 2. We should snapshot state dict in the beginning to always hold the invariant that ep.state_dict == mod.state_dict() 3. We will overwrite real weights with fake weights if we don't restore the weights in LIFO ordering. 4. We don't turn on strict checking which could sliently fail on corner cases. This diff aims to solve all these issues at once. Test Plan: CI Differential Revision: D56505020 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124847 Approved by: https://github.com/pianpwk
|
diff --git a/test/export/test_export.py b/test/export/test_export.py
index 9d946d0fa0..bb96de9a9a 100644
--- a/test/export/test_export.py
+++ b/test/export/test_export.py
@@ -4032,6 +4032,32 @@ def forward(self, b_t, x, y):
][0]
self.assertEqual(op_node.target._name, "aten::add.Tensor")
+ @testing.expectedFailureRetraceability
+ def test_layer_sharing(self):
+ N, C, H, W = 1, 2, 2, 3
+
+ class Module(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ layer = torch.nn.LayerNorm([C, H, W])
+ self.norms = torch.nn.ModuleList(
+ [
+ layer,
+ layer,
+ ]
+ )
+
+ def forward(self, x):
+ for norm in self.norms:
+ x = norm(x)
+ return x
+
+ m = Module()
+ copied_m = copy.deepcopy(m)
+ ep = export(copied_m, (torch.randn(N, C, H, W),))
+ self.assertEqual(copied_m.state_dict(), m.state_dict())
+ self.assertEqual(ep.state_dict, m.state_dict())
+
def test_non_persistent_buffer(self):
class MyModule(torch.nn.Module):
def __init__(self):
diff --git a/torch/_export/non_strict_utils.py b/torch/_export/non_strict_utils.py
index 56812fe191..f102d1bfb0 100644
--- a/torch/_export/non_strict_utils.py
+++ b/torch/_export/non_strict_utils.py
@@ -13,7 +13,7 @@ from torch._dynamo.source import (
from torch._dynamo.variables.builder import TrackedFake
from torch._export.passes.add_runtime_assertions_for_constraints_pass import InputDim
from torch._guards import Source
-from torch._subclasses.fake_tensor import FakeTensorMode
+from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch.export import Constraint
from torch.export.dynamic_shapes import _Dim
from torch.export.graph_signature import CustomObjArgument
@@ -92,9 +92,15 @@ def make_fake_params_buffers(
params_buffers: Dict[str, torch.Tensor],
) -> Dict[str, Union[torch.Tensor, torch.nn.Parameter]]:
faked_params_buffers = {}
+ memo: Dict[int, FakeTensor] = {}
for key, value in params_buffers.items():
- faked_params_buffers[key] = fake_mode.from_tensor(value, static_shapes=True)
- return faked_params_buffers
+ if id(value) in memo:
+ fake_tensor = memo[id(value)]
+ else:
+ fake_tensor = fake_mode.from_tensor(value, static_shapes=True)
+ memo[id(value)] = fake_tensor
+ faked_params_buffers[key] = fake_tensor
+ return faked_params_buffers # type: ignore[return-value]
def make_fake_inputs(nn_module, args, kwargs, dynamic_shapes):
diff --git a/torch/export/_trace.py b/torch/export/_trace.py
index 918e10cb90..bf77f7c712 100644
--- a/torch/export/_trace.py
+++ b/torch/export/_trace.py
@@ -515,7 +515,11 @@ def _export_non_strict(
# otherwise aot_export_module will error out because it sees a mix of fake_modes.
# And we want aot_export_module to use the fake_tensor mode in dynamo to keep the pipeline easy to reason about.
with torch.nn.utils.stateless._reparametrize_module(
- mod, fake_params_buffers
+ mod,
+ fake_params_buffers,
+ tie_weights=True,
+ strict=True,
+ stack_weights=True,
), grad_safe_guard, _ignore_backend_decomps(), _compiling_state_context(): # type: ignore[attr-defined]
gm, graph_signature = transform(aot_export_module)(
mod,
@@ -907,6 +911,7 @@ def _export(
constant_attrs = _gather_constant_attrs(mod)
flat_args, orig_in_spec = pytree.tree_flatten((args, kwargs))
+ original_state_dict = mod.state_dict(keep_vars=True)
if not strict:
out_spec = None
@@ -1069,7 +1074,7 @@ def _export(
root=gm,
graph=gm.graph,
graph_signature=ep_non_strict.sig,
- state_dict=mod.state_dict(keep_vars=True),
+ state_dict=original_state_dict,
range_constraints=range_constraints,
module_call_graph=_make_module_call_graph(
_EXPORT_MODULE_HIERARCHY, orig_in_spec, out_spec, module_call_signatures
@@ -1280,7 +1285,7 @@ def _export(
root=gm,
graph=gm.graph,
graph_signature=export_graph_signature,
- state_dict=mod.state_dict(keep_vars=True),
+ state_dict=original_state_dict,
range_constraints=range_constraints,
module_call_graph=_make_module_call_graph(
_EXPORT_MODULE_HIERARCHY,
diff --git a/torch/nn/utils/stateless.py b/torch/nn/utils/stateless.py
index ae7ebcdf3d..2cb6c7460d 100644
--- a/torch/nn/utils/stateless.py
+++ b/torch/nn/utils/stateless.py
@@ -93,6 +93,7 @@ def _reparametrize_module(
*,
tie_weights: bool = False,
strict: bool = False,
+ stack_weights: bool = False,
) -> Iterator[None]:
if tie_weights:
untied_parameters_and_buffers = _untie_named_tensors_map(
@@ -127,6 +128,11 @@ def _reparametrize_module(
)
yield
finally:
+ if stack_weights:
+ # When stacking is enabled, we will restore the weights in LIFO order.
+ orig_parameters_and_buffers = dict(
+ reversed(orig_parameters_and_buffers.items())
+ )
new_parameters_and_buffers, _ = accessor.swap_tensors_dict(
orig_parameters_and_buffers, allow_missing=True
)
|
2.41.0
|
24f8dd8c5b78c52c1fbad216f1afbf2a1bccf12
|
Thu, 25 Apr 2024 23:03:27 +0000
|
[PATCH 0679/1000] [export] Serialize empty list based on argument type (#123748)
|
Fixes https://github.com/pytorch/pytorch/issues/123480 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123748 Approved by: https://github.com/zhxchen17
|
diff --git a/test/export/test_serialize.py b/test/export/test_serialize.py
index bf9705a58d..186517d02d 100644
--- a/test/export/test_serialize.py
+++ b/test/export/test_serialize.py
@@ -331,6 +331,17 @@ class TestSerialize(TestCase):
g.nodes[1].inputs[0].arg.as_tensor.name,
)
+ def test_int_list(self) -> None:
+ class M(torch.nn.Module):
+ def forward(self, x):
+ return torch.ops.aten.sum.dim_IntList(x, [])
+
+ ep = torch.export.export(M(), (torch.randn(3, 2),))
+ serialized = ExportedProgramSerializer().serialize(ep)
+ for node in serialized.exported_program.graph_module.graph.nodes:
+ if "aten.sum.dim_IntList" in node.target:
+ self.assertEqual(node.inputs[1].arg.type, "as_ints")
+
@unittest.skipIf(IS_WINDOWS, "Windows not supported for this test")
@unittest.skipIf(not torchdynamo.is_dynamo_supported(), "dynamo doesn't support")
diff --git a/torch/_export/serde/serialize.py b/torch/_export/serde/serialize.py
index 86861b9b8e..55f7e505be 100644
--- a/torch/_export/serde/serialize.py
+++ b/torch/_export/serde/serialize.py
@@ -600,14 +600,14 @@ class GraphModuleSerializer(metaclass=Final):
serialized_args.append(
NamedArgument(
name=schema_arg.name,
- arg=self.serialize_input(kwargs[schema_arg.name]),
+ arg=self.serialize_input(kwargs[schema_arg.name], schema_arg.type),
)
)
elif not schema_arg.kwarg_only and i < len(args):
serialized_args.append(
NamedArgument(
name=schema_arg.name,
- arg=self.serialize_input(args[i]),
+ arg=self.serialize_input(args[i], schema_arg.type),
)
)
else:
@@ -648,7 +648,9 @@ class GraphModuleSerializer(metaclass=Final):
and arg.name in self.graph_state.sym_bool_values
)
- def serialize_input(self, arg) -> Argument:
+ def serialize_input(
+ self, arg, arg_type: Optional[torch._C.Argument] = None
+ ) -> Argument:
import torch._inductor.ir as inductor_ir
inductor_tensor_buffers = (
@@ -716,6 +718,39 @@ class GraphModuleSerializer(metaclass=Final):
elif arg is None:
return Argument.create(as_none=())
elif isinstance(arg, (list, tuple)):
+ if len(arg) == 0:
+ if arg_type is not None:
+ if isinstance(arg_type, torch.OptionalType):
+ arg_type = arg_type.getElementType() # type: ignore[assignment]
+ assert isinstance(arg_type, torch.ListType)
+ elem_type = arg_type.getElementType()
+ if isinstance(elem_type, torch.OptionalType):
+ elem_type = elem_type.getElementType()
+
+ if isinstance(elem_type, torch.BoolType):
+ return Argument.create(as_bools=[])
+ elif isinstance(elem_type, torch.IntType):
+ return Argument.create(as_ints=[])
+ elif isinstance(elem_type, torch.FloatType):
+ return Argument.create(as_floats=[])
+ elif isinstance(elem_type, torch.StringType):
+ return Argument.create(as_strings=[])
+ elif isinstance(elem_type, torch.TensorType):
+ return Argument.create(as_tensors=[])
+ else:
+ # I believe empty symint lists default to ints, but
+ # please file an issue if this is not the case
+ raise SerializeError(f"Empty list with type {elem_type} nyi.")
+ else:
+ # We could serialize this by default to a tensor list. This
+ # is needed in the HOO case
+ log.warning(
+ "Unsure how to serialize the given empty list, "
+ "as we don't know what is the type of this argument. "
+ "Serializing it as a tensor list by default."
+ )
+ return Argument.create(as_tensors=[])
+
# Must check bool first, as bool is also treated as int
if all(isinstance(a, bool) for a in arg):
return Argument.create(as_bools=list(arg))
|
2.41.0
|
8835fff9fd498472b0e8f49a3a4670d86f3c5b7
|
Thu, 25 Apr 2024 23:07:33 +0000
|
[PATCH 0680/1000] remove empty partition (#124920)
|
In some rare scenarios, the partitioner will produce an empty partition. it's a waste of time to compile an empty graph. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124920 Approved by: https://github.com/ezyang
|
diff --git a/test/test_fx_passes.py b/test/test_fx_passes.py
index 21400c41c1..491633f0e4 100644
--- a/test/test_fx_passes.py
+++ b/test/test_fx_passes.py
@@ -234,6 +234,11 @@ class TestPartitionFunctions:
a2 = e + f
return a0, a1, a2
+ @staticmethod
+ def forward18(a, b, c):
+ a0, a1 = torch.ops.aten.var_mean(a)
+ return a0
+
# A mock OperatorSupport class, where only operator.add is supported
class MockOperatorSupport(OperatorSupport):
def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
@@ -277,6 +282,8 @@ class TestFXGraphPasses(JitTestCase):
(TestPartitionFunctions.forward15, [['add_1', 'add', 'permute_1', 'view', 'permute_2', 'permute_3', 'permute']], False),
(TestPartitionFunctions.forward16, [["permute_1", "add_1", "add"]], True),
(TestPartitionFunctions.forward16, [['add_1', 'add', 'permute_1', 'view', 'permute_2', 'permute_3', 'permute']], False),
+ # should be empty partition, not a partiton with empty nodes
+ (TestPartitionFunctions.forward18, [], False),
])
def test_partitioner(self, fn, expected_partition, bookend_non_compute_pass):
traced = symbolic_trace(fn)
diff --git a/torch/fx/passes/infra/partitioner.py b/torch/fx/passes/infra/partitioner.py
index a8a861be0f..5b606c1745 100644
--- a/torch/fx/passes/infra/partitioner.py
+++ b/torch/fx/passes/infra/partitioner.py
@@ -260,7 +260,7 @@ class CapabilityBasedPartitioner:
for id, partition in partitions_by_id.items():
logger.debug("partition #%s: %s", id, [node.name for node in partition.nodes])
- return list(partitions_by_id.values())
+ return [partition for partition in partitions_by_id.values() if partition.size() > 0]
def fuse_partitions(self, partitions: List[Partition]) -> GraphModule:
logger.debug("Fusing partitions...")
|
2.41.0
|
3fae4fcf45cf68a59bdec9c64cb04d6461824c2
|
Thu, 25 Apr 2024 23:08:20 +0000
|
[PATCH 0681/1000] Revert "Use recursive blob for package data (#119257)"
|
This reverts commit f20e3ae0c36146c962a5665018e9ad662a7cf211. Reverted https://github.com/pytorch/pytorch/pull/119257 on behalf of https://github.com/malfet due to This likely caused https://github.com/pytorch/pytorch/issues/124941, not sure why warning about recursive grep was ignored ([comment](https://github.com/pytorch/pytorch/pull/119257#issuecomment-2078312309))
|
diff --git a/setup.py b/setup.py
index d35240683c..d774446780 100644
--- a/setup.py
+++ b/setup.py
@@ -1393,9 +1393,15 @@ def main():
]
)
torchgen_package_data = [
- "packaged/**/*.cpp",
- "packaged/**/*.h",
- "packaged/**/*.yaml",
+ # Recursive glob doesn't work in setup.py,
+ # https://github.com/pypa/setuptools/issues/1806
+ # To make this robust we should replace it with some code that
+ # returns a list of everything under packaged/
+ "packaged/ATen/*",
+ "packaged/ATen/native/*",
+ "packaged/ATen/templates/*",
+ "packaged/autograd/*",
+ "packaged/autograd/templates/*",
]
setup(
name=package_name,
|
2.41.0
|
73d724e2196d51853d8e9a3d750fca11e58ff93
|
Thu, 25 Apr 2024 23:21:43 +0000
|
[PATCH 0682/1000] [CUDA] Fix 64-bit indexing in `vol2col` in conv3d (#124650)
|
Similar to #118005, fixes sometimes silent IMAs that occur CC @atalman @malfet Pull Request resolved: https://github.com/pytorch/pytorch/pull/124650 Approved by: https://github.com/soulitzer
|
diff --git a/aten/src/ATen/native/cuda/vol2col.cuh b/aten/src/ATen/native/cuda/vol2col.cuh
index 51dbe1c744..98ec2c3522 100644
--- a/aten/src/ATen/native/cuda/vol2col.cuh
+++ b/aten/src/ATen/native/cuda/vol2col.cuh
@@ -36,7 +36,7 @@ __global__ void vol2col_kernel(
const int height_col,
const int width_col,
T* data_col) {
- CUDA_KERNEL_LOOP(index, n) {
+ CUDA_KERNEL_LOOP_TYPE(index, n, int64_t) {
auto w_out = index % width_col;
index /= width_col;
auto h_out = index % height_col;
diff --git a/test/nn/test_convolution.py b/test/nn/test_convolution.py
index 0bf6065a18..c0d715019d 100644
--- a/test/nn/test_convolution.py
+++ b/test/nn/test_convolution.py
@@ -3183,6 +3183,17 @@ class TestConvolutionNNDeviceType(NNTestCase):
output_cpu = model(input_tensor.float().cpu())
self.assertEqual(output.cpu().float(), output_cpu, atol=1e-3, rtol=1e-3)
+ @onlyCUDA
+ @skipCUDAIfRocm
+ @largeTensorTest("24GB", "cpu")
+ @largeTensorTest("20GB", "cuda")
+ def test_conv3d_large_batch_1(self, device):
+ x = torch.rand(1, 32, 512, 512, 256)
+ m = torch.nn.Conv3d(32, 1, kernel_size=1, padding=0, stride=1, bias=False)
+ yref = m(x)
+ y = m.to(device=device)(x.to(device=device))
+ self.assertEqual(yref, y.cpu())
+
@onlyCUDA
@skipCUDAIfNoCudnn
def test_contig_wrong_stride_cudnn(self, device):
|
2.41.0
|
b3a2d751c117fe563bdbc4a1b4f8736c184ca68
|
Thu, 25 Apr 2024 23:25:20 +0000
|
[PATCH 0683/1000] [MPS][BE] Error-check linear (#124952)
|
Validate that all arguments are on MPS devices and dtypes are expected Fixes cryptic messages like ``` % python3 -c "import torch;print(torch.nn.functional.linear(torch.rand(32, 32), torch.rand((32, 32), device='mps')))" RuntimeError: Placeholder storage has not been allocated on MPS device! ``` And hard crashes like ``` % python3 -c "import torch;print(torch.nn.functional.linear(torch.rand(32, 32, device='mps'), torch.randint(-10, 10, (32, 32), dtype=torch.int8, device='mps')))" ``` Fixes https://github.com/pytorch/pytorch/issues/123995 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124952 Approved by: https://github.com/Skylion007
|
diff --git a/aten/src/ATen/native/mps/operations/Linear.mm b/aten/src/ATen/native/mps/operations/Linear.mm
index 6ed9853035..450e24c77c 100644
--- a/aten/src/ATen/native/mps/operations/Linear.mm
+++ b/aten/src/ATen/native/mps/operations/Linear.mm
@@ -16,9 +16,16 @@ Tensor _mps_linear(const Tensor& input, const Tensor& weight_arg, const c10::opt
auto weight = (weight_arg.dim() == 1) ? weight_arg.view({1, weight_arg.size(0)}) : weight_arg;
TORCH_CHECK(supportedFloatingType(input), "MPS device does not support linear for non-float inputs");
+ TORCH_CHECK(input.is_mps(), "Tensor for argument input is on ", input.device(), " but expected on mps");
+ TORCH_CHECK(supportedFloatingType(weight_arg), "MPS device does not support linear for non-float weights");
+ TORCH_CHECK(weight_arg.is_mps(), "Tensor for argument weight is on ", weight_arg.device(), " but expected on mps");
const Tensor& bias = *(at::borrow_from_optional_tensor(bias_opt));
- bool is_bias_defined = bias.defined();
+ const bool is_bias_defined = bias.defined();
+ if (is_bias_defined) {
+ TORCH_CHECK(bias.is_mps(), "Tensor for argument bias is on ", bias.device(), " but expected on mps");
+ TORCH_CHECK(supportedFloatingType(bias), "MPS device does not support linear for non-float bias");
+ }
auto input_size = input.sizes();
std::vector<int64_t> output_size(input_size.begin(), input_size.end() - 1);
diff --git a/test/test_mps.py b/test/test_mps.py
index bfac420775..7f87c1ccd4 100644
--- a/test/test_mps.py
+++ b/test/test_mps.py
@@ -1961,6 +1961,25 @@ class TestMPS(TestCaseMPS):
helper(())
helper((2, 4))
+ def test_linear_errors(self):
+ # Mixed CPU<->MPS tensors
+ size = (3, 3)
+
+ # Unsupported dtypes
+ with self.assertRaisesRegex(RuntimeError, "does not support linear for non-float weights"):
+ torch.nn.functional.linear(torch.rand(size, device='mps'),
+ torch.randint(-10, 10, size, dtype=torch.int8, device='mps'))
+
+ # Weigths on wrong device
+ with self.assertRaisesRegex(RuntimeError, "argument weight is on cpu but expected on mps"):
+ torch.nn.functional.linear(torch.rand(size, device='mps'),
+ torch.rand(size, device='cpu'))
+
+ # Input on wrong device
+ with self.assertRaisesRegex(RuntimeError, "argument input is on cpu but expected on mps"):
+ torch.nn.functional.linear(torch.rand(size, device='cpu'),
+ torch.rand(size, device='mps'))
+
def _linear_helper(self, in_features, out_features, shape, bias=True, backward_pass=False):
cpu_linear = torch.nn.Linear(in_features=in_features, out_features=out_features, device="cpu", bias=bias)
mps_linear = torch.nn.Linear(in_features=in_features, out_features=out_features, device="mps", bias=bias)
|
2.41.0
|
4b6ed460990f43fe4bbf8cedb067b9f42199e9d
|
Thu, 25 Apr 2024 12:12:53 -0700
|
[PATCH 0685/1000] guard_size_oblivious in unbind (#124959)
|
Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124959 Approved by: https://github.com/albanD
|
diff --git a/torch/_refs/__init__.py b/torch/_refs/__init__.py
index 144419eb5b..a0b00e2c9e 100644
--- a/torch/_refs/__init__.py
+++ b/torch/_refs/__init__.py
@@ -3890,12 +3890,14 @@ def unflatten(a: TensorLikeType, dim: int, sizes: ShapeType) -> TensorLikeType:
@register_decomposition(aten.unbind)
def unbind(t: TensorLikeType, dim: int = 0) -> TensorSequenceType:
+ from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
+
dim = utils.canonicalize_dim(t.ndim, dim)
torch._check_index(
len(t.shape) > 0,
lambda: "Dimension specified as 0 but tensor has no dimensions",
)
- if t.shape[dim] == 0:
+ if guard_size_oblivious(t.shape[dim] == 0):
return tuple()
else:
return tuple(
|
2.41.0
|
33f095d0779ecf0ce489ceecff35404abde8581
|
Thu, 25 Apr 2024 14:05:18 -0700
|
[PATCH 0686/1000] Delete erroneous print (#124972)
|
I forgot to remove it before landing Pull Request resolved: https://github.com/pytorch/pytorch/pull/124972 Approved by: https://github.com/albanD
|
diff --git a/torch/library.py b/torch/library.py
index 78857ef75c..455c6d9067 100644
--- a/torch/library.py
+++ b/torch/library.py
@@ -123,7 +123,6 @@ class Library:
if has_preexisting_packet:
ns = getattr(torch.ops, self.ns)
packet = getattr(ns, packet_name)
- print("refreshing", ns, packet_name)
torch._ops._refresh_packet(packet)
self._op_defs.add(qualname)
|
2.41.0
|
9379ebbbf1369aad8179cac4a2eb7d72f25739e
|
Fri, 26 Apr 2024 00:16:16 +0000
|
[PATCH 0687/1000] fix Invalid call to aoti_torch_tensor_copy_ #123039 (#124037)
|
fixes #123039 In abi mode, ExternKernelSchedulerNode generates code using `aoti_torch_tensor_copy_` which requires `AtenTensorHandle`, but the allocation generates ArrayRefTensor to allocate mem in stack. To fix this issue, this PR prevents ExternKernelSchedulerNode from using stack-mem-allocation in abi, and creates AtenTensorHandle instead. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124037 Approved by: https://github.com/desertfire
|
diff --git a/test/inductor/test_cuda_cpp_wrapper.py b/test/inductor/test_cuda_cpp_wrapper.py
index b662e2438c..fa717ab835 100644
--- a/test/inductor/test_cuda_cpp_wrapper.py
+++ b/test/inductor/test_cuda_cpp_wrapper.py
@@ -109,9 +109,7 @@ if config.abi_compatible:
test_failures_cuda_wrapper[
f"{test_name}_dynamic_shapes"
] = test_torchinductor.TestFailure(("cuda_wrapper",), is_skip=False)
- skip_list = [
- "test_multi_device_cuda",
- ]
+ skip_list = []
for test_name in skip_list:
test_failures_cuda_wrapper[test_name] = test_torchinductor.TestFailure(
("cuda_wrapper",), is_skip=True
diff --git a/torch/_inductor/codegen/cpp_wrapper_cpu.py b/torch/_inductor/codegen/cpp_wrapper_cpu.py
index 95e4ef3ac7..f50e2582a2 100644
--- a/torch/_inductor/codegen/cpp_wrapper_cpu.py
+++ b/torch/_inductor/codegen/cpp_wrapper_cpu.py
@@ -896,9 +896,11 @@ class CppWrapperCpu(WrapperCodeGen):
@cache_on_self
def get_output_refs(self):
return [
- f"torch::tensor({x.codegen_reference(self.wrapper_call)})"
- if isinstance(x, ir.ShapeAsConstantBuffer) and not config.abi_compatible
- else x.codegen_reference(self.wrapper_call)
+ (
+ f"torch::tensor({x.codegen_reference(self.wrapper_call)})"
+ if isinstance(x, ir.ShapeAsConstantBuffer) and not config.abi_compatible
+ else x.codegen_reference(self.wrapper_call)
+ )
for x in V.graph.graph_outputs
]
@@ -1098,9 +1100,11 @@ class CppWrapperCpu(WrapperCodeGen):
outputs_str = "output_tensors"
else:
outputs = [
- f"output_tensors[{i}]"
- if self.output_is_tensor[i]
- else f"output_tensors[{i}].item()"
+ (
+ f"output_tensors[{i}]"
+ if self.output_is_tensor[i]
+ else f"output_tensors[{i}].item()"
+ )
for i in range(len(V.graph.graph_outputs))
]
outputs_str = f"[{', '.join(outputs)}]"
@@ -1426,6 +1430,7 @@ class CppWrapperCpu(WrapperCodeGen):
and ir.is_contiguous_strides_for_shape(
buffer.get_stride(), buffer.get_size()
)
+ and not buffer.is_extern()
)
def make_buffer_free(self, buffer):
|
2.41.0
|
a810bcf91d414b5d86bc25142d66cbc59b8e247
|
Thu, 25 Apr 2024 12:54:46 -0700
|
[PATCH 0688/1000] skip unsupported rocm test (#124968)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124968 Approved by: https://github.com/jithunnair-amd, https://github.com/davidberard98
|
diff --git a/test/inductor/test_fused_attention.py b/test/inductor/test_fused_attention.py
index 63e14b58c0..c6393a9b4d 100644
--- a/test/inductor/test_fused_attention.py
+++ b/test/inductor/test_fused_attention.py
@@ -161,6 +161,7 @@ class TestSDPAPatternRewriterTemplate(TestCase):
check_train=False,
)
+ @skipIfRocm
def _test_insignificant_strides(self):
f32 = torch.float32
|
2.41.0
|
321005dd83c20d50fd1d537a37798178737ee29
|
Thu, 25 Apr 2024 10:35:47 -0700
|
[PATCH 0689/1000] Add support for capturing tensors with score_mod (#124444)
|
``` import torch from torch import nn import torch.nn.functional as F import torch._inductor.config as config # torch.set_default_device('cuda') import torch from torch.nn.attention._templated_attention import _templated_attention as templated_attention from triton.testing import do_bench from torch.nn.attention import SDPBackend, sdpa_kernel index = torch.ops.aten torch.manual_seed(0) B = 16 H = 16 S = 2048 D = 64 head_scale = torch.randn(H, device='cuda') def alibi(score, batch, head, token_q, token_kv): return score + torch.ops.aten.index(head_scale, [head]) * (token_q - token_kv) bias = torch.randn(H, S, S, dtype=torch.float16, device='cuda') query = torch.randn(B, H, S, D, device="cuda", dtype=torch.float16) key = torch.randn(B, H, S, D, device="cuda", dtype=torch.float16) value = torch.randn(B, H, S, D, device="cuda", dtype=torch.float16) compiled = torch.compile(templated_attention) out = compiled(query, key, value, score_mod=alibi) out2 = templated_attention(query, key, value,score_mod=alibi) print((out - out2).abs().mean()) assert (out - out2).abs().mean() < 1e-3 print("Flash (no mask): ", do_bench(lambda: F.scaled_dot_product_attention(query, key, value))) print("Flash (mask): ", do_bench(lambda: F.scaled_dot_product_attention(query, key, value, attn_mask=bias))) print("flexattention: ", do_bench(lambda: compiled(query, key, value, score_mod=alibi))) ``` <img width="324" alt="image" src="https://github.com/pytorch/pytorch/assets/6355099/18c175d0-2720-4dfd-8747-85b8a8f609f5"> Differential Revision: [D56583900](https://our.internmc.facebook.com/intern/diff/D56583900) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124444 Approved by: https://github.com/jansel, https://github.com/drisspg
|
diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py
index 0382cb4e84..9fcd0fd78a 100644
--- a/test/inductor/test_templated_attention.py
+++ b/test/inductor/test_templated_attention.py
@@ -4,7 +4,7 @@ import functools
from collections import namedtuple
from typing import Callable
-from unittest import expectedFailure, skipUnless
+from unittest import skip, skipUnless
from unittest.mock import patch
import torch
@@ -36,6 +36,8 @@ supported_platform = skipUnless(
Tolerances = namedtuple("Tolerances", ["atol", "rtol"])
torch.set_float32_matmul_precision("high")
+index = torch.ops.aten.index
+
def create_attention(score_mod):
return functools.partial(_templated_attention, score_mod=score_mod)
@@ -47,6 +49,8 @@ test_dtypes = (
else [torch.float16, torch.float32]
)
+test_dtypes_fast = [torch.float16]
+
# TODO float16 was causing ERRORs for tests on ROCm
# See https://github.com/pytorch/pytorch/issues/123531
if common_utils.TEST_WITH_ROCM:
@@ -65,13 +69,19 @@ def _causal_mod(score, b, h, token_q, token_kv):
return torch.where(token_q >= token_kv, score, float("-inf"))
+B = 4
+H = 8
+S = 2048
+D = 64
+
+
class TestTemplatedSDPA(InductorTestCase):
def run_test(self, score_mod: Callable, dtype: torch.dtype = torch.float16):
sdpa_partial = create_attention(score_mod)
compiled_sdpa = torch.compile(sdpa_partial)
- q = torch.randn((4, 8, 2048, 64), dtype=dtype, device="cuda")
- k = torch.randn((4, 8, 2048, 64), dtype=dtype, device="cuda")
- v = torch.randn((4, 8, 2048, 64), dtype=dtype, device="cuda")
+ q = torch.randn((B, H, S, D), dtype=dtype, device="cuda")
+ k = torch.randn((B, H, S, D), dtype=dtype, device="cuda")
+ v = torch.randn((B, H, S, D), dtype=dtype, device="cuda")
golden_out = sdpa_partial(
q.to(torch.float64), k.to(torch.float64), v.to(torch.float64)
)
@@ -109,23 +119,116 @@ class TestTemplatedSDPA(InductorTestCase):
self.run_test(composed_score_mod, dtype)
- # TODO We are currently not capturing free variables in the closure correctly
- @expectedFailure
@supported_platform
@common_utils.parametrize("dtype", test_dtypes)
def test_captured_buffers(self, dtype: torch.dtype):
- head_offset = torch.rand(8, device="cuda", dtype=dtype)
+ head_offset = torch.rand(H, device="cuda", dtype=dtype)
def score_mod(score, b, h, m, n):
- return score + head_offset[h]
+ return score + index(head_offset, [h])
self.run_test(score_mod, dtype)
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_seq_masking(self, dtype):
+ seq_idx = torch.zeros(S, device="cuda", dtype=torch.bool)
+ seq_idx[S // 2 :] = 1
+
+ def seq_mask_mod(score, b, h, q, kv):
+ return torch.where(
+ index(seq_idx, [q]) == index(seq_idx, [kv]), score, float("-inf")
+ )
+
+ self.run_test(seq_mask_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_load_from_bias_seq_only(self, dtype):
+ bias = torch.randn(S, S, device="cuda", dtype=dtype)
+
+ def bias_mod(score, b, h, q, kv):
+ return score + index(bias, [q, kv])
+
+ self.run_test(bias_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_load_from_bias_seq_batch(self, dtype):
+ bias = torch.randn(B, S, S, device="cuda", dtype=dtype)
+
+ def bias_mod(score, b, h, q, kv):
+ return score + index(bias, [b, q, kv])
+
+ self.run_test(bias_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_load_from_bias_head_seq_batch(self, dtype):
+ bias = torch.randn(B, H, S, S, device="cuda", dtype=dtype)
+
+ def bias_mod(score, b, h, q, kv):
+ return score + index(bias, [b, h, q, kv])
+
+ self.run_test(bias_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_load_rel_bias(self, dtype):
+ rel_bias = torch.randn(2 * S, device="cuda", dtype=dtype)
+
+ def bias_mod(score, b, h, q, kv):
+ return score + index(rel_bias, [(q - kv) + S])
+
+ self.run_test(bias_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_dependent_causal_bidirectional(self, dtype):
+ num_bidirectional = torch.randint(0, S, (B,), device="cuda", dtype=torch.int32)
+
+ def bias_mod(score, b, h, q, kv):
+ causal_attention = q >= kv
+ cur_num_bidirectional = index(num_bidirectional, (b,))
+ bidirectional_attention_on_video = (q <= cur_num_bidirectional) & (
+ kv <= cur_num_bidirectional
+ )
+ return torch.where(
+ bidirectional_attention_on_video | causal_attention,
+ score,
+ -float("inf"),
+ )
+
+ self.run_test(bias_mod, dtype)
+
+ @supported_platform
+ @skip("Triton bug ") # https://github.com/pytorch/pytorch/issues/124571
+ @common_utils.parametrize("dtype", test_dtypes)
+ def test_njt_causal(self, dtype):
+ offsets = torch.tensor(
+ [0, 1024, 1024 + 512, S], device="cuda", dtype=torch.int32
+ )
+ seq_idx = torch.zeros(S, device="cuda", dtype=torch.int32)
+ for idx in range(len(offsets) - 1):
+ seq_idx[offsets[idx] : offsets[idx + 1]] = idx
+
+ def create_njt_wrapper(orig_score_mod, offsets, seq_idx):
+ def njt_score_mod(qk, b, h, q, kv):
+ q_nested = q - index(offsets, [index(seq_idx, [q])])
+ kv_nested = kv - index(offsets, [index(seq_idx, [kv])])
+ return orig_score_mod(qk, b, h, q_nested, kv_nested)
+
+ return njt_score_mod
+
+ causal_njt = create_njt_wrapper(_causal_mod, offsets, seq_idx)
+
+ self.run_test(causal_njt, dtype)
+
@supported_platform
def test_backwards_fails(self):
make_tensor = functools.partial(
torch.randn,
- (4, 8, 2048, 64),
+ (B, H, S, D),
dtype=torch.float32,
device="cuda",
requires_grad=True,
@@ -139,9 +242,9 @@ class TestTemplatedSDPA(InductorTestCase):
@supported_platform
def test_mixed_dtypes_fails(self):
- query = torch.randn((1, 1, 2048, 64), dtype=torch.float32, device="cuda")
- key = torch.randn((1, 1, 2048, 64), dtype=torch.float16, device="cuda")
- value = torch.randn((1, 1, 2048, 64), dtype=torch.float16, device="cuda")
+ query = torch.randn((1, 1, 1024, 64), dtype=torch.float32, device="cuda")
+ key = torch.randn((1, 1, 1024, 64), dtype=torch.float16, device="cuda")
+ value = torch.randn((1, 1, 1024, 64), dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(
ValueError, "Expected query, key, and value to have the same dtype"
):
@@ -163,6 +266,21 @@ class TestTemplatedSDPA(InductorTestCase):
self.run_test(score_mod)
+ @supported_platform
+ @patch.object(torch._inductor.config, "max_autotune", True)
+ def test_max_autotune_with_captured(self):
+ head_scale = torch.randn(H, device="cuda")
+ batch_scale = torch.randn(B, device="cuda")
+ tok_scale = torch.randn(S, device="cuda")
+
+ def bias_mod(score, batch, head, token_q, token_kv):
+ score = score + index(tok_scale, [token_q])
+ score = score + index(batch_scale, [batch])
+ score = score + index(head_scale, [head])
+ return score
+
+ self.run_test(bias_mod)
+
@supported_platform
@common_utils.parametrize("dtype", test_dtypes)
@common_utils.parametrize("score_mod", [_identity, _causal])
@@ -173,7 +291,7 @@ class TestTemplatedSDPA(InductorTestCase):
make_tensor = functools.partial(
torch.randn,
- (4, 8, 2048, 64),
+ (B, H, S, D),
dtype=dtype,
device="cuda",
requires_grad=True,
@@ -215,7 +333,7 @@ class TestTemplatedSDPA(InductorTestCase):
def test_logsumexp_only_return(self):
make_tensor = functools.partial(
torch.randn,
- (4, 8, 2048, 64),
+ (B, H, S, D),
dtype=torch.float32,
device="cuda",
requires_grad=True,
@@ -236,7 +354,7 @@ class TestTemplatedSDPA(InductorTestCase):
def test_logsumexp_is_not_fused(self):
make_tensor = functools.partial(
torch.randn,
- (4, 8, 2048, 64),
+ (B, H, S, D),
dtype=torch.float32,
device="cuda",
requires_grad=True,
diff --git a/torch/_dynamo/variables/higher_order_ops.py b/torch/_dynamo/variables/higher_order_ops.py
index 582b3eb723..a1abcb15fb 100644
--- a/torch/_dynamo/variables/higher_order_ops.py
+++ b/torch/_dynamo/variables/higher_order_ops.py
@@ -1536,12 +1536,10 @@ class TemplatedAttentionHigherOrderVariable(TorchHigherOrderOperatorVariable):
) -> "VariableTracker":
from .builder import wrap_fx_proxy
- query, key, value, score_mod, *other_buffers = self.normalize_to_args(
- args, kwargs
- )
+ query, key, value, score_mod = self.normalize_to_args(args, kwargs)
p_args, p_kwargs = self.create_wrapped_node(tx, query, score_mod)
- proxied_args = [query, key, value, *other_buffers]
+ proxied_args = [query, key, value]
# Store the invocation as a call
# Norm_kwargs contains the score_function and we dont want to proxy this because
diff --git a/torch/_higher_order_ops/templated_attention.py b/torch/_higher_order_ops/templated_attention.py
index 09e10754fe..388e741837 100644
--- a/torch/_higher_order_ops/templated_attention.py
+++ b/torch/_higher_order_ops/templated_attention.py
@@ -60,7 +60,7 @@ def math_attention(
"""
assert len(other_buffers) == 0, "Other buffers are not yet supported."
- scores = query @ key.transpose(-2, -1)
+ scores = (query @ key.transpose(-2, -1)).to(dtype=torch.float32)
b = torch.arange(0, scores.size(0), device=scores.device)
h = torch.arange(0, scores.size(1), device=scores.device)
@@ -179,9 +179,11 @@ def templated_attention_functionalize(
assert isinstance(other_buffers_unwrapped, tuple)
assert all(isinstance(item, torch.Tensor) for item in other_buffers_unwrapped)
- example_vals = [torch.zeros((), dtype=query.dtype)] + [
- torch.zeros((), dtype=torch.int) for _ in range(4)
- ]
+ example_vals = (
+ [torch.zeros((), dtype=query.dtype)]
+ + [torch.zeros((), dtype=torch.int) for _ in range(4)]
+ + list(other_buffers_unwrapped)
+ )
with ctx.redispatch_to_next() as m:
functional_score_mod = ctx.functionalize(score_mod)
pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch
diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py
index 4950f5e802..152621453c 100644
--- a/torch/_inductor/codegen/triton.py
+++ b/torch/_inductor/codegen/triton.py
@@ -3413,22 +3413,14 @@ class TritonScheduling(BaseScheduling):
buffer_names.update(node.used_buffer_names())
# Get buffers objects
+
def _get_buffer(name: str) -> Union[ir.Buffer, ir.TensorBox]:
- if name in V.graph.name_to_buffer:
- return V.graph.name_to_buffer[name]
- elif name in V.graph.graph_inputs:
- return V.graph.graph_inputs[name]
- elif name in V.graph.constants:
- data = V.graph.constants[name]
- return ir.ConstantBuffer(
- name,
- ir.FixedLayout(
- data.device, data.dtype, *V.graph.static_sizes_strides(data)
- ),
- )
- raise RuntimeError(f"Failed to find buffer matching name {name}")
+ buf = V.graph.get_buffer(name)
+ if buf is None:
+ raise RuntimeError(f"Failed to find buffer matching name {name}")
+ return buf
- buffers = [_get_buffer(name) for name in buffer_names]
+ buffers = [V.graph.get_buffer(name) for name in buffer_names]
# In theory we can separately check xnumel and rnumel are <= int_max
# but some indexers do use the full linear index so we need to be
diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py
index c3b5cd3bf8..c6d363ab4b 100644
--- a/torch/_inductor/graph.py
+++ b/torch/_inductor/graph.py
@@ -665,6 +665,14 @@ class GraphLowering(torch.fx.Interpreter):
return self.name_to_buffer[buffer_name]
if buffer_name in self.graph_inputs:
return self.graph_inputs[buffer_name]
+ if buffer_name in self.constants:
+ data = V.graph.constants[buffer_name]
+ return ir.ConstantBuffer(
+ buffer_name,
+ ir.FixedLayout(
+ data.device, data.dtype, *V.graph.static_sizes_strides(data)
+ ),
+ )
return None
def get_dtype(self, buffer_name: str):
diff --git a/torch/_inductor/kernel/templated_attention.py b/torch/_inductor/kernel/templated_attention.py
index 7942a367e2..e0adf25322 100644
--- a/torch/_inductor/kernel/templated_attention.py
+++ b/torch/_inductor/kernel/templated_attention.py
@@ -3,6 +3,7 @@ import logging
from typing import Any, List
import torch
+from .. import config
from ..lowering import empty_strided, lowerings, register_lowering
from ..select_algorithm import autotune_select_algorithm, TritonTemplate
@@ -114,12 +115,14 @@ sdpa_template = TritonTemplate(
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk = tl.dot(q, k.to(MATMUL_PRECISION), acc=qk)
# ~~~~~~~~~~~~~~~~~~~ Apply score modification ~~~~~~~~~~~~~~~~~~~
+ m = offs_m[:, None]
+ n = start_n + offs_n[None, :]
{{ modification(
score="qk",
b="off_hz // H",
h="off_hz % H",
- m="offs_m[:, None]",
- n="start_n + offs_n[None, :]",
+ m="m",
+ n="n",
out="qk"
) | indent_except_first(2) }}
# TODO: In the case that score_mod is linear, this can be LICMed
@@ -170,7 +173,8 @@ sdpa_template = TritonTemplate(
)
-@register_lowering(torch.ops.higher_order.templated_attention)
+# TODO: We probably also need a layout constraint?
+@register_lowering(torch.ops.higher_order.templated_attention, type_promotion_kind=None)
def templated_attention(*args, **kwargs):
from torch._prims_common import make_contiguous_strides_for
from ..ir import (
@@ -182,7 +186,7 @@ def templated_attention(*args, **kwargs):
TensorBox,
)
- query, key, value, subgraph = args
+ query, key, value, subgraph, *other_buffers = args
def create_placeholder(name: str, dtype: torch.dtype) -> InputBuffer:
return TensorBox.create(
@@ -272,17 +276,23 @@ def templated_attention(*args, **kwargs):
configs: List[Any] = []
if query.get_dtype() == torch.float32:
configs.append((64, 64, 4, 3))
- configs += [
- (128, 64, 4, 3),
- (128, 128, 4, 3),
- (128, 128, 8, 2),
- (64, 128, 4, 3),
- ]
-
+ else:
+ configs.append((128, 64, 4, 3))
+ if config.max_autotune:
+ configs += [
+ (128, 64, 4, 3),
+ (128, 128, 4, 3),
+ (128, 128, 8, 2),
+ (64, 128, 4, 3),
+ (64, 64, 4, 3),
+ ]
+ # Note, we don't need to pass in the captured buffers explicitly
+ # because they're implicitly added by the score_mod function
+ # We do need to explicitly pass it in for autotuning though.
for BLOCK_M, BLOCK_N, num_warps, num_stages in configs:
sdpa_template.maybe_append_choice(
choices=choices,
- input_nodes=(query, key, value, logsumexp),
+ input_nodes=[query, key, value, logsumexp],
layout=layout,
subgraphs=subgraph_buffer,
mutated_inputs=[
@@ -298,9 +308,10 @@ def templated_attention(*args, **kwargs):
ROWS_GUARANTEED_SAFE=False,
OUTPUT_LOGSUMEXP=True,
)
+ inputs_for_autotuning = [query, key, value, logsumexp] + list(other_buffers)
return (
autotune_select_algorithm(
- "sdpa", choices, [query, key, value, logsumexp], layout
+ "sdpa", choices, inputs_for_autotuning, layout
),
logsumexp,
)
diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py
index db7ff41573..02e29c431b 100644
--- a/torch/_inductor/runtime/triton_heuristics.py
+++ b/torch/_inductor/runtime/triton_heuristics.py
@@ -194,7 +194,6 @@ class CachingAutotuner(KernelInterface):
compiled_binaries = []
if not self.configs:
raise RuntimeError("No triton configs are available")
-
for c in self.configs:
try:
compiled_binary, launcher = self._precompile_config(
@@ -202,11 +201,8 @@ class CachingAutotuner(KernelInterface):
)
except OutOfResources as e:
if len(self.configs) == 1:
- raise RuntimeError(
- f"Failed to compile triton config: {c}. "
- f"Report a fatal compilation error. "
- f"{e}"
- )
+ # There are no valid Triton configs
+ raise e
# Skip the config if we run out of resource
continue
self.launchers.append(launcher)
diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py
index 8ba22061a6..c301c3394f 100644
--- a/torch/_inductor/select_algorithm.py
+++ b/torch/_inductor/select_algorithm.py
@@ -36,7 +36,14 @@ from .codegen.triton_utils import config_of, signature_to_meta
from .exc import CUDACompileError
from .ir import ChoiceCaller, PrimitiveInfoType
from .runtime.runtime_utils import do_bench
-from .utils import get_dtype_size, Placeholder, sympy_dot, sympy_product, unique
+from .utils import (
+ get_dtype_size,
+ Placeholder,
+ sympy_dot,
+ sympy_index_symbol,
+ sympy_product,
+ unique,
+)
from .virtualized import V
log = logging.getLogger(__name__)
@@ -269,20 +276,23 @@ class TritonTemplateKernel(TritonKernel):
potential multiple modifications
"""
+ def add_input(name):
+ return self.args.input(name)
+
class PlaceholderSubstitution(V.WrapperHandler): # type: ignore[name-defined]
self.name = "PlaceholderSubstitution"
def load(self, name: str, index: sympy.Expr):
if name not in fixed_inputs:
- raise AssertionError(
- f"All loads should be coming from fixed inputs - {name}"
- )
+ # If it's not a fixed input, it's a load from a captured
+ # tensor
+ var = add_input(name)
+ return f"tl.load({var} + {index})"
+
return f"({fixed_inputs[name]})"
- # TODO Doesn't work yet
def indirect_indexing(self, index_var, size, check):
- return self._inner.indirect_indexing(index_var, size, False)
- # return sympy_symbol(str(index_var))
+ return sympy_index_symbol(str(index_var))
# if self.modification_cache is None:
with V.set_ops_handler(PlaceholderSubstitution(V.ops)):
@@ -589,16 +599,25 @@ class TritonTemplate(KernelTemplate):
+ "-"
)
mod = PyCodeCache.load(code, extra)
- _, call_args, _ = kernel.args.python_argdefs()
- expected_args = list(unique(x.get_name() for x in input_nodes))
- expected_args.extend([fake_out.get_name()])
- assert list(call_args)[: len(expected_args)] == expected_args, (
- call_args,
- expected_args,
+ input_call_args = tuple(kernel.args.input_buffers.keys())
+ output_call_args = tuple(kernel.args.output_buffers.keys())
+
+ # We expect the input_buffer order to be [*input_nodes, *captured_buffers]
+ expected_input_args = tuple(unique(x.get_name() for x in input_nodes))
+ expected_output_args = (fake_out.get_name(),)
+ assert input_call_args[: len(expected_input_args)] == expected_input_args, (
+ input_call_args,
+ expected_input_args,
)
+ assert output_call_args == expected_output_args, (
+ output_call_args,
+ expected_output_args,
+ )
+
+ full_input_nodes = tuple([V.graph.get_buffer(k) for k in input_call_args])
extra_args = V.graph.sizevars.size_hints(
- map(sympy.expand, call_args[len(expected_args) :]),
+ map(sympy.expand, tuple(kernel.args.sizevars.keys())),
fallback=config.unbacked_symint_fallback,
)
@@ -636,13 +655,13 @@ class TritonTemplate(KernelTemplate):
num_stages=num_stages,
num_warps=num_warps,
matrix_instr_nonkdim=kwargs.get("matrix_instr_nonkdim", 0),
- input_tensor_meta=TensorMeta.from_irnodes(input_nodes),
+ input_tensor_meta=TensorMeta.from_irnodes(full_input_nodes),
output_tensor_meta=TensorMeta.from_irnodes(layout),
)
return TritonTemplateCaller(
kernel_hash_name,
- input_nodes,
+ full_input_nodes,
layout,
make_kernel_render,
extra.strip("-").replace("-", ", "),
@@ -994,6 +1013,7 @@ class AlgorithmSelectorCache(PersistentCache):
[c for c in choices if hasattr(c, "precompile")],
timeout=precompilation_timeout_seconds,
)
+ from triton.runtime.autotuner import OutOfResources
@functools.lru_cache(None)
def wait_on_futures():
@@ -1013,6 +1033,9 @@ class AlgorithmSelectorCache(PersistentCache):
)
except StopIteration:
pass
+ except OutOfResources:
+ # This config is invalid due to requiring too many resources
+ pass
executor.shutdown(wait=True)
|
2.41.0
|
2f521f3769a9545b7c9df57569b1cba6116745b
|
Fri, 26 Apr 2024 02:03:01 +0000
|
[PATCH 0691/1000] Revert "remove empty partition (#124920)"
|
This reverts commit 98835fff9fd498472b0e8f49a3a4670d86f3c5b7. Reverted https://github.com/pytorch/pytorch/pull/124920 on behalf of https://github.com/clee2000 due to I think Dr CI is wrong, the xla failure looks real https://hud.pytorch.org/pytorch/pytorch/commit/98835fff9fd498472b0e8f49a3a4670d86f3c5b7 https://github.com/pytorch/pytorch/actions/runs/8840540357/job/24278180954 ([comment](https://github.com/pytorch/pytorch/pull/124920#issuecomment-2078495051))
|
diff --git a/test/test_fx_passes.py b/test/test_fx_passes.py
index 491633f0e4..21400c41c1 100644
--- a/test/test_fx_passes.py
+++ b/test/test_fx_passes.py
@@ -234,11 +234,6 @@ class TestPartitionFunctions:
a2 = e + f
return a0, a1, a2
- @staticmethod
- def forward18(a, b, c):
- a0, a1 = torch.ops.aten.var_mean(a)
- return a0
-
# A mock OperatorSupport class, where only operator.add is supported
class MockOperatorSupport(OperatorSupport):
def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
@@ -282,8 +277,6 @@ class TestFXGraphPasses(JitTestCase):
(TestPartitionFunctions.forward15, [['add_1', 'add', 'permute_1', 'view', 'permute_2', 'permute_3', 'permute']], False),
(TestPartitionFunctions.forward16, [["permute_1", "add_1", "add"]], True),
(TestPartitionFunctions.forward16, [['add_1', 'add', 'permute_1', 'view', 'permute_2', 'permute_3', 'permute']], False),
- # should be empty partition, not a partiton with empty nodes
- (TestPartitionFunctions.forward18, [], False),
])
def test_partitioner(self, fn, expected_partition, bookend_non_compute_pass):
traced = symbolic_trace(fn)
diff --git a/torch/fx/passes/infra/partitioner.py b/torch/fx/passes/infra/partitioner.py
index 5b606c1745..a8a861be0f 100644
--- a/torch/fx/passes/infra/partitioner.py
+++ b/torch/fx/passes/infra/partitioner.py
@@ -260,7 +260,7 @@ class CapabilityBasedPartitioner:
for id, partition in partitions_by_id.items():
logger.debug("partition #%s: %s", id, [node.name for node in partition.nodes])
- return [partition for partition in partitions_by_id.values() if partition.size() > 0]
+ return list(partitions_by_id.values())
def fuse_partitions(self, partitions: List[Partition]) -> GraphModule:
logger.debug("Fusing partitions...")
|
2.41.0
|
913f77c60b8c86434da3b8d88e6e6b6b2319e0b
|
Fri, 26 Apr 2024 02:35:14 +0000
|
[PATCH 0692/1000] Revert "Made FlexAttention rewrite getitem calls to use aten.index in score_mod (#124799)"
|
This reverts commit 9bccafc31c9d489b727155e95633efd19adbceaa. Reverted https://github.com/pytorch/pytorch/pull/124799 on behalf of https://github.com/clee2000 due to broke tests but only on crossref https://github.com/pytorch/pytorch/actions/runs/8841521519/job/24279075171, added no td label so itll actually run this time ([comment](https://github.com/pytorch/pytorch/pull/124799#issuecomment-2078530797))
|
diff --git a/c10/cuda/CUDAMiscFunctions.cpp b/c10/cuda/CUDAMiscFunctions.cpp
index f55bba13e9..11ea775366 100644
--- a/c10/cuda/CUDAMiscFunctions.cpp
+++ b/c10/cuda/CUDAMiscFunctions.cpp
@@ -12,7 +12,7 @@ const char* get_cuda_check_suffix() noexcept {
} else {
return "\nCUDA kernel errors might be asynchronously reported at some"
" other API call, so the stacktrace below might be incorrect."
- "\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1";
+ "\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.";
}
}
std::mutex* getFreeMutex() {
diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py
index 9c665d5b88..9fcd0fd78a 100644
--- a/test/inductor/test_templated_attention.py
+++ b/test/inductor/test_templated_attention.py
@@ -4,7 +4,7 @@ import functools
from collections import namedtuple
from typing import Callable
-from unittest import expectedFailure, skip, skipUnless
+from unittest import skip, skipUnless
from unittest.mock import patch
import torch
@@ -125,7 +125,7 @@ class TestTemplatedSDPA(InductorTestCase):
head_offset = torch.rand(H, device="cuda", dtype=dtype)
def score_mod(score, b, h, m, n):
- return score + head_offset[h]
+ return score + index(head_offset, [h])
self.run_test(score_mod, dtype)
@@ -136,7 +136,9 @@ class TestTemplatedSDPA(InductorTestCase):
seq_idx[S // 2 :] = 1
def seq_mask_mod(score, b, h, q, kv):
- return torch.where(seq_idx[q] == seq_idx[kv], score, float("-inf"))
+ return torch.where(
+ index(seq_idx, [q]) == index(seq_idx, [kv]), score, float("-inf")
+ )
self.run_test(seq_mask_mod, dtype)
@@ -146,7 +148,7 @@ class TestTemplatedSDPA(InductorTestCase):
bias = torch.randn(S, S, device="cuda", dtype=dtype)
def bias_mod(score, b, h, q, kv):
- return score + bias[q, kv]
+ return score + index(bias, [q, kv])
self.run_test(bias_mod, dtype)
@@ -156,7 +158,7 @@ class TestTemplatedSDPA(InductorTestCase):
bias = torch.randn(B, S, S, device="cuda", dtype=dtype)
def bias_mod(score, b, h, q, kv):
- return score + bias[b, q, kv]
+ return score + index(bias, [b, q, kv])
self.run_test(bias_mod, dtype)
@@ -166,7 +168,7 @@ class TestTemplatedSDPA(InductorTestCase):
bias = torch.randn(B, H, S, S, device="cuda", dtype=dtype)
def bias_mod(score, b, h, q, kv):
- return score + bias[b, h, q, kv]
+ return score + index(bias, [b, h, q, kv])
self.run_test(bias_mod, dtype)
@@ -176,7 +178,7 @@ class TestTemplatedSDPA(InductorTestCase):
rel_bias = torch.randn(2 * S, device="cuda", dtype=dtype)
def bias_mod(score, b, h, q, kv):
- return score + rel_bias[(q - kv) + S]
+ return score + index(rel_bias, [(q - kv) + S])
self.run_test(bias_mod, dtype)
@@ -187,7 +189,7 @@ class TestTemplatedSDPA(InductorTestCase):
def bias_mod(score, b, h, q, kv):
causal_attention = q >= kv
- cur_num_bidirectional = num_bidirectional[b]
+ cur_num_bidirectional = index(num_bidirectional, (b,))
bidirectional_attention_on_video = (q <= cur_num_bidirectional) & (
kv <= cur_num_bidirectional
)
@@ -199,38 +201,6 @@ class TestTemplatedSDPA(InductorTestCase):
self.run_test(bias_mod, dtype)
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes_fast)
- def test_natten_2d(self, dtype):
- H = 32
- W = S // H
- WINDOW = 3
- assert W * H == S
-
- def get_x_y(idx):
- # This should be a floor divide, but we don't support that properly
- return idx / W, idx % W
-
- def natten_mask(score, b, h, q, kv):
- q_x, q_y = get_x_y(q)
- kv_x, kv_y = get_x_y(kv)
- return torch.where(
- ((q_x - kv_x).abs() <= WINDOW) | ((q_y - kv_y).abs() <= WINDOW),
- score,
- float("-inf"),
- )
-
- self.run_test(natten_mask, dtype)
-
- @supported_platform
- @expectedFailure
- @common_utils.parametrize("dtype", test_dtypes_fast)
- def test_silu_on_score(self, dtype):
- def silu_score(score, b, h, q, kv):
- return torch.nn.functional.silu(score)
-
- self.run_test(silu_score, dtype)
-
@supported_platform
@skip("Triton bug ") # https://github.com/pytorch/pytorch/issues/124571
@common_utils.parametrize("dtype", test_dtypes)
@@ -244,8 +214,8 @@ class TestTemplatedSDPA(InductorTestCase):
def create_njt_wrapper(orig_score_mod, offsets, seq_idx):
def njt_score_mod(qk, b, h, q, kv):
- q_nested = q - offsets[seq_idx[q]]
- kv_nested = kv - offsets[seq_idx[kv]]
+ q_nested = q - index(offsets, [index(seq_idx, [q])])
+ kv_nested = kv - index(offsets, [index(seq_idx, [kv])])
return orig_score_mod(qk, b, h, q_nested, kv_nested)
return njt_score_mod
@@ -304,9 +274,9 @@ class TestTemplatedSDPA(InductorTestCase):
tok_scale = torch.randn(S, device="cuda")
def bias_mod(score, batch, head, token_q, token_kv):
- score = score + tok_scale[token_q]
- score = score + batch_scale[batch]
- score = score + head_scale[head]
+ score = score + index(tok_scale, [token_q])
+ score = score + index(batch_scale, [batch])
+ score = score + index(head_scale, [head])
return score
self.run_test(bias_mod)
diff --git a/test/test_overrides.py b/test/test_overrides.py
index cb46ca6ed8..d79753f78a 100644
--- a/test/test_overrides.py
+++ b/test/test_overrides.py
@@ -1387,28 +1387,6 @@ class TestTorchFunctionMode(TestCase):
self.assertTrue(called)
- def test_getitem_call(self):
- # This failed because the parser thinks the function is called to()
- # but it's actually called _parse_to()
-
- called = False
-
- class A(TorchFunctionMode):
- def __torch_function__(self, func, types, args=(), kwargs=None):
- nonlocal called
- if kwargs is None:
- kwargs = {}
- called = True
- return func(*args, **kwargs)
-
- a = torch.zeros(5)
- b = torch.tensor(0)
- with A():
- a[b]
-
- self.assertTrue(called)
-
-
def test_distributions_bernoulli(self):
# This failed because improper use of has_torch_function when
# is_tensor_like should have been used instead, inside the
diff --git a/torch/_dynamo/variables/higher_order_ops.py b/torch/_dynamo/variables/higher_order_ops.py
index 26f1eeb91c..a1abcb15fb 100644
--- a/torch/_dynamo/variables/higher_order_ops.py
+++ b/torch/_dynamo/variables/higher_order_ops.py
@@ -1475,7 +1475,6 @@ class TemplatedAttentionHigherOrderVariable(TorchHigherOrderOperatorVariable):
self, tx, query: "VariableTracker", score_function: "VariableTracker"
):
from torch._dynamo.symbolic_convert import InstructionTranslator
- from torch._higher_order_ops.templated_attention import TransformGetItemToIndex
from .builder import SourcelessBuilder
tx: InstructionTranslator = tx
@@ -1500,21 +1499,19 @@ class TemplatedAttentionHigherOrderVariable(TorchHigherOrderOperatorVariable):
bhmn = [create_scalar() for _ in range(4)]
new_args = [score, *bhmn]
-
- with TransformGetItemToIndex():
- (
- (body_output, body_treespec),
- body_graph,
- body_lifted_freevars,
- ) = speculate_subgraph(
- tx,
- score_function,
- new_args,
- {}, # expect only args no kwargs for now
- description="templated_attention",
- source_target=self.value,
- set_subgraph_inputs="flatten_manual",
- )
+ (
+ (body_output, body_treespec),
+ body_graph,
+ body_lifted_freevars,
+ ) = speculate_subgraph(
+ tx,
+ score_function,
+ new_args,
+ {}, # expect only args no kwargs for now
+ description="templated_attention",
+ source_target=self.value,
+ set_subgraph_inputs="flatten_manual",
+ )
body_name = add_subgraph(
tx,
diff --git a/torch/_functorch/vmap.py b/torch/_functorch/vmap.py
index 054a40123e..5d05148faf 100644
--- a/torch/_functorch/vmap.py
+++ b/torch/_functorch/vmap.py
@@ -178,7 +178,7 @@ def _maybe_remove_batch_dim(name, batched_output, vmap_level, batch_size, out_di
raise ValueError(
f"vmap({name}, ...): `{name}` must only return "
f"Tensors, got type {type(batched_output)}. "
- "Did you mean to set out_dims= to None for output?"
+ "Did you mean to set out_dim= to None for output?"
)
return _remove_batch_dim(batched_output, vmap_level, batch_size, out_dim)
diff --git a/torch/_higher_order_ops/templated_attention.py b/torch/_higher_order_ops/templated_attention.py
index 52a9156820..388e741837 100644
--- a/torch/_higher_order_ops/templated_attention.py
+++ b/torch/_higher_order_ops/templated_attention.py
@@ -1,4 +1,4 @@
-from typing import Any, Callable, Tuple
+from typing import Callable, Tuple
import torch
import torch.utils._pytree as pytree
@@ -16,29 +16,6 @@ from torch.fx.experimental.proxy_tensor import (
track_tensor_tree,
)
-from torch.overrides import TorchFunctionMode
-
-
-def transform_getitem_args(x: torch.Tensor, index_args) -> Tuple[Any, ...]:
- if isinstance(index_args, tuple):
- return (x, list(index_args))
- elif not isinstance(index_args, (list, tuple)):
- return (x, [index_args])
- return (x, index_args)
-
-
-class TransformGetItemToIndex(TorchFunctionMode):
- # This is needed since we want to support calling
- # A[q_idx], where q_idx is a scalar tensor in score_mod.
- # Today, when q_idx is a scalar tensor, we implicitly convert it to a python
- # scalar and create a view. We do not want that behavior in this case, so we
- # use this torchfunctionmode to override that behavior for score_mod
- # wherever we're running it.
- def __torch_function__(self, func, types, args, kwargs=None):
- if func == torch.Tensor.__getitem__:
- return torch.ops.aten.index(*transform_getitem_args(*args))
- return func(*args, **(kwargs or {}))
-
class TemplatedAttentionHOP(HigherOrderOperator):
def __init__(self):
@@ -96,10 +73,7 @@ def math_attention(
score_mod = torch.vmap(score_mod, in_dims=(0, None, 0, None, None) + in_dim_buffers)
score_mod = torch.vmap(score_mod, in_dims=(0, 0, None, None, None) + in_dim_buffers)
- # todo: We wouldn't need these overrides in this file if Dynamo always did the
- # rewriting.
- with TransformGetItemToIndex():
- scores = score_mod(scores, b, h, m, n, *other_buffers).to(torch.float32)
+ scores = score_mod(scores, b, h, m, n, *other_buffers).to(torch.float32)
# TODO Unconditionally return logsumexp for backwards
# if any(t.requires_grad for t in (query, key, value)):
@@ -148,8 +122,7 @@ def trace_templated_attention(
example_vals = [
torch.zeros((), dtype=query.dtype, requires_grad=query.requires_grad)
] + [torch.zeros((), dtype=torch.int) for _ in range(4)]
- with TransformGetItemToIndex():
- score_graph = make_fx(score_mod)(*example_vals, *other_buffers)
+ score_graph = make_fx(score_mod)(*example_vals, *other_buffers)
proxy_mode.tracer.root.register_module("sdpa_score", score_graph)
node_args = (query, key, value, score_graph, *other_buffers)
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args)
@@ -214,10 +187,9 @@ def templated_attention_functionalize(
with ctx.redispatch_to_next() as m:
functional_score_mod = ctx.functionalize(score_mod)
pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch
- with TransformGetItemToIndex():
- mutates = _has_potential_branch_input_mutation(
- functional_score_mod, example_vals, pre_dispatch
- )
+ mutates = _has_potential_branch_input_mutation(
+ functional_score_mod, example_vals, pre_dispatch
+ )
# The only care about mutations of existing buffers since we can't replay these.
# However, we can just error if anything is detected
if mutates:
diff --git a/torch/_prims_common/__init__.py b/torch/_prims_common/__init__.py
index 377fc36830..61d602bd36 100644
--- a/torch/_prims_common/__init__.py
+++ b/torch/_prims_common/__init__.py
@@ -85,7 +85,6 @@ torch_function_passthrough = {
torch.Tensor.__format__,
torch.Tensor.__repr__,
torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined]
- torch.Tensor.__getitem__,
}
diff --git a/torch/csrc/autograd/python_variable_indexing.cpp b/torch/csrc/autograd/python_variable_indexing.cpp
index e3cdd04f09..87b0e32293 100644
--- a/torch/csrc/autograd/python_variable_indexing.cpp
+++ b/torch/csrc/autograd/python_variable_indexing.cpp
@@ -32,7 +32,8 @@
using namespace at;
using namespace torch::autograd::utils;
-namespace torch::autograd {
+namespace torch {
+namespace autograd {
Py_ssize_t THPVariable_length(PyObject* self) {
HANDLE_TH_ERRORS
@@ -68,7 +69,7 @@ static inline int64_t count_specified_dimensions(PyObject* index) {
for (Py_ssize_t i = 0; i < size; i++) {
PyObject* obj = PyTuple_GET_ITEM(
index, i); // NOLINT(cppcoreguidelines-pro-type-cstyle-cast)
- if (check_has_torch_function(obj))
+ if (!THPVariable_CheckExact(obj) && check_has_torch_function(obj))
return -1;
if (THPVariable_Check(obj)) {
const auto& var = THPVariable_Unpack(obj);
@@ -340,7 +341,7 @@ static inline THPObjectPtr wrapTuple(PyObject* index) {
// indexing is needed, it calls C++ `at::indexing::dispatch_index`.
PyObject* THPVariable_getitem(PyObject* self, PyObject* index) {
HANDLE_TH_ERRORS
- if (check_has_torch_function(self)) {
+ if (!THPVariable_CheckExact(self) && check_has_torch_function(self)) {
return handle_torch_function_indexing(self, index);
}
const auto& self_ = THPVariable_Unpack(self);
@@ -437,8 +438,9 @@ int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* py_value) {
if (py_value == nullptr) {
throw TypeError("Tensor does not support deleting items");
}
- if ((check_has_torch_function(self)) ||
- (check_has_torch_function(py_value))) {
+ if ((!THPVariable_CheckExact(self) && check_has_torch_function(self)) ||
+ (!THPVariable_CheckExact(py_value) &&
+ check_has_torch_function(py_value))) {
py::object ret = py::reinterpret_steal<py::object>(
handle_torch_function_indexing(self, index, py_value));
return 0;
@@ -551,4 +553,5 @@ int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* py_value) {
END_HANDLE_TH_ERRORS_RET(-1)
}
-} // namespace torch::autograd
+} // namespace autograd
+} // namespace torch
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
index 3d77cc1252..46148424e5 100644
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -22133,9 +22133,9 @@ python_ref_db = [
torch_opinfo_name="roll",
validate_view_consistency=False,
skips=(
- # # RuntimeError: no _refs support for torch.Tensor.__getitem__
- # # Leaving it as a ref because fftshift uses it
- # DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),
+ # RuntimeError: no _refs support for torch.Tensor.__getitem__
+ # Leaving it as a ref because fftshift uses it
+ DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),
),
),
PythonRefInfo(
diff --git a/torch/testing/_internal/opinfo/definitions/fft.py b/torch/testing/_internal/opinfo/definitions/fft.py
index 0601af24bb..3f1d43ee9f 100644
--- a/torch/testing/_internal/opinfo/definitions/fft.py
+++ b/torch/testing/_internal/opinfo/definitions/fft.py
@@ -767,10 +767,18 @@ python_ref_db: List[OpInfo] = [
"_refs.fft.fftshift",
op_db=op_db,
torch_opinfo_name="fft.fftshift",
+ skips=(
+ # TODO Move fftshift to decomps
+ DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"),
+ ),
),
PythonRefInfo(
"_refs.fft.ifftshift",
op_db=op_db,
torch_opinfo_name="fft.ifftshift",
+ skips=(
+ # TODO Move ifftshift to decomps
+ DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"),
+ ),
),
]
diff --git a/torch/testing/_internal/opinfo/definitions/linalg.py b/torch/testing/_internal/opinfo/definitions/linalg.py
index 288aaa34f2..a1b6531b15 100644
--- a/torch/testing/_internal/opinfo/definitions/linalg.py
+++ b/torch/testing/_internal/opinfo/definitions/linalg.py
@@ -2389,6 +2389,8 @@ python_ref_db: List[OpInfo] = [
supports_out=True,
op_db=op_db,
skips=(
+ # no _refs support for Tensor.__getitem__
+ DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"),
# TODO: is this really needed?
DecorateInfo(
unittest.expectedFailure, "TestCommon", "test_python_ref_errors"
|
2.41.0
|
3f4e71daa6dce6014d30da046d28f14cf30d5a4
|
Fri, 26 Apr 2024 02:45:42 +0000
|
[PATCH 0693/1000] Making _MeshEnv subclassing thread local (#124555)
|
With _mesh_resources being global var, when thread pg based testing is used (aka spawn_threads_and_init_comms()), the last rank with the same key would overwrite the formers. This isn't an issue in regular process-based runtime as logically each key is unique. Example failure: https://github.com/pytorch/pytorch/actions/runs/8779134353/job/24087295785 ``` RuntimeError: Could not resolve the process group registered under the name 8 or Throwing assert not none error ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124555 Approved by: https://github.com/xunnanxu, https://github.com/wanchaol
|
diff --git a/torch/distributed/device_mesh.py b/torch/distributed/device_mesh.py
index 79e5c508a6..2b176583de 100644
--- a/torch/distributed/device_mesh.py
+++ b/torch/distributed/device_mesh.py
@@ -1,6 +1,7 @@
# Copyright (c) Meta Platforms, Inc. and affiliates
import logging
import math
+import threading
from typing import Dict, List, Optional, Tuple, TYPE_CHECKING, Union
import torch
@@ -57,7 +58,7 @@ else:
"DeviceMesh requires numpy >= 1.21 to be installed for type checking"
)
- class _MeshEnv:
+ class _MeshEnv(threading.local):
def __init__(self) -> None:
self.mesh_stack: List[DeviceMesh] = []
self.child_to_parent_mapping: Dict[DeviceMesh, DeviceMesh] = {}
|
2.41.0
|
9a611a3ce0ba04147ab3a4e8310aa0734b52f46
|
Fri, 26 Apr 2024 02:57:51 +0000
|
[PATCH 0694/1000] Update Jinja to 3.1.3 (#124976)
|
To fix CVE-2024-22195 Also, delete unused docs/cpp/requirements.txt and functorch/docs/requirements.txt Pull Request resolved: https://github.com/pytorch/pytorch/pull/124976 Approved by: https://github.com/kit1980
|
diff --git a/.github/requirements-gha-cache.txt b/.github/requirements-gha-cache.txt
index 8b708df2eb..312133cd8b 100644
--- a/.github/requirements-gha-cache.txt
+++ b/.github/requirements-gha-cache.txt
@@ -5,7 +5,7 @@
# functorch/docs/requirements.txt
# .ci/docker/requirements-ci.txt
boto3==1.19.12
-jinja2==3.0.1
+jinja2==3.1.3
lintrunner==0.10.7
ninja==1.10.0.post1
nvidia-ml-py==11.525.84
diff --git a/docs/cpp/requirements.txt b/docs/cpp/requirements.txt
deleted file mode 100644
index da401f2883..0000000000
--- a/docs/cpp/requirements.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-sphinx==3.1.2
-Jinja2==3.0.*
-breathe==4.25.0
-exhale==0.2.3
-docutils==0.16
--e git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
-bs4
-lxml
diff --git a/functorch/docs/requirements.txt b/functorch/docs/requirements.txt
deleted file mode 100644
index 706eff6d47..0000000000
--- a/functorch/docs/requirements.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-sphinx==3.5.4
-docutils==0.16
--e git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
-sphinxcontrib.katex==0.8.6
-sphinx_copybutton>=0.3.1
-IPython==8.12.0
-myst-nb==0.13.2
-# Fixing upper version due to https://github.com/sphinx-doc/sphinx/issues/10306
-Jinja2<3.1.0
|
2.41.0
|
69b1e6cdca56ee91a61ee6b1502a9faa1819106
|
Fri, 26 Apr 2024 03:19:21 +0000
|
[PATCH 0695/1000] [profiler] Split up profiler test file (#124856)
|
To help with issues on test time out split profiler test file into 4 files. - profiler - record_function - execution_trace - torch_tidy Pull Request resolved: https://github.com/pytorch/pytorch/pull/124856 Approved by: https://github.com/shengfukevin, https://github.com/aaronenyeshi
|
diff --git a/test/profiler/test_execution_trace.py b/test/profiler/test_execution_trace.py
new file mode 100644
index 0000000000..3d31ee20a1
--- /dev/null
+++ b/test/profiler/test_execution_trace.py
@@ -0,0 +1,383 @@
+# Owner(s): ["oncall: profiler"]
+
+# if tqdm is not shutdown properly, it will leave the monitor thread alive.
+# This causes an issue in the multithreading test because we check all events
+# in that test with their tids. The events that correspond to these lingering
+# threads all have TID of (uint64_t)(-1) which is invalid.
+# The work around is turnning off monitoring thread when tqdm is loaded.
+# Since these are unit tests, it is safe to turn off monitor thread.
+try:
+ import tqdm
+
+ tqdm.tqdm.monitor_interval = 0
+except ImportError:
+ pass
+
+import json
+import sys
+import tempfile
+import unittest
+from typing import Any, Dict, List
+
+import torch
+import torch.nn as nn
+from torch.autograd import (
+ _record_function_with_args_enter,
+ _record_function_with_args_exit,
+)
+from torch.profiler import (
+ ExecutionTraceObserver,
+ kineto_available,
+ profile,
+ record_function,
+ supported_activities,
+)
+
+from torch.testing._internal.common_cuda import TEST_CUDA
+from torch.testing._internal.common_utils import (
+ IS_WINDOWS,
+ run_tests,
+ skipIfTorchDynamo,
+ TestCase,
+)
+
+from torch.utils._triton import has_triton
+
+Json = Dict[str, Any]
+
+
+class TestExecutionTrace(TestCase):
+ def payload(self, use_cuda=False):
+ u = torch.randn(3, 4, 5, requires_grad=True)
+ with record_function("## TEST 1 ##", "1, 2, 3"):
+ inf_val = float("inf")
+ neg_inf_val = float("-inf")
+ nan_val = float("nan")
+ rf_handle = _record_function_with_args_enter(
+ "## TEST 2 ##",
+ 1,
+ False,
+ 2.5,
+ [u, u],
+ (u, u),
+ "hello",
+ u,
+ inf_val,
+ neg_inf_val,
+ nan_val,
+ )
+ x = torch.randn(10, 10, requires_grad=True)
+ if use_cuda:
+ x = x.cuda()
+ y = torch.randn(10, 10, requires_grad=True)
+ if use_cuda:
+ y = y.cuda()
+ z = x + y + x * y + x * y
+ z.backward(z)
+ gelu = nn.GELU()
+ m = torch.randn(2)
+ _ = gelu(m)
+ if use_cuda:
+ z = z.cpu()
+ _record_function_with_args_exit(rf_handle)
+
+ def get_execution_trace_root(self, output_file_name) -> Json:
+ nodes = []
+ with open(output_file_name) as f:
+ et_graph = json.load(f)
+ assert "nodes" in et_graph
+ nodes = et_graph["nodes"]
+ return nodes
+
+ def get_execution_trace_rf_ids(self, nodes: List[Json]) -> List[int]:
+ """Returns a sorted list of rf_id (record function ids) in execution trace"""
+
+ def get_rf_id(node):
+ attrs = node["attrs"]
+ for a in attrs:
+ if a["name"] == "rf_id":
+ return a["value"]
+ return None
+
+ rf_ids_ = (
+ get_rf_id(n)
+ for n in nodes
+ if n["name"] != "[pytorch|profiler|execution_trace|process]"
+ and n["name"] != "[pytorch|profiler|execution_trace|thread]"
+ )
+ return sorted(rf_id for rf_id in rf_ids_ if rf_id is not None)
+
+ def get_kineto_rf_ids(self, events: List[Json]) -> List[int]:
+ """Returns a sorted list of Record function IDs for CPU operators and user annotations"""
+ ops_and_annotations = (
+ e for e in events if e.get("cat", "") in ["cpu_op", "user_annotation"]
+ )
+ return sorted(
+ e.get("args", {}).get("Record function id", -1) for e in ops_and_annotations
+ )
+
+ @unittest.skipIf(not kineto_available(), "Kineto is required")
+ def test_execution_trace_with_kineto(self):
+ trace_called_num = 0
+
+ def trace_handler(p):
+ nonlocal trace_called_num
+ trace_called_num += 1
+
+ use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
+ # Create a temp file to save execution trace and kineto data.
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
+ fp.close()
+ kt = tempfile.NamedTemporaryFile(
+ mode="w+t", suffix=".kineto.json", delete=False
+ )
+ kt.close()
+
+ with profile(
+ activities=supported_activities(),
+ schedule=torch.profiler.schedule(
+ skip_first=3, wait=1, warmup=1, active=2, repeat=1
+ ),
+ on_trace_ready=trace_handler,
+ execution_trace_observer=(
+ ExecutionTraceObserver().register_callback(fp.name)
+ ),
+ ) as p:
+ for idx in range(10):
+ with record_function(f"## LOOP {idx} ##"):
+ self.payload(use_cuda=use_cuda)
+ p.step()
+ self.assertEqual(fp.name, p.execution_trace_observer.get_output_file_path())
+
+ # Uncomment for debugging
+ # print("Output kineto = ", kt.name)
+ # print("Output ET = ", fp.name)
+
+ p.export_chrome_trace(kt.name)
+ self.assertEqual(trace_called_num, 1)
+
+ nodes = self.get_execution_trace_root(fp.name)
+ loop_count = 0
+ found_root_node = False
+ for n in nodes:
+ assert "name" in n
+ if "[pytorch|profiler|execution_trace|process]" in n["name"]:
+ found_root_node = True
+ if n["name"].startswith("## LOOP "):
+ loop_count += 1
+ self.assertTrue(found_root_node)
+ # Since profiler trace is active for 2 iterations
+ self.assertEqual(loop_count, 2)
+
+ # Compare the collected Execution Trace and Kineto Trace
+ # in terms of record func ID (rf_id) and External IDs
+ # both of these should match for the same trace window.
+
+ with open(kt.name) as f:
+ kineto = json.load(f)
+ events = kineto["traceEvents"]
+
+ # Look up rf_ids in both Execution and Kineto trace as two lists.
+ rf_ids_et = self.get_execution_trace_rf_ids(nodes)
+ rf_ids_kineto = self.get_kineto_rf_ids(events)
+
+ self.assertCountEqual(rf_ids_et, rf_ids_kineto)
+ self.assertListEqual(
+ rf_ids_et,
+ rf_ids_kineto,
+ msg=f"ET and kineto rf_id should exactly match\n"
+ f" rf_ids_et = {rf_ids_et}\n"
+ f" rf_ids_kineto = {rf_ids_kineto}\n",
+ )
+
+ def test_execution_trace_alone(self):
+ use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
+ # Create a temp file to save execution trace data.
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
+ fp.close()
+ expected_loop_events = 0
+
+ et = ExecutionTraceObserver().register_callback(fp.name)
+ et.start()
+ for idx in range(5):
+ expected_loop_events += 1
+ with record_function(f"## LOOP {idx} ##"):
+ self.payload(use_cuda=use_cuda)
+ et.stop()
+
+ assert fp.name == et.get_output_file_path()
+ et.unregister_callback()
+ nodes = self.get_execution_trace_root(fp.name)
+ loop_count = 0
+ # Expected tensor object tuple size, in th form of:
+ # [tensor_id, storage_id, offset, numel, itemsize, device_str]
+ tensor_tuple_size = 6
+ found_root_node = False
+ for n in nodes:
+ assert "name" in n
+ if "[pytorch|profiler|execution_trace|process]" in n["name"]:
+ found_root_node = True
+ if n["name"].startswith("## LOOP "):
+ loop_count += 1
+ # Check if tensor tuple representation size is correct.
+ if n["name"] == "## TEST 2 ##":
+ assert len(n["inputs"]["values"][3][0]) == tensor_tuple_size
+ assert found_root_node
+ assert loop_count == expected_loop_events
+
+ @unittest.skipIf(IS_WINDOWS, "torch.compile does not support WINDOWS")
+ @unittest.skipIf(
+ sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+"
+ )
+ @unittest.skipIf(not TEST_CUDA or not has_triton(), "need CUDA and triton to run")
+ def test_execution_trace_with_pt2(self):
+ class ConvAndRelu(nn.Module):
+ def __init__(self) -> None:
+ super().__init__()
+ self.linear = nn.Linear(4096, 4096)
+ self.relu = nn.ReLU(inplace=True)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.linear(x)
+ x = self.relu(x)
+ return x
+
+ # Create a temp file to save execution trace data.
+ fp = tempfile.NamedTemporaryFile("w+t", suffix="_et.json", delete=False)
+ fp.close()
+
+ with torch._inductor.config.patch(compile_threads=1):
+ test_module = torch.compile(ConvAndRelu())
+
+ x = torch.rand(128, 4096)
+ et = ExecutionTraceObserver().register_callback(fp.name)
+ et.start()
+ test_module.forward(x)
+ et.stop()
+
+ assert fp.name == et.get_output_file_path()
+ et.unregister_callback()
+ nodes = self.get_execution_trace_root(fp.name)
+
+ found_root_node = False
+ for n in nodes:
+ assert "name" in n
+ if "[pytorch|profiler|execution_trace|process]" in n["name"]:
+ found_root_node = True
+
+ assert found_root_node
+
+ def test_execution_trace_start_stop(self):
+ use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
+ # Create a temp file to save execution trace data.
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
+ fp.close()
+ expected_loop_events = 0
+ et = ExecutionTraceObserver()
+ et.register_callback(fp.name)
+ for idx in range(10):
+ if idx == 3:
+ et.start()
+ elif idx == 5:
+ et.stop()
+ elif idx == 8:
+ et.start()
+ elif idx == 9:
+ et.stop()
+ if et._execution_trace_running:
+ expected_loop_events += 1
+ with record_function(f"## LOOP {idx} ##"):
+ self.payload(use_cuda=use_cuda)
+
+ assert fp.name == et.get_output_file_path()
+ et.unregister_callback()
+ nodes = self.get_execution_trace_root(fp.name)
+ loop_count = 0
+ found_root_node = False
+ for n in nodes:
+ assert "name" in n
+ if "[pytorch|profiler|execution_trace|process]" in n["name"]:
+ found_root_node = True
+ if n["name"].startswith("## LOOP "):
+ loop_count += 1
+ assert found_root_node
+ assert loop_count == expected_loop_events
+
+ def test_execution_trace_repeat_in_loop(self):
+ use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
+ iter_list = {3, 4, 6, 8}
+ expected_loop_events = len(iter_list)
+ output_files = []
+ for idx in range(10):
+ if idx in iter_list:
+ # Create a temp file to save execution trace data.
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
+ fp.close()
+ output_files.append(fp.name)
+ et = ExecutionTraceObserver()
+ et.register_callback(fp.name)
+ et.start()
+ with record_function(f"## LOOP {idx} ##"):
+ self.payload(use_cuda=use_cuda)
+ if idx in iter_list:
+ et.stop()
+ et.unregister_callback()
+
+ event_count = 0
+ for et_file in output_files:
+ nodes = self.get_execution_trace_root(et_file)
+ found_root_node = False
+ for n in nodes:
+ assert "name" in n
+ if "[pytorch|profiler|execution_trace|process]" in n["name"]:
+ assert n["id"] == 1
+ found_root_node = True
+ if n["name"].startswith("## LOOP "):
+ event_count += 1
+ assert found_root_node
+ assert event_count == expected_loop_events
+
+ def test_execution_trace_no_capture(self):
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
+ fp.close()
+ et = ExecutionTraceObserver()
+ et.register_callback(fp.name)
+
+ assert fp.name == et.get_output_file_path()
+ et.unregister_callback()
+ nodes = self.get_execution_trace_root(fp.name)
+ for n in nodes:
+ assert "name" in n
+ if "[pytorch|profiler|execution_trace|process]" in n["name"]:
+ found_root_node = True
+ assert found_root_node
+
+ @skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/124500")
+ def test_execution_trace_nested_tensor(self):
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
+ fp.close()
+
+ et = ExecutionTraceObserver()
+ observer = et.register_callback(fp.name)
+
+ def fn(nt):
+ return nt.sin().cos()
+
+ with torch.profiler.profile(execution_trace_observer=observer) as prof:
+ for i in range(3):
+ values = torch.rand((8 + i, 4 + i))
+ offsets = torch.tensor([0, 2, 4, 6, 8 + i])
+ nt = torch.nested.nested_tensor_from_jagged(values, offsets)
+ fn(nt)
+
+ nodes = self.get_execution_trace_root(fp.name)
+ found_cos = False
+ for n in nodes:
+ assert "name" in n
+ if "cos" in n["name"]:
+ found_cos = True
+ assert found_cos
+
+
+if __name__ == "__main__":
+ run_tests()
diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py
index 61f2ca314a..2ec04c447f 100644
--- a/test/profiler/test_profiler.py
+++ b/test/profiler/test_profiler.py
@@ -17,16 +17,14 @@ import collections
import gc
import json
import os
+import pickle
import re
import subprocess
import sys
-import tempfile
-import textwrap
import threading
import unittest
-import weakref
from dataclasses import dataclass, field
-from typing import Any, Dict, List, Optional
+from typing import List, Optional
from unittest.mock import patch
import expecttest
@@ -34,18 +32,11 @@ import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
-import torch.utils.data.datapipes as dp
-from torch._C._profiler import _TensorMetadata
-from torch.autograd import (
- _record_function_with_args_enter,
- _record_function_with_args_exit,
-)
from torch.autograd.profiler import KinetoStepTracker, profile as _profile
from torch.autograd.profiler_legacy import profile as _profile_legacy
from torch.profiler import (
_utils,
DeviceType,
- ExecutionTraceObserver,
kineto_available,
profile,
ProfilerAction,
@@ -66,7 +57,9 @@ from torch.profiler._pattern_matcher import (
report_all_anti_patterns,
SynchronizedDataLoaderPattern,
)
+
from torch.testing._internal.common_cuda import TEST_MULTIGPU
+
from torch.testing._internal.common_device_type import skipCUDAVersionIn
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
@@ -84,15 +77,13 @@ from torch.testing._internal.common_utils import (
TestCase,
)
-Json = Dict[str, Any]
-
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
-import pickle
+
from torch._C._profiler import _ExperimentalConfig, _ExtraFields_PyCall
@@ -226,456 +217,6 @@ class TestProfilerITT(TestCase):
q.backward()
-class TestRecordFunction(TestCase):
- def _record_function_with_param(self):
- u = torch.randn(3, 4, 5, requires_grad=True)
- with _profile(
- with_stack=True, use_kineto=kineto_available(), record_shapes=True
- ) as prof:
- with record_function("## TEST 1 ##", "1, 2, 3"):
- rf_handle = _record_function_with_args_enter(
- "## TEST 2 ##", 1, False, 2.5, [u, u], "hello", u
- )
- _record_function_with_args_exit(rf_handle)
- with record_function("## TEST 3 ##"):
- rf_handle = _record_function_with_args_enter("## TEST 4 ##")
- _record_function_with_args_exit(rf_handle)
- return prof
-
- def test_record_function(self):
- prof_result = self._record_function_with_param()
- found_test_1 = False
- found_test_2 = False
- found_test_3 = False
- found_test_4 = False
- for e in prof_result.function_events:
- if "## TEST 1 ##" == e.name:
- found_test_1 = True
- self.assertTrue(e.input_shapes == [[]])
- elif "## TEST 2 ##" == e.name:
- found_test_2 = True
- self.assertTrue(e.input_shapes == [[], [], [], [], [], [3, 4, 5]])
- elif "## TEST 3 ##" == e.name:
- found_test_3 = True
- self.assertTrue(e.input_shapes == [])
- elif "## TEST 4 ##" == e.name:
- found_test_4 = True
- self.assertTrue(e.input_shapes == [])
- self.assertTrue(found_test_1)
- self.assertTrue(found_test_2)
- self.assertTrue(found_test_3)
- self.assertTrue(found_test_4)
-
- def test_datapipe_with_record_function(self):
- with _profile(
- with_stack=True, use_kineto=kineto_available(), record_shapes=True
- ) as prof:
- input_dp1 = dp.iter.IterableWrapper(range(4))
- input_dp2 = dp.iter.IterableWrapper(range(4, 8))
- input_dp3 = dp.iter.IterableWrapper(range(8, 12))
- output_dp = input_dp1.mux(input_dp2, input_dp3)
- output = list(output_dp)
-
- has_iter = False
- has_mux = False
- for e in prof.function_events:
- if has_iter and has_mux:
- break
-
- if not has_iter and "IterableWrapper" in e.name:
- has_iter = True
- if not has_mux and "Multiplexer" in e.name:
- has_mux = True
- self.assertTrue(has_iter)
- self.assertTrue(has_mux)
-
- def test_datapipe_delegation_with_profiler(self):
- class IDPIterator(torch.utils.data.IterDataPipe):
- def __init__(self):
- self.data = list(range(10))
- self._idx = 0
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self._idx >= 10:
- self._idx = 0
- raise StopIteration
- self._idx += 1
- return self.data[self._idx - 1]
-
- def get_value(self, idx):
- return self.data[idx]
-
- dp1 = IDPIterator() # The object itself is an iterator
- self.assertEqual(5, dp1.get_value(5))
- it_dp1 = iter(dp1) # This creates the 1st iterator
- self.assertEqual(5, it_dp1.get_value(5)) # type: ignore[attr-defined]
- self.assertEqual(list(range(10)), list(it_dp1))
-
- class IDPDelegator(torch.utils.data.IterDataPipe):
- def __init__(self, datapipe):
- self.datapipe = datapipe
-
- def __iter__(self):
- return iter(self.datapipe)
-
- dp2 = IDPDelegator(dp1)
- it_dp2 = iter(dp2)
- self.assertEqual(5, it_dp2.get_value(5))
- self.assertEqual(list(range(10)), list(it_dp2))
-
- def test_datapipe_with_record_function_fork(self):
- with _profile(
- with_stack=True, use_kineto=kineto_available(), record_shapes=True
- ) as prof:
- input_dp = dp.iter.IterableWrapper(range(10))
- dp1, dp2, dp3 = input_dp.fork(num_instances=3)
- output1 = list(dp1)
- has_iter = False
- has_child = False
- for e in prof.function_events:
- if has_iter and has_child:
- break
-
- if not has_iter and "IterableWrapper" in e.name:
- has_iter = True
- if not has_child and "_ChildDataPipe" in e.name:
- has_child = True
- self.assertTrue(has_iter)
- self.assertTrue(has_child)
-
-
-class TestExecutionTrace(TestCase):
- def payload(self, use_cuda=False):
- u = torch.randn(3, 4, 5, requires_grad=True)
- with record_function("## TEST 1 ##", "1, 2, 3"):
- inf_val = float("inf")
- neg_inf_val = float("-inf")
- nan_val = float("nan")
- rf_handle = _record_function_with_args_enter(
- "## TEST 2 ##",
- 1,
- False,
- 2.5,
- [u, u],
- (u, u),
- "hello",
- u,
- inf_val,
- neg_inf_val,
- nan_val,
- )
- x = torch.randn(10, 10, requires_grad=True)
- if use_cuda:
- x = x.cuda()
- y = torch.randn(10, 10, requires_grad=True)
- if use_cuda:
- y = y.cuda()
- z = x + y + x * y + x * y
- z.backward(z)
- gelu = nn.GELU()
- m = torch.randn(2)
- _ = gelu(m)
- if use_cuda:
- z = z.cpu()
- _record_function_with_args_exit(rf_handle)
-
- def get_execution_trace_root(self, output_file_name) -> Json:
- nodes = []
- with open(output_file_name) as f:
- et_graph = json.load(f)
- assert "nodes" in et_graph
- nodes = et_graph["nodes"]
- return nodes
-
- def get_execution_trace_rf_ids(self, nodes: List[Json]) -> List[int]:
- """Returns a sorted list of rf_id (record function ids) in execution trace"""
-
- def get_rf_id(node):
- attrs = node["attrs"]
- for a in attrs:
- if a["name"] == "rf_id":
- return a["value"]
- return None
-
- rf_ids_ = (
- get_rf_id(n)
- for n in nodes
- if n["name"] != "[pytorch|profiler|execution_trace|process]"
- and n["name"] != "[pytorch|profiler|execution_trace|thread]"
- )
- return sorted(rf_id for rf_id in rf_ids_ if rf_id is not None)
-
- def get_kineto_rf_ids(self, events: List[Json]) -> List[int]:
- """Returns a sorted list of Record function IDs for CPU operators and user annotations"""
- ops_and_annotations = (
- e for e in events if e.get("cat", "") in ["cpu_op", "user_annotation"]
- )
- return sorted(
- e.get("args", {}).get("Record function id", -1) for e in ops_and_annotations
- )
-
- @unittest.skipIf(not kineto_available(), "Kineto is required")
- def test_execution_trace_with_kineto(self):
- trace_called_num = 0
-
- def trace_handler(p):
- nonlocal trace_called_num
- trace_called_num += 1
-
- use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
- # Create a temp file to save execution trace and kineto data.
- fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
- fp.close()
- kt = tempfile.NamedTemporaryFile(
- mode="w+t", suffix=".kineto.json", delete=False
- )
- kt.close()
-
- with profile(
- activities=supported_activities(),
- schedule=torch.profiler.schedule(
- skip_first=3, wait=1, warmup=1, active=2, repeat=1
- ),
- on_trace_ready=trace_handler,
- execution_trace_observer=(
- ExecutionTraceObserver().register_callback(fp.name)
- ),
- ) as p:
- for idx in range(10):
- with record_function(f"## LOOP {idx} ##"):
- self.payload(use_cuda=use_cuda)
- p.step()
- self.assertEqual(fp.name, p.execution_trace_observer.get_output_file_path())
-
- # Uncomment for debugging
- # print("Output kineto = ", kt.name)
- # print("Output ET = ", fp.name)
-
- p.export_chrome_trace(kt.name)
- self.assertEqual(trace_called_num, 1)
-
- nodes = self.get_execution_trace_root(fp.name)
- loop_count = 0
- found_root_node = False
- for n in nodes:
- assert "name" in n
- if "[pytorch|profiler|execution_trace|process]" in n["name"]:
- found_root_node = True
- if n["name"].startswith("## LOOP "):
- loop_count += 1
- self.assertTrue(found_root_node)
- # Since profiler trace is active for 2 iterations
- self.assertEqual(loop_count, 2)
-
- # Compare the collected Execution Trace and Kineto Trace
- # in terms of record func ID (rf_id) and External IDs
- # both of these should match for the same trace window.
-
- with open(kt.name) as f:
- kineto = json.load(f)
- events = kineto["traceEvents"]
-
- # Look up rf_ids in both Execution and Kineto trace as two lists.
- rf_ids_et = self.get_execution_trace_rf_ids(nodes)
- rf_ids_kineto = self.get_kineto_rf_ids(events)
-
- self.assertCountEqual(rf_ids_et, rf_ids_kineto)
- self.assertListEqual(
- rf_ids_et,
- rf_ids_kineto,
- msg=f"ET and kineto rf_id should exactly match\n"
- f" rf_ids_et = {rf_ids_et}\n"
- f" rf_ids_kineto = {rf_ids_kineto}\n",
- )
-
- def test_execution_trace_alone(self):
- use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
- # Create a temp file to save execution trace data.
- fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
- fp.close()
- expected_loop_events = 0
-
- et = ExecutionTraceObserver().register_callback(fp.name)
- et.start()
- for idx in range(5):
- expected_loop_events += 1
- with record_function(f"## LOOP {idx} ##"):
- self.payload(use_cuda=use_cuda)
- et.stop()
-
- assert fp.name == et.get_output_file_path()
- et.unregister_callback()
- nodes = self.get_execution_trace_root(fp.name)
- loop_count = 0
- # Expected tensor object tuple size, in th form of:
- # [tensor_id, storage_id, offset, numel, itemsize, device_str]
- tensor_tuple_size = 6
- found_root_node = False
- for n in nodes:
- assert "name" in n
- if "[pytorch|profiler|execution_trace|process]" in n["name"]:
- found_root_node = True
- if n["name"].startswith("## LOOP "):
- loop_count += 1
- # Check if tensor tuple representation size is correct.
- if n["name"] == "## TEST 2 ##":
- assert len(n["inputs"]["values"][3][0]) == tensor_tuple_size
- assert found_root_node
- assert loop_count == expected_loop_events
-
- @unittest.skipIf(IS_WINDOWS, "torch.compile does not support WINDOWS")
- def test_execution_trace_with_pt2(self):
- class ConvAndRelu(nn.Module):
- def __init__(self) -> None:
- super().__init__()
- self.linear = nn.Linear(4096, 4096)
- self.relu = nn.ReLU(inplace=True)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.linear(x)
- x = self.relu(x)
- return x
-
- # Create a temp file to save execution trace data.
- fp = tempfile.NamedTemporaryFile("w+t", suffix="_et.json", delete=False)
- fp.close()
-
- with torch._inductor.config.patch(compile_threads=1):
- test_module = torch.compile(ConvAndRelu())
-
- x = torch.rand(128, 4096)
- et = ExecutionTraceObserver().register_callback(fp.name)
- et.start()
- test_module.forward(x)
- et.stop()
-
- assert fp.name == et.get_output_file_path()
- et.unregister_callback()
- nodes = self.get_execution_trace_root(fp.name)
-
- found_root_node = False
- for n in nodes:
- assert "name" in n
- if "[pytorch|profiler|execution_trace|process]" in n["name"]:
- found_root_node = True
-
- assert found_root_node
-
- def test_execution_trace_start_stop(self):
- use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
- # Create a temp file to save execution trace data.
- fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
- fp.close()
- expected_loop_events = 0
- et = ExecutionTraceObserver()
- et.register_callback(fp.name)
- for idx in range(10):
- if idx == 3:
- et.start()
- elif idx == 5:
- et.stop()
- elif idx == 8:
- et.start()
- elif idx == 9:
- et.stop()
- if et._execution_trace_running:
- expected_loop_events += 1
- with record_function(f"## LOOP {idx} ##"):
- self.payload(use_cuda=use_cuda)
-
- assert fp.name == et.get_output_file_path()
- et.unregister_callback()
- nodes = self.get_execution_trace_root(fp.name)
- loop_count = 0
- found_root_node = False
- for n in nodes:
- assert "name" in n
- if "[pytorch|profiler|execution_trace|process]" in n["name"]:
- found_root_node = True
- if n["name"].startswith("## LOOP "):
- loop_count += 1
- assert found_root_node
- assert loop_count == expected_loop_events
-
- def test_execution_trace_repeat_in_loop(self):
- use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
- iter_list = {3, 4, 6, 8}
- expected_loop_events = len(iter_list)
- output_files = []
- for idx in range(10):
- if idx in iter_list:
- # Create a temp file to save execution trace data.
- fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
- fp.close()
- output_files.append(fp.name)
- et = ExecutionTraceObserver()
- et.register_callback(fp.name)
- et.start()
- with record_function(f"## LOOP {idx} ##"):
- self.payload(use_cuda=use_cuda)
- if idx in iter_list:
- et.stop()
- et.unregister_callback()
-
- event_count = 0
- for et_file in output_files:
- nodes = self.get_execution_trace_root(et_file)
- found_root_node = False
- for n in nodes:
- assert "name" in n
- if "[pytorch|profiler|execution_trace|process]" in n["name"]:
- assert n["id"] == 1
- found_root_node = True
- if n["name"].startswith("## LOOP "):
- event_count += 1
- assert found_root_node
- assert event_count == expected_loop_events
-
- def test_execution_trace_no_capture(self):
- fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
- fp.close()
- et = ExecutionTraceObserver()
- et.register_callback(fp.name)
-
- assert fp.name == et.get_output_file_path()
- et.unregister_callback()
- nodes = self.get_execution_trace_root(fp.name)
- for n in nodes:
- assert "name" in n
- if "[pytorch|profiler|execution_trace|process]" in n["name"]:
- found_root_node = True
- assert found_root_node
-
- @skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/124500")
- def test_execution_trace_nested_tensor(self):
- fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
- fp.close()
-
- et = ExecutionTraceObserver()
- observer = et.register_callback(fp.name)
-
- def fn(nt):
- return nt.sin().cos()
-
- with torch.profiler.profile(execution_trace_observer=observer) as prof:
- for i in range(3):
- values = torch.rand((8 + i, 4 + i))
- offsets = torch.tensor([0, 2, 4, 6, 8 + i])
- nt = torch.nested.nested_tensor_from_jagged(values, offsets)
- fn(nt)
-
- nodes = self.get_execution_trace_root(fp.name)
- found_cos = False
- for n in nodes:
- assert "name" in n
- if "cos" in n["name"]:
- found_cos = True
- assert found_cos
-
-
@instantiate_parametrized_tests
class TestProfiler(TestCase):
@unittest.skipIf(
@@ -1016,7 +557,9 @@ class TestProfiler(TestCase):
def run_profiler(tensor_creation_fn):
# collecting allocs / deallocs
with _profile(
- profile_memory=True, record_shapes=True, use_kineto=kineto_available()
+ profile_memory=True,
+ record_shapes=True,
+ use_kineto=kineto_available(),
) as prof:
x = None
with record_function("test_user_scope_alloc"):
@@ -1027,16 +570,22 @@ class TestProfiler(TestCase):
def check_metrics(stats, metric, allocs=None, deallocs=None):
stat_metrics = {}
+ # print(stats)
for stat in stats:
stat_metrics[stat.key] = getattr(stat, metric)
+ # print(stat_metrics)
if allocs is not None:
for alloc_fn in allocs:
self.assertTrue(alloc_fn in stat_metrics)
- self.assertTrue(stat_metrics[alloc_fn] > 0)
+ self.assertGreater(
+ stat_metrics[alloc_fn], 0, f"alloc_fn = {alloc_fn}"
+ )
if deallocs is not None:
for dealloc_fn in deallocs:
self.assertTrue(dealloc_fn in stat_metrics)
- self.assertTrue(stat_metrics[dealloc_fn] < 0)
+ self.assertLess(
+ stat_metrics[dealloc_fn], 0, f"alloc_fn = {dealloc_fn}"
+ )
def create_cpu_tensor():
return torch.rand(10, 10)
@@ -2093,18 +1642,6 @@ assert KinetoStepTracker.current_step() == initial_step + 2 * niters
event_list.table()
-def find_node_with_name(nodes, name):
- for node in _utils.traverse_dfs(nodes):
- if node.name == name:
- return node
-
-
-def find_node_with_regex(nodes, pattern):
- for node in _utils.traverse_dfs(nodes):
- if re.search(pattern, node.name):
- return node
-
-
class SimpleNet(nn.Module):
def __init__(self):
super().__init__()
@@ -2115,859 +1652,6 @@ class SimpleNet(nn.Module):
return self.fc2(self.fc1(x))
-class TestTorchTidyProfiler(TestCase):
- def _get_tensor_fields(self, node, index):
- self.assertIsNotNone(node)
- self.assertIsInstance(
- node.extra_fields, torch._C._profiler._ExtraFields_TorchOp
- )
- tensor_info = node.extra_fields.inputs[index]
- self.assertIsInstance(tensor_info, _TensorMetadata)
- self.assertIsNotNone(tensor_info.impl_ptr)
- self.assertIsNotNone(tensor_info.storage_data_ptr)
- self.assertIsNotNone(tensor_info.id)
- return tensor_info.impl_ptr, tensor_info.storage_data_ptr, tensor_info.id
-
- def test_pointers_and_ids(self):
- a = torch.randn(4, 3)
- a_initial_storage_data = a.storage().data_ptr()
-
- # Views of tensors can share the same storage, but have different TensorImpls
- b = a.view((1, 12))
- c = torch.randn(4, 1)
- c_initial_storage_data = c.storage().data_ptr()
- d = torch.randn(4, 3)
-
- with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
- _ = a + c
- _ = b * c
-
- # Resize should create a new data_ptr but keep the TensorImpl the same.
- f = a.resize_(128, 129)
- _ = torch.relu(f)
-
- # `.set_` points a Tensor at an existing storage.
- _ = d.sin()
- c.set_(d.storage())
- _ = c.cos()
-
- nodes = p.profiler.kineto_results.experimental_event_tree()
-
- def get_fields(op_name, index):
- return self._get_tensor_fields(find_node_with_name(nodes, op_name), index)
-
- a_impl, a_storage_data, a_id = get_fields("aten::add", 0)
- b_impl, b_storage_data, b_id = get_fields("aten::mul", 0)
-
- # Profiler matches ground truth from Python API.
- self.assertEqual(a_storage_data, a_initial_storage_data)
-
- # Views are handled correctly.
- self.assertEqual(a_storage_data, b_storage_data)
- self.assertNotEqual(a_impl, b_impl)
-
- # The same Tensor used in multiple calls gives identical results.
- c_impl, c_storage_data, c_id = get_fields("aten::add", 1)
- self.assertEqual((c_impl, c_storage_data, c_id), get_fields("aten::mul", 1))
- self.assertEqual(c_storage_data, c_initial_storage_data)
-
- # Mutations to the underlying storage are reflected. (But ID is shared.)
- f_impl, f_storage_data, f_id = get_fields("aten::relu", 0)
- self.assertEqual(a_impl, f_impl)
- self.assertNotEqual(a_storage_data, f_storage_data)
- self.assertEqual(a_id, f_id)
-
- # Calling `set_` with an existing Tensor makes them share an ID.
- d_impl, d_storage_data, d_id = get_fields("aten::sin", 0)
- c_impl_new, c_storage_data_new, c_id_new = get_fields("aten::cos", 0)
- self.assertNotEqual(d_impl, c_impl_new)
- self.assertEqual(d_storage_data, c_storage_data_new)
- self.assertEqual(c_id, c_id_new)
- self.assertEqual(d_id, c_id_new)
-
- @staticmethod
- def _format_allocations(profiled_code):
- gc.collect()
- with profile(profile_memory=True, record_shapes=True) as prof:
- profiled_code()
- gc.collect()
-
- root_events = prof.profiler.kineto_results.experimental_event_tree()
- events = sorted(_utils.traverse_dfs(root_events), key=lambda x: x.start_time_ns)
- allocations = tuple(
- event.extra_fields
- for event in events
- if isinstance(
- event.extra_fields, torch._C._profiler._ExtraFields_Allocation
- )
- )
-
- return textwrap.indent(
- "\n".join(
- f"{repr(i.id):>5}{' ' * 6}"
- f"{repr(i.allocation_id):>5}{' ' * 6}"
- f"{'Allocation' if i.alloc_size > 0 else 'Free'}"
- for i in allocations
- ),
- " " * 12,
- )
-
- def test_tensorimpl_invalidation_set(self) -> None:
- def profiled_code(add_empty_set: bool):
- x = torch.ones((1,))
-
- # Determines if new storage is created before or after the old one
- # is destroyed.
- if add_empty_set:
- x.set_()
-
- x.set_(torch.ones((1,)).storage())
- x.view_as(x)
-
- self.assertExpectedInline(
- self._format_allocations(lambda: profiled_code(add_empty_set=False)),
- """\
- 0 1 Allocation
- 0 2 Allocation
- 0 1 Free
- 0 2 Free""",
- )
-
- self.assertExpectedInline(
- self._format_allocations(lambda: profiled_code(add_empty_set=True)),
- """\
- 0 1 Allocation
- 0 1 Free
- 0 2 Allocation
- 0 2 Free""",
- )
-
- def test_tensorimpl_invalidation_keep_alive(self) -> None:
- def profiled_code(add_empty_set: bool):
- x = torch.ones((1,))
- x_storages = [x.storage()]
- for _ in range(3):
- x.set_()
- x.set_(torch.ones((1,)).storage())
-
- # This keeps the StorageImpls alive and preserves the chain.
- # (Despite the `set_()` call.)
- x_storages.append(x.storage())
- x.view_as(x)
-
- # Free storage in a deterministic fashion.
- while x_storages:
- x_storages.pop()
- gc.collect()
-
- # Determines if new storage is created before or after the old one
- # is destroyed.
- if add_empty_set:
- x.set_()
-
- for _ in range(3):
- x.set_(torch.ones((1,)).storage())
- x.view_as(x)
-
- del x
- gc.collect()
-
- self.assertExpectedInline(
- self._format_allocations(lambda: profiled_code(add_empty_set=False)),
- """\
- 0 1 Allocation
- 0 2 Allocation
- 0 4 Allocation
- 0 5 Allocation
- 0 4 Free
- 0 2 Free
- 0 1 Free
- 0 6 Allocation
- 0 5 Free
- 0 7 Allocation
- 0 6 Free
- 0 8 Allocation
- 0 7 Free
- 0 8 Free""",
- )
-
- self.assertExpectedInline(
- self._format_allocations(lambda: profiled_code(add_empty_set=True)),
- """\
- 0 1 Allocation
- 0 2 Allocation
- 0 4 Allocation
- 0 5 Allocation
- 0 4 Free
- 0 2 Free
- 0 1 Free
- 0 5 Free
- 0 6 Allocation
- 0 7 Allocation
- 0 6 Free
- 0 8 Allocation
- 0 7 Free
- 0 8 Free""",
- )
-
- def test_tensorimpl_invalidation_full(self) -> None:
- def profiled_code():
- x = torch.ones((1,))
- x_storages = [x.storage()]
- for _ in range(3):
- x.set_()
- x.set_(torch.ones((1,)).storage())
- x_storages.append(x.storage())
- x.view_as(x)
-
- # Free storage in a deterministic fashion.
- while x_storages:
- x_storages.pop()
- gc.collect()
-
- for _ in range(3):
- x.set_(torch.ones((1,)).storage())
-
- for _ in range(3):
- x.set_()
- x.set_(torch.ones((1,)).storage())
-
- for i in range(4):
- x.resize_((1 + i,))
- x.view_as(x)
-
- self.assertExpectedInline(
- self._format_allocations(profiled_code),
- """\
- 0 1 Allocation
- 0 2 Allocation
- 0 4 Allocation
- 0 5 Allocation
- 0 4 Free
- 0 2 Free
- 0 1 Free
- 0 6 Allocation
- 0 5 Free
- 0 7 Allocation
- 0 6 Free
- 0 8 Allocation
- 0 7 Free
- 0 8 Free
- 0 9 Allocation
- 0 9 Free
- 0 10 Allocation
- 0 10 Free
- 0 11 Allocation
- 0 12 Allocation
- 0 11 Free
- 0 13 Allocation
- 0 12 Free
- 0 14 Allocation
- 0 13 Free
- 0 14 Free""",
- )
-
- def test_tensorimpl_invalidation_scalar_args(self) -> None:
- def profiled_code():
- with torch.no_grad():
- x = torch.ones((1,))
- for _ in range(10):
- x.add_(2)
-
- self.assertExpectedInline(
- self._format_allocations(profiled_code),
- """\
- 0 1 Allocation
- 1 2 Allocation
- 2 3 Allocation
- 2 3 Free
- 1 2 Free
- 3 4 Allocation
- 4 5 Allocation
- 4 5 Free
- 3 4 Free
- 5 6 Allocation
- 6 7 Allocation
- 6 7 Free
- 5 6 Free
- 7 8 Allocation
- 8 9 Allocation
- 8 9 Free
- 7 8 Free
- 9 10 Allocation
- 10 11 Allocation
- 10 11 Free
- 9 10 Free
- 11 12 Allocation
- 12 13 Allocation
- 12 13 Free
- 11 12 Free
- 13 14 Allocation
- 14 15 Allocation
- 14 15 Free
- 13 14 Free
- 15 16 Allocation
- 16 17 Allocation
- 16 17 Free
- 15 16 Free
- 17 18 Allocation
- 18 19 Allocation
- 18 19 Free
- 17 18 Free
- 19 20 Allocation
- 20 21 Allocation
- 20 21 Free
- 19 20 Free
- 0 1 Free""",
- )
-
- def test_module_and_optimizer_ids(self) -> None:
- model = torch.nn.Linear(2, 1, bias=True)
- optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
-
- def check(cold_start: bool) -> None:
- with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
- x = torch.ones((1, 2))
- _ = x.sin() # Mark `x`
- model(x).backward()
- optimizer.step()
- _ = optimizer.state[model.weight][
- "momentum_buffer"
- ].cos() # Mark weight momentum
- _ = model.weight.grad.tan() # Mark weight gradient
-
- nodes = p.profiler.kineto_results.experimental_event_tree()
-
- def get_fields(op_name, index):
- return self._get_tensor_fields(
- find_node_with_name(nodes, op_name), index
- )
-
- # Marked Tensors act as ground truth for python tracer IDs.
- _, _, x_id = get_fields("aten::sin", 0)
- _, _, weight_momenumtum_id = get_fields("aten::cos", 0)
- _, _, weight_grad_id = get_fields("aten::tan", 0)
- self.assertNotEqual(x_id, weight_momenumtum_id)
- self.assertNotEqual(x_id, weight_grad_id)
- self.assertNotEqual(weight_momenumtum_id, weight_grad_id)
-
- # Use linear op to identify weight ground truth.
- linear_op_node = find_node_with_name(nodes, "aten::linear")
- self.assertIsNotNone(linear_op_node)
- x_metadata, weight_metadata, _ = linear_op_node.extra_fields.inputs
- self.assertEqual(x_id, x_metadata.id)
-
- # Module
- linear_module_node = find_node_with_name(nodes, "nn.Module: Linear_0")
- self.assertIsNotNone(linear_module_node)
- self.assertIsNotNone(linear_module_node.extra_fields.module)
- self.assertIsNone(linear_module_node.extra_fields.optimizer)
-
- linear_parameters = linear_module_node.extra_fields.module.parameters
- name, weight, weight_grad = linear_parameters[0]
- self.assertEqual(name, "weight")
- self.assertEqual(weight.id, weight_metadata.id)
-
- self.assertEqual(weight_grad is None, cold_start)
- if not cold_start:
- self.assertEqual(weight_grad.id, weight_grad_id)
-
- # Optimizer
- step_node = find_node_with_regex(nodes, "_optimizer_step_code")
- self.assertIsNotNone(step_node)
- self.assertIsNone(step_node.extra_fields.module)
- self.assertIsNotNone(step_node.extra_fields.optimizer)
- optimizer_parameters = step_node.extra_fields.optimizer.parameters
- self.assertEqual(len(optimizer_parameters), 2) # Weight and bias
- weight, weight_grad, state = optimizer_parameters[0]
- self.assertEqual(weight.id, weight_metadata.id)
- self.assertEqual(weight_grad.id, weight_grad_id)
- self.assertEqual(len(state), 1)
- self.assertEqual(state[0][0], "momentum_buffer")
- self.assertEqual(state[0][1].id, weight_momenumtum_id)
-
- # Check that we handle first step (lazy initalization) and steady state.
- check(cold_start=True)
- check(cold_start=False)
-
- def _test_allocation_ids(self, before_fn, after_fn) -> None:
- with profile(profile_memory=True, record_shapes=True) as p:
- # Introduce other operations and allocations to check robustness
- _ = before_fn()
-
- x = torch.rand(4, 3)
- x.resize_(4, 4)
-
- # We need to use `x` post resize for profiler to determine its ID.
- x.sin()
-
- # Introduce other operations and allocations to check robustness
- _ = after_fn()
-
- # Ensure `x` is the last variable collected to make it easier to
- # find the deallocation event.
- gc.collect()
- del x
- gc.collect()
-
- nodes = p.profiler.kineto_results.experimental_event_tree()
-
- def find_chain(names: List[str]):
- out = []
- for name in names:
- root = [out[-1]] if out else nodes
- out.append(find_node_with_name(root, name))
- self.assertIsNotNone(out[-1], name)
- return out
-
- allocation = find_chain(["aten::rand", "aten::empty", "[memory]"])[
- -1
- ].extra_fields
- _, uniform_node = find_chain(["aten::rand", "aten::uniform_"])
- x_impl, x_storage_data, x_id = self._get_tensor_fields(uniform_node, 0)
-
- # Make sure IDs are consistent between allocations and op inputs
- self.assertEqual(allocation.ptr, x_storage_data)
- self.assertEqual(allocation.id, x_id)
-
- resize_node = find_node_with_name(nodes, "aten::resize_")
- self.assertIsNotNone(resize_node)
- self.assertEqual(len(resize_node.children), 2)
- allocate_new = resize_node.children[0].extra_fields
- free_old = resize_node.children[1].extra_fields
-
- # Destruction of the old storage for x.
- self.assertEqual(free_old.id, allocation.id)
- self.assertEqual(free_old.ptr, allocation.ptr)
-
- # Make sure ID is retained through change in storage.
- self.assertEqual(allocate_new.id, allocation.id)
- self.assertNotEqual(allocate_new.ptr, allocation.ptr)
-
- # Deletion when `x` goes out of scope.
- free_new = [
- i for i in nodes if i.tag == torch._C._profiler._EventType.Allocation
- ][-1].extra_fields
- self.assertIsInstance(free_new, torch._C._profiler._ExtraFields_Allocation)
- self.assertEqual(free_new.id, allocate_new.id)
- self.assertEqual(free_new.ptr, allocate_new.ptr)
-
- def test_allocation_ids(self) -> None:
- self._test_allocation_ids(lambda: None, lambda: None)
-
- def test_allocation_ids_with_other_ops(self) -> None:
- x = torch.ones((1,))
- self._test_allocation_ids(
- lambda: (x + 1).relu_(), lambda: torch.zeros((1,)).cos()
- )
-
- def test_impl_reuse(self) -> None:
- repeats = 1_000
- with profile(profile_memory=True, record_shapes=True) as p:
- for _ in range(repeats):
- torch.ones((1,))
- gc.collect()
-
- roots = p.profiler.kineto_results.experimental_event_tree()
- tensor_impls = tuple(
- e.extra_fields.inputs[0].impl_ptr
- for e in _utils.traverse_dfs(roots)
- if e.name == "aten::fill_"
- )
-
- self.assertEqual(len(tensor_impls), repeats)
- self.assertEqual(len(set(tensor_impls)), repeats)
-
- def test_allocation_id_uniqueness(self) -> None:
- repeats = 1_000
- with profile(profile_memory=True, record_shapes=True) as p:
- for _ in range(repeats):
- torch.ones((1,))
- gc.collect()
-
- roots = p.profiler.kineto_results.experimental_event_tree()
- id_set = set()
- for e in _utils.traverse_dfs(roots):
- fields = e.extra_fields
- if isinstance(fields, torch._C._profiler._ExtraFields_TorchOp):
- id_set |= {
- t.allocation_id
- for t in fields.inputs
- if isinstance(t, _TensorMetadata)
- }
-
- elif isinstance(fields, torch._C._profiler._ExtraFields_Allocation):
- id_set.add(fields.allocation_id)
-
- id_set.difference_update([None])
- self.assertEqual(repeats, len(id_set))
-
- def test_extra_fields(self):
- with profile(with_stack=True, profile_memory=True) as p:
- _ = torch.ones((1,))
-
- nodes = p.profiler.kineto_results.experimental_event_tree()
- node = find_node_with_name(nodes, "aten::ones")
- self.assertIsNotNone(node)
-
- self.assertIsInstance(
- node.extra_fields, torch._C._profiler._ExtraFields_TorchOp
- )
-
- self.assertIsInstance(
- node.parent.extra_fields, torch._C._profiler._ExtraFields_PyCCall
- )
-
- self.assertEqual(node.children[0].name, "aten::empty")
- self.assertEqual(node.children[0].children[0].name, "[memory]")
- self.assertIsInstance(
- node.children[0].children[0].extra_fields,
- torch._C._profiler._ExtraFields_Allocation,
- )
-
- def test_tensor_properties(self):
- x = torch.ones(10, 10).as_strided([4, 4], [12, 3])
- y = torch.ones(4, 1, requires_grad=True)
-
- with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
- _ = x + y
- _ = x * y
-
- nodes = p.profiler.kineto_results.experimental_event_tree()
- node = find_node_with_name(nodes, "aten::add")
- self.assertIsNotNone(node)
-
- self.assertIsInstance(
- node.extra_fields, torch._C._profiler._ExtraFields_TorchOp
- )
-
- def getattr_inputs(name, default):
- return [getattr(i, name, default) for i in node.extra_fields.inputs]
-
- self.assertEqual(getattr_inputs("sizes", []), [[4, 4], [4, 1], []])
- self.assertEqual(getattr_inputs("strides", []), [[12, 3], [1, 1], []])
- self.assertEqual(
- getattr_inputs("layout", None), [torch.strided, torch.strided, None]
- )
- self.assertEqual(
- getattr_inputs("device", None),
- [torch.device("cpu"), torch.device("cpu"), None],
- )
- self.assertEqual(
- getattr_inputs("dtype", None), [torch.float32, torch.float32, None]
- )
- self.assertEqual(node.extra_fields.scope, torch.profiler.RecordScope.FUNCTION)
-
- mul_node = find_node_with_name(nodes, "aten::mul")
- self.assertIsNotNone(mul_node)
- self.assertEqual(
- node.extra_fields.sequence_number + 1, mul_node.extra_fields.sequence_number
- )
-
- def test_sparse_tensors(self):
- i = [[0, 1, 1], [2, 0, 2]]
- v = [3, 4, 5]
- s = torch.sparse_coo_tensor(i, v, (2, 3))
-
- with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
- _ = s + s
-
- nodes = p.profiler.kineto_results.experimental_event_tree()
- node = find_node_with_name(nodes, "aten::add")
- self.assertIsNotNone(node)
-
- self.assertIsInstance(
- node.extra_fields, torch._C._profiler._ExtraFields_TorchOp
- )
-
- def getattr_inputs(name, default):
- return [getattr(i, name, default) for i in node.extra_fields.inputs]
-
- self.assertEqual(getattr_inputs("sizes", []), [[2, 3], [2, 3], []])
- self.assertEqual(getattr_inputs("strides", []), [[], [], []])
- self.assertEqual(
- getattr_inputs("layout", None), [torch.sparse_coo, torch.sparse_coo, None]
- )
- self.assertEqual(
- getattr_inputs("device", None),
- [torch.device("cpu"), torch.device("cpu"), None],
- )
-
- @unittest.skipIf(
- not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
- )
- def test_mkldnn_tensors(self):
- x = torch.ones(4, 3).to_mkldnn()
-
- with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
- _ = x + x
-
- nodes = p.profiler.kineto_results.experimental_event_tree()
- node = find_node_with_name(nodes, "aten::add")
- self.assertIsNotNone(node)
-
- self.assertIsInstance(
- node.extra_fields, torch._C._profiler._ExtraFields_TorchOp
- )
-
- def getattr_inputs(name, default):
- return [getattr(i, name, default) for i in node.extra_fields.inputs]
-
- self.assertEqual(getattr_inputs("sizes", []), [[4, 3], [4, 3], []])
- self.assertEqual(getattr_inputs("strides", []), [[], [], []])
- self.assertEqual(
- getattr_inputs("layout", None), [torch._mkldnn, torch._mkldnn, None]
- )
- self.assertEqual(
- getattr_inputs("device", None),
- [torch.device("cpu"), torch.device("cpu"), None],
- )
-
- def test_scalar_ins(self):
- x = torch.ones(5, 5)
- alpha = 0.9
-
- with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
- _ = torch.add(x, 9.1, alpha=alpha)
-
- nodes = p.profiler.kineto_results.experimental_event_tree()
- node = find_node_with_name(nodes, "aten::add")
- self.assertIsNotNone(node)
-
- def getattr_inputs(name, default):
- return [getattr(i, name, default) for i in node.extra_fields.inputs]
-
- # The second argument to the add gets promotoed to a zerodim Tensor
- self.assertEqual(
- getattr_inputs("dtype", None), [torch.float32, torch.float64, None]
- )
- self.assertEqual(getattr_inputs("sizes", []), [[5, 5], [], []])
- self.assertEqual(node.extra_fields.inputs[2], alpha)
-
- def test_tensor_lists(self):
- x = torch.ones((1,))
- y = torch.ones((1,))
- with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
- _ = torch.stack((x, y))
-
- nodes = p.profiler.kineto_results.experimental_event_tree()
- node = find_node_with_name(nodes, "aten::stack")
- inputs = node.extra_fields.inputs
- self.assertEqual(len(inputs), 2)
- self.assertIsInstance(inputs[0], list)
- self.assertEqual(len(inputs[0]), 2)
- self.assertEqual(x.storage().data_ptr(), inputs[0][0].storage_data_ptr)
- self.assertEqual(y.storage().data_ptr(), inputs[0][1].storage_data_ptr)
-
- def test_nnmodule_params(self):
- def flat_out_extrafields(nodes, out=None):
- if out is None:
- out = []
- for node in nodes:
- if (
- isinstance(node.extra_fields, _ExtraFields_PyCall)
- and node.extra_fields.module
- ):
- if node.extra_fields.module.parameters:
- out.append(node.extra_fields.module)
- flat_out_extrafields(node.children, out)
- return out
-
- inputs = torch.rand(10)
- net = SimpleNet()
- out = net(inputs)
- torch.nn.functional.cross_entropy(out, torch.rand(2)).backward()
- with torch.profiler.profile(with_stack=True, profile_memory=True) as p:
- _ = net(inputs)
-
- modules = flat_out_extrafields(
- p.profiler.kineto_results.experimental_event_tree()
- )
- self.assertEqual(
- len(modules), 2, f"Expected two parameter list, but got {len(modules)}"
- )
-
- params = [
- (n, p.storage_data_ptr, g.storage_data_ptr)
- for module in modules
- for (n, p, g) in module.parameters
- ]
- expected = [
- (name, val.storage().data_ptr(), val.grad.storage().data_ptr())
- for name, val in net.fc1._parameters.items()
- ]
- expected += [
- (name, val.storage().data_ptr(), val.grad.storage().data_ptr())
- for name, val in net.fc2._parameters.items()
- ]
- self.assertEqual(expected, params, f"{expected} vs. {params}")
-
- def _flat_out_extrafields(self, nodes, out=None):
- if out is None:
- out = []
- for node in nodes:
- if (
- isinstance(node.extra_fields, _ExtraFields_PyCall)
- and node.extra_fields.optimizer
- and node.extra_fields.optimizer.parameters
- ):
- # avoiding OptInfo duplicates from iterations
- addr = node.extra_fields.optimizer.parameters[0][0].storage_data_ptr
- if not [o for o in out if addr == o.parameters[0][0].storage_data_ptr]:
- out.append(node.extra_fields.optimizer)
- self._flat_out_extrafields(node.children, out)
- return out
-
- def _check_results(self, opt, opts, check_items=False):
- self.assertEqual(len(opts), 1, f"Expected 1 optimizer: len(opts): {len(opts)}")
- self.assertEqual(
- id(opt),
- opts[0].self_ptr,
- f"Optimizer addr ({id(opt)}) vs. profiled addr ({opts[0].self_ptr})",
- )
- if check_items:
- self.assertEqual(len(opt.param_groups), len(opts))
- for group, opt_ in zip(opt.param_groups, opts):
- self.assertEqual(
- [(v.storage().data_ptr()) for v in group.get("params", [])],
- [(o.storage_data_ptr) for (o, _, _) in opt_.parameters],
- )
- for opt_ in opts:
- observed_state = {
- p.storage_data_ptr: {name: s.storage_data_ptr for name, s in state}
- for (p, _, state) in opt_.parameters
- }
-
- # Make sure the profiler collected all optimizer state and check
- # that the address recorded by the profiler is correct.
- for parameter, parameter_state in opt.state.items():
- self.assertEqual(
- {
- name: value.storage().data_ptr()
- for name, value in parameter_state.items()
- },
- observed_state.get(parameter.storage().data_ptr(), []),
- )
-
- def test_optimizer(self):
- inputs = torch.rand(10)
- with torch.profiler.profile(with_stack=True, profile_memory=True) as p:
- net = SimpleNet()
- opt = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
-
- opt.zero_grad()
- out = net(inputs)
- loss = torch.nn.functional.cross_entropy(out, torch.rand(2))
- loss.backward()
- opt.step()
- self._check_results(
- opt,
- self._flat_out_extrafields(
- p.profiler.kineto_results.experimental_event_tree()
- ),
- False,
- )
-
- def _test_optimizer_parameters(self, optimizer_factory):
- inputs = torch.rand(10)
- with torch.profiler.profile(with_stack=True, profile_memory=True) as p:
- net = SimpleNet()
- opt = optimizer_factory(net.parameters())
- for _ in range(2):
- opt.zero_grad()
- out = net(inputs)
- loss = torch.nn.functional.cross_entropy(out, torch.rand(2))
- loss.backward()
- opt.step()
- self._check_results(
- opt,
- self._flat_out_extrafields(
- p.profiler.kineto_results.experimental_event_tree()
- ),
- True,
- )
-
- def test_optimizer_parameters_sgd(self):
- self._test_optimizer_parameters(
- lambda params: torch.optim.SGD(params, lr=0.01, momentum=0.9)
- )
-
- def test_optimizer_parameters_adam(self):
- self._test_optimizer_parameters(
- lambda params: torch.optim.Adam(params, foreach=True)
- )
-
- def test_allocations(self):
- gc.collect()
- with profile(profile_memory=True) as p:
- x = torch.empty((3, 4))
-
- nodes = p.profiler.kineto_results.experimental_event_tree()
- node = find_node_with_name(nodes, "[memory]")
- self.assertIsNotNone(node)
-
- alloc_size = 3 * 4 * 4 # fp32 -> 4 bytes
- ptr = node.extra_fields.ptr
- self.assertGreater(ptr, 0)
- self.assertEqual(node.extra_fields.alloc_size, alloc_size)
- self.assertEqual(node.extra_fields.device, torch.device("cpu"))
- total_allocated = node.extra_fields.total_allocated
-
- # total_reserved is only for CUDACachingAllocator
- self.assertEqual(node.extra_fields.total_reserved, 0)
-
- with profile(profile_memory=True) as p:
- del x
- gc.collect()
-
- nodes = p.profiler.kineto_results.experimental_event_tree()
- node = find_node_with_name(nodes, "[memory]")
- self.assertIsNotNone(node)
-
- self.assertEqual(node.extra_fields.ptr, ptr)
- self.assertEqual(node.extra_fields.alloc_size, -alloc_size)
- self.assertEqual(node.extra_fields.device, torch.device("cpu"))
- self.assertEqual(
- node.extra_fields.total_allocated, total_allocated - alloc_size
- )
-
- def test_refcounts(self):
- class Sentinel:
- pass
-
- def make():
- outer_sentinel = Sentinel()
-
- def outer():
- # Python will only close over variables used in the function.
- _ = outer_sentinel
- inner_sentinel = Sentinel()
-
- def inner():
- _ = inner_sentinel
-
- with profile(with_stack=True):
- inner()
-
- return weakref.ref(inner_sentinel)
-
- return outer, weakref.ref(outer_sentinel)
-
- # Use a factory function to ensure the test scope never sees strong
- # references. `del` has strange semantics that interact with closures
- # at an AST level, so this is simpler.
- outer, outer_sentinel_ref = make()
- inner_sentinel_ref = outer()
-
- self.assertIsNone(inner_sentinel_ref())
-
- # `outer` holds the last reference via closure.
- self.assertIsNotNone(outer_sentinel_ref())
-
- del outer
- self.assertIsNone(outer_sentinel_ref())
-
-
@dataclass(frozen=True)
class MockKinetoEvent:
_name: str
diff --git a/test/profiler/test_record_function.py b/test/profiler/test_record_function.py
new file mode 100644
index 0000000000..9c0945454c
--- /dev/null
+++ b/test/profiler/test_record_function.py
@@ -0,0 +1,155 @@
+# Owner(s): ["oncall: profiler"]
+
+# if tqdm is not shutdown properly, it will leave the monitor thread alive.
+# This causes an issue in the multithreading test because we check all events
+# in that test with their tids. The events that correspond to these lingering
+# threads all have TID of (uint64_t)(-1) which is invalid.
+# The work around is turnning off monitoring thread when tqdm is loaded.
+# Since these are unit tests, it is safe to turn off monitor thread.
+try:
+ import tqdm
+
+ tqdm.tqdm.monitor_interval = 0
+except ImportError:
+ None
+
+from typing import Any, Dict
+
+import torch
+import torch.optim
+import torch.utils.data
+import torch.utils.data.datapipes as dp
+from torch.autograd import (
+ _record_function_with_args_enter,
+ _record_function_with_args_exit,
+)
+from torch.autograd.profiler import profile as _profile
+from torch.profiler import kineto_available, record_function
+from torch.testing._internal.common_utils import run_tests, TestCase
+
+Json = Dict[str, Any]
+
+
+class TestRecordFunction(TestCase):
+ def _record_function_with_param(self):
+ u = torch.randn(3, 4, 5, requires_grad=True)
+ with _profile(
+ with_stack=True, use_kineto=kineto_available(), record_shapes=True
+ ) as prof:
+ with record_function("## TEST 1 ##", "1, 2, 3"):
+ rf_handle = _record_function_with_args_enter(
+ "## TEST 2 ##", 1, False, 2.5, [u, u], "hello", u
+ )
+ _record_function_with_args_exit(rf_handle)
+ with record_function("## TEST 3 ##"):
+ rf_handle = _record_function_with_args_enter("## TEST 4 ##")
+ _record_function_with_args_exit(rf_handle)
+ return prof
+
+ def test_record_function(self):
+ prof_result = self._record_function_with_param()
+ found_test_1 = False
+ found_test_2 = False
+ found_test_3 = False
+ found_test_4 = False
+ for e in prof_result.function_events:
+ if "## TEST 1 ##" == e.name:
+ found_test_1 = True
+ self.assertTrue(e.input_shapes == [[]])
+ elif "## TEST 2 ##" == e.name:
+ found_test_2 = True
+ self.assertTrue(e.input_shapes == [[], [], [], [], [], [3, 4, 5]])
+ elif "## TEST 3 ##" == e.name:
+ found_test_3 = True
+ self.assertTrue(e.input_shapes == [])
+ elif "## TEST 4 ##" == e.name:
+ found_test_4 = True
+ self.assertTrue(e.input_shapes == [])
+ self.assertTrue(found_test_1)
+ self.assertTrue(found_test_2)
+ self.assertTrue(found_test_3)
+ self.assertTrue(found_test_4)
+
+ def test_datapipe_with_record_function(self):
+ with _profile(
+ with_stack=True, use_kineto=kineto_available(), record_shapes=True
+ ) as prof:
+ input_dp1 = dp.iter.IterableWrapper(range(4))
+ input_dp2 = dp.iter.IterableWrapper(range(4, 8))
+ input_dp3 = dp.iter.IterableWrapper(range(8, 12))
+ output_dp = input_dp1.mux(input_dp2, input_dp3)
+ output = list(output_dp)
+
+ has_iter = False
+ has_mux = False
+ for e in prof.function_events:
+ if has_iter and has_mux:
+ break
+
+ if not has_iter and "IterableWrapper" in e.name:
+ has_iter = True
+ if not has_mux and "Multiplexer" in e.name:
+ has_mux = True
+ self.assertTrue(has_iter)
+ self.assertTrue(has_mux)
+
+ def test_datapipe_delegation_with_profiler(self):
+ class IDPIterator(torch.utils.data.IterDataPipe):
+ def __init__(self):
+ self.data = list(range(10))
+ self._idx = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self._idx >= 10:
+ self._idx = 0
+ raise StopIteration
+ self._idx += 1
+ return self.data[self._idx - 1]
+
+ def get_value(self, idx):
+ return self.data[idx]
+
+ dp1 = IDPIterator() # The object itself is an iterator
+ self.assertEqual(5, dp1.get_value(5))
+ it_dp1 = iter(dp1) # This creates the 1st iterator
+ self.assertEqual(5, it_dp1.get_value(5)) # type: ignore[attr-defined]
+ self.assertEqual(list(range(10)), list(it_dp1))
+
+ class IDPDelegator(torch.utils.data.IterDataPipe):
+ def __init__(self, datapipe):
+ self.datapipe = datapipe
+
+ def __iter__(self):
+ return iter(self.datapipe)
+
+ dp2 = IDPDelegator(dp1)
+ it_dp2 = iter(dp2)
+ self.assertEqual(5, it_dp2.get_value(5))
+ self.assertEqual(list(range(10)), list(it_dp2))
+
+ def test_datapipe_with_record_function_fork(self):
+ with _profile(
+ with_stack=True, use_kineto=kineto_available(), record_shapes=True
+ ) as prof:
+ input_dp = dp.iter.IterableWrapper(range(10))
+ dp1, dp2, dp3 = input_dp.fork(num_instances=3)
+ output1 = list(dp1)
+ has_iter = False
+ has_child = False
+ for e in prof.function_events:
+ if has_iter and has_child:
+ break
+
+ if not has_iter and "IterableWrapper" in e.name:
+ has_iter = True
+ if not has_child and "_ChildDataPipe" in e.name:
+ has_child = True
+ self.assertTrue(has_iter)
+ self.assertTrue(has_child)
+
+
+if __name__ == "__main__":
+ run_tests()
diff --git a/test/profiler/test_torch_tidy.py b/test/profiler/test_torch_tidy.py
new file mode 100644
index 0000000000..a891d90458
--- /dev/null
+++ b/test/profiler/test_torch_tidy.py
@@ -0,0 +1,912 @@
+# Owner(s): ["oncall: profiler"]
+
+# if tqdm is not shutdown properly, it will leave the monitor thread alive.
+# This causes an issue in the multithreading test because we check all events
+# in that test with their tids. The events that correspond to these lingering
+# threads all have TID of (uint64_t)(-1) which is invalid.
+# The work around is turnning off monitoring thread when tqdm is loaded.
+# Since these are unit tests, it is safe to turn off monitor thread.
+try:
+ import tqdm
+
+ tqdm.tqdm.monitor_interval = 0
+except ImportError:
+ None
+
+import gc
+import re
+import textwrap
+import unittest
+import weakref
+from typing import Any, Dict, List
+
+import torch
+import torch.nn as nn
+import torch.optim
+import torch.utils.data
+from torch._C._profiler import _TensorMetadata
+from torch.profiler import _utils, profile
+from torch.testing._internal.common_utils import run_tests, TestCase
+
+Json = Dict[str, Any]
+
+from torch._C._profiler import _ExtraFields_PyCall
+
+
+def find_node_with_name(nodes, name):
+ for node in _utils.traverse_dfs(nodes):
+ if node.name == name:
+ return node
+
+
+def find_node_with_regex(nodes, pattern):
+ for node in _utils.traverse_dfs(nodes):
+ if re.search(pattern, node.name):
+ return node
+
+
+class SimpleNet(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.fc1 = nn.Linear(10, 5)
+ self.fc2 = nn.Linear(5, 2)
+
+ def forward(self, x):
+ return self.fc2(self.fc1(x))
+
+
+class TestTorchTidyProfiler(TestCase):
+ def _get_tensor_fields(self, node, index):
+ self.assertIsNotNone(node)
+ self.assertIsInstance(
+ node.extra_fields, torch._C._profiler._ExtraFields_TorchOp
+ )
+ tensor_info = node.extra_fields.inputs[index]
+ self.assertIsInstance(tensor_info, _TensorMetadata)
+ self.assertIsNotNone(tensor_info.impl_ptr)
+ self.assertIsNotNone(tensor_info.storage_data_ptr)
+ self.assertIsNotNone(tensor_info.id)
+ return tensor_info.impl_ptr, tensor_info.storage_data_ptr, tensor_info.id
+
+ def test_pointers_and_ids(self):
+ a = torch.randn(4, 3)
+ a_initial_storage_data = a.storage().data_ptr()
+
+ # Views of tensors can share the same storage, but have different TensorImpls
+ b = a.view((1, 12))
+ c = torch.randn(4, 1)
+ c_initial_storage_data = c.storage().data_ptr()
+ d = torch.randn(4, 3)
+
+ with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
+ _ = a + c
+ _ = b * c
+
+ # Resize should create a new data_ptr but keep the TensorImpl the same.
+ f = a.resize_(128, 129)
+ _ = torch.relu(f)
+
+ # `.set_` points a Tensor at an existing storage.
+ _ = d.sin()
+ c.set_(d.storage())
+ _ = c.cos()
+
+ nodes = p.profiler.kineto_results.experimental_event_tree()
+
+ def get_fields(op_name, index):
+ return self._get_tensor_fields(find_node_with_name(nodes, op_name), index)
+
+ a_impl, a_storage_data, a_id = get_fields("aten::add", 0)
+ b_impl, b_storage_data, b_id = get_fields("aten::mul", 0)
+
+ # Profiler matches ground truth from Python API.
+ self.assertEqual(a_storage_data, a_initial_storage_data)
+
+ # Views are handled correctly.
+ self.assertEqual(a_storage_data, b_storage_data)
+ self.assertNotEqual(a_impl, b_impl)
+
+ # The same Tensor used in multiple calls gives identical results.
+ c_impl, c_storage_data, c_id = get_fields("aten::add", 1)
+ self.assertEqual((c_impl, c_storage_data, c_id), get_fields("aten::mul", 1))
+ self.assertEqual(c_storage_data, c_initial_storage_data)
+
+ # Mutations to the underlying storage are reflected. (But ID is shared.)
+ f_impl, f_storage_data, f_id = get_fields("aten::relu", 0)
+ self.assertEqual(a_impl, f_impl)
+ self.assertNotEqual(a_storage_data, f_storage_data)
+ self.assertEqual(a_id, f_id)
+
+ # Calling `set_` with an existing Tensor makes them share an ID.
+ d_impl, d_storage_data, d_id = get_fields("aten::sin", 0)
+ c_impl_new, c_storage_data_new, c_id_new = get_fields("aten::cos", 0)
+ self.assertNotEqual(d_impl, c_impl_new)
+ self.assertEqual(d_storage_data, c_storage_data_new)
+ self.assertEqual(c_id, c_id_new)
+ self.assertEqual(d_id, c_id_new)
+
+ @staticmethod
+ def _format_allocations(profiled_code):
+ gc.collect()
+ with profile(profile_memory=True, record_shapes=True) as prof:
+ profiled_code()
+ gc.collect()
+
+ root_events = prof.profiler.kineto_results.experimental_event_tree()
+ events = sorted(_utils.traverse_dfs(root_events), key=lambda x: x.start_time_ns)
+ allocations = tuple(
+ event.extra_fields
+ for event in events
+ if isinstance(
+ event.extra_fields, torch._C._profiler._ExtraFields_Allocation
+ )
+ )
+
+ return textwrap.indent(
+ "\n".join(
+ f"{repr(i.id):>5}{' ' * 6}"
+ f"{repr(i.allocation_id):>5}{' ' * 6}"
+ f"{'Allocation' if i.alloc_size > 0 else 'Free'}"
+ for i in allocations
+ ),
+ " " * 12,
+ )
+
+ def test_tensorimpl_invalidation_set(self) -> None:
+ def profiled_code(add_empty_set: bool):
+ x = torch.ones((1,))
+
+ # Determines if new storage is created before or after the old one
+ # is destroyed.
+ if add_empty_set:
+ x.set_()
+
+ x.set_(torch.ones((1,)).storage())
+ x.view_as(x)
+
+ self.assertExpectedInline(
+ self._format_allocations(lambda: profiled_code(add_empty_set=False)),
+ """\
+ 0 1 Allocation
+ 0 2 Allocation
+ 0 1 Free
+ 0 2 Free""",
+ )
+
+ self.assertExpectedInline(
+ self._format_allocations(lambda: profiled_code(add_empty_set=True)),
+ """\
+ 0 1 Allocation
+ 0 1 Free
+ 0 2 Allocation
+ 0 2 Free""",
+ )
+
+ def test_tensorimpl_invalidation_keep_alive(self) -> None:
+ def profiled_code(add_empty_set: bool):
+ x = torch.ones((1,))
+ x_storages = [x.storage()]
+ for _ in range(3):
+ x.set_()
+ x.set_(torch.ones((1,)).storage())
+
+ # This keeps the StorageImpls alive and preserves the chain.
+ # (Despite the `set_()` call.)
+ x_storages.append(x.storage())
+ x.view_as(x)
+
+ # Free storage in a deterministic fashion.
+ while x_storages:
+ x_storages.pop()
+ gc.collect()
+
+ # Determines if new storage is created before or after the old one
+ # is destroyed.
+ if add_empty_set:
+ x.set_()
+
+ for _ in range(3):
+ x.set_(torch.ones((1,)).storage())
+ x.view_as(x)
+
+ del x
+ gc.collect()
+
+ self.assertExpectedInline(
+ self._format_allocations(lambda: profiled_code(add_empty_set=False)),
+ """\
+ 0 1 Allocation
+ 0 2 Allocation
+ 0 4 Allocation
+ 0 5 Allocation
+ 0 4 Free
+ 0 2 Free
+ 0 1 Free
+ 0 6 Allocation
+ 0 5 Free
+ 0 7 Allocation
+ 0 6 Free
+ 0 8 Allocation
+ 0 7 Free
+ 0 8 Free""",
+ )
+
+ self.assertExpectedInline(
+ self._format_allocations(lambda: profiled_code(add_empty_set=True)),
+ """\
+ 0 1 Allocation
+ 0 2 Allocation
+ 0 4 Allocation
+ 0 5 Allocation
+ 0 4 Free
+ 0 2 Free
+ 0 1 Free
+ 0 5 Free
+ 0 6 Allocation
+ 0 7 Allocation
+ 0 6 Free
+ 0 8 Allocation
+ 0 7 Free
+ 0 8 Free""",
+ )
+
+ def test_tensorimpl_invalidation_full(self) -> None:
+ def profiled_code():
+ x = torch.ones((1,))
+ x_storages = [x.storage()]
+ for _ in range(3):
+ x.set_()
+ x.set_(torch.ones((1,)).storage())
+ x_storages.append(x.storage())
+ x.view_as(x)
+
+ # Free storage in a deterministic fashion.
+ while x_storages:
+ x_storages.pop()
+ gc.collect()
+
+ for _ in range(3):
+ x.set_(torch.ones((1,)).storage())
+
+ for _ in range(3):
+ x.set_()
+ x.set_(torch.ones((1,)).storage())
+
+ for i in range(4):
+ x.resize_((1 + i,))
+ x.view_as(x)
+
+ self.assertExpectedInline(
+ self._format_allocations(profiled_code),
+ """\
+ 0 1 Allocation
+ 0 2 Allocation
+ 0 4 Allocation
+ 0 5 Allocation
+ 0 4 Free
+ 0 2 Free
+ 0 1 Free
+ 0 6 Allocation
+ 0 5 Free
+ 0 7 Allocation
+ 0 6 Free
+ 0 8 Allocation
+ 0 7 Free
+ 0 8 Free
+ 0 9 Allocation
+ 0 9 Free
+ 0 10 Allocation
+ 0 10 Free
+ 0 11 Allocation
+ 0 12 Allocation
+ 0 11 Free
+ 0 13 Allocation
+ 0 12 Free
+ 0 14 Allocation
+ 0 13 Free
+ 0 14 Free""",
+ )
+
+ def test_tensorimpl_invalidation_scalar_args(self) -> None:
+ def profiled_code():
+ with torch.no_grad():
+ x = torch.ones((1,))
+ for _ in range(10):
+ x.add_(2)
+
+ self.assertExpectedInline(
+ self._format_allocations(profiled_code),
+ """\
+ 0 1 Allocation
+ 1 2 Allocation
+ 2 3 Allocation
+ 2 3 Free
+ 1 2 Free
+ 3 4 Allocation
+ 4 5 Allocation
+ 4 5 Free
+ 3 4 Free
+ 5 6 Allocation
+ 6 7 Allocation
+ 6 7 Free
+ 5 6 Free
+ 7 8 Allocation
+ 8 9 Allocation
+ 8 9 Free
+ 7 8 Free
+ 9 10 Allocation
+ 10 11 Allocation
+ 10 11 Free
+ 9 10 Free
+ 11 12 Allocation
+ 12 13 Allocation
+ 12 13 Free
+ 11 12 Free
+ 13 14 Allocation
+ 14 15 Allocation
+ 14 15 Free
+ 13 14 Free
+ 15 16 Allocation
+ 16 17 Allocation
+ 16 17 Free
+ 15 16 Free
+ 17 18 Allocation
+ 18 19 Allocation
+ 18 19 Free
+ 17 18 Free
+ 19 20 Allocation
+ 20 21 Allocation
+ 20 21 Free
+ 19 20 Free
+ 0 1 Free""",
+ )
+
+ def test_module_and_optimizer_ids(self) -> None:
+ model = torch.nn.Linear(2, 1, bias=True)
+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
+
+ def check(cold_start: bool) -> None:
+ with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
+ x = torch.ones((1, 2))
+ _ = x.sin() # Mark `x`
+ model(x).backward()
+ optimizer.step()
+ _ = optimizer.state[model.weight][
+ "momentum_buffer"
+ ].cos() # Mark weight momentum
+ _ = model.weight.grad.tan() # Mark weight gradient
+
+ nodes = p.profiler.kineto_results.experimental_event_tree()
+
+ def get_fields(op_name, index):
+ return self._get_tensor_fields(
+ find_node_with_name(nodes, op_name), index
+ )
+
+ # Marked Tensors act as ground truth for python tracer IDs.
+ _, _, x_id = get_fields("aten::sin", 0)
+ _, _, weight_momenumtum_id = get_fields("aten::cos", 0)
+ _, _, weight_grad_id = get_fields("aten::tan", 0)
+ self.assertNotEqual(x_id, weight_momenumtum_id)
+ self.assertNotEqual(x_id, weight_grad_id)
+ self.assertNotEqual(weight_momenumtum_id, weight_grad_id)
+
+ # Use linear op to identify weight ground truth.
+ linear_op_node = find_node_with_name(nodes, "aten::linear")
+ self.assertIsNotNone(linear_op_node)
+ x_metadata, weight_metadata, _ = linear_op_node.extra_fields.inputs
+ self.assertEqual(x_id, x_metadata.id)
+
+ # Module
+ linear_module_node = find_node_with_name(nodes, "nn.Module: Linear_0")
+ self.assertIsNotNone(linear_module_node)
+ self.assertIsNotNone(linear_module_node.extra_fields.module)
+ self.assertIsNone(linear_module_node.extra_fields.optimizer)
+
+ linear_parameters = linear_module_node.extra_fields.module.parameters
+ name, weight, weight_grad = linear_parameters[0]
+ self.assertEqual(name, "weight")
+ self.assertEqual(weight.id, weight_metadata.id)
+
+ self.assertEqual(weight_grad is None, cold_start)
+ if not cold_start:
+ self.assertEqual(weight_grad.id, weight_grad_id)
+
+ # Optimizer
+ step_node = find_node_with_regex(nodes, "_optimizer_step_code")
+ self.assertIsNotNone(step_node)
+ self.assertIsNone(step_node.extra_fields.module)
+ self.assertIsNotNone(step_node.extra_fields.optimizer)
+ optimizer_parameters = step_node.extra_fields.optimizer.parameters
+ self.assertEqual(len(optimizer_parameters), 2) # Weight and bias
+ weight, weight_grad, state = optimizer_parameters[0]
+ self.assertEqual(weight.id, weight_metadata.id)
+ self.assertEqual(weight_grad.id, weight_grad_id)
+ self.assertEqual(len(state), 1)
+ self.assertEqual(state[0][0], "momentum_buffer")
+ self.assertEqual(state[0][1].id, weight_momenumtum_id)
+
+ # Check that we handle first step (lazy initalization) and steady state.
+ check(cold_start=True)
+ check(cold_start=False)
+
+ def _test_allocation_ids(self, before_fn, after_fn) -> None:
+ with profile(profile_memory=True, record_shapes=True) as p:
+ # Introduce other operations and allocations to check robustness
+ _ = before_fn()
+
+ x = torch.rand(4, 3)
+ x.resize_(4, 4)
+
+ # We need to use `x` post resize for profiler to determine its ID.
+ x.sin()
+
+ # Introduce other operations and allocations to check robustness
+ _ = after_fn()
+
+ # Ensure `x` is the last variable collected to make it easier to
+ # find the deallocation event.
+ gc.collect()
+ del x
+ gc.collect()
+
+ nodes = p.profiler.kineto_results.experimental_event_tree()
+
+ def find_chain(names: List[str]):
+ out = []
+ for name in names:
+ root = [out[-1]] if out else nodes
+ out.append(find_node_with_name(root, name))
+ self.assertIsNotNone(out[-1], name)
+ return out
+
+ allocation = find_chain(["aten::rand", "aten::empty", "[memory]"])[
+ -1
+ ].extra_fields
+ _, uniform_node = find_chain(["aten::rand", "aten::uniform_"])
+ x_impl, x_storage_data, x_id = self._get_tensor_fields(uniform_node, 0)
+
+ # Make sure IDs are consistent between allocations and op inputs
+ self.assertEqual(allocation.ptr, x_storage_data)
+ self.assertEqual(allocation.id, x_id)
+
+ resize_node = find_node_with_name(nodes, "aten::resize_")
+ self.assertIsNotNone(resize_node)
+ self.assertEqual(len(resize_node.children), 2)
+ allocate_new = resize_node.children[0].extra_fields
+ free_old = resize_node.children[1].extra_fields
+
+ # Destruction of the old storage for x.
+ self.assertEqual(free_old.id, allocation.id)
+ self.assertEqual(free_old.ptr, allocation.ptr)
+
+ # Make sure ID is retained through change in storage.
+ self.assertEqual(allocate_new.id, allocation.id)
+ self.assertNotEqual(allocate_new.ptr, allocation.ptr)
+
+ # Deletion when `x` goes out of scope.
+ free_new = [
+ i for i in nodes if i.tag == torch._C._profiler._EventType.Allocation
+ ][-1].extra_fields
+ self.assertIsInstance(free_new, torch._C._profiler._ExtraFields_Allocation)
+ self.assertEqual(free_new.id, allocate_new.id)
+ self.assertEqual(free_new.ptr, allocate_new.ptr)
+
+ def test_allocation_ids(self) -> None:
+ self._test_allocation_ids(lambda: None, lambda: None)
+
+ def test_allocation_ids_with_other_ops(self) -> None:
+ x = torch.ones((1,))
+ self._test_allocation_ids(
+ lambda: (x + 1).relu_(), lambda: torch.zeros((1,)).cos()
+ )
+
+ def test_impl_reuse(self) -> None:
+ repeats = 1_000
+ with profile(profile_memory=True, record_shapes=True) as p:
+ for _ in range(repeats):
+ torch.ones((1,))
+ gc.collect()
+
+ roots = p.profiler.kineto_results.experimental_event_tree()
+ tensor_impls = tuple(
+ e.extra_fields.inputs[0].impl_ptr
+ for e in _utils.traverse_dfs(roots)
+ if e.name == "aten::fill_"
+ )
+
+ self.assertEqual(len(tensor_impls), repeats)
+ self.assertEqual(len(set(tensor_impls)), repeats)
+
+ def test_allocation_id_uniqueness(self) -> None:
+ repeats = 1_000
+ with profile(profile_memory=True, record_shapes=True) as p:
+ for _ in range(repeats):
+ torch.ones((1,))
+ gc.collect()
+
+ roots = p.profiler.kineto_results.experimental_event_tree()
+ id_set = set()
+ for e in _utils.traverse_dfs(roots):
+ fields = e.extra_fields
+ if isinstance(fields, torch._C._profiler._ExtraFields_TorchOp):
+ id_set |= {
+ t.allocation_id
+ for t in fields.inputs
+ if isinstance(t, _TensorMetadata)
+ }
+
+ elif isinstance(fields, torch._C._profiler._ExtraFields_Allocation):
+ id_set.add(fields.allocation_id)
+
+ id_set.difference_update([None])
+ self.assertEqual(repeats, len(id_set))
+
+ def test_extra_fields(self):
+ with profile(with_stack=True, profile_memory=True) as p:
+ _ = torch.ones((1,))
+
+ nodes = p.profiler.kineto_results.experimental_event_tree()
+ node = find_node_with_name(nodes, "aten::ones")
+ self.assertIsNotNone(node)
+
+ self.assertIsInstance(
+ node.extra_fields, torch._C._profiler._ExtraFields_TorchOp
+ )
+
+ self.assertIsInstance(
+ node.parent.extra_fields, torch._C._profiler._ExtraFields_PyCCall
+ )
+
+ self.assertEqual(node.children[0].name, "aten::empty")
+ self.assertEqual(node.children[0].children[0].name, "[memory]")
+ self.assertIsInstance(
+ node.children[0].children[0].extra_fields,
+ torch._C._profiler._ExtraFields_Allocation,
+ )
+
+ def test_tensor_properties(self):
+ x = torch.ones(10, 10).as_strided([4, 4], [12, 3])
+ y = torch.ones(4, 1, requires_grad=True)
+
+ with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
+ _ = x + y
+ _ = x * y
+
+ nodes = p.profiler.kineto_results.experimental_event_tree()
+ node = find_node_with_name(nodes, "aten::add")
+ self.assertIsNotNone(node)
+
+ self.assertIsInstance(
+ node.extra_fields, torch._C._profiler._ExtraFields_TorchOp
+ )
+
+ def getattr_inputs(name, default):
+ return [getattr(i, name, default) for i in node.extra_fields.inputs]
+
+ self.assertEqual(getattr_inputs("sizes", []), [[4, 4], [4, 1], []])
+ self.assertEqual(getattr_inputs("strides", []), [[12, 3], [1, 1], []])
+ self.assertEqual(
+ getattr_inputs("layout", None), [torch.strided, torch.strided, None]
+ )
+ self.assertEqual(
+ getattr_inputs("device", None),
+ [torch.device("cpu"), torch.device("cpu"), None],
+ )
+ self.assertEqual(
+ getattr_inputs("dtype", None), [torch.float32, torch.float32, None]
+ )
+ self.assertEqual(node.extra_fields.scope, torch.profiler.RecordScope.FUNCTION)
+
+ mul_node = find_node_with_name(nodes, "aten::mul")
+ self.assertIsNotNone(mul_node)
+ self.assertEqual(
+ node.extra_fields.sequence_number + 1, mul_node.extra_fields.sequence_number
+ )
+
+ def test_sparse_tensors(self):
+ i = [[0, 1, 1], [2, 0, 2]]
+ v = [3, 4, 5]
+ s = torch.sparse_coo_tensor(i, v, (2, 3))
+
+ with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
+ _ = s + s
+
+ nodes = p.profiler.kineto_results.experimental_event_tree()
+ node = find_node_with_name(nodes, "aten::add")
+ self.assertIsNotNone(node)
+
+ self.assertIsInstance(
+ node.extra_fields, torch._C._profiler._ExtraFields_TorchOp
+ )
+
+ def getattr_inputs(name, default):
+ return [getattr(i, name, default) for i in node.extra_fields.inputs]
+
+ self.assertEqual(getattr_inputs("sizes", []), [[2, 3], [2, 3], []])
+ self.assertEqual(getattr_inputs("strides", []), [[], [], []])
+ self.assertEqual(
+ getattr_inputs("layout", None), [torch.sparse_coo, torch.sparse_coo, None]
+ )
+ self.assertEqual(
+ getattr_inputs("device", None),
+ [torch.device("cpu"), torch.device("cpu"), None],
+ )
+
+ @unittest.skipIf(
+ not torch.backends.mkldnn.is_available(), "MKL-DNN build is disabled"
+ )
+ def test_mkldnn_tensors(self):
+ x = torch.ones(4, 3).to_mkldnn()
+
+ with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
+ _ = x + x
+
+ nodes = p.profiler.kineto_results.experimental_event_tree()
+ node = find_node_with_name(nodes, "aten::add")
+ self.assertIsNotNone(node)
+
+ self.assertIsInstance(
+ node.extra_fields, torch._C._profiler._ExtraFields_TorchOp
+ )
+
+ def getattr_inputs(name, default):
+ return [getattr(i, name, default) for i in node.extra_fields.inputs]
+
+ self.assertEqual(getattr_inputs("sizes", []), [[4, 3], [4, 3], []])
+ self.assertEqual(getattr_inputs("strides", []), [[], [], []])
+ self.assertEqual(
+ getattr_inputs("layout", None), [torch._mkldnn, torch._mkldnn, None]
+ )
+ self.assertEqual(
+ getattr_inputs("device", None),
+ [torch.device("cpu"), torch.device("cpu"), None],
+ )
+
+ def test_scalar_ins(self):
+ x = torch.ones(5, 5)
+ alpha = 0.9
+
+ with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
+ _ = torch.add(x, 9.1, alpha=alpha)
+
+ nodes = p.profiler.kineto_results.experimental_event_tree()
+ node = find_node_with_name(nodes, "aten::add")
+ self.assertIsNotNone(node)
+
+ def getattr_inputs(name, default):
+ return [getattr(i, name, default) for i in node.extra_fields.inputs]
+
+ # The second argument to the add gets promotoed to a zerodim Tensor
+ self.assertEqual(
+ getattr_inputs("dtype", None), [torch.float32, torch.float64, None]
+ )
+ self.assertEqual(getattr_inputs("sizes", []), [[5, 5], [], []])
+ self.assertEqual(node.extra_fields.inputs[2], alpha)
+
+ def test_tensor_lists(self):
+ x = torch.ones((1,))
+ y = torch.ones((1,))
+ with profile(with_stack=True, profile_memory=True, record_shapes=True) as p:
+ _ = torch.stack((x, y))
+
+ nodes = p.profiler.kineto_results.experimental_event_tree()
+ node = find_node_with_name(nodes, "aten::stack")
+ inputs = node.extra_fields.inputs
+ self.assertEqual(len(inputs), 2)
+ self.assertIsInstance(inputs[0], list)
+ self.assertEqual(len(inputs[0]), 2)
+ self.assertEqual(x.storage().data_ptr(), inputs[0][0].storage_data_ptr)
+ self.assertEqual(y.storage().data_ptr(), inputs[0][1].storage_data_ptr)
+
+ def test_nnmodule_params(self):
+ def flat_out_extrafields(nodes, out=None):
+ if out is None:
+ out = []
+ for node in nodes:
+ if (
+ isinstance(node.extra_fields, _ExtraFields_PyCall)
+ and node.extra_fields.module
+ ):
+ if node.extra_fields.module.parameters:
+ out.append(node.extra_fields.module)
+ flat_out_extrafields(node.children, out)
+ return out
+
+ inputs = torch.rand(10)
+ net = SimpleNet()
+ out = net(inputs)
+ torch.nn.functional.cross_entropy(out, torch.rand(2)).backward()
+ with torch.profiler.profile(with_stack=True, profile_memory=True) as p:
+ _ = net(inputs)
+
+ modules = flat_out_extrafields(
+ p.profiler.kineto_results.experimental_event_tree()
+ )
+ self.assertEqual(
+ len(modules), 2, f"Expected two parameter list, but got {len(modules)}"
+ )
+
+ params = [
+ (n, p.storage_data_ptr, g.storage_data_ptr)
+ for module in modules
+ for (n, p, g) in module.parameters
+ ]
+ expected = [
+ (name, val.storage().data_ptr(), val.grad.storage().data_ptr())
+ for name, val in net.fc1._parameters.items()
+ ]
+ expected += [
+ (name, val.storage().data_ptr(), val.grad.storage().data_ptr())
+ for name, val in net.fc2._parameters.items()
+ ]
+ self.assertEqual(expected, params, f"{expected} vs. {params}")
+
+ def _flat_out_extrafields(self, nodes, out=None):
+ if out is None:
+ out = []
+ for node in nodes:
+ if (
+ isinstance(node.extra_fields, _ExtraFields_PyCall)
+ and node.extra_fields.optimizer
+ and node.extra_fields.optimizer.parameters
+ ):
+ # avoiding OptInfo duplicates from iterations
+ addr = node.extra_fields.optimizer.parameters[0][0].storage_data_ptr
+ if not [o for o in out if addr == o.parameters[0][0].storage_data_ptr]:
+ out.append(node.extra_fields.optimizer)
+ self._flat_out_extrafields(node.children, out)
+ return out
+
+ def _check_results(self, opt, opts, check_items=False):
+ self.assertEqual(len(opts), 1, f"Expected 1 optimizer: len(opts): {len(opts)}")
+ self.assertEqual(
+ id(opt),
+ opts[0].self_ptr,
+ f"Optimizer addr ({id(opt)}) vs. profiled addr ({opts[0].self_ptr})",
+ )
+ if check_items:
+ self.assertEqual(len(opt.param_groups), len(opts))
+ for group, opt_ in zip(opt.param_groups, opts):
+ self.assertEqual(
+ [(v.storage().data_ptr()) for v in group.get("params", [])],
+ [(o.storage_data_ptr) for (o, _, _) in opt_.parameters],
+ )
+ for opt_ in opts:
+ observed_state = {
+ p.storage_data_ptr: {name: s.storage_data_ptr for name, s in state}
+ for (p, _, state) in opt_.parameters
+ }
+
+ # Make sure the profiler collected all optimizer state and check
+ # that the address recorded by the profiler is correct.
+ for parameter, parameter_state in opt.state.items():
+ self.assertEqual(
+ {
+ name: value.storage().data_ptr()
+ for name, value in parameter_state.items()
+ },
+ observed_state.get(parameter.storage().data_ptr(), []),
+ )
+
+ def test_optimizer(self):
+ inputs = torch.rand(10)
+ with torch.profiler.profile(with_stack=True, profile_memory=True) as p:
+ net = SimpleNet()
+ opt = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
+
+ opt.zero_grad()
+ out = net(inputs)
+ loss = torch.nn.functional.cross_entropy(out, torch.rand(2))
+ loss.backward()
+ opt.step()
+ self._check_results(
+ opt,
+ self._flat_out_extrafields(
+ p.profiler.kineto_results.experimental_event_tree()
+ ),
+ False,
+ )
+
+ def _test_optimizer_parameters(self, optimizer_factory):
+ inputs = torch.rand(10)
+ with torch.profiler.profile(with_stack=True, profile_memory=True) as p:
+ net = SimpleNet()
+ opt = optimizer_factory(net.parameters())
+ for _ in range(2):
+ opt.zero_grad()
+ out = net(inputs)
+ loss = torch.nn.functional.cross_entropy(out, torch.rand(2))
+ loss.backward()
+ opt.step()
+ self._check_results(
+ opt,
+ self._flat_out_extrafields(
+ p.profiler.kineto_results.experimental_event_tree()
+ ),
+ True,
+ )
+
+ def test_optimizer_parameters_sgd(self):
+ self._test_optimizer_parameters(
+ lambda params: torch.optim.SGD(params, lr=0.01, momentum=0.9)
+ )
+
+ def test_optimizer_parameters_adam(self):
+ self._test_optimizer_parameters(
+ lambda params: torch.optim.Adam(params, foreach=True)
+ )
+
+ def test_allocations(self):
+ gc.collect()
+ with profile(profile_memory=True) as p:
+ x = torch.empty((3, 4))
+
+ nodes = p.profiler.kineto_results.experimental_event_tree()
+ node = find_node_with_name(nodes, "[memory]")
+ self.assertIsNotNone(node)
+
+ alloc_size = 3 * 4 * 4 # fp32 -> 4 bytes
+ ptr = node.extra_fields.ptr
+ self.assertGreater(ptr, 0)
+ self.assertEqual(node.extra_fields.alloc_size, alloc_size)
+ self.assertEqual(node.extra_fields.device, torch.device("cpu"))
+ total_allocated = node.extra_fields.total_allocated
+
+ # total_reserved is only for CUDACachingAllocator
+ self.assertEqual(node.extra_fields.total_reserved, 0)
+
+ with profile(profile_memory=True) as p:
+ del x
+ gc.collect()
+
+ nodes = p.profiler.kineto_results.experimental_event_tree()
+ node = find_node_with_name(nodes, "[memory]")
+ self.assertIsNotNone(node)
+
+ self.assertEqual(node.extra_fields.ptr, ptr)
+ self.assertEqual(node.extra_fields.alloc_size, -alloc_size)
+ self.assertEqual(node.extra_fields.device, torch.device("cpu"))
+ self.assertEqual(
+ node.extra_fields.total_allocated, total_allocated - alloc_size
+ )
+
+ def test_refcounts(self):
+ class Sentinel:
+ pass
+
+ def make():
+ outer_sentinel = Sentinel()
+
+ def outer():
+ # Python will only close over variables used in the function.
+ _ = outer_sentinel
+ inner_sentinel = Sentinel()
+
+ def inner():
+ _ = inner_sentinel
+
+ with profile(with_stack=True):
+ inner()
+
+ return weakref.ref(inner_sentinel)
+
+ return outer, weakref.ref(outer_sentinel)
+
+ # Use a factory function to ensure the test scope never sees strong
+ # references. `del` has strange semantics that interact with closures
+ # at an AST level, so this is simpler.
+ outer, outer_sentinel_ref = make()
+ inner_sentinel_ref = outer()
+
+ self.assertIsNone(inner_sentinel_ref())
+
+ # `outer` holds the last reference via closure.
+ self.assertIsNotNone(outer_sentinel_ref())
+
+ del outer
+ self.assertIsNone(outer_sentinel_ref())
+
+
+if __name__ == "__main__":
+ run_tests()
|
2.41.0
|
2b5738a8bf325d79468b839b8412b87cb9951c1
|
Wed, 24 Apr 2024 11:58:01 -0700
|
[PATCH 0696/1000] [benchmark][cudagraph] Explicitly call aten.div with CUDA denominator for cudagraphs (#119729)
|
aten.div's output device will be its numerator's device. so it is acceptable to do cuda / cpu type divisions. post grad passes operate only on graphs and can't handle runtime graph inputs. so we change user code to move inputs to cuda for cudagraph. this affects any graph that has cpu tensors as graph inputs. Pull Request resolved: https://github.com/pytorch/pytorch/pull/119729 Approved by: https://github.com/eellison
|
diff --git a/test/inductor/test_compiled_autograd.py b/test/inductor/test_compiled_autograd.py
index 5f9dd9b84d..7c9c84c894 100644
--- a/test/inductor/test_compiled_autograd.py
+++ b/test/inductor/test_compiled_autograd.py
@@ -1,5 +1,6 @@
# Owner(s): ["module: inductor"]
import functools
+import io
import re
import sys
import unittest
@@ -1342,6 +1343,24 @@ TORCH_LIBRARY(test_autograd_cpp_node_data_dependent, m) {
out = compiled_fn(activations)
self.assertTrue(len(activations) == 0)
+ @unittest.skipIf(not HAS_CUDA, "requires cuda")
+ def test_cudagraphs_cpu_division(self):
+ from torch._dynamo.testing import reduce_to_scalar_loss
+
+ model = torch.nn.Linear(10, 10, dtype=torch.float16).cuda()
+ inputs = torch.randn(10, 10, dtype=torch.float16).cuda()
+ out = model(inputs)
+ loss = reduce_to_scalar_loss(out)
+ torch._inductor.config.triton.cudagraphs = True
+
+ stderr_msgs = io.StringIO()
+ with mock.patch("sys.stderr", stderr_msgs), compiled_autograd.enable(
+ compiler_fn
+ ):
+ loss.backward()
+
+ self.assertFalse("skipping cudagraphs" in stderr_msgs.getvalue())
+
def load_test_module(name):
testdir = Path(__file__).absolute().parent.parent
diff --git a/torch/_dynamo/testing.py b/torch/_dynamo/testing.py
index c115e1cc09..2dd384f4d8 100644
--- a/torch/_dynamo/testing.py
+++ b/torch/_dynamo/testing.py
@@ -103,7 +103,7 @@ def reduce_to_scalar_loss(out):
"""Reduce the output of a model to get scalar loss"""
if isinstance(out, torch.Tensor):
# Mean does not work on integer tensors
- return out.sum() / out.numel()
+ return out.sum() / torch.tensor(out.numel(), device=out.device)
elif isinstance(out, (list, tuple)):
return sum(reduce_to_scalar_loss(x) for x in out) / len(out)
elif type(out).__name__ in (
|
2.41.0
|
55939904b08cb8d0e9d045e5eb7e03f6548b823
|
Thu, 25 Apr 2024 10:47:25 -0700
|
[PATCH 0697/1000] [cudagraphs] add more info to skip messages (#124700)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124700 Approved by: https://github.com/eellison ghstack dependencies: #119729
|
diff --git a/test/inductor/test_cudagraph_trees.py b/test/inductor/test_cudagraph_trees.py
index f80c610829..f1973b50ea 100644
--- a/test/inductor/test_cudagraph_trees.py
+++ b/test/inductor/test_cudagraph_trees.py
@@ -253,7 +253,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
opt = torch.compile(model.forward, mode="reduce-overhead")(x, y, z)
FileCheck().check(
- "skipping cudagraphs due to mutation on input. Found from"
+ "skipping cudagraphs due to mutated inputs (1 instances). Found from"
).check("torch.logical_xor").run(captured_output[0])
@requires_multigpu()
@@ -266,9 +266,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
with capture_stderr() as captured_output:
foo(torch.ones([10], device="cuda"), torch.ones([20]))
- FileCheck().check("skipping cudagraphs due to cpu device.").check(
- "y + 2"
- ).run(captured_output[0])
+ FileCheck().check(
+ "skipping cudagraphs due to cpu device (arg1_1). Found from"
+ ).check("y + 2").run(captured_output[0])
with capture_stderr() as captured_output:
foo(
@@ -309,9 +309,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
with capture_stderr() as captured_output:
foo(inp())
- FileCheck().check("skipping cudagraphs due to mutation on input.").check(
- ".add_(2)"
- ).run(captured_output[0])
+ FileCheck().check(
+ "skipping cudagraphs due to mutated inputs (1 instances). Found from"
+ ).check(".add_(2)").run(captured_output[0])
# mutation on inp doesnt hit cudagraphs
self.assertEqual(len(self.get_manager().roots), 0)
@@ -366,7 +366,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
mut_out = mut(tmp)
self.assertEqual(mut_out, non_mut(foo(inp)))
FileCheck().check_count(
- "skipping cudagraphs due to mutation on input.", 1, exactly=True
+ "skipping cudagraphs due to mutated inputs (1 instances). Found from",
+ 1,
+ exactly=True,
).run(captured_output[0])
@parametrize("backend", ("inductor", "cudagraphs"))
@@ -396,7 +398,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
mut_out = mut(tmp)
self.assertEqual(mut_out, non_mut(foo(inp)))
FileCheck().check_count(
- "skipping cudagraphs due to mutation on input.", 0, exactly=True
+ "skipping cudagraphs due to mutated inputs (1 instances). Found from",
+ 0,
+ exactly=True,
).run(captured_output[0])
torch.compiler.cudagraph_mark_step_begin()
@@ -407,9 +411,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
# now its an input from eager we should fallback to inductor without cudagraphs
with capture_stderr() as captured_output:
mut(mut_inp)
- FileCheck().check("skipping cudagraphs due to mutation on input.").check(
- "x.add_(2)"
- ).run(captured_output[0])
+ FileCheck().check(
+ "skipping cudagraphs due to mutated inputs (1 instances). Found from"
+ ).check("x.add_(2)").run(captured_output[0])
self.assertEqual(mut_inp, non_mut(foo(inp)))
@parametrize("backend", ("inductor", "cudagraphs"))
@@ -434,7 +438,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
torch.compiler.cudagraph_mark_step_begin()
fee(inp(), foo(inp()))
FileCheck().check_count(
- "skipping cudagraphs due to mutation on input.", 1, exactly=True
+ "skipping cudagraphs due to mutated inputs (1 instances). Found from",
+ 1,
+ exactly=True,
).run(captured_output[0])
@parametrize("backend", ("inductor", "cudagraphs"))
@@ -468,7 +474,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
mut(mut_inp) # should not warn since mut has warned
FileCheck().check_count(
- "skipping cudagraphs due to mutation on input.", 1, exactly=True
+ "skipping cudagraphs due to mutated inputs (1 instances). Found from",
+ 1,
+ exactly=True,
).run(captured_output[0])
def test_function_compiled_multiple_times(self):
@@ -1611,8 +1619,15 @@ if HAS_CUDA and not TEST_WITH_ASAN:
def foo(x):
return x.item()
- self.assertEqual(foo(torch.tensor(3.0, device="cuda")), 3.0)
- self.assertEqual(foo(torch.tensor(6.0, device="cuda")), 6.0)
+ with capture_stderr() as captured_output:
+ self.assertEqual(foo(torch.tensor(3.0, device="cuda")), 3.0)
+ self.assertEqual(foo(torch.tensor(6.0, device="cuda")), 6.0)
+
+ # NOTE: this test is named after incompatible ops, but is not skipping due to incompatible ops.
+ # This should get fixed.
+ FileCheck().check(
+ "skipping cudagraphs due to cpu device (_local_scalar_dense)"
+ ).run(captured_output[0])
@torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
def test_incompatible_cudagraph_ops_nonzero(self):
@@ -1620,13 +1635,38 @@ if HAS_CUDA and not TEST_WITH_ASAN:
def foo(x):
return x.nonzero()
- self.assertEqual(
- foo(torch.tensor([1, 0, 2], device="cuda")), torch.tensor([[0], [2]])
- )
- self.assertEqual(
- foo(torch.tensor([1, 0, 0], device="cuda")), torch.tensor([[0]])
+ with capture_stderr() as captured_output:
+ self.assertEqual(
+ foo(torch.tensor([1, 0, 2], device="cuda")),
+ torch.tensor([[0], [2]]),
+ )
+ self.assertEqual(
+ foo(torch.tensor([1, 0, 0], device="cuda")), torch.tensor([[0]])
+ )
+
+ FileCheck().check("skipping cudagraphs due to ['incompatible ops']").run(
+ captured_output[0]
)
+ @torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
+ def test_incompatible_cudagraph_ops_nonzero_backend(self):
+ @torch.compile(backend="cudagraphs")
+ def foo(x):
+ return x.nonzero()
+
+ with capture_stderr() as captured_output:
+ self.assertEqual(
+ foo(torch.tensor([1, 0, 2], device="cuda")),
+ torch.tensor([[0], [2]]),
+ )
+ self.assertEqual(
+ foo(torch.tensor([1, 0, 0], device="cuda")), torch.tensor([[0]])
+ )
+
+ FileCheck().check(
+ "skipping cudagraphs due to incompatible op (nonzero)"
+ ).run(captured_output[0])
+
def test_storage_access_error(self):
x = torch.rand([4], device="cuda")
torch._C._set_storage_access_error_msg(x, "custom error msg")
diff --git a/torch/_dynamo/backends/cudagraphs.py b/torch/_dynamo/backends/cudagraphs.py
index ee89b79690..742e141c71 100644
--- a/torch/_dynamo/backends/cudagraphs.py
+++ b/torch/_dynamo/backends/cudagraphs.py
@@ -12,13 +12,14 @@ from torch._dynamo.backends.debugging import boxed_nop
from torch._inductor.cudagraph_utils import (
BoxedDeviceIndex,
check_multiple_devices_or_any_cpu_nodes,
+ format_default_skip_message,
get_mutation_stack_trace,
get_placeholders,
)
from torch._inductor.utils import (
BoxedBool,
count_tangents,
- has_incompatible_cudagraph_ops,
+ get_first_incompatible_cudagraph_node,
num_fw_fixed_arguments,
output_node,
)
@@ -99,8 +100,8 @@ def check_for_skip(aot_model: torch.fx.GraphModule, num_fixed) -> Optional[str]:
):
return skip
- if has_incompatible_cudagraph_ops(aot_model):
- return "skipping cudagraphs due to incompatible op"
+ if node := get_first_incompatible_cudagraph_node(aot_model):
+ return format_default_skip_message(f"incompatible op ({node.name})")
return None
diff --git a/torch/_inductor/cudagraph_utils.py b/torch/_inductor/cudagraph_utils.py
index e897096f4e..dd551fad03 100644
--- a/torch/_inductor/cudagraph_utils.py
+++ b/torch/_inductor/cudagraph_utils.py
@@ -1,5 +1,5 @@
import dataclasses
-from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
+from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
@@ -48,7 +48,7 @@ def format_default_skip_message(reason: str) -> str:
def get_mutation_stack_trace(
- placeholders: List[torch.fx.Node], mutation_indices: Iterable[int]
+ placeholders: List[torch.fx.Node], mutation_indices: List[int]
) -> str:
stack_trace: Optional[str] = ""
@@ -57,11 +57,13 @@ def get_mutation_stack_trace(
if stack_trace := get_mutating_use_stack_trace(placeholder):
break
+ msg = format_default_skip_message(
+ f"mutated inputs ({len(mutation_indices)} instances)"
+ )
if stack_trace:
- msg = f"skipping cudagraphs due to mutation on input. Found from : \n {stack_trace}"
- return msg
+ return f"{msg}. Found from : \n {stack_trace}"
- return format_default_skip_message("mutated inputs")
+ return msg
def check_for_mutation(
@@ -69,8 +71,6 @@ def check_for_mutation(
inputs: List[torch.Tensor],
is_cuda_graph_recorded_tensor: Callable[[torch.Tensor], bool],
) -> Optional[str]:
- default_msg = format_default_skip_message("mutated inputs")
-
# doesnt work for non-trees because the warmup run would apply mutation twice
if torch._inductor.config.triton.cudagraph_trees:
# checking if mutation is only on parameters/static inputs
@@ -82,15 +82,14 @@ def check_for_mutation(
or is_cuda_graph_recorded_tensor(inputs[idx])
)
]
- has_mutation = len(mutation_indices) != 0
- if not has_mutation:
- return None
-
- return get_mutation_stack_trace(func.placeholders, mutation_indices)
-
else:
- has_mutation = len(func.mutated_input_idxs) != 0
- return None if not has_mutation else default_msg
+ mutation_indices = func.mutated_input_idxs
+
+ return (
+ get_mutation_stack_trace(func.placeholders, mutation_indices)
+ if mutation_indices
+ else None
+ )
def get_use_stack_trace(node) -> Optional[str]:
@@ -104,12 +103,11 @@ def check_multiple_devices_or_any_cpu_nodes(
device_node_mapping: Dict[torch.device, torch.fx.Node]
) -> Optional[str]:
if cpu_node := device_node_mapping.get(torch.device("cpu")):
+ msg = f"cpu device ({cpu_node.name})"
if stack_trace := get_use_stack_trace(cpu_node):
- return format_default_skip_message(
- f"cpu device. Found from : \n {stack_trace}"
- )
+ return format_default_skip_message(f"{msg}. Found from : \n {stack_trace}")
- return format_default_skip_message("cpu device")
+ return format_default_skip_message(msg)
if (
len(device_node_mapping) == 1
diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py
index b5b724eb0d..14478b0cb7 100644
--- a/torch/_inductor/utils.py
+++ b/torch/_inductor/utils.py
@@ -592,7 +592,7 @@ def any_is_symbolic(*args: Any) -> bool:
return any(is_symbolic(a) for a in args)
-def has_incompatible_cudagraph_ops(gm):
+def get_first_incompatible_cudagraph_node(gm):
from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
forbidden_set = {
@@ -628,10 +628,14 @@ def has_incompatible_cudagraph_ops(gm):
)
for node in gm.graph.nodes:
if str(node.target) in forbidden_set:
- return True
+ return node
if (val := node.meta.get("val")) is not None and free_unbacked_symbols(val):
- return True
- return False
+ return node
+ return None
+
+
+def has_incompatible_cudagraph_ops(gm):
+ return get_first_incompatible_cudagraph_node(gm) is not None
def output_node(gm: torch.fx.GraphModule):
|
2.41.0
|
4430564ce2b9b22676e2529b74fa1576e981b27
|
Thu, 25 Apr 2024 10:47:25 -0700
|
[PATCH 0698/1000] [cudagraphs] add cudagraph_skips counter (#124804)
|
used in tests and benchmark csv Pull Request resolved: https://github.com/pytorch/pytorch/pull/124804 Approved by: https://github.com/eellison ghstack dependencies: #119729, #124700
|
diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py
index d610d7dd13..99fbd7b86d 100644
--- a/benchmarks/dynamo/common.py
+++ b/benchmarks/dynamo/common.py
@@ -1956,6 +1956,9 @@ def get_dynamo_stats():
"autograd_compiles": torch._dynamo.utils.counters["compiled_autograd"][
"compiles"
],
+ "cudagraph_skips": torch._dynamo.utils.counters["inductor"][
+ "cudagraph_skips"
+ ],
}
)
diff --git a/test/inductor/test_cudagraph_trees.py b/test/inductor/test_cudagraph_trees.py
index f1973b50ea..33ee5247bd 100644
--- a/test/inductor/test_cudagraph_trees.py
+++ b/test/inductor/test_cudagraph_trees.py
@@ -11,6 +11,7 @@ import torch
import torch._dynamo.config as dynamo_config
import torch.nn as nn
+from torch._dynamo.utils import counters
from torch._inductor import config
from torch._inductor.compile_fx import compile_fx_inner
from torch._inductor.cudagraph_trees import cudagraphify_impl as tree_cudagraphify_impl
@@ -255,6 +256,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to mutated inputs (1 instances). Found from"
).check("torch.logical_xor").run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@requires_multigpu()
@parametrize("backend", ("inductor", "cudagraphs"))
@@ -269,6 +271,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to cpu device (arg1_1). Found from"
).check("y + 2").run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
with capture_stderr() as captured_output:
foo(
@@ -278,6 +281,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check("skipping cudagraphs due to multiple devices").run(
captured_output[0]
)
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 2)
@torch._inductor.config.patch("triton.cudagraph_skip_dynamic_graphs", True)
def test_skip_symbolic(self):
@@ -291,6 +295,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to graph with symbolic shapes inputs"
).check("x + y").run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
@@ -312,6 +317,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to mutated inputs (1 instances). Found from"
).check(".add_(2)").run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
# mutation on inp doesnt hit cudagraphs
self.assertEqual(len(self.get_manager().roots), 0)
@@ -402,6 +408,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
0,
exactly=True,
).run(captured_output[0])
+ self.assertTrue("cudagraph_skips" not in counters["inductor"])
torch.compiler.cudagraph_mark_step_begin()
inp = torch.rand([4], device="cuda")
@@ -415,6 +422,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
"skipping cudagraphs due to mutated inputs (1 instances). Found from"
).check("x.add_(2)").run(captured_output[0])
self.assertEqual(mut_inp, non_mut(foo(inp)))
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
@@ -442,6 +450,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
1,
exactly=True,
).run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
@@ -478,6 +487,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
1,
exactly=True,
).run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
def test_function_compiled_multiple_times(self):
def foo(x):
@@ -1628,6 +1638,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to cpu device (_local_scalar_dense)"
).run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
def test_incompatible_cudagraph_ops_nonzero(self):
@@ -1647,6 +1658,20 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check("skipping cudagraphs due to ['incompatible ops']").run(
captured_output[0]
)
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
+
+ @torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
+ def test_incompatible_cudagraph_ops_nonzero_graph_breaks(self):
+ @torch.compile(mode="reduce-overhead")
+ def foo(x):
+ y = x.nonzero() # skip
+ torch._dynamo.graph_break()
+ return y.nonzero() # skip 2 times (due to recompile)
+
+ foo(torch.tensor([1, 0, 2], device="cuda"))
+ foo(torch.tensor([1, 0, 0], device="cuda"))
+
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 3)
@torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
def test_incompatible_cudagraph_ops_nonzero_backend(self):
@@ -1666,6 +1691,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to incompatible op (nonzero)"
).run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
def test_storage_access_error(self):
x = torch.rand([4], device="cuda")
diff --git a/torch/_dynamo/backends/cudagraphs.py b/torch/_dynamo/backends/cudagraphs.py
index 742e141c71..4cef3a68fd 100644
--- a/torch/_dynamo/backends/cudagraphs.py
+++ b/torch/_dynamo/backends/cudagraphs.py
@@ -15,6 +15,7 @@ from torch._inductor.cudagraph_utils import (
format_default_skip_message,
get_mutation_stack_trace,
get_placeholders,
+ log_cudagraph_skip_and_bump_counter,
)
from torch._inductor.utils import (
BoxedBool,
@@ -27,8 +28,6 @@ from torch._inductor.utils import (
from torch.multiprocessing.reductions import StorageWeakRef
from .registry import register_backend
-perf_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
-
def find_input_mutations(g):
def meta_fk(meta):
@@ -132,7 +131,9 @@ def cudagraphs(dynamo_model, dynamo_inputs):
fixed = num_fw_fixed_arguments(len(dynamo_inputs), len(aot_inputs))
if skip_msg := check_for_skip(aot_model, fixed):
BoxedBool.disable(do_cudagraphs)
- perf_log.warning("skipping cudagraphs due to %s", skip_msg)
+ log_cudagraph_skip_and_bump_counter(
+ f"skipping cudagraphs due to {skip_msg}"
+ )
return interp
boxed_device_index.set(get_device_index(aot_model))
@@ -157,7 +158,9 @@ def cudagraphs(dynamo_model, dynamo_inputs):
fixed = count_tangents(aot_model)
if skip_msg := check_for_skip(aot_model, fixed):
- perf_log.warning("skipping cudagraphs due to %s", skip_msg)
+ log_cudagraph_skip_and_bump_counter(
+ "skipping cudagraphs due to %s", skip_msg
+ )
# See [Backward Generation Handling]
manager = torch._inductor.cudagraph_trees.get_manager(
diff --git a/torch/_inductor/compile_fx.py b/torch/_inductor/compile_fx.py
index 93c5fa5ca7..13303acd4f 100644
--- a/torch/_inductor/compile_fx.py
+++ b/torch/_inductor/compile_fx.py
@@ -29,7 +29,11 @@ from torch._dynamo.utils import (
from torch._functorch import config as functorch_config
from torch._functorch.aot_autograd import aot_export_module, make_boxed_func
from torch._inductor.codecache import code_hash, CompiledFxGraph, FxGraphCache
-from torch._inductor.cudagraph_utils import BoxedDeviceIndex, get_placeholders
+from torch._inductor.cudagraph_utils import (
+ BoxedDeviceIndex,
+ get_placeholders,
+ log_cudagraph_skip_and_bump_counter,
+)
from torch._inductor.debug import save_args_for_compile_fx_inner
from torch._inductor.utils import (
@@ -488,9 +492,8 @@ def compile_fx_inner(
# check cudagraph disabling reasons from inductor lowering
if cudagraphs and compiled_graph.disabled_cudagraphs_reason:
if "cuda" in compiled_graph.device_types:
- perf_hint_log.warning(
- "skipping cudagraphs due to %s",
- compiled_graph.disabled_cudagraphs_reason,
+ log_cudagraph_skip_and_bump_counter(
+ f"skipping cudagraphs due to {compiled_graph.disabled_cudagraphs_reason}"
)
BoxedBool.disable(cudagraphs)
@@ -601,10 +604,12 @@ def compile_fx_inner(
# prefer better disable_cudagraphs_reason bc stack trace
# TODO: migrate all disable reasons to stack trace, refactor
if compiled_graph.disabled_cudagraphs_reason:
- perf_hint_log.warning(compiled_graph.disabled_cudagraphs_reason)
+ log_cudagraph_skip_and_bump_counter(
+ compiled_graph.disabled_cudagraphs_reason
+ )
else:
- perf_hint_log.warning(
- "skipping cudagraphs due to %s", cudagraph_fail_reasons
+ log_cudagraph_skip_and_bump_counter(
+ f"skipping cudagraphs due to {cudagraph_fail_reasons}"
)
# cudagraphs does its own aligning of inputs
diff --git a/torch/_inductor/cudagraph_trees.py b/torch/_inductor/cudagraph_trees.py
index 141354d43a..f1ca0950b9 100644
--- a/torch/_inductor/cudagraph_trees.py
+++ b/torch/_inductor/cudagraph_trees.py
@@ -79,6 +79,7 @@ from torch._inductor.compile_fx import (
from torch._inductor.cudagraph_utils import (
check_for_mutation,
FunctionID,
+ log_cudagraph_skip_and_bump_counter,
WrappedFunction,
)
from torch.multiprocessing.reductions import StorageWeakRef
@@ -111,9 +112,6 @@ log = torch._logging.getArtifactLogger(__name__, "cudagraphs")
from . import config
-perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
-
-
@dataclasses.dataclass(frozen=True)
class GraphID:
"Unique counter of a cuda graph recording"
@@ -1816,7 +1814,7 @@ class CUDAGraphTreeManager:
self, function_id: FunctionID, inputs: List[Tensor]
):
node_id = self._get_node_id()
- if has_mutation_str := check_for_mutation(
+ if maybe_mutation_str := check_for_mutation(
self.ids_to_funcs[function_id],
inputs,
self._get_cuda_graph_recorded_tensor_checker(),
@@ -1826,7 +1824,7 @@ class CUDAGraphTreeManager:
if function_id in self.warned_mutation:
return
self.warned_mutation.add(function_id)
- perf_hint_log.warning(has_mutation_str)
+ log_cudagraph_skip_and_bump_counter(maybe_mutation_str)
else:
self.non_cudagraph_managed_mutation_hint[node_id][function_id] = False
diff --git a/torch/_inductor/cudagraph_utils.py b/torch/_inductor/cudagraph_utils.py
index dd551fad03..c87022fcb7 100644
--- a/torch/_inductor/cudagraph_utils.py
+++ b/torch/_inductor/cudagraph_utils.py
@@ -2,6 +2,9 @@ import dataclasses
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
+from torch._dynamo.utils import counters
+
+perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
@dataclasses.dataclass(frozen=True)
@@ -125,6 +128,11 @@ def check_lowering_disable_cudagraph(
return check_multiple_devices_or_any_cpu_nodes(device_node_mapping)
+def log_cudagraph_skip_and_bump_counter(msg):
+ perf_hint_log.warning(msg)
+ counters["inductor"]["cudagraph_skips"] += 1
+
+
@dataclasses.dataclass
class BoxedDeviceIndex:
value: Optional[int]
|
2.41.0
|
3cf36cb7cb41e7d4f2390fb85381eafaf31e863
|
Fri, 26 Apr 2024 01:10:07 +0000
|
[PATCH 0699/1000] Implement deepcopy / clone for SymNode, NestedIntSymNode (#121361)
|
**Motivation**: There's a Meta-internal use case that deepcopies a bunch of metadata, which includes shapes. When we try to use NestedTensor with this tool, it errors out when we try to deepcopy the metadata, because SymNodes cannot be deepcopied. The change here is to add an implementation of `__deepcopy__`. **Implementation**: 1. `__deepcopy__` on SymNode calls clone() 2. Implement `clone()` in NestedIntSymNode, which previously didn't have this implemented **Potential Issues**: Right now, this works. But, regarding (2): Eventually we'll have some mapping between the NestedSymIntNode and its corresponding offsets/lengths tensor (cc @soulitzer who is working on this). How should this work with `__deepcopy__`? Should the offsets/lengths tensor also be cloned, or should the new symint reference the same offsets as the old symint? On one hand, we already have this issue with NestedIntSymNodeImpl::mul(): mul() creates a new NestedIntSymNodeImpl. On the other hand, `__deepcopy__` might imply different semantics. Pull Request resolved: https://github.com/pytorch/pytorch/pull/121361 Approved by: https://github.com/soulitzer
|
diff --git a/aten/src/ATen/core/NestedIntSymNodeImpl.cpp b/aten/src/ATen/core/NestedIntSymNodeImpl.cpp
index 5bc89369f1..b703f76773 100644
--- a/aten/src/ATen/core/NestedIntSymNodeImpl.cpp
+++ b/aten/src/ATen/core/NestedIntSymNodeImpl.cpp
@@ -73,4 +73,8 @@ c10::SymNode NestedIntSymNodeImpl::mul(const c10::SymNode& other) {
return SymNode(c10::make_intrusive<NestedIntSymNodeImpl>(val_, coeff_ * *c));
}
+c10::SymNode NestedIntSymNodeImpl::clone() {
+ return SymNode(c10::make_intrusive<NestedIntSymNodeImpl>(val_, coeff_));
+}
+
} // namespace c10
diff --git a/aten/src/ATen/core/NestedIntSymNodeImpl.h b/aten/src/ATen/core/NestedIntSymNodeImpl.h
index b24b66aa95..228f4310a3 100644
--- a/aten/src/ATen/core/NestedIntSymNodeImpl.h
+++ b/aten/src/ATen/core/NestedIntSymNodeImpl.h
@@ -146,6 +146,8 @@ class TORCH_API NestedIntSymNodeImpl : public SymNodeImpl {
return false;
}
+ c10::SymNode clone() override;
+
#define DEFINE_BINARY_NOT_SUPPORTED(name) \
c10::SymNode name(const c10::SymNode& other) override { \
TORCH_CHECK(false, #name " not supported by NestedIntSymNode"); \
@@ -173,7 +175,6 @@ class TORCH_API NestedIntSymNodeImpl : public SymNodeImpl {
DEFINE_NOT_SUPPORTED(ceil)
DEFINE_NOT_SUPPORTED(floor)
DEFINE_NOT_SUPPORTED(neg)
- DEFINE_NOT_SUPPORTED(clone)
DEFINE_NOT_SUPPORTED(sym_float)
#undef DEFINE_NOT_SUPPORTED
diff --git a/test/test_dynamic_shapes.py b/test/test_dynamic_shapes.py
index 6e6525e7e9..284cf85d01 100644
--- a/test/test_dynamic_shapes.py
+++ b/test/test_dynamic_shapes.py
@@ -1055,6 +1055,13 @@ class TestSymNumberMagicMethods(TestCase):
hash(n)
hash(m)
+ def test_symint_deepcopy(self):
+ shape_env = ShapeEnv()
+
+ symnodes = (torch._C._get_nested_int(1, 1),)
+ deepcopied_symnodes = copy.deepcopy(symnodes)
+ self.assertEqual(symnodes, deepcopied_symnodes)
+
def test_non_symbolic_symnode(self):
j1 = torch._C._get_nested_int(1, 1)
j2 = torch._C._get_nested_int(1, 1)
diff --git a/torch/csrc/jit/python/init.cpp b/torch/csrc/jit/python/init.cpp
index 2023ec27ba..f26dad973d 100644
--- a/torch/csrc/jit/python/init.cpp
+++ b/torch/csrc/jit/python/init.cpp
@@ -1303,6 +1303,11 @@ void initJITBindings(PyObject* module) {
"nested_int_coeff",
[](const c10::SymNode& node) {
return node->nested_int_coeff();
+ })
+ .def(
+ "__deepcopy__",
+ [](const c10::SymNode& node, py::handle memo) {
+ return node->clone();
});
// clang-format on
|
2.41.0
|
323c681ad7bfe1b1f501e4225e2dcf9d9bc9c93
|
Fri, 26 Apr 2024 05:10:50 +0000
|
[PATCH 0700/1000] Update trymerge to honor the list of unstable failures from Dr.CI (#124965)
|
After https://github.com/pytorch/test-infra/pull/5131, we want to have trymerge to honor the list of unstable failures from Dr.CI because having the unstable keyword is the job name now doesn't cover all unstable jobs. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124965 Approved by: https://github.com/clee2000
|
diff --git a/.github/scripts/drci_mocks.json.gz b/.github/scripts/drci_mocks.json.gz
index f64c0f5e3c0a573b2d4e08f310edad81a14e6598..a4c1db752cb093a419e2f88149ee71eef18e49b0 100644
GIT binary patch
delta 6574
zcmV;f8Byl^H0w5hABzYGhOa7S0{>)kV`*P)Z)0n7E^2dcZUF3EZFAf<lK!4wfq1XV
z$*zZkcLG!At&?TPQQ4M#wtRbcc8Ud(pv3hIIUzZ=l&$;k*C6Ljns*IHu@kuxTOvS!
zMEBEAH_!n8^&LVFH1;(?!>cMUqKgMT^e?ywmw!4sdHL*rDg6GS{vD04*JWSw;#1Pk
zznsKS&V0ftmQvtHvl^)@As9-OPqAimMVT2`D17iT%_`JIefXysWtlA=%vJhl9-fZt
zEw2#eS=1kuX`Z<{iAnpT!d|a181`*t<G@s|3dNLm2>d6{i0EO_6XJj8>t9rZelRNf
zuI@!u4a;7Cw|kye7o*UFrn=+tnf_~{3i>X)@ML9oHUwRG@UknZ_7#@CV%K`GCJw^F
zWc4sVIKSwI@kLsc7hNV~*OkuK9u`418ierHJyeV6W=rLTan1PHC&YwG3nGl+@ONY-
zj#)&t!8{J*&?t-LwXqowR}h(D1^<?FQm)J26nSKS%Mwk;5PgA>K=v}N;96`E^&U*?
z|FWcEkXbFezx?HcEsDJOWt$e~+Nsh;_6tH|#8W<oKmR*<fd6~6w|+9JSL^4QEh|*z
zdB0qtpUW%ktDk<4`iIs}E&Y4_Y*#;p_P8e3^z*y9hj{SQ%cB?n<6dWS;mO<AFP@zS
zXK&wspT3yC{{Hmc+0m1i&+2R6jn(`C!5aRoGu8_oZ9oU8f@FeWLKe{BV=XxbHK9w;
z;Y{);fXBc@dFh5=c*Q!V7)TBm!%^CgSHNYv8*T$1*1@_%#Tmh1CVHH;2A|m&Irwzv
z24)R>qSTFXw;rc%BezxXX-Ze&hlx0eLKD+}DAuv`eMMtp6!xVrLK})$7-F=RyWr=i
zro4yFE^J{@TB1DbUm+o>q?EsSf|9~oR1QsKQQsyNN;A}bguYy)(S@tscTlM=Oa^~h
zG)S{_FdCr3{yItvJFs9k^JF^!-o=wR9Vc8!*IHgUjeY%Y1(>g^5T%~RTwuKd%r~)r
zyA{+hyXFl|p+=OilvoC};i%tl9Hr4^C&?qz??j{65THT_B9GwCc@$}=QaLL6Ax_Jx
zNW&3;;}N2d5PQ_z>#57WDbB5zeNN-lc;~~>kE?yI!}hfgzOn96+{3D6d^^ER@jaHt
z8hnq2v8ikaUb6!o4AQD|VSp;@wip$EoOTrMkgo>vZ%3Ka^Jk9jdT35;-O6TVUUY}1
zF#W#mcb#B^I35Nx4ki~7=@R6+u<>@dZmqlTHy{<O9+o}AJWQ38a(lOM0~!JKJ|>&c
z_)jw!T-Cr(4i|(h2L`FJ(Ex)sz*vvGmfGD5WvVB@38e=`Su@J_DEsPB=Jy1D>j~~r
zs)hDf@ZoZw)t7{*om|%2`o>ray2nI@py0YI#@V7pfar-+U<BeXu?T4vk0R(4FuH^*
z{Z2Vha*E$c<p4%`P{VhM;B(>etr_~jJ>AlBNO0`WDC;rZBe*roF%wJ^zG|)r#IC*;
zoBi*}D0Cb3SZyg>=?Y6he1o-rl{&&W(VWOcS?v>T7$LFdG)`zJl8DpWTZ-fNPmhpo
zeja5}UKBQ}OxU;R+3|5xfGk{xo4`%Jm37#)i<JhUkui1bAe86(5@UYbXm?*kYUb<q
z5a~28gfX}X;wxw>jcSG^2gm2${qBiY;e>f$w8H=D+ePbK-EnP@S^%$qyB!keZ`h10
z@Mx(=xR!1TV_SE2GkIxz2&mjOMD7k6><r@`;~<Pf6Pa2&4|@-dYm}osp($gkHE3W(
zCFkp*acw?iIGROferK-MYP$~i1SX0`g^8}NYcSs?9K5TJ_x1X~avhdO6cNH_-!526
zGuRU^&;r`<XD3K;(c|2I(-7HUatlb2rI2C}L`%5_Qk|hK(mYP13GD#k&Cf5^G%&5y
zsokQyQ>*mA6rake&YQdntU0%BQqu4FEPK)wmIpx~nWHO>DgB1yu0bB<dqU7P(Iy0i
z1~&sBh<z?wgrFj8S*<k)j$@YKYl32%&$MrF#d=g88j?c$_mFIV8Ir>DC1FCf$%^D|
zSg|Sjy3m;a?%YUvlpPj`N-L`Gf!Hz-na6z~N3qkYgC>mQJ6Lt_wbcAhRyPDAw3UuF
zSuu#PFriec1uG`7CRqFGS}NWBr0Y)R$nOxpPGL|zLfSJ&Q=v8Hh+v|y0#Q-ElRa`u
zw%Q)K(iQedGV9wqFk%U%7GtnSG*WTm%n)ZGPZAX?8kx}Fb82Zam$Ad-()5;<#-<U8
zs^>4H$05SeWD8@nH;xDqzt5Ac1{nbllg9=me;_>BOqb65P@9-hzpAs;rip5<>UNko
zOb2_azK}i9HjOwL%&9&oD&J;>Vv_3mRq1`5_>yJb@T%8ynfj-vPwnS4vS(LA>z2fO
zy}CwJ<YgJytSa*1RZv^4pvj>L3hNLJV-#e01|5NA+%mVdcFoWS>?w*x`3pM8v!D*D
ze+B8eU%c0&>UHwU4)Wq^yzc+=$s3f9>O9y%eceN6MQUTX3z>c&w7If~NDpK5JbmJ!
z=WpI4j4n)x3{<I1AHmRql3ckK81b}n_1rH_5-wo`OX^WxUrx(9W7k8klR6`J(nCD%
zeAbhzIx7vY(3`6>_a2Xs$zsGjD$uK@e-=p97N4(GLLt1-D4X8zp_62;-u1#s*DhVy
z4JNAKm(o^Mnw_J8xeUr-Kdpi?{mll0upXDsc^boum~)pxY}|l*2)(lw#d$QEUaQlI
zo54n}Oa|d?opJ1*|IPoLy?%TAa{!|s9KC$``Zzd#efra8(@AiT5|6qSsfDD9G5K8p
zLh*KY0Wi&kf>>t3_Ck0=v4-$gwVj$w?l3i(+(BwG(PxzONUMJROfXH-<V=&92sD4O
zO&g~s)`l<`*z%$_kBwn`1-~BW{l0}E`D~rZ9Gl464%(mud{?#zMw$I=v)!yR^)zQ3
ztnxvh39R$=G_1)+BV6w-n(i~I)354{Mg-Re#9l^r=oF#1E;X}JIV^izdW><7cS?ZX
zfO)2f{<9`PkFm^q&I}(E%WjnKvAlmDmcd(K+W(_qnK6fD=~C~v#xh7&NkNHR+{{2I
zaZ?&t9xiq|P{h>IUIyuA|J@WpdmFxI<2~NXFdgI_b_4=kzZx)KSL|ezMa^5+Xc#Zt
zz>AdN@eay8_bF`O`uFIqjcn?C`Z7HSkG_ZAojgBzeYSSHLM?B~!42~;p%8!3wuK|?
zHhwPkgC{upl4hOHCB;Nfvb7d9z1bxPHRJqCIh!Jbcy4$B0YqW?tKf8i*7w;V)MO4*
zeMnS|hx{8wRjuC*4TZTW;%Vf6R$qJKuAPns?rL2X_y**)>+AvsEuYLT?A#fU+EG<`
zp0{BK5D1$KH?4<>OIdAeN9=!E7nFcOWpcZ)vyK2sC+eH*9BB9Y((^4G&{kUA)BR++
zCl2d=HY5D4xStt_eKjI(l&*BI1a<BeR<y-XNnB4Z-k_)I5NfJ^Pc3B!4wBsX{<*Z2
z`Cw_Gv)%Su!XttsdM8-|Oi6-$rj{ht3tuUzAhO*Ai4f$TMxB+(MN5Cf1aYg>fV=7N
z)+tKVKLept(IYM93Qk2dPEqoG$$d`wx?>Fnrk~iVGc;9n(IH|rkes5e08$(a3SOKs
zf><6)tdmgtp)@>7r0|nS>4*cc)XuI{IW$EHVYwZo0R;M^$W@&=I`8V@cToCnS~$0^
zG8jx6NYVm+KRv4#P=bFntJKau7h!DTa~<xfoO4!|ws1ln2Tva#%=Iz3(><RlK0ZKk
zZcCKq6&g7CLFMX89whmw2+DH{Jw>CcZ^|-=)3TOu&^K54s49EtI3I**)+}q}2`ch0
zoq_phUby*-=`Uzpy<Yf8v*D;hjl)^G#TD0YIT{Z00`&XigLQw&U5&aE(Gw7(2@anA
zj%H4PmARW_C@!_enBygj(iBQLAZ(k%?PfEkdARYQyH%BMzgEo7h;~ff;&kS0Xr^n$
zR8ggg#_Ort{&oe93Q#qb8_3#8d4-J$Z9<h5iB2q&+J=e5zD;Dp!IX!wRXoJLq;{v%
z8~*<HzoS}kPQ8DdHxBHj)8{ULFt**%@Vqdwb<p0<VC>Z5y2Zg-eiRtO_Y?=?%6G(z
zPb9t*yeOd=_kFSG76_%l_X4Pk_T2)0*KPs73%5XZ2zPQ9xQE-DWtoYn@vvudSr6Mm
zQC`@rpLUrN8*uBiZB^c~2}9$DArlD^(jC1R!@G*u#tDBsg0FED6G@HO1-H|jt7Zzs
zz4GKz!@@;c<=Jr_+vhfOlw6LvHy;Af+_FDZx0h*l9>mqv&<1WEKLDWt(;7hUuFe6`
zQi5}E=*OC7el!`Zxo0mU?}>>%*(?S+AEtd<{8Z1Q_E6Yde*AFq1P>9kQ9{vbvoldY
zjC(?QU~`j14JSXT45lVvJn-93v^tD`3l;#-lx9gjb;nP}*Iy4$;PQOh6$qfPVB*U3
zvkfYfT{Mn3ytS!_+&Mzfgs(ROtv!>f4Jv;YdE$tv!NcUYDF)t*FN`(+6Tqs<r_~w;
zwBThGdFFEMg3qQ%odkD7r}VT`j-n6rG)v<4pG?2BlPDP^V^I&isN0)dbl7tliO~Ge
z7X7Oxo4_SeKE2BFetwSTx<b<z-wb{M4DKnyiJ&clUt%Tiv$3y@HS1y8V}}O68~A^F
znjVv3mIZY>0SubygTD+S%_6v*7lS#DWOP^dmPF%&u=myvyd|!+JFZf#P8BAYN!n_v
zF!QMZYvTKhDH0T(hAFjlMr`eY;Y-=NsW~7}`Nv%j6W^(^+X0^74kk6*s~&CfAmZuR
zjuRlJ1G+WfYk98(d|kQ%@R%inSSC0f!YSjLx&6m6i9<=G?}wD?NE>oJS@D(!jdSS}
zs=kJU##?=ooemnGsgOHc)l+UphHRNuvGF653J)58a)ycfum@=|V`?gx$!hsT8XKnI
zZNHzEdDa!2Z(1<-#VQv;Mv<`07n;V*$A)tqn+W?dN_?<eVQh(_Ood5XCn&qMHW&ok
z>N&|v8ogcGA#rJkg1h_@4t^DB3(UN&egC(atYcbehP7HueBtnh6odBdHJu-t-)z5A
zBXayk6_Ad};?Ckc+s$+9cG}JSyFSP5a{|<voAx7Zrq|2_I?f4D7#K?kt~TZbbVJ)o
z(#uZ%*%n0_+j@Y8rYt+*Rp+zmkL;7_4=^=8n||8|m7~bz?I)n5SGxoTTQ!J&P(A`T
z;*&YI;5OtYu0M^zv_zFtn96-*C`;0Fw}AiSlTHvGD0|Vn<j#N8V#y?*4_>99%wPKH
z#y{m+oD10GS)H2M=4m4{8H{88>64TYI000X%@8AhM+VgWx8@Lv$z=0?HW%%iGy3d5
zkIu$3&A%Eq+njw4LO03mq;k8{O1R{c+}8bjUswy<1i@M@Ac%IK+mXCQ2y!gw{fgDi
zv1YCAi(LvqH}HQ#2o|<K+7E}E?uo;jk8m3>vL1IE(tXX>Ei(}+*dE{Bl0tIDC|k0_
z%7^QJiYTq;<=*5;Pmq;%FsPc*8WRYT!Vo9d);u>&N8gAk*KY#hx1Wk`v!7eTo~SyX
zjJ5gw^O^L<u%C)-{WSDH!!o+)=T0NLdc#C?tzoMsqJ@q{?5hw*A<#XJBOLiA@grpk
z;lyGoEZ1RpdpEX576``l&-a{tUf$AL$8;=FG6*S&hXIBwkHXzIdwV?!8Re8=wHAdj
z6FdrKVx?3hPWC|Iio|QOU#9ng``ZhZr04c%_=iO0ScKm$D!*U*2feUY_1^ar_Yu2d
zG_2728<Rg0D+k0Kwv=4|tdnsPAUIK#SEI~Mo!M9B@@RZ*?8sL2{)nKmFz}`tZ-21E
zw4a}kY#Ep=o*don9(`zc^{**ai#c8gdbX3v5<D9FwgqNwnxC;Wr{-rY!nJEnmlNCs
zVR|0G8<QUsDSvPJxsIcQ=)y*yhIyK~lVEGGA59C7=ga;pHD~6$oZq8W!S8|F9li6u
z5MG}Je8@+-8)H*zojYU8_rXtT=|G@b^V~}3(f~wBASAugjyzARpGP4&YIGT0pGKNm
zk=ZwPu4Rv2D%n|<KFaFNo|CFuC8FM<seSP8c8Kzxz<({{-$`K>I5b1Q+5MOdZ?^&Y
zS7`#qI$zfKdiCbrX<ogXoLew=2;^W`23cMO(=!crhB~GC2Gnoc^}^YcaPI`W*N^u`
zKe(dl;nzKcQFsN7K0<kQVT(zLllEL0Sns;zi@Sq(K1R=<{J89-@bMie&(N@NhRLE~
zp7%ZUzJIjSQ?KC(f}c;G{~Y||=<M0sS4VGO1aFVdPF_ETF>$TMX<2WRfwzMv>ZgOW
z*&#oEgSkgUKMc$jip&UFFRx1Qx6pXJfAlsOC<hDCBN)7)HL0~LvhIjrRNuvCeTd*%
zwCRfZ03Gmbxuy#yRQp0oEivQkTrtd}P!SdyEq_!PU(X~3hQG**X9EyHjrpr9``H%%
z0Seg9^H0vQEHk;2ES}61SDk(u*6-ShE-wk`m@ut<y79$2pbKONEJ0!8j&rL!u^pDP
z+MPPdXOir*(r0U-y<6I3I(Eg~*Q47D31?z$Z1PN&n{9GE@~JJmjkc|}$*y#TO?Jly
zNq-~aFp?yeI=0+eKamO8AQ4KP@QC>a{@KMQdtMjpN7gNisC#q<KU}24?$0N0p7c%W
zMZ+O@7lQUA>osTU_E2F{H^nk6Yz2)1w<bkCKyGh5`Z%JPsCsieiU{uY8sS|2dz_EJ
z)cpOgbp`FL&s=@zFoe#Y)^(1XJ0>+<2Y)3pU%>wSIx_t<No`SXrhq^GTSE7hy{vA&
z=imP3%-97GLusqg5Eu^C+nf6R>))P%dQaY;ynGtGJbBX4#qpc>!9PyUeh!{Kd-DGI
zW3o{H7>wWZcaJ;&$a06?!q(IVKSmH#HAJy+&$ItNdwUcdAN};xGa4MfI(qReXn$IH
zh#oF>P?sOQh2n*>!KXMaP-nPcAev%$S=6~)xcuP*x@mLJWAxXLAIpy)tB)T|{dfBD
zWAs<_%P$Le+vp-k507E&+>X$j<B3d#UcopMg`V07IkSV@8StvdF?#a~y+1o%c*Vb=
z7{+X&qqnb*qka)QJ$d`Mn-8n5vwxTFFSytRBifAy=93*?SbWO*B%9sZSgKjq*?Z1_
za_+e^Vbu;F;j9fMyRl3><39m^u4M=;u3>29>%=OW5Xw1A4#j_<#tjQ?#edV<E%DzL
z?d`*Vjq}ywmx*R#o$+5I%X|L2=f8XY`!n*NBR}9kqTK2JTsz*Y$?%&ezkjjyR32R7
zQtwE9REZ@UHNjwVUvlcNorl-4q}2E<RFP6qEL$eOfyu7ETv*$$XWzy@yE(G}V)hAF
zZ9{Oo-@F%_DR%~2i+OT0IZfGYES!5byJxenl+8l7aW-h++X0MKgJHM+?IItQcDxk7
zU?<<ddltMnIem8adI_<)!hg5N$1hhDoh~n0PoH>uD%^<&S8!52DmNL)cfpuYJ2TS8
z!8%<%SmSZrnqz94#+l<7MrDf}Ly24;V5vxgrK54|(=a6A-Ej=rhhxa$IfnJHJTS*N
z;oNh~J;&U0%(u@mf<WN&wQ~&X5$d_!%Ghs{W0YFUF@_1A_*g0xMt{`YWgJ8I;TU=s
zaSUgN<`@vpJ;&U0%st2a0pb|lBmUPP#|Z3ks;J*4$0*6kI*yT{CSe?hK2NNshTa*+
zs68k~9iCxmn@5!|XBa1%dxp7Zn0to#14J>n$G-X~hW4mq7@sN8BEz`E+I0*QM@fCm
zGD&nOB*%BgFnSM$(SLUl!wCPt4C6#|&oK84bI&k;h!}>FubpAs4la*z?o-;%!LgJP
z%-hT?R|3o!)!LtriY7JQXX&paTi2t63ZL<X{ByU?<O78gwl)PpSu8B2gv6R)LZUUK
z_ay}dFoQ2P`;URG%vPia7>f;%QvL5ZBx(+dh99<JyWMWQ4S&_~4u=1zJB<uf*L}64
zC90cAu%@~>f$TzcwPLNLy1H}))d}}uSZ%_{=#XK7Lt!xXV-g!flSs!2D1Xc>-G%D@
z@|O>_b^qn;!Xo$Ol;#_zUrY%WIE!rG#@;QsW&dVZ8FRP)+*Z@QGK~Q}egEpov$r>F
z$<mtt_3Z7tlYiHz%c~y0K7D#LuGm;wXFqS&{qSMDiS|4jp~sKW$B%<LFY5~aUd+Zq
z#Q=4R#O*Q)>V5XX<Xm<4LzXGe_i%s%r?{A7dh_-=Evg2M)1l@k_bD4-<3aw}qRv(3
ze!W%OK~Ogr*h|}1lR(?R?hpPg8pIzTpftN#?zSfN5r33Eh8`AfZt>;*hAvEb0YkrV
z{o#j)sFR_G57EP49&RL=8`i}4z`GU1Z-rzS1BQjck4C@C`K8Kz8;3Bi@Onm4VDS|5
z7s4>`ys6*<+K}6BDiwHL6n0Vmww8&Gvbf08c)T^V)9;&s>6qcr1z>rY_O1KkFZ#hW
z)mCjb-GBMsTayrNK0c^!xw~|x88>)nDz}D*W@1>wLrubE=c$l_VWw6dZsBxt1^*;I
zp_VBU5%+x`68emV#>kK)){m8lBX|tVW3~(b92YjWS(Tc;J0ExiUV!_US7@gv2*bcS
zlTz>OMpyZ$K+RY*AJDEpy*;_h-GZXI2J83)qBCpvQLxWxe7i7dN@ZWwqt4OeI>R;7
gK<+U@h+KM>@^}CD-Twms0RR630BeB{D9p+L0FT-Pod5s;
delta 6333
zcmV;u7((akHvBYyABzYGW4sM!0{>)kV`*P)Z)0n7E^2dcZUF3EZFAc=lKwuw0+YR}
z?CeGq-U+BKTkANPj4O$gOO)HYO{Q2N3EG@Uq=uvtd$;btUxSon*?PAqPcq}wjBODR
zK%)EUryFR1|Nai42O9gDpwVTO7t#5H9{M+2gv&o3oxFU1_7pyUs6V60^}6g!UVKUh
z`InOz%9&3%#Zn6V=&DBQN(hD$<x{NLTv29*778D{OtT7g(ExrG<1DkqgSkq-<l*V0
z-tr1jo<)OkndX_RlbCccF6{LR!_mN2HV#bXs!&X6hroZbPec!ko)G_>uYX<*2f?@)
zxVq<6H7a|5-R@ahosUBgn(9u*XZG7f74%(p;m*qL)ev;y#>=jx+E-Zmie2l$nm7mx
zlhwog;Ox8`#^-5Ko_Cp$T~|6^dsGD3co@P{_fRdOn=O^+#x>((pAZu&Er>9N!_UY{
z9J7dOgLxdrp-~pgYhyDRt{^g_3VxS!Qm)J26nSKS%M#7T5PgA>K=vZ7;96`E^&ZUX
z|GcDOkXbFeKmF;0EsDJOd7BpJ+Nsh;_6tH|#8W<ofB&4`!2dnkTR$1qtM#*Q%L-L_
zJ}6h{=kf~s>ZjkM{-O0#OaER!+tp8@J+8?${rqn3CLa9w^616?y2qJbc=Go3i)W`n
z|LyyK(--sC-=DtgA3b^btiJZ$M9m)%tl{4}W4+MP26TWbNG2F2WC0yM){<jT6S@Q)
z&Ln>VcnnRHmu?6~m#kxoq2zEe9H)bL1zfiK!foKgI#_q8=o1WPqQ_Zl@VOcz2cPcT
zz^s8!l)5qQ*5lM|<hBYvP3bE9FcBwFXkr?F#X6S0uV_q+!oKuHXhRVTLyXpP7ySI#
zl=o2o+!hw4CCanGB@&WKO8JW?C@HK(<;X-94Qx`OG(+7-=*xK;ox9ot2bJpFWbnhH
zVVb4G@emdE*Ku0dp#{5{C))w=F7CwXIN?IN*7Cw>?CW<cz<gbWDD^bv0_zoEzKPv`
zt)Pb4HE(DJHKKf_#4@Oj#)CoQD2*>VNgkO&CmP3w02MkAc?5UPqDVuP%2Cmu<Fu@b
zG#mps9wGV&u}95!J#{fK#hLZ8&uN?*?`$;wVYSb7*uM6`H`YChdswxMZzq@;z9-UH
zgYWSuHkIwbV|Ji}VOn+04Nzs>7NdfH(~iO&^3_28?KpFK{>-sm56y|KTiLA4i|)u2
zW-zdWt`lq!$D@G8!So^`U4mQ}Hr@``t#$YP2Bc!u!?H)1hpCcMZtoUuKqH{u$7B;4
ze=)=1Wep7Fa6!m&V2~Oc4KQc}jP=NCsolL$rg{RLP<l|5HKTlwvacRxeowG}p5Pv(
zT4;X-A1?P<eMyMg$z{E*Z;Yj&drV{q3a-0ioGn@eh@LnFMj-wYi;!mVID$?Aqf5Bb
z@03F&XZW2~4q%jrHGF3XJ`*0_nxPNe(=9EB1joKlS&!)+!L3=2E5S73tLBP8?CNW=
z+5eu7Lbp+m)t17QuCNruH&|PLsUwUN&52Bu)jrXN5fW=o<AjDHi8#Hzr8s{7^a$DJ
z?{OC8MPZ}LgaeD79UnIZ$ij8_61d5?vJSg;vC<$kGNz6lgz|h}V$5$F?e2?6&3xS+
zBAw=iFb3yAd<jjZQO&UA;P~9T-#yVPoG=fJR`_3iyJ(%OJE;v)3*d2ow?pFm7dGPx
zJX-1zuBDs8*w&rhOkNrv0xEY6k-LKi>tozw9E5RbB2#PUVeg@FjdHXnG-XV+1`VvJ
z<a|9euFZ#x##fP<-<hkm+OETQ0ux2!!bF$XHJEP`4&GJA`+9v~xem)CiU{GeZx<}3
z8SaS}XaQ~bw-cne=yC3UX^3nvxdo)iQb;ifqNQ8|sm{n2X&$H1ly-pd=I0k{8kkk;
z)NWDUsa1MticjTC=S^M()|}ZkDe1Somp$nU%Yz`0%+Zy`lzzi;*D#OrJt63tXcK}$
zgPQ>m#6FiTLQs*ltkxO?CoxO#H9@h>d)ha+Vm&Gk4N0N>dq}o_3`ybnk}#p#WJPi}
ztk{%%U1-dIb8aL($_@)er4`lpKx`R^%;P?gqu6QHK@-OD9jrR|T55hLs~dt5+Db>8
ztQbUCm{2O!f)x{36Rdr8EtT$m(sd_u<adZ)r!c4<A?=x?na~<@L@-fUfv70o$sRc+
zTWybA=?Z%!ne}Zt7_o#>i!s<E8mTyOW{9(pCy9y`jZEn8IkhyM%h+LZY4(+s#-<U8
zs^>4H$05SeWD8@nH;xDqzs-}a1{nc&lg9=me<(cJOqX8yp*At&K~-m|%@WmI)$K5G
zm=5+-eIa|IZ5nYhm{WaFRKCp$#WdCRtJ3>A@g>W=(PgjaGWAbSpW4r9Wc!yR>z2fO
zy}CwJ<YgJytSa)+Wl&qKpvj>L3hNLJV-#e01|5NA+%mVdcFoWS>?w*x`7=7my`T=N
ze+B8e58mr>^*VWFhk0>1S@-|x<PFNlbsp@nzV4xZk=hu(h0I_8+FV&gq=zwjo<8x=
z^EdAiM(3tP2C7tMfM94rNiJOrjCfkPdhSD$gi9E~l6sWam(#M&*!9rsq|V5l_7G1x
zzv{_Vy($ea(VNS@dyYrQWHDkM73ft{e+#5)i_h08p%9*EoXx)Pp_62;-u1$1*DhVy
z4JNAKLusoj&Cbx!Tm<E4kXAvN{>ui#upXDsc^bomm@}6`Y}|l*2)(lw#d$QIU8~cH
zo54n}Oa|d?opJ2$|IPpQU%x&6DS*)rj$XceeH<LWKK*gC=_I&EiAUXv)Iw6lG5jt7
zp?JHy0GMV%K`gUidm+4`SVMTL+D=U-cbJ+??jSXp=sqPq(yCv7C732@@=BAL2sD3*
zO`D`9)`l<`+VZ?MkBwn`1s{*|!N9_he74SHPE2HN2W?OSzAIY<<IH}x*=|;udYUs1
zR{5a!1lIX_8rF295w7<Z&Gs49=~wkeBZ6xKVlN^)a*EJfmzvqA9F{#UJ;pf4J0(DG
zz&uk#|4|d5Cs^h^XNC`oWjD(ASl)jR%it|A?f+h|%$UQnbgB1SV;Ll?q@YADZe}2q
zxG4=R4;MQfC}L`9FN1Wm|89z)y$#=2<2~8SFdO6@b_4=kzZx)KSL|e*Ma@&!XqYVA
zz=M?F@ea#9_bF`O`uFIqjcn?C`Z7HOkG_ZAojgBz-Cw(1p_Vu0;D&jaP>6qM+rklc
z8$Xx&!BZT4Nwd!9l47E#*;<R5-t3ZtnsNT6yqY3|cy4qK0YqU2tKf8i*7w;V)MO4*
zeMnSIhWr~vRjuC*4TZTW;#uV1udh9E*G|VnceSnxd;{{@b#?)RmQQCFcJ2&F?Wn3e
z&)cvA2!zdro7Tg`rL4BKBX)nS3rfJCGPzyYSx11R6Aesu2DE#9>G>88Xe+Jm>3%ZZ
z6Nhy_n-Tt2+|LZez8VoXN>{p9f;#sKE81eHB(5hHZ_rb92sKr|rIxY-2T5*x|5#ee
ze6Y08*=~C+;TFLWy^|~frX;~WQ%jQSg|C!U5ZP{mL<n+Eqt43YqNRUfg1A*`z}<9s
z>l7vG_d)1X^hk@jf-@0KQj~mOa-UPa?pTAN86>vqj7$}scZgUGB&TRAfE34qf){6u
zAeP4x>m<~EC=HJiDf}c-I^qB<wX-W#j!aQPSZ;@D0D=BEa#gP!op*KdJ1BiOE1X+b
z84jlnBxwPkPy6)(N|1kMmD<_oB8*LauEX7xbI!`r7EY*>;OXOoxjv@fboXb9j}K6s
z+Y)7Yg@#UkP`Ub&2T49Ig7VBlPtmv<n6eDww5%l@49sOduF4)d&WB-|HOm@#f{Of0
zXJ~%O3pal;`+z3Z>xGXr8;vW}IGm+hTyg!D<IyNDK)*jeSeJj?)u=lWJpnPA;Na=+
zXyyc1nY&4b;!<mjIbO0TO`((n!nQfw?rO#~4>ulkx2p2(*NR!6XvfqoPG?>X&1|ih
zDylTmcs*6ye_ev30#r@q2C{ZiUSVTGn^0v%q7%!cwqYW%ZxfktFy&!v6%Vm5som-H
zhQIyoZ>ScWQ}2J~i35A#^tlTljBR&3Ix9?U9kjPI7(2DNZgH@d9|eZ+J;lMe@*VNw
z6N&ExFG^^}eP1lP1wtwCy#VT>eYb$$wOhdN!Yxo8!kyd&?&0=kS*}FXc-U8RSr6M`
zQJ&jukan388*uBiZB^c~2}9$DArlD^(jC1R!?TLm#tDDig0FED6G@HO1-G-Dt7Zzs
zJ@WKY!@@;c<=Jr_+vhfOlw3}@Hy;7e+_FDZw-;%47R1%%$Odj6KLDWt(;7hUuFe6`
zQi5}E=-ZlQel!`Zxo0mT?}>>%*(?S+AEg6Z{8-PU_E6Yde*AEH2M-anQ9{vbvolpc
zjC(?QU~`j14JSXD3}z-^GVt3^v^tD`3l;#-lx9gjbH`66*I$oL;PQOh6$qfPVB*U3
zvkfYfoi~m+JhiEa+&Mzfgs(ROt=*HV4Jv;XdE$tv!Nc^^3<GZ_7bcqj31C&_vuX_k
zTJW-pJaf5r!DmyXPJ+9kQ+iq|N6`nmn<a7kk7iKXX_O3-v8ab$)a^|#I_$ZOL}-3w
zi@{}+P2iF!pI&D9AU{KMU7^{7Zw9{r2KN-<M9>z&FR_yM+1S^{n)NX4u|tF34g7yS
zO;5-$%Yr(c00zwrz+VQDW)WP@i{TtcGP)~!OQP{X*n8_6-V)c^9apJVrwS9yByBZS
znE6zIHSzt$6bTA<!<1S&BewRy@TF|s)EpA1{KGDXiSN|d?Ep`32a}rZRgboK5b<nm
zCkYU<0o@w#wY*mXzAjw>c+3((ER#46;goUB-2UU3#Gxe8_d`l`qz$>Ata!_V#<}zf
zRbRtF<E`GwP6v%&sgOHc)l+UphHRNuvGFC73J)58(#OQT*n_l~F*Os+bhUgcjSW-q
zb}&fGJnIV1H!Ya^VwH;^qexih3r%C@W5c<QO@w_JB|g}#Ft$WdroyDH6O`Rr8w`SN
z^_=7-jovQpkhru%!Cig{2fvE61!msXzW>`y)-f$K!&)sSzHoR#ib4DKn$8c+zwDq>
zBXaUa7Lbm~;?Ckc+s$+9cG}JSo8HIma{^S~P5Y5H(`&8-I>`x87#K?kt~TZbbVJ)o
z(u+?1*%n0_+j@XTrYt+*W#_XQjO~-@4=^?E&))5W%5miK_7hOj%UuG4tr|psRz3nZ
z;*&YE;5OtYu3wG8v_zFtn999mC`-~aw}Ah{lTHvGC|l9H<j#N8V#y?*4_>99%wPKI
z#y{j*oD0}wzfR3;^R$sG8BAjS*^`tIH~~zP%@8Ah#|G5>U(F#D)5+%lYA)J0efsRb
zj{1|C=3kAQZO%Rip_^oOQn}q}C0z1pZtMQNFRX=af?%x{5JbDz?MU7t1UVM;e#PqM
zShH65#V&=Q8~8sU1PePD?}tN9_r&4NTeuAvS&zF7>AvRcmYE0@Y>#(uNg=snlr7m|
z<->J<MU>X_a&Pjar^re>7*x$@jR^!vVThA!Yo42?qi@8N>o<Y$+fPNe+0U(EPgI>x
z#@c-U`IYo0u%C%+{WSD1VHuqda;K49y<sA{*05C*(L%=}_Em_Z5a=Gq5srM5_>r=N
zaAL6(mg_LQy&GF23j|{ZXM4^*FK=nBV>%I08HAL?!vMpTN8#?9y}cfVjB?7bT8qM%
z2_A(qu~I4$Cwri9MdCHtFVp+L{q2QH(sO$>{6nI0BEoMMmEW)ZhhEsrdhh#*dx>2!
z9#v@l7n45{D+k6Mwv=4|sFQILAUJWASL4jioY`0A;%IVh;>cF@{)nKmFz}|DY=5w$
zbdaBoZ5fy>o*v!pZhd5T^{**ai#c8gy0??b5<D7vw*_WxnxBa@XXa-j!nJEn7gO8>
zVR{z86O$hkDSuD;xsIcQ=-fu1MtPdKlVEGGAI%Dn=ga;(HGOkd&hOEx;B(-1NAJ8Z
zgx6;QAM%#&#@N(a=g!#jeehFSIuNMVJh#%hGyqW&2uZKBBhS<7r*Vjm8eK-$r;%n>
z<mw$e*Rn@1l<X`^A7%As&uP`I5>ao_)IRukJ4AU;;D46!@1$@QI5b1Q+5MOfZ?^&Y
zS7`z!I$zfKdiCbrX<ogXo?9??2;^{723cMOvoj5LhB~GC2Gnoc^}^Yc@ZBkPuOIJ?
z{_KiohhO&)M&Tti`UvIKxh<w8PTF&2V7=>-FYX(}^D%n<<cDP^g-^bL@(hg%XP7J+
z<@vxv?|(}>JM|jwAo%Iz`A@+=j{48uzB+pQB6xe$KY9Ha#>BN2r)9lO2A&S?Xpj!m
zW{3Rb3FdAQ{ds6EQDnx@dU;ubzlA2_{e!o`Ksi{59>L%Ztx2t2k#$D|qxvph>q7+B
zqD@!K2k3xj%Qamvq1qQxYKa+N=Zaw-g^IAyXn&!?_<ANOF#LI5JR5=tYRq3=+RwK5
zJ1F2F&p$cKvdrX8vv@L7Ty_R&SU+ngy0{>uW5Trd=_VKJfG&_7umpvTJI<}{#CB9(
z)$Y_uKGS5Ml|EYw?cLHQ(}^qY-X7gvNO&dI#wK6Ma<fgYM?SS>x6!uMHrbV~u*vS&
zAb)8@97dAFQpc8C>nAb+8ze%h6CN?&z^`3wvgdWdeq`OUh`L98_~Sesb$>c}^JHL3
zFB*-&yAZS|S+6-$w}%Rwx+#`XVJm19xHT#AA#!`;(Z?~xMAe()QABXB*9hn0KjVB1
zrsi*dsVit_edg*zhaq(Kw61g9d}CVEb$?JI^9AhBuVXVvlhhXFW(xTIKP7Zu*^BDt
zbN=<O&WxP{F_g9%kAUG&y}fBLxc=@LsQ2Xk$;+p~%abP!T^zr8AN=E_|5Nbv*^~Fr
zACrap$6)-PzkA&IN0vME7Ph7~_%ni_sv(Mn?>zgT{@bJA_~^$UpV8p>)zOP*L4VWA
zL-cU5gS!0aEfg=54L`+cfjXlF1JM-2!=lc`!sQPi&`q0z9;3f}{8)bcSbh9x>YwSy
zkI`Sy&p$7G+eYU(dUy<D=XQkN98Y8_^a{qADD>1u$eA7F&VW}vj?tS}=zafq;Sv9e
zVi>c9j^4gLjs`{W^yKa1Za%8I&VOFIA8@e?Mzk9Z%_lp#uy~jCNjAH+u~f6Jv-g|<
z<=k^;!m1r!!dV+ic4L|NivI-syOtraxQ3yXuM?|iLMZ1fITZha8aFJo75~j@x5R&2
zw6_ocHO^OyZzj4D>lObsvb^WNd;YuUzds`XIr0MzB+8xc&$Z*dnhd{r@_(CHPvyZS
zF7=M&N0nHzQ4<U%_a&$P+Ie^#OG=H;LKP_$#j<7c8=CC$%el3KdiHJdx0^EyAZDL(
z)iwmT`<M4(Gv&@;YcWsmN=`F2n+WHg&F<OkD`m6LZJZ4n_;vte)o|3UKb_~}(oUA*
z7wqKwch7<sC#TQ)ua^*;D}Q`@eEf1n(dqJ{_4J9ir^20ha0MsTqjHmxd>4!fwKF4a
z9IVsTgEbk)tvRN)X`DHZVN|xrF_g&l0hWp+SUMWVJ`F<>-W|t~eK>|3o?}=K%L8+a
z6V5%y+;hx5$9(%7BM1aOUpvRJ9-*Gwt&IIPIYz0q9AlW^iI1gHVShx;UB)qVAC94S
z5yx<LXpRBl+;hx5$J}$w?;wuRJ>q};ag4wor;7S*a*UFktm7CNY7)kA=<~#CYUrJD
zjM{@@)ZrP1ws}<fa)xoDxo4PrhPh{$-$4|Ed+e)^VrY*#hVhvaEi#NttX;=2ag@}@
zER#fsLUMd(45Rm87=L{iF^upJ%rH(g_Y8B-F!v1eyNF>Z`Pv!A?cnkl=RT$F92`p-
z!Mx4PawWiwQLX*?sAyW_eU|<@wsk#9sPGwI$Uk@MOg>O3VQW(ml*PhQN=U2;CL~%z
zdS6md05kkzv%e2*Wws(cz*ua6l<MD;kf=E*8vfXZ?RNX(ZGWhacQE`%-DzZ?y6&wV
zEm7T-1Z%3B6UZ)9S1Z;^s;f&^P@QluhSesFj1CzVI1~nBKPIs;G>LSafbz%8(p{+T
zPk;JQTlb&)=N7p)r!?;{{bEY6z*%GmHui48E&DgS%9y+T=eC;dm1zv<>HAktp1r+c
zOP1FB?`Ln{oqxPOU0(J0_36{2NyWy}I{SIE?uQSPO|)m(7(ITBK7Jh5d0Ch6b1@qW
z6+_f161U4JsQ1|i({t6`A6ce6-@^e8oZ@1R>CLa#X;C$3oDMaAxmVc$8xQl(7IiK&
z_wiP32SMFjU@vT2O#*ELyFd7kXc&KdfYR({x!amFKtfRZ7<yRva*Gf5S9EU5a~S%C
z>kmIXM4b#he25<Y{BR@5+^{CT2i~n9{=Ovh-T!^}{{R30|NjF3-gfK=KgIw6wjPEj
diff --git a/.github/scripts/gql_mocks.json.gz b/.github/scripts/gql_mocks.json.gz
index aaf415d4acb980730103bca1ee744f2e3396a2d1..31a5230dbae9aa2c5d3d842c4640f40d0aa5312a 100644
GIT binary patch
delta 16098
zcmYjYRZv|`v&AiFaEIXT?(PsExH|_pXmB~WOK^7!?gw|bV8Pwp-EY4C>AubMboEqC
z_1?33^_rerf`>>#c+w~Ygx6~N0QeAi$Pjp_5P0Yic$g4)*d};5mM~CL1(4GA(fc!!
zfdBM-4;fD5_~P;Im|T=!(d7$y`Y3XztQ<W)td6q+`_|pbr{4qfsBt*C^pZcbt*f=(
z0&}vBQH9N7gT+j1^po7$luFMT>6&=s$7A=VRIj1|RXfy+J5-I7=-`xR(sq|2S|!}|
zHtt$F5j01o!@IaJ902rd)Im85gUHrHMbeZP<(Du?pU9?+3zRfjFGi%>uHw&I$V9!+
zCDMqsE!bnSJqkR!Z;mWUd81z}c2l!M!75Mq1qS|AEo+cQquw<~d+LHo@cw(pq^x>!
zxnbK!4trD-qy>_})|?}mQ1(06+Jjg;+P4jEMOV$U=GaVmeu*F`X#WnMd7()2uNcR_
zB#u%@cmL2AsP=S-UKoj=BB(l{J}@9F&&+xmA8c{G|H?JJ5zLT*aUa>E?YrA)j0&nh
ziYC9QcfXNGQcto{Lr<4u8~0Xxc$8F(R(O;j;>;vM;L}8}9K7k8vVydStqHtFT07$y
zb%EJSxy2vgfpr88<2_5n_Vy<*QyX5h!c)gbvTQ=G{~%TDikcPmf+9*ug0Yh1?s2Vh
zkh>>B?I{Y1!bIPSL70~#mBwOT8;7QIOT{;s@92m2U0lU2$qUjs;qZJd%K^GOxZf&w
za~~?Nr5$~<iA3Z$Y<=#h`^~uYp%#I;m-w*e+Ro#ocQFofsHo6I+kUK~8MHdMf7y6=
zSUK$sYU=Og8(`-J8zU@#^Q2h?(O!P23Vz8}u}|X{=F@Fszt3AvrM3jPfmg!tq{<DF
zg~O{BLr$I@7s0>fxNJ?<m{vfv?(UQ_7H4!Fg-4jun9s;uXn57i&u~}Pj;{FpYd*1T
zfuhD~NXH;6M=(^Xq1&6*QJD9*@goK`5-~}fG%ckwEGHEU589$}Fn?;v=R6YmYrg1I
zXu3R^!O^lONhf(U=1UBjJV&pPU_=%i`c}Mh?aDEXP_Q)w53@<l^!2tp947<y+wTs<
zSGF!7W7U=Z@!($67)<pw(^aHLu-uNL+!8}Hb0COTRwWWoy-%Tdnthl&&^_0KOj3R|
zkOrM>+u~wt0vXh^5V+QBb_Unn+3yKo9q=K(ffcaHvYQW)4XMR!<=WanT6*IU-T+FJ
z=zX=_z;E%!`#{-7_Fj5Z%`xG>zSzC@OsF9xmf~IQsEIu>7(3Xqv4|r%us`IAPdYJU
zTLl>wd6~Y(aSHjg4}acI#nfE-<-Go_UZVT3G<7Gu7{qdM8D3bzHr6~tkw~kjtXs`R
z^Pgl?k?Mg>pkNl<kSHF!C%N>L0@r523vH-3c%X|;;|_*XAsi4;9~tM7yjKn1W)FK`
zX(tRT)WkjR;kzC#r|#ijE(l_LKQT+*O9y!#<};1XHMS9@W^S(p-r_fcnTIKgIIS1e
z39yQN<b!4pY7JBXytY5(CG=<?BJ70W&~JI3Jb1n0;Ye3%%6mUIs!?<%AFFe^MbZR~
zW1Q;yCY(RWN_}lWwH<gQ(6(_^v4l6jt=yT#4Glin`{^f*EVY|<FL5_6<BCpne}h_+
z++ON1g9|X>Iup<Iw>D8P)Nhg9hkhN;TXZSk&rU2}hMcHU?We$HL4Deq^e7c~1VRIS
zQJ$83ZbuY$dQK%b05tjF)nlr)FHWn}lRJLZJH%^rNU*X2q`}E)i|bg>tMoM|qN@-B
zq=VTWzce*x@YNM$Y-IJc%!Kg_6+pSpPg)pm<|L{X$>{)TPfdy=`Iz?yrekO9d4Rf>
zJO_dG>1l^n<T7q8Uu;3tn6tJ9RuyAdJosZ<hwZ(MB*r3bRL7u%Lz5p*VkHC4EcKT8
z30-P|i_2=>0&Iy_oQ~4S1>Q#gi7VaFw#Jn_n~yikZEVQps6sz(sAST}2=a~xmyAWV
z3cf_fH?+1*Xf(~XMKvmANY9(BR35tlRH7U2o91`PY>Di&y!9j+u$nk?1HjTg!e|A5
zbjGl3;&A)Q$2C^z9jXGr>mzAPBS%luyMI#i`WEy{HIJu}0hzVjnmQJXT%C9Ic(V&2
zT@8Z;0$spmbR(A~6U=HWXy=4D=jL}CBN$mTV^DOp?c|gX{#h&Uc&$ey@1~pRK4AYB
zT1`1M(~o)iQfk>1YJ2@DN(@~Ksy=3*2qn-~#2)ef6e!DCl9G)fyE0ULxIUbrD2}9|
zBF;HZ&qOY|TI3VyAQhmoCT<NjIi)%z;R6OePo4CD5|6!fJ}7`7^@pAAdEcg;jzx_@
zV>iBy!$y>Sp$h2BGI4;;1_e;Uvt}XivAiVdKxmpf>e2pdDd5|iE{so)Xs2C?kMin|
zg9csJ&4T@Ov__PMm--~2(p?T+xkk#p?5g6zqWCF3gTU|<fG`2~`!F5JT=Fa&DIjz7
zMR(#VZBiSs01X6j=om#I=IHWcFqQ(bPHnlngPuK)rG$!rw3oIRM`fWF|J!+dZxP^D
zx*~l{^>2mp&^G@zLtY`wQ&vyl&v?D|E;8!dtP^}t>;QTdfeVTO$Jn1WDj-Edmo2XY
z&(DTCoR}h@zb2Y_U3WP>7?`SMmj^5`T{O`cV$}wD4oa`U(Sk*y*E+@wxm;I0{O7m{
zP8#QRkH9+ItNf+Fue)M1ao1){U(W-4!8#k2(7+(=PpXkviJ9Gt-_#vEI?IWz{|?n7
z4i<CDbEkM%yCh>#>UHvlPa^@$gu$WuEfhsq`;i$Vk*a^V4E@#;X#3jVdr&|>@bhEI
zjsY~F2(;P8HpNl@I`OtqJto00pe?PzFhEqG@Py1l0o|M2d&GgL#;(|kY1+Gf24j27
z?}SrPA$OKj@~y((tTuYo>@(74*&^<p0vIC>JqqMOmDsi%{>D$i5<fWXOlzbOR|m%s
zGZfg^WW{)7Z{caXTwKTh3oPA1Oqrb|BL?C=*=yQj7*>$4n(dHW(LDtad41#i^Qam-
zB^fAKk<+Gj4tc-Ss5@^)7}gmA?rUsM`!%e5zhq-wEJ0ILwOEdX6BH+@28`9+w{0W?
zuG=}`44X8$CIv(>8brfd+YZbyx>Zcd&odkNLj!CF#zaB`ymjGm$L`>-et^7kgh4f9
z`$ik*oXyUjR)4D%L}uE4+JmylpazeF0}@MzT5xfb+*&Xvd`DL_)9Od4Z0)++q$Y>^
zMey8aF>?@Vf+`G%Rgh;Ha(-0mLO7exI^dP)@>fc%=qhzunt4DHhJRMzv~y)2OUt?8
zHUiTev8h`eHgs|c2&whoUfe2BFhM!$X{@r6_Qg0D)OcS|F{8ag5mN=r4AYGD(J<|F
zg%xW7W_`g7p<0ZJEvC(ICfO!Qt2fNntE?v96qin1y--jw!E4-#9&je&xiW>fnB;x1
zlJJ`Xf3BvFM2j_IP?=?j)r;b)pkhAK4oJ*c31B>15642_Pnr^VhM;Gyqd{K9Gm87X
zA!zdlwQ@SgZ5pzA?x-qr$2L;_m~RV8{R{G%7);igc{9oTH6}LnO=H*1c|eM{2n3Or
zCMz!9zR#+47M}tHQuRS)T=eP82UBRTx#|n&&Gq79daSMVhkJ;cw`=So;CQHrLbXk~
zYDUGsUqXSS>RJRmz*Pq*2oep5(WR>$_cf|jp0||&^>5#Vh*1HvQ5p6N7t)(Vk36mi
z(fSlAlXp7?tpV%OE4q_I>%8aRsh1}cyYe+ObD1U?ePy8lwgk?j``D}t8nNw+iOCx*
zGAsNSshb4;>7H-<h*nD#%){q5R!hIYCGl=w+kJfpM|Nw**cVIOK*x+aML>$0tZR*$
zRzC^m-vIhz+iD8n`fh!h<K%O;FA|Sd%x-!9aU&(3tnw;rYy*Mg8hsSsuepVc-Rv%X
zwu-$*U9O)}4P*5<0K$?9oBL$^D1y`j-f3>J^lsOcM(1d5rA|Akjw~?wT%EuPV5^Fu
zv)3`bht>+=uND^_sP?yoHC16#7C2q4=Y$#-{6}5^a++Hm>2HQyfYLaL$rymJ5!;Ur
zSru7P#PIdWA-i6eaalXXA_)cAdF3Fo_ku)T9JtrRoH?B{$E}KL&v2NxFJAjK8`iLA
zM)ijZiJc9e#Xcn9H0P7swFJW5>)O+c3c`sJj;4U+D~YBe2!=BR-@;|*%SvbjL`5Nj
zHCR&@#vg3*EpI^;W+EY~4;JXnZV4qfMUs`x^JG3z>V7_&R;I-CW<IgP8Z{d5WEKv0
zDw+km<MVN7-lbP~8*pe|>t`)BHL6JXX?2Nc+)=ZySE2dLF=gW|eZya3W8OA-596@d
zw16oXLxUF%q73is$?kq$?AFSB4LJA@QaqEELPmbD?p=j?5^OyU_-M>Z`9m%yw%y-6
zzliFUGJZ#OY5yLKF%`(6`ktuC!v|cIe>S(*E4$rsbmH8-Su!7-eQ<DGvD{}sA_scI
z9X{7<G8myD|NHuNt&VaQ`e0Y3J^P~@T_-o&YokHR$;#pz<`ovH&3&S)BspGP6U9hO
z=cZJ^m^4EFp@kk~b*cH}Sw`3u>VaXm!7G3J`lauUvy5;nD|JtHcsVymCj+}k#n1UN
z<v&tKQw(3;zRfO#kGj`r?;CC9{7IU_7&kvcF0@-R8H+4%{q1yJ?wnwZZ@E00P*inq
zetZB*)>}ezhVIskdjy-NaNo=79YO$K>h4W1U~iezuj>^z9L{}~3<cH%w5e}slRFg3
zhDOUWXL}SY8aW|=3@GN8??rf6JcG*Uk+SAib`}G#Ca|y)3_^0LjVMtFrCyQcRFg)S
zkn7V}o#xV#%Bus!wF0r)ni{B#5={!!<A<QeDk6OWa~%Ud*0E~n{r}A@|IKO{8yblq
zfZ1k4+hozLi@m3Uqfn%w994KhC_x#Olx+setCbXg#WJV@I3|K=-i6vdquXqd?8_z=
z<OyD~z^!jqO<Ub#W6W5GjZK^GpWI`M%(!sc`%l0nd7ZTOEHL2BQsc0>ycRV32Degj
zt>;VIy;%taf_RsF*!@o13^F!FS2_J`H5MHd<`ixTtuxc6->MWA8Bjc&lql__o9fZ;
z`G5>xkM2*YzDnyb#Lt1h1q6t7O7=19iT=CErJ$h0bR%HQ<N?D0ja9OFf`1h#@9O7<
zv%sp)J*|v)V(3taWqj%Q)1Wym+KM(&>+*kD)GAw$2OOwtE3*wXW2O2PJf;Vy#PLJ7
zo>5I~RZ`5j2@9r)v0hjC-}t*N9fGtR=S3hmA~0`W4V}?|g$Y4)l}zl)L~+oGh`3z~
z(ri!_*8w4@1h04(rU+OEuC4{IO`?s3Sw%N-ZM^EDFDxuvct$mWxSGmB%z&MSG^=7s
zlejkHn&~Be4)RYBzpDz~%f*3w7fuLfh6h;BFDn->2M-SiLxtM!rdh+9MswH_Y=70r
zd(yy7FIt*LcShaHxpnj1U%??u?@@miUKY;XKEu2nt#SUW;SDk3E0Yca&^{hkCK(pH
zKR@~#jfQe>K6i~kd_D>wWGQxjWtFF?lHCe^t7-?+R*z(YjMLW)N2YOAu2uCiadN+U
zyY+hF66eL+kYVI)=k2!GeQiq8+uTb6$3V-YL-BnvtCv_=zmg=IP`RC3gf6UZgV^F9
z2NCfG@&jbb&@F(Uz>ZY7Jb&20GZblm^ZQq^hpNub`7S@u_)tNNo^}ToytPdVRz54S
zw2DV;K0Xc)9!E<lYj2BG2#dafb);>J`aH8c@@1G9wY@uvLf`57@z<3RbCUQ`{`x>W
z7n$JWZ+)09z1tBw^H-FlPpqU*@{+U%O$btpRkIjK(_oA-^Wb_*z+~+AH8)G$nsmB1
zoudh?RggYtuA8=yHOjN1=HwE2RgKIQ`F$%6Z2Z2c`(6|D(Olgf`{F<w*(v)z{E*3a
z?>&JsYMMtw<5!|_;W3Z~&VEu-egiKgm*9N19-Vo~+rFN1opWG~;n+UV*=~jI6|VY>
zQ3HhsR-|<@;~J|ba~q!>+`$BPw}XEYfzlho;VPh3LI7<BjnAS6_Qi5&VQb0pGQUO1
zB_PAXW$L?9zp?lexZ<U&j3D%&J%fR0)0|no^pew*#J_1Idx%vzMC<HI+1Z*-b}^Ax
z35|L+i2`)aEIgrHRJ6AA8+q4|Z2wp8V`yTav(g#!<lro5vtx7VGrL0>Mue&#^4SMD
zZW0tre36rLJ0Kt&u{$>fGhwedD-0D+H9#tT;cC^jH?Sd#1cLiHpt2+U>~Kjr=+NDr
zWhybah!N*iG00RiGR}L)N*-@D5YfPR(S1T9^%?dii;Y=nd_D7>5mtnR)cs@g+R1OQ
zHnyN}kCtB$14dv2-#yEC7;%I}Znul<Y8oU)%N%GY*^y6n#f|^nBj7ShQW78k>Wng^
zH`=tLNhBI7wNNMZU&Y({fZIw2^4nM!@cVQ-H{QKve||gYe{j17-Z#8`Uf00e5)OO6
zKAo<r)S!X6nL#2sZ+>sNlR-ic&n)Z^90`v+B(kOAeOo_%-M^zzKKKT|A{CHq$Ae^q
z(bS!YXv8k@uD4%~r^3jjb4SNvikM`tm|117aIq+_gJrxBr+1=Ed;i|eBgHOztUaKG
zlZK%~kc+u}EuxGy4p;WaU%OT7n^WEUy5&oQu`!M9=q|nyxa3bs$r>#u+{VlQZIaO6
zs$Cq_>Cc}(h-kijzK$D-DEDDKx169aeZP1E{h?vS5`UESERGHAO#<N9S7d5~)z7Sd
z#rtI;ut)!a#_*!pqpKYujd%IQv5a&@ND$^t!|(Kb(0-NRs7^cklkbu<Iw;8IR{@Fn
z9PCNPx4i0B!6c;khA)1C5%7KRounAnKB9SIaROop3GUC<Z_R8toe%^){NN*y2$}xM
zw4#DlhFHxVH3>BX`$fddMdVbkqnJt&!M>K3YP1#$V$I<ZbG9fvYH+t6jH(cVb`N`{
zO*uGHM5*1goUI*s@B_BLJ<LtD0MigH)SibIZ((f~<qZH>ne_Io4nE*50U&vZh|zKU
zP;w1X!4GNXGGMloIqc&~<E{e5z;}$fHX~YkiXi?Gr7lf3!{-^xdRat_Tf;nh!)oS-
zM1kUd30u|Qz7IH!Dxz5!=A+{2Z+99RV?%m(y1>Fo?{Z$nw2XbdO!EtrQ|RJ(@0jnL
zBr!w8d$0XHF-`c<pGtBZ>v2<l_j8YGC+m&B!|Lt6Kl+{U5KgD7#wi)ZQI4SCN?dZ1
zb6UZ#2={YP6MNHL=wc<Z;qD#6wsrsU_OMyScatqmMk$w}x|p6|mwt)6zp?du>sFr&
zBZ9M@-=btUpN58wSBiq`AEF`T@zNfTA;rY@&}m`~F5<C&>p`2LwM<5ilg{biIr|G@
z$MxlfR^9DPJhD#s7q>vV<Ck7nnPo)c>gBp6SHJ0flFQMXILceWCWKu`2l{ocrTgv%
zZ_WE<TO}q51$24K)u()+Hp>|&9wqte{a40+O!4La3!$gVwEkVlz@qP-c$m#m2cx<*
zl1>r#tz}ock4JL+GUH0mF)c&~$#F&9XI|BHky|5CfPhQT26P--(VDTZ!_<A+>!4ED
zR}cxzfcxDdkv>Ly>uW<W0Wt6Pb4{Z*edeZ}<ZZ$AY7P$mlBvbMwPg6m`d6qYX5K>K
z+jOe{sZyqH+dhX}+X<}dvh<`cL9MZH`I+ZsU1gpV+}Jijh|_;lHt)BZQML)}zqlC8
z85k}8*sR6Ya0Tfb!G5AxZp;&}ngTmCG5vy0ypo(2n+*nMeYYz6hz`7}0pI`lgs4qv
zlyQ&0Jfpc&s;wo9Itj&F96BCmUO(I048~~bkL^T<onEtwFBhCty-G!i^y#<G4Ld{%
zsjKZ#kPJC_OxZ~eDFT(3!#BsLy>=MeiQ}E>sd!R6mq5!=eHc0-+Zc|CBORwSvRzT|
z-{f~qb|Oz#Hy>5XIA~kEY)&4_v08t6eETXEm?X)D7#UCA5}>jrMX^2RrN|!AvyWlP
zJ|d)tW-nsYrp3H5tLDmny-*1vh<D$ZQ0(A(pF`bzz3OPFahJk@`#ACoRi0P8=Ob*R
z&iU5PAq9F25Pwtl-VpD2a76&Si{OQ`h7Y`VfUDRFllKosZnh6@Q}$6H1EPnsdD~O*
zI5O3WlXnkBu7@{nQ+8dgr(>orXa?FAA{|{MpXNAZikC=<$973-E&)+oA!-Xq&Mq*^
zo)=(V*|qPL4*O9YkNWfVSpUlCH6jxn6{?BNWKgVT8a<67jV;qt4|076#1$kWA4LLR
zAlebQ+NMqZ9=12eu*pc{Du5C3yL&Fj^wB)o;S%(-Wmt7GUB0{2swtY{g-0;yP6<>@
z!OW#YE?BzTQaV@FJ71b~#UWz`#MzQwL7v0;T%ucYFoO-Wy{Q2@U*>YVC$>+CXcRpR
zg8oL=a_%l)#WGUijU$t2v#XZn4PCG99b5Z)pI3A8o<*fi@-td!dos0~V8ICXqddzW
z8C8yyclilM3nI$Zf2HZ1C)EP+VIm?8O;wiDxiyzIjS^8_Dxu3$fKcF^Mp>#;raUCx
zZ~@oFG53L3irxS4YV%3yhVeaKB@pKhG<eMlZPv+-$HYXdeq|6L*{e6qi1GD&Ukuvi
z%b)MprxyGG)FY0N^-cMBm@oW{cb1V~8CxSDl&%-{P!vsUWa{{%iRo=pmkkD=rZ@{a
za%}+w?Eya!`LZ$vYIx8})|mx^VO!?|)}xV`qEsg{Qn=jWwh(6WG+m|(NUvQIKok=D
z`X~BA=DHx3nBkX=KYtD`wwuX67+XibSRTm4!}yU2?gaY8`!V(=?9dXeCyvvf+o%j$
zBq{}IXLF=ple;^60zsd~m%HNWf<ll!elO)DYE=s7)qisQ6S<M=<WYR{vxr3YM(^iw
zJ2O4RT@H(vi{(?xRC*OkdN^hDK$YjjB1Ku6DQ+LrKNHruVVCFJyDDScq{p5&+Spf~
zpX?hOFybCicW>84`8MAPaxd+2`<kk1R>N#p9<~<-hhwU4J6ZHzHd49Z7Joc_J}*u`
znSGwGOsPXh`4ahiq@^G*Uq$<elXy%Us}aS8<`jEfEf!LHGV}$AAY+(6f;fdD_Z*-=
zm{}dTfjzab!ipj45uvG~=u}@F1?$V5zRjA1cbo5-A5~#}Ba_$@tM`f^z%vk@cIB$i
zje=Sx4PcZRh+0qL?I)XX=In<eyTB>9@BVyeMzTDiU&fvmK}5u**~^l_4;;J?<Ph_D
zf>k}}(U4ziy{|GF874!i2eJ9*1d|3oDvo+O5|H@{DxzbsI>Pb=O!11(*k?u93mY#r
zo=x$IpNeq`AUq>TM}|{E+C*FQ6G1qw0jytxC5(v33!oS=CVJ#IyrpQ8e8RtC6adK_
zJkU`wov^Bkrx6oKamWXg$YeGX5wBp9(^!X%Gnhscf`7i%zJ55KgYF-wkI^MbIs%E`
zz6y}}q7QjbXCn{Mgb5?FY!kern|w-UBtTx_#v?m5k}--TfAtrQtH1Qk$r7iya>Wa9
z<I6wV*&2CP8;&y|aZ6Y_KV^W!=`*bBs*0!{`=sne+K7~zPG!;amNI-$Hk7-LK(!bH
zRm0bh8zbxrYm0gyf$lnJzQvDVxJa8*=s*sQb)mxNNXlce>JbJ6Q22V_=xe9XkYy5E
z5*1i!v_-mwiG5UW(sC9CAx3a`#+8No{jbl&A@Z-!q}?=|4p(TJ>EePWmVqkOJO4-H
z)#B~3$nzB*PG3zq=z~o8J7ZBqk4$18aTR*~exj-%%qtv-J9eaj7b=O31hI}0)PqSq
zdmw8n=_e_*B(kj>2$ALFj7G<fhJUJde_`w$8?Yz73j^KJT+OawltzYiF7g8%6~=9a
zA{9L?qb*Xlx<{tgCE{vsUb1ITuIR=Yy($OR+Edd>sc*7pqSXf>v<YH3Ih1B}RiE0p
zdFDtyA88k~hRW9`llS97?oJyq;{=DM&G1`NMa!1;WvW;;FCCd@guX{yFV?2RwBb6#
z@4xNkGsOo)c4<F((HhKLFeUZ!Q>r;^RRekLKjZtRo4s?t;Z*$V;CLhG{BV4x_U|}9
zJHi@lGlO>8k)+5O%A7jB(8)Q3YiVw#4@w;NrkAt=sohgF5*e`91taR_cm-+2EhaOP
zz}Vw@i(e`7j|3Ac8mN@xkaUa@1i^nAtxY{;pAA@<VV&JVK@hH-%4J>Q!Apo6iQr)Q
z2s<zcmuvmLD)+*~HnbRwYXcr@kv6d%@JT9*p)SPS)(MMXt2<p9b-gHxmO`PM9H+N7
zi-UVWgW?D&-Zp9efu9qQt5oPO5NFnrWCtItiH8fQYFQG#Xy<x73I)EW2bI>Fcb<8^
zbC(6agk4roI2^6z?QO?LZRhwuFe6XtdK6=VcFO-gX5pN-OTFfpM71MX_e_|tH4<01
z{4lwS(akwnjWs)JGu3Icp^a`jis#yHuHi)j;qlJZr<;jO7NjfgW>olF+y)<1{9V90
zS-rcS26(x^m6|YdD@7^Ina-{8e4?Ip(5V*Q<76J>1H50l!1r#Y{|0A;*9YQ&IsnP~
zbQ8_Q0mHyp7*1wM^1jyyyMRL-K$ow&i^%q06o$Te922vO!4d#$D_o$R&3`7)1Q&*-
zp6x#`lZ-@G;$bGmUgUaxlr*UR0Yw=b?Jt3JEq2`@bS;Mm!|!3dVRR$S{6EVHESlwh
zfkWuh)-In8yB9LrH?LQ1E4ZFi%&zwJ@0-Gn`;g*4#;=`X-+E#MP3N4#k_c)JF28bW
z82wh5?oxWFiB#}kJQ~6Mhvq?#?kMe<q^M{MVA4shp6@Dca6$C)I*a~|RkmqAQMx-p
zk68x&4tlSzUji8=7N_0yU@`@GXTHN^h7@l%A#l%2*6J{Zi~CtFo|U_4`68jiPMVe!
z>$vbImeiM#A?=3!z(zNFU8D;k=pLV<f#i*s0CIR=zXgZ=B`CL?kc$Dir9W_HHPTv~
z6!6iJs;P0{#yHdqs7bePw?}2iJe_Wsyt%r+6Z&AfzB)$vt}O{}zSbA^ch_h=QRC_{
zo;E#EV?I%Pd{hncq_cF%VX$h#3<PUH4@h3pH&)c*3<R&FR2>biW%VeXMHajh1f~79
z1d1yDE?`nbk9;7-5a<K#UM}dKh2sbmEH7c$%+*q$!o<Q#3+2Yme8@_MOpPaQ#_s>!
zy~W&-(T9%tUgMXAD6%t-P7;JJhfXOjNCQct|2*_n6>3)n4+<H1otIzYDh$tTiWc@h
zp%>@dzOS+*S~rbW|IN+(p^hj8vgTLLKI4W!>Dog`g5It#1o_$^eKURBoIk{qTF4s3
z;8Jx37omAg>{PEDz)R$6pmbv%2FDq_kT*Ky1+RVsN{Q+mcltShin7qUd&qVM|G;zG
z_u$eN-7f^HXnq1YWiF}Z=-Jji29wP{GRwdw93?Mp73p0Luc1u3(+IPH(i2GxGl9~7
z5je#;lp0`T(j-bi6ARY>mqc2S!J_VTxD3zF^O?A3r0WCEF9b7V&O+wTi;b``*843l
z&K<=~c^Fj)TDrmR3;$d-f7z0ayxKBZOEUaR;e()C9L@sl5<bK^_+}vq!5~I0M9#pn
z6LA#;iB~4;0U;id3z30EQHok9J)LQUl9Th%E!#W{`yh~5J!n-^H|UcS?(?3XWN!oq
z0?~vS?`vNrBW3U|NtkoP<MXNy-oC9hdZd&>p%?_v_O>QpjL43A>44^tiC};@%bJ#8
zpzr3?QYvT~Cj^ds_Dqizp~EXWw>sfZskk_!2$9>j@W3;-iHLofo+ghc4xifV8z0#n
z#7n%+iEEHQjUcjA3olvBdXpt2APK@~I*GYh1*XfmVxc4UtfD!PWlLf#fJJa8!WZ_#
z;H;zjbl^kaso9gw)6@uM{bAr^FT%G;s1DSP`PzQ{l;`KQTjaMr(Vi$_=29PH7<;#~
zL;*k!On!^m^Q`qy<cr)N^|gzMmIRAa%jFJ|dxAQJDC5GG%1S0D&y)oy(auGle)(K8
z{>4n^^<}ZwGnhQ0cVj|r;7DHit4-AZFK5SCJx4NSr)d7uHLTPN3QtYWYe4x#!wZV-
zN9d2qjry0;dA54CNGHWgZxn2M!&2MmT~U5}$BT&yqLGXzLJNdz6_P4MZ0?i@i;E)d
z2L0j*8GH{pJCvyt6I{deY{O!tnT^I2<Q`b=9?jo+=emrRa2IUZLAFqRD3rZOA78Zy
z8}$Ij?qLoWBA{&e4$hp=FAzn?oz3(kPYY5ds%GPVNm5+)i@E0JEEe`4{KBa!^%@u1
zJmYZ9EgRK5W7mOF#pXe1?(hrteT!k~1LTsXKFX9MoF-PgzSn@8MGT@AR;=6T>x>I)
zG@;`W<*6nITJMsU$+KK?EMtERxzeJ-tUp~3laAVY*s=6V?Jz%cZ5#SGu+;*xW40Kt
zU^c+9R|KrL)GTgB=;mDbHGfC7Whsx51{D0z5pN3Ii8^|Y#zyP-4Y!&Tcfh%7ps(Nl
z5QvxTpZXPP31SemEL=SCKbbik;gFHOL;krPi7G6_JMi<E4Z9ne+B+IiX?dl2Kb>0d
z-csN{BmKnhfe-)k+5Y-!1k>Iacp30%xaJI9i)Y}1bUrQ{_gFwi=mkZSON7na|J)KM
zQLFez1!_847y-^raww1%<-?zV0fb(gv-|gFI*c$ME`sUo_1^cfW!q~Ub6t^0+<=R%
zijJPSf^Ts^qqQeFVejw?@~10o#PmZ{%mJ<dPY>QCXzCCLD=egCJ}g_oY^09#qpuyr
z4|{QxiIb*iV&nD4BJTST%E+I&o!!+jpZitn&#cd=TJF`NL9h;PySMv9;~>}&6jz2Y
z7tgvcDxPR})$lm?TR8ao)M~#SrTyP2ST?%xK7Uk_pok++RNM1<uqpX{BByk}xf+BH
z$N8yzD0fXzPw!a}7`f6yXc~k!dSq`6-lIa+RBvAG4_0KA?m%^<*+Hb*u)b|C{xp#e
zCDLcxQ|SE#J&L}T?!3#Y`~iXydOe+5rFGEvLBs9dzIr_-3iVEPLCqa^&6fjwe3*S_
zn%cQSZn_~FQYV|6f$7ivy*i1)#`-Og^+auOrhI=y*!ig1Zn=1LsC~h49l@2K=kB@1
zlE&Cw48R6{H2r})ijKdnat+CV8D+2}V)tc0>jcq&{8Xz=xHk`X7ZGF|`Ca+Qz?eF4
zMMu1|+U3^GPc}WL!1e4w`Je9Qr)-ySTy?h~LD&ynJiKnnqC806X@sfcA;I7;PuNBn
z@UUU@u-syptm5%U%=*^umo&Lra7V006I7kr`p$;vkry7UvZnfic$8vU{>Hw8T15Wu
zF^R&}c>Z;cd@$a7RK6goKxE{f2RT@O_SB+^QQQU)!w@CD0ecjvFAuDowgS?>`Od^8
z#zKo^!;=VZiVX|K@;Zx6h5Uu=cgfhPmy>Ce6%!YZUQ4vqt#w_uvD*VsKAmbx?!Z(r
zUIn-FHbpvVXvn{?+lU6AE?t>@L)r{u34BB;tefm3Ru0@w*82&H$Q>(dY+(=F0%)?l
z%ViL@nI0GS028W4l-)<VVdOl1MOIMRIp&+uvJi;~SHYVVQv=B2`>6G|Q7Sx1&?NNj
zkz__xo}wPdxFd!4DV8Av{;=O|;q~m^SE%OPy>F3(#<$?fT8C9=(c-j1-3>hV_@wA5
zFlfE@@ySa{yNQ8_McI5nFPXgtyUk(n=yciPfhjl>5SQ@z7z8p{R2|&Fj+z)ZEca@m
z-xQ?5dQHz`1ZsTfY*^;Zif)zxN^|E4wSYDu2D^+GLF2i<mwhFCDMK3bvrMZPU};ww
zb=OU9zKJHH{(K~N!O3gn?hqwglQ~P-Zm$Pc-7b6lwoy=7aT{v`C)bqA?-K7M=W1<X
zEnh8{7Mo`#J6)kCAZ=F_Jq{jMKHFRqK9{S?jN)qT%rsx*m+C(5q?FBn5ONAd0b9{Y
zfAf}U@#@Eko;c*aX7EW%TY1KsQ^)@pr`ob(m)bS`IiKv+(2JwLc|X!Z56i1d>HbdJ
zNdcb?Kxlq0<*Sj0geicu99zWgH0Jx?3_Y2I8MUIjtyW9+|50nJ3&c`~p$BCIn8u4r
zC$C$>2H8kxeOxLo2IVa!bN>-z4|q*V<?6%5{7*KkYQTK$Hq?0jTUd7`4Xzeb-G9s&
zJ81)-2JbwhUNeyiUrv5{hMAKLmta~9oj%ATr+1QDxCXJCD~3=JyJUwByq}o@XayUS
zSY%wbWz?`wnRgX}A3CVtG`X0`WX-y^bt4PB>3PztcuRm{dw&ES_cLq+LO(oe_rq*m
zhM2nH%$J~2K{8jmTQ@i@K0e`*%Nei-{!Kmm?(R0B66@bQ;|?=2J%7KBzh0fT<A7S;
zW>5^ih;SQ`Yc}Gvbo>uBuL1(b_5MT66`4$q|4Q{_9FtC1W(U|-)IdAB|1AF@4zuEa
zrVxN|-(RE4CSEYdh_jUcIIwd1&r+ZN`0#mi&;H}uTgxXpE*bb)Gt4HVxLNM6_q=<R
zP<WN>hLIh&d$$?u3IK%iF^Bgi1(B*?o3Cq^|4&oqK<4=Wnq4}+c!!7;4h%mEsj9k)
zfK?ww^Pfj_Q%fgCh^v8TAEc|9Odij~9oDoS&wcf0mar^53}}WQ+3?xXS(I1KFjuV-
z(YLi>tSB_^KV5fXG^5m~zQ9_g8Y9h!Z!hb;J=TMsE)TH>K`N%TsM#DjchB#tTUNvQ
zji^-g&RQla{tz|lgWP4NlZf1EnUN48ek0ikSEt?r0-5b$>SSA~6=SLfAo|#o&^nLU
zK8Z1tm#Mh?hQgYZeM*h^#5+0OQLMR=lrC*$95heobssEEwT1cQQj=mO>=LGo<QsA-
zoo5hjGWSt?RA<bS&m*gF>S9+_W4_<o(%*-!O4TQxjNgsd8bThAZwx@~y~TGlI!`(?
z#y$fWRjQRw!yVMfYpzhd!kQ3L8mLnNNn1baug`Mg%Gkylg5pKB`2(A`17strd}bmt
zZwZ40;yZy^RlkgG9kyk~wG(oQmLF7LAyPq=q5)N&WG&Nh!z<WU!)6{Fu`1n=?NUAn
zo^zP-A1uB7NaNwvu<P<FLA|0Lm?K7sTgkAF_-itkT7-QU^fp?>TxB?UE1~6ol$N^(
z%~Q`8@e>WIP^bmeacB^vUeT~&TF>ULF*KSI2kecE*!Y9u+f3u<JI<!YO;NiQkJv$~
zsS%Xgy?R<+LEG%&b$wA`JM*ePe@&V4V93`pl*MI}?Tj9I9HCL*p7qfuW7`Bj@;H;}
zV`Ryh%|a*srMhhPTVE0AXbjA+{K*mhk9w)CTpM52Gr(q)7dnZ<`_YVI@Q*0KKA-9|
zMm~Ppvp!A;*qxP>QFuHoviuvGSONesCM8aP>}n(+zQ9TXdFf4|ej@cZ`cs9F!7z88
zJ=KJh-fXZtNo(6me~Xf)sASRjk=MJ!!783TzM@vs^N7V|j=qaci72Qsa!YT%RZR(D
z1(71-c!|{8hFR&xZ>&W2a2!o8*MJ9_%F|&}Rg>Nbe#hJ7uFiy=N?wrZq8$K9Tt7eI
zct{D_lgGRD*2@`R3d$1%DVTLTn@Qh5oSg)4(U&Hk7R?m9t&bh!{Wa#-!1<k1|1MKm
z!2Xiam01-z#v@<`#7LI$Jh^2kfVvH!6vll1+cJjzgHal#eGK-;jT#k4()VRZzx{1+
zY7E>rDKfjXS~F?*-X#Xlqy#$9?hdW<+AXUgR_%^&d<hXbX`WUmktktZO<m3&D?go0
zLKhK5+wJ&w3~|(-jfioxf}u&)2QmsbTo#B5`m}OkK|V;{jgntcV^Os@TJFkaFx(r+
z`OS~`8h0bL#(DYmL_Bx(wlR3T(_gLlij$)#WM&pr1`*6$6?5R2dl8jDU1?v<DG-<6
zN%Xs~`0Fm+vs6><ybfEC?{cwk5T)bZHhr+Z_+)-Q3ZY+^PKd#t(F|?poQ*!_wO~(z
ziPg7DXka*q2f6s>@!ebHiB}K#wG=0D%h$!7cn~6A^*Fe_`Ddtj|8kwD!xEpgy)f~5
z4q|ted+IN?+V_99jB)CLTzqm>$6bXcxtWIjjWS@jS0K9d=pPkL%8eIki`R;CiofU;
zlg$5+a55Ou4?&u;eKut2;)t?|oW>e7O+j(tAFg$spKU!A$QbNB1yGlFzIyy}Jamx`
zR%d`l;ORb_UQM)9C<@z18R>Au$l%(HXJ+42RhHfnYipUP&vwTKO)|a`plM{UM9H}u
zJ4Yysm^W4&8{8wZYhfAWM;15mF&2(S>^0hjjav!2BF(<Yp@)jE<))gjoNgxO@}IKr
z1D+65#_Dk=RGy=q-}J3-<iAta`1rWjwPP@}hg~nOKQ1O{NlV=zy|>hro%wpoJa(N;
zkMAY340Vg#dryggJS|Qh)-yYU?kAV`{|XjaYVnY?1Z^JXv*h&0LiDvTl$6wZMtt0(
z1dJb$wi!Lrkr<|+4K(TLyx{Oe%@&yNeYbDjZ+A*)9^X}>(NSWfP*4Ay11&D1@_^O_
zp6&#eJMe-96DZ#1ka`F4*sm)n%wra1v4A^ZeEnk*sZ0S-CVaR3MK$Ofv5xt7Zjx4t
z9eCWO_5W0Qk^7KBEEoWK2BRG^VkrT=c5irDjU*JQz@JAsThe)iNF>`77HmuCxET96
zyZ9<lic+$}8qjl?ivE#)W8u*KPLb`g%Moi>t0GH+!fQUDmR9myHd;=MN6JJKPQ#P_
zwm!a)wOtV5poBjB#1{fDZC~!oOg)H|&|W%AWrLAwSXDQybK<V`fo9p=u94MVSOr`L
zCnzrWEeXW?*~8k~1<$4G)r*abgEdaYCfst9^3J@q&`<e?4^%-|CM3CV_Jq>rUuS}j
zZ272o$1$-f7vts)<CQN<-=oUMp!lLErxN>(E%iaH(l<R5yeQ%+ky1mTh-_@<reZ%G
z82vx5Bb9o`sU6_9$oXQMPVj6z!(UA<TPMZTNs**uhr#eS3L}7SyeG5u`fXFhc<{)3
zkcEyl-$;0vM&yj&c(5*>jidBlDWO0M0|SGIoYi&WnhLQ|eC7eet|Y1>$*jSUa6BFH
z?oUu`PzTcUUv7&pAu|1Tm{+On-LQ#{Vv==LPW<7FfqHizvwWR*EGdY!LPMs#<B0;3
z<C5kV72_FTb$;RZx>CsRTV`mHcU!5vye*pHWj$MGDddcNG{WH@=q};eCTN|z^^CiM
zty5X<WS~d%mynp8Yu2lqQlp)`@wqa!VRMjn;yU+_x6F2GBSh_Y%Raz(Z{PVyZY0VT
zA<H!;*fC;}d6&@+2f8wX1{b-j`{ER>{<ihyXC%klR9`NKRNeg`U6<X*jG8s-5gEz-
z3RVWby0thn=9Z<`0f#&h25uj*;W)9XhK-Q)$w;#f`JA&%mmKS}{mBU6SC%Pn<QgcA
zJYY|BWXTm}EkW@+48tXI5c;_(&Q@LQT?%pnQD~upVgGuo!QD{E_~^<hQ<fzT(9=Dc
zYjYaKK;wskP~?STG&*kD(M)T{dw>o`Vqv2f6%2=!%*dX9JAEQyXw^}ug2CpZ?;LJ9
z06)j!OaIP~ImZyA#os#<bi}4RC@Y}o@WT!XuS~a;B^n{Q?vFwicHvX*V?B~$r@s`R
zd^&H;WoR3^zj0j4#DF3ECFT5CgsAr+$2x6?>;}-URl*z0$L*erlQ!d)O&&JpbdP?9
zXQ*t-lV&H+CKWp9NEO<$hucdwS|mICXN&U`^j~VIb*^Ud+e*){b*NGqJxM_5nVX~@
z5zRxKY4_UPh3d_H7dVNCE2TqvdO4&<VL_}1hNbSaVhp({TL=HKkS?Kr+TnSWJk6Vl
zc)qOQ2st2=K4x6f$M14ua@^l{`w<%&2Xx?Y+|&8IWkIU5D20GMN~<;I%XUfVJCGDz
zdJ7NX=X<L)2MTQ$1^LC3Q$hDfn<1(WX=uFe-MUH-kh-VPIYaQqgHHOZY2AdiNg?f!
z3T1XxFadZH1*GmCfBYjn4F@{{i=#WC<`m0+|DfB&9%{VLJXG3r_zflC8l#KErBK(9
zSA`)^JQhlRd5ijlIz8ui29qx;MaA-k$<Gx;(yg<)^5Izf{8MmYI{~DcFq{BJ*{g<X
zeXk)5yUcMS<4FNa!8$Kn5{?Kdk%j5q-p1|RU8Y}!$v9nwmrFYdAvRARON^UvtqCAM
zH1Ty5GZY_QiR$q@UZBn~Ubydwt;DN(SQOq6`e>e)S?_aP;{#5oteU4(r1qaH@!ND#
zreyV|9EFS=h+S4le}j-Q-NkBB5f2RIil)k447HF884u&ZPFdaI1(5D4pE;R<-Jhz^
z7~h$y8WA9X{Ze^1Z#<k$KO;l(%iLU^Sh9U%qPtCWiu*l^fJBF<R&Pp!jKtd%|2e+A
zSy3}gf*v{rC-|>EZAz<1T!hpc6sGQff$fP%qqE3v_}OhiV8aHlIoivOc(8rehuKIW
z;}d-cYMj^{bgAVT7~rXe+URQLPA?8w=th0=dliRC1R%2#%GypcewAXAnv+ifd{4_p
zftUKZ;Ih7Q)R>=S#WB4=2Tw1)7SBo8(8!%(2PiJplhsqfd2A&t#biv`tQa%Zn^CZ3
zn_ZaHKGXrBE)w0aALo{7o&Mex_d5Hw_ceY>lfL>0T(CmGL<7qQbScWy{!TdcXD@ym
zb&b*M#*8>bf<v*e(vb|<8B(Ziy`Q7yfSJE+QR?_V-vH{qybN7U-d|vhz8Mj-dv1(S
z`=jDw2x~%e*Z{p-?XMpV3JL*LOOx?|aQsM#pp4=WXaFHOk;%x^Q(d_J6WjBGwzX>l
zyb_a8v*=1F3@Vp9e#z*V9&8>^`DsZs)kN&a_y3~vQ5Qt?<<nO-I{rqo4R)mb79%NL
z<t2cjz|yT4(TIW>LQU{RfyIH*AB^$U8OQt{jLB`GsY55aav<BgB>8uheyQBRYyK+;
zplAF`F1VR-4Zzx$Ss>_O{EKG|TbU>~0g8N&HMZY-Cy3S3#-$JbIfq}6?P74>xXPhs
zKcqjWr(>I7E7IsrQkRcsucCN{@yz?Vrud$B%vNa-aT!$~qd2hg{wsNuh=~c6&e^ep
z!Xvwlf<oNUj+^n1QB6|ljI1z~0OPM9G~>M;lbsD?3qUQ>ddrkkSaB08rCO>Q`xYQF
zr2podS)PB<@1GpZ^VlPGUmq<`$hm1r6TZMs4N<k32Q&)g1WWy;%nYQ(lpBzE+M*Gh
zOvsPAA0|=PXLj|oT}wq>PI>o&s^b$8F~X0F$PMFtUeu_ZZ+MBigxQz^S4$9qOj#w^
zuncBChWnu|Zg7<bF{dVsBkkEWm?F%p3C;{5+!Y#s)p?IirJboE)&8)hhBe!#^|HbL
zrGcn+BeI7c$^)!s{LS5-%b0Zw^W}pCtp}hF1E7xj!fu(0op9OscsH9UkX3-2EmX<d
zVTi@(Q;uhva}idi*nHIKSe04};@>L<4vU+@?0|HBKF(rdGmE8U-^gO+P6-rwXkJCY
zo3H>c7>`eTOAmz<2(-+6iLD+p2mwgNmG~}&G3FM&rynJD?zjEhy(<rbTHa%LY>IO8
zKlg<Jr_Y3#X+Fh(E%<7E)c*ZD6J9+*R+GvP!32cjOhOrj_0PUkw!>zSU<)fGqu>2_
z7_I*_-{(4aS#bv38^PA2T=+_1F%c5PI8mR943EEPL^Z99f}@t9iUX#?%;+k$?aU})
zffF>WBfkz62PCwDX^Zo6z;anNjMP_CF1PUasN9#J?>&m!X;wEfC9GA2*2E>m?9{f`
z^q=wWIp*(I9%)z(Tq;MPtq#xLPBWPW-6WM3oq4qlPTAO7-6F>?niRxy#WaGT2K=bg
z(V<8aUbkn6QgJ%@iGOb)zjbtA(Re89Y~m}B27OA>+TWE0b@Vt16B7oU(mdM+5|SV{
zG%75xl>G{B!9;nHTcZ&qz^J?-^7s}I_8~lz%o;xQ{*OccjHHYJC2r<6N|5p4x5%A+
zHT)AO{?L{D6)07lL_m!&Zsc*AdET@s9CK_)diD-(_4e_&GI?yiZbECnL|Zg;a2UG;
zY0!^s33%g@<~aRgbAydSWbex9>Vz(Q4)eaU!s(h23<pi>M{rDfa4TLSz~C5G+&&<s
zJM-^$LJGtZcW{$~^lzdn_M@y&JwmxMQ#GC2JNX|1j%~JCG^O7iSo5stf`#n0Nm`Ah
zrhe)-dYr*A9FNWh+N3_n|7GrUy{dC=<pnJvOlcQ<nDgE!7c9%qnHd~Qcu?@+X5I)a
zxuBx@C~dF;(vqtS+%;}R6dD%VH<#BuJ3X(qA|I#W2WNkPlIzs4FKM6<bnZxq-{YR>
zMi+TqxLvDr*sJ<~?>@hreSGw83{S>9%aL(rn>V+H3;7<7uUNYq@P&a2*Lg0P-MWSj
z{|EFj^n<1ZR!a-BW0)*n9Z^1jbfi4o-50((h2BPPKUzxq4sUB_Mi-B4-)?^#Zwy0*
zhXD`~he127TQ8*4JBc?QoOg`#LER)=qk0&qZzkbUZKcPmlpxHf%&omnbS>98#03o%
z(L||0g}*KK29mEWwLjVKG?8?)BN8`G$kM3^XtA{g-|OM4E8^evxko+_iUj)POX)^!
z!Q-MdMrk6V_3ONt>m8*n(#)5FXnnst#Q6USDnM<r=5JD===34Z`S107EL1dailXS$
z-<<Kq%$Od@Ik-~4*MG_OP!C?7(UmvN-3y75+g3+NdHl0zjzEt}H&&3CTGKY&J_(91
zn1(t=*WbO~ML+wAevc6P*fJDEo^jRo#QvBi_batCQA=jV3_fL~l6G@!Wbjv3zpX!c
z<SWQHK)<zm36phD+*UdZBLF}i;7N`&?)b~WE1V-f`rkuak?>GQlYL>hcwK{XSGx9o
zGQUMf+=L~k9Xsw*q50{Hi*x%8faT#@1mVH*U)O9KFt^ApQPJ^tEW2}kp*%%eHh|#=
zAI#j<!Q28nqEC|OuQb}tyFL>s9R{2)MckmO_fw&OsRjTvtq`*<OOVGfn=GF^n?7Nm
z4C+csRv_{P=`R-#q|w8I-#*UW=f)xEtxp}E2yV9M3a&)qMQDZo*#VZ+Z8XdR&CW)b
zz@PW<h^M{<(SvX>CH6Ml9DiZu^*{Vq<n`$S#+jJoik_(*!V0_NnP!8?kg|wh%)~%m
zPG{TwE}M8Vrs+judq_~#CxWqlW3s|j<Ys5%9VIOkJ&<rtfwD{Z&w)WhLYHIxpUy_A
z&FvZ2=&B+p3F>S2vOyaz7#;>3r53f5DqH)|B=lFgp+WnCsSr=O9PHw-Ej4uol@*yy
zY?`43j0@VM@Pe~hd*?Z<pO~E&?}9>)&ypj=m3nuRcH$nLI#!tOXRHPn;l>F|-rw!E
kg{+JB!!rL9s&97tJ)MSqqCr4>enLZ>8qB3ElUqRiA0flE82|tP
delta 13076
zcmV+vGwaOuiW|W58V4VX2nan44uJ=S2LXi#0)+<yg$D$M2L-hU1}}$ywySMqS@-+=
ziazYDHObwWsJr#*bLY+`giJV_mxPe)owJ!mT2jj>$dX5r0cY0y_gmGH!6t@)umQ<h
zYr=r7YDxW6S5<dcKc$M!<s?XxesKck-TpN~8-M@%O%lv9IN6o(e8=!w1cvyTh;!W6
z>w(oE0epiylVf6rjjdmQ$)_+2bM=>-<OKT~-V>P^PC}GVhVU;4(42q~NU?|F-6WUY
zqQlgdAwG{{Zt%lw5Q!1=6p^?WO3>s*s_~15hPIEzDY9LFt+?0$Un`zoGsx#;s15+B
z(?4Lg(!PkI(SMQWjC_n!8KKc2Z%Vp}7hn}L++Z!v+L6-VDw9=zwt(ZClpFx$`HSvh
ztdfXGhaA9T1qtq~A{zzF3)KE4w2l-NXJEgg@B}Tb4`vQtGl;|qCeNdJL~@5*^&(6!
zPP*7Bi_RD#&wIx&lrqIdCIwNM6t{Vg$>%+m6aCV?)G^&5`%?841Z#=9?#pS2Tfr~E
zc!gg6g%mjgYRmb56ylqbxMR%xq59x{Nf#5Zcu)|NsDZEE0o_y|OumQOKzb4!meK@w
zL|%jI+SpRVb2tE#4lYhYfHxx4Absdj_~1?+NOQW(>~iY{5A;KCUz&mrW~txG+t2iD
zyL-EvH~O_XKg5-ZF0!UrFms-pKb*nJg6*$VI6*#X&poeydzH84u6WyBZr=y*Yfp;b
z8taX{?d|nfd;9ALdvl0ouy4T-vPEL2+P^P$|0wT;&ZjU;oq!y!T$*|q*%u~e<@T^T
z2N*^2E~oc`by}&O4!Vv_EvnlKL$_@IzUb~KL{;o5{loY(Mia#hwFnq4Px*#$YjAZr
zI^1H8F@ivUV3<keXK=OxA3g5`FVGM7R>AHgO%ysj5sy$w)Cy=uo^KwkLl1Q>O#h2D
zLf%CZJsV~!fp(#17e(}jG*0L@!Z^#N@bM&a7D9kWI^oMsC+s0b1GsTTdxee<S2Cbk
zIpIguMfe=!%f{LY_f=j@y{Jmy8eamhh)0+d50|2Uy~5}sjKl1>j4>6xR0b^z>>_yg
zi5o(sv@Jyn0TO;JT%4KV7uZ@@ICzZR;Ij8P@4z+CU&)&?@cVV#6)<MNm8)4@;q1q$
z{IBKdzsZ3V{dNBo+&y&vYv~Sq*!*FUVLI{d<Mnw`tREf5INQAI7&W?-&B4{GC%T`z
zo!3NvQVk!QInE-7C$|4U)5EuW;$Q#s`rG?o=03G6cZ$E8*j>kaF~i*4ur+o`n3ed$
z2&OD);`Qf4U~`+hBH;ol7#AMyrpn^_4|l&ZTUgD59`3GrG6*5)ixcD$1al1&bTCj4
z{?_C9&coe!tjXi0*N40FEyvE2FUd3Bt$8GWqSS{f$#d(Shqffoveo6d*bLVfoX?=Q
zbvYg&r9q!gY0$LlDGgei!#17LaJtkf4Q-*=N3!`CK+NCZ<KgH<8hmKKt_J@{yEf!y
zv3K?K{lVk`YmAdMW>xbCh3sSdgVn`g4A36g9WN4b&*_{;qQ3Yt)2@s!GwsIV`&IOR
z`y(}UZ5+9jcKV4MMzRxyz2p2$;?EB5Q;QsT#t`)Sf^SfxGWf=wSUu`AxRn8?fPW(l
zxTRGe;2_&v13ot2BKFfH^gFSC(n+b~b$Y4rBiy#h@}yr3@eb}3MG~|i2%}oe>#nMS
zV^<&QAiD&1K2AExAgk>_XN*^l)KIp6sm6RToJyZ^#WxvsR31@n2Q$;n>QE!;N`w9O
zHR*CS9n+SfRR$Vlo9n#S%GnpdoW${YZ4u_0rdLH6Wt$(KmD8{3Xe`G}&iaDRSmj_0
zWg8ldUCg`SmpHbkhB>P**t%&}1{-7>Y{MbwE{%A7(Y3V7p%}_8!MtkV9oH6rUR`6f
zD!eG$(7}tjJjR+?z+7K&xn6nfqU<tFqna>n)(xk!Cq~)k+UjwTH4e7xy{)0!8n28q
z$TkS)voP+QW=x~bcp<F5s5*9aYYnpPM#jWOMkjzL?tBevY|Ra7R}Rfkb{W>jYCG5s
z-Ke}QhO*6{R@E0;Q?KluQMSQ<&_b|X3|yllG7;AoV%ubuAqLsjRZr&}6n}zD=L-J;
zRsR*N53Hbkcu_ZPp<J(=nxJHtapU?{8ll%#Bb>_iL&>)G<C;*v*@i@Cyy}cV$u8}B
z-d(K8B&AUOG(@-U${GT)%al*?y(+MpTL|4T9i!shqmW%<{8<z5x0qpnRK*`<mt`;h
z`?1e!j=jM(lR4F}FJzY#d)5T(sYYp4>``_}vFDAzp6gm=>_K+9u=mm=%TgIMBH#l6
zjjEaQh3v9|&l&-~O{;?svP)+5RVmwsjC-bWyDIc3yPVKBV$?HMb&g-iE=gWM6Q_90
z&?*~zlwD5f8!^LU4r7&np$FOJguW3IJYG2wtdLzQ=o>b_gZX*oMLm>VQtTTM=yB6F
zD`O9`%VqMLG`r)DS$RnhWtS59MvQr#)5>8U$~F#pRTgWU!GV8tyuqNOI7I6Pu4(9I
z)tS4JU5cdd2F}|}!>YVRqhyz0R1Z|Kzq$@=$EiNBL3SAjb`w^AWlWP*AJ{0n1Oq!5
z#$HinubyMuyob{^m(*$VMvQH%YPNMDyG+2tKtIA?8<A)a@Htj>vktOL1pU`AKZ;~e
zc%z13Z?UQ;a|_ue!X67Io{7}Yj<8I%Xv*lfs^?TG*|tQ?M2Q$S_-Nm`L`<FzAErXg
zoAs^jkNca8su0tE`D3%K5Hqu>g;$7KOi3{7N4qNbtq^0Ib*&I{fG*(+yo?G2*9N06
zB+SS#6K;eBck^ScrRyen!e6F|caq7x29CitCy)JW+;*xS`$yRZg$4?3FNq@Q<wr6;
zCC?G!Bh{5J>IbaJDzmYmY~z82v0GbgZKpC<2g)`G+d<lY$wVCHqfSu^zIIqrYFEY*
zWg9(ClgO9pQL&G5bShKCtbW6l>i4L}rZ=h{+c@kkM=MOb%L#en)(=)wuN*p|Y_nEX
zuo!$S(|RLnSFh26vQ4wad6Go+9-|CgWaVNkAlo2B^Tew+l-#LYfCFV41mz#Y&^u{@
z3uVSVFf|^3AJeB*x%}(<o#m}u-Wj#)(vP$*UDzKtjA7t(qR=f;W4!ZoN;|?0=e}g^
zXiQt=k~Owp-=gN@?jH<0*|A8a-@(<cjxPt<B_=hi7J!IM`VG_KmBTueZPe83c7T_(
zf%w0A!L1w2Fe}1c%C=#`WtiZ-d2fGS+A!h#x3^n=ZJ6+s!h}I83oEsVFu`1Km|!+D
zOt`?Leqn++9VR%mdYItQr3n)*0k2t@a0#~#VV!AK+wv}D+c04wOxWsmdP^K8Y<X>%
z@C3qyzKBO>$5KWMhz@>qoZ-IF0jG714la5fQ>2gVl8yd|79L|T8iYl;=h}q_e5&M)
zd5<)I8nanxG&2#W@JqcBwkWGyw+dyOj_|B8U9Sjm0&l}I9lc@|(NcC9Ms#iQH7%V}
zyBfZwY#ZTEMELP<J$s2G{C~Flg^v{BW2)H60s?x=S!_UWIrUxcyScPfjRHK&nGW!(
zA74S)wru#5L{;ZzWmG|SnTo>2NhcX(^@ZAhyvG1G8#RDE5d@NT>RvngJA|tj234|6
zI;>RmTIaR}LdT0QK^WyStsU|<qm{=t$~I~??j>1=G6lCt`=(yo@oei><wFuE+t83p
zEw6yFu5;XFTdeYaIm$M1zx>(&%qrq9U%JCyeGzx6pJ7AUhDO?H;`NO+vxvWH3$8<d
zoyu9VDBDI}1@LvRxqd}2sAXxDYp$bg)8n$4bj`(;eF_)X4RLPj2CYa2QpheD;;aFP
z+f_LS3fV>@-s!6qvszB8xoK#+T@_lCU9!Q<>pz&WsBvY)L3SAi^Y>1h)(&XPGOAX@
zD`b~sI(9=n4ZL)%HyExG=;a)V`s${C1va%!y<)Uh$S$WAu%>B&W3b9v0J6)e1-xNe
z;Lxf^m<rh?+6uo=hByJxkJdl^+n}rwOE$J`n^gmYLUwu8V0RBSp)q>kR843vWS3D7
zt}ExD7fP@epN4)Y&{5P+9~^3$RrLX7o1XtRbEW`S`pH>#sQ^hV2gkB6QxVmFOa*j}
znN@S*OW7q;0oFtn&>g+93V`gAsQ_=H3Ro2vg-hA?WXuGUo-;T-TjG;3pWnA9V`etB
z@RKp?!#pAT@{b`JtbKVJ%Cq^Ollftd&U>Dd!TO;!Jts4(|DU`kfc~D!x8`>BGtay|
zBRS2f8e87-dV;qbdg}=u#}f>HWEv)Z=*{2!8$YVeeNXcb?bp@(kJA8Ue;5gP!y*lE
zd}6*X=^_BYbW*obJzpPWo0J7AU~U)5C>D8mD(ipJ>MGKXqg7AlcC;oh4wpwWE{;vs
zEN%Y^(hjR$Q3z$*GdNEdTD^KbFqCZ&wC>P5X@m=Q%<6_2Wt*D6cW#@1ojRR_yb7!B
zRJXz)+a8{Jy5LfcR(7#Uwn5;AFk%~EhN;F4tumw_yA1KMQ!_rGs}P%-UKwJLZ5+g>
z!{|iBmnR<AC@l^>Qmwuq+eYQ{izwS5kZ)eGD*k|LIx|j3tdgNSjPqfKX=|F^q2~FE
z+G&DqT2&`YO19CHrK_KRTmwJo4SZWa>>bUjZ0b?A$pPBzW9#MI3+7!a<7Jh6*xLEH
zy?(H_ze?0mDs`1skCRhZwSif7Wo-c2RvXCPhe<UkFG}1nlCjKB+1g0-qZ%rPUU`zP
zWS1-=oarg*mNY-50*0w;M#YSXLUtK<z-~BVSyw%<P3}}omM>&~TRoUMWLaN5FsrTu
z6|$`!OdYeVuO8U8Rap-}w#hN$a1bTJ@8IVm&BFk?GpiZErb#VUk=?G4U9Kb4VV0+%
zJItl8{5`vRjJj?E+~&;DD;oMjc6sq<jliF?std)1Y~%OczmM|ui-iGJJG@Ott4ecM
z$Sxb+tnPSoja9~f8)TOaZ+>6A->h$Kf85`EG`tPNsf;(sE*su@-N(1B8<p_}*=0;<
z{@$e<wI;lnQVEAqy(&e0A=|11di`dpgjv-}6tb;KU|`TJm2j#S5G!O`mB7HEQ7XZh
zR@rf&Y^xHe56w~uqw4grkX@>L;6fMrpa`L&)SS!==oxE&l+|4!bnZBgRxOe!WtZYu
z-OM$^F_{u*LhUVsuA7`zEQ?giE~6r_Mk)fY%B)t%E~6swMk<0?k&U~QT}DOF8>tAk
zVN_NGkX<502%t~pQ4$|{mjPqVSr@M;gRYsC;{=p#xwbC3w!F=c+Q%o%wKd-RS`gRP
z*6vx$wKcPUsfBZGz1kq#VVuvGYRlnF=RcYf-h*nZY`?BlTdsE$L7!JSMZU4VyRo^w
zz4`h<tbW<gW2>(lnyHz*QV1}XZLR*L)tCKOud*j>^)HoeA$I?~v$e5xz1{!m4hY=_
znJe9xr^DI3IP1pWe|Ghg>A!#fY1TL6--`d^Pt|>YD}M}SIy%aZ#V?i{a6>RJu&`V_
zr>5=&fi6AUcDe5OoN5%}C)+V>$7Ni2ejt5jN>517b8XiqMW6q2Q->2YL%)<m9>z@H
zmFX{(Qr)yjau&lM<#!i-sRr{z&#t6$kYpiTG@9z~l>rREl0o(g5TIH8ewcr6Z?Cnl
z-@{mcws8%_D@ll^W<`@4GobgR@0z-43zIQZ_!^T;*i;76Go-G0rZ;og6^`M<FwEuk
zu3<&`y&GLZ8=1@_GTA^V$hL50lr17Wu@)SjSal6gijI$pj;}?8VogOThGAEZP(Zeg
zP$nakbz5KV2*qn>rp=m}!jFZ{OtJlkHT{c!G77gbZ_OS0JHxgVNE1Up_@OU&{Oj%b
zZ^HO*U4IkDzo}`?BF8_rznoLQhun}XJ9={BXW7+`M~r3LiQnYJZ)-za*5W_son2pR
zZGW?-eWpg)V$D8fOSmEV;5MJKNt+L`wprEYqik#QCvE=QovkId`EuvcHY2a^#K+u!
zjWjjgmx1STP56%K>Yio!jzO6f__}U8!Z*2PnoP>T@puqe+%)yT(>>`17V{XNz0-(!
z0QMqx8lxnWe!0UtiO@=zZKO$-?N7F;dVcCEG0cyZrd-*mjFMic^iZ7jhLegirFIwQ
zV&y+?KD>VSJ9(8#aYBXz^5qC_n;*G<!aF$=seg2wBqv`8nB9bAeIz1FNidAPX=lFT
zCjLk%e-3gaNRvMKPwzOxzkYu{!-mRlBN6vrmY2LB&y&c<ghY`OH_7v)|Dt;s{~z)<
zs##zK%U?~+*6`P(qt5?3Ix0Rf?ZfymJ&X^ICBX;pj31ofgA>qSk|ljf;HHCrNS=rJ
zi1<TE;B9h{@WUXGsf=?n5czQiof#*1X@3}po<Os%npvIk*<a+G{P|^f@94wZ_5Igh
zR>_w)AGf#5zg~V(|3CU|fBp5*-aGvE?dMnfTdyy^Kbrje=zll&_t=-e$N<bp@w~}E
zITaFxCz5Qu`$&=?K)SCm?hz4x`{c_olSfG!_KuJruAlvK0BZDwJWoN}!W6Fpg9JZe
zfTX_kNiPLid{Hczn!Q-VJ{$MjyL}QQ5d=UekROK`Nn+@tV*o?=envz#iai3@cT(vO
zJqcH*<SnQTXhI6zm=?W@y(6=t0l2p(RTmd<c8*095D|d$o@}T;1kAI4Jd7gZN&*fP
z6irEG5??EqlNV<>=x86w5>D_z!<_V!RFYwQ7RtyUA*KLPFd}E5XQ~yz8ybgeCm!PB
z&3EA?;i5=p*=q67&<`#SgAB((K3k!t1_F*XPef`!6mY?p$H_njFz!bL8Xpj7&=+nP
zf#zM@8vnf>t6m5VU>#e3^dAr|z1A(;V7f^!$4xR!*HqjS_r0}1fZ>8CQNyVC^d+kC
z%L~Ej9Nm>;cz}Hw?3O`P4+0jj3@>%Azr?R*T|Cp6U4&x)g6`czyQtU+r-PyE4GxAi
z*E|M(wKD7Cy&_jacU{Y}B)p%bl$l)To(w3X@V1sGq~jVwdM49<XSH#USDAL3V3yyd
z;VJ0kEOV^lMwL_lDiyJJ3=coUtxiybfQ|m?I#)Q|aebNg<i<Fs<Z5^~&R97@r9Ud1
z!XndGuDipn<Nh0_pJ39`A5A~6b#J$iu_W|<R90r_4gV1+k90#kd0Bco_EULAejDB~
z{`%>b&o|BuvV%Z>7hEu^Nt=5v<A(1A+<?h|p*dU&0?!woqq!?T4zN~Ryq`Ya0SyAz
zWInY5p?NmfbS601Hq&y1<U$JRyEX*5E#3_o--2>ZE#LO2A(+dUX$KU5XUwvMXG+86
z+;XXC@oom*rgZE;3f=aY)O6FPuE#t|eJ5}oY5Ia1hS1@Evlj1W;LW&z(3Eo9l0M}!
z;FLqu&ZzD=reg*i%(><mZi{zQ{I~6MaMLhJ(KQ&^p69zJw;i3kzGWKJG<}$}*rwLv
z-3+`nZW|`|4bG$&xUOsBvLbxuX_mA+@Z+v)%fOT^-VMQ93Ydl28X)a!0e89KITp8s
zt5egbw&8kzx~JQ|-QwL0ynUTYn`(|_nJ~m%$~dDe2&iQ;6Na{H;I4wh%oguv;B6S-
zr%eIgTMEPWET3`1mJ)E6g3*8l%(ua!(-!ZB;2j8^S<<s~iwbEn&($Gxb9^3Hy3ZU-
zqYjt=ows;518)byHQW{!hHiL)q4U6WG|pWw;NZW1!OVMx>)5P~*BXL13|*fx;RTMT
z*+u}!YRqMx?^z}lEYK+RJx=x8te^jMLn6gZ2^7<bEIS7~-}TQAQ!B2#;!2+6BHBoX
zG3plO*X|cB{s?pJP9MK1_e@Edk9d$I(H*l98ME{`Wh4G5L+g>V>~>@>$d=kz7Exy}
zjz(mE<5<LiCQ*6UkC1$q3}l=o!_<@0+uwIjsWU0KdTJSWsua0$a4b_v!i>ZT3Co@#
zxPedLMPuoydQ2*Xz!NcMGN?UUA1*&zxl=CQ%1V7Q@z(ACF=PL1IWzIrjn6w`@8i?X
zzTTC26b7Ta89J_8{3#o{WjsZ8V~XxQmH54X{<M{N{o4?1@4IjyqcE0t^V*Ki%vP6|
zN0$#apSCW)mJrDJ!cFo!sWEdX)mlAX9zFild)j)u0S$V|>D`pr&|4K=9u@xl#91lc
zjSkdtk5_lJ#kxi*YZZBU6xn_1wm3|^NN#Kui!*mz)v`5>w#vLb$}AsCnSZDqtuobr
zPMMC^Tk?Th=<)){2NdOuOy@{GU^oWX4Ni>((T!fVUsv*hJPmQ|K4qfO%T6x12a3%O
zL$6Xo?^3qq9+=$mh(0FEN;HZ_zrTIb8vTYDeMX&y8hvPg*^K^!(al_T@ubPOc$Jz*
zm$I$NpW5Vpce1>j+<o!6CbchSW`t#bxyXk%WGO*k=5WJ`iAtq3_TQ$+MjPAfH>JVh
zv&h?7hm{;FujN;af`of`kywj@^EFk~H%NxDuP8~@lC<~oZ!dp6l_@I5FRZR9cGsu1
zsT?H#O&BR7KShJM5|p1-&v--r7iIC4jrGF}OuswKrT;R>{K3m3u(o;Fmq){Y9E|na
zV6?KJOu_dR5(TgAvs5a!mwR>0H9SM*w-@Q81)<LjxWCsTMHX~Wj++^SV8u%%j1+%e
zUBEQPJDS~L>_FpP=5#gle`g65&$*sui5IFNp}KQ(=hf!^tjnd3v>?|yu&a@wZ8NId
zv$DqS5F8;J)XRdj=x_LYS};R@6)1HsZ!+t7FO0{=-p<bE?!osV{%vxT7YybnnTA0q
zed2+Bk$$4cl9D(Y-4SI@yZU3HY_VAst8+#fZ*n8b>+jy}?|oV*$g>l`=Qm#Y0oA+I
z=^E@2kaVDnIW>)iB00XvqmS1>_O9{?%^gdtOSLZJkAWrAV49;Z*7lBnZ&GbkR(Af-
zbedl`1=%xpzOIjUM%vllhBn&y^nSGSIvMR9Bxk#!cDDO1IosJjfzP(SZ6B=b+Xq|c
zpI&dB?S0)c_P(w&_>9B<&%Q<GAMf6-|1vYMc!wR346_Qs1)dR*@jVY3OseDI(6_k{
zKYePEw=x<8!w5-sPqM>*SbZn)J1NKqDG7927{F9GQ*3xC1W_^t$D^tmr!pHx_)SiZ
zhyGfr0xCovq%SX61LKPudY(?+6{<kqjX;-U;wN54++i5y1)E_>5O<Bx#^3*blLWI2
zPIe_cpQ40IQgSBZ9LeH(;i)76e1j}=;3^gE+{V_g<Wrc1`Ita|Ptgy;duHS;JPA=g
z8K%ex7<!T%!|&Kb@otjKZqZ@bw0fR1q}GFIb`XgX^c0b43ep(MMg;-l_@SZgV*$_N
zN*R}AiC_lSil^5MLdnNCl@Zzo@<xQwFqMb#dIk?cio+tLoDvl5dny5*;2<W<HJR>6
z@*F80Kkj}wSbw#Dy-7L*nFlXv2frpRXdi^}K0Hni{ZmjMMi+FAL4>q_GRw#S@I@v=
z>@5h(Pm~5A(W83yzDQ4?5%_Z$!{~A&*~QLL^QBIZcp~bgiPtyQOe}=GW`AE3ovZ(p
zz#3kPMbK9-T6;mB_l{pE^@$Hu%6~Bq)FXo-O!{B};V$rh<UJY7RP~rUBCpdm09HNg
zaNsMZL<zIC5g0hqhkL;XYqvDWR)?7f=vlR{rr%(Yf9N;Lk<r0~#9MifnTCFMZ+G)X
zGe75txUy<xFdcfuvF7F;!Ymug64lOS`&In+q<Q-X23$%{!~P&iQ7x2-z+02j9jy_v
z6_8Of1p5YmHjb#XI1&b9GLmvteSs%Tl+97X$oz-{;oY%tQzYO_(mQC8b--Ac@|%P0
zIR=sojT21p-UesfSePP|?cY}s_9Z^FPcA4hb^a2WTmeV0bEyonm#Kuwlg!k|Vf^wm
zM9cFM*Cf0g%$z5NU|36XTxEFDqRmwTb{mbZQmQC_DeRCd10+fnzG9D6q^VwcqU$+Q
zxyo~a_I?%gN|2J0m#BsJ1WfVWI_<@sf=Z*y%`Ua(V8fKdxt|@In<6?IwGD&Wi&DgK
z`wtX`zTNvAvxL9@W$qzlS;6JI0P?!CqcaB94I79PCmQiI-h57X-{yW^h<kf4RdDif
zcZKMGObz`QEk~D3EMdYZ&-}rQhkKMf8H8RJpM>C<bkGSeoVlJ<FFttAmWB`hKe(Uq
zJK0H2$1K=qB>^_H*Hb*<33i5}zJ4ew_+s8x{>m>|;W@v`3eQ>jrQz7d<*)jf^Bf|l
zupsNk`B-0^$eFzN&tH5YFs4FKVda^Hzecuyz{NrcR(NWdfP()l2-_7A`T*=`h9u^}
zFjwc+_A{JA{fO<`uqv{B8`f>vzUk9o`=&o+`!)mL3~XizZg_^}8iwPWRF|6PYXGJ$
zVBm5Z_-@Pg-Lie(VvD;DY{1N7!g5@pc~oQY7qxvoa9r0k49&I;ZfcCR?2!$@JJ4Kz
z4Gf%RJIu8pBGE0T3*FPeviVXEICXT51`d5VcADOt$!S>2P1ADIwA?gz&A$UXZz<Mj
zqvfW#Z`z*mPTzXFv!rQz?34NCTx+>$9^BBiC(P!n#M;M}o2J#}X6Q0IYq@D!J+8kV
z>n%4;tHSkGVRNhHrfC(q0g7ztEjLYntIQ2iW=C(iX<B8PlQJD8Elt5Kbh#ch1*V0W
zj?}alL{o6leoIqu$IXwscX2^eP;7M=l$%wzNl><>DVW^v_@95RFYkWGKVR9c!G9cs
zujZ<+WDFBli1eS2k8>0UZqyN*jyI&#d@+BQpmSrPE<yi3*}pC>0bZk{Ud%;*WbW=7
zb~isB?5}S>m}`Kpo^%Z+)wHUv0cBg)FzFip`QiOjb`AgBdiQQYlx64|`d-Uma|04f
z%3!mwG`yC<rk)HoizQJg&g@!U?sWNMkSORjvuxd-)0s5RW|An3Z&Fu6jP2iM6l(k-
zo{vIsqPp5)&f@Wen>3oLW{jkNPP<Hh93*W@8DBJ#>LwLO=A!vcc1PN1H+p}*^E&yq
zcaVJB-T02*gl`{gu}`lz&-T9V81V1gCBMn}7R!4rzsUr@37&sz`Az1_Z&C(~H6bx!
z^&l}Zx*F|jtR*p_j_qiMIX5S8mAj&BzriFX+-eMo3Dxw4DMH!)19C@y_~QL~%N@~j
zM=UpY1Yb7ph<%BDfl<$wBeXw?QQ^z#<X=Z>>ha|f`6H%CBlzW)x8jfZI)vLGMG^#w
z5YH)s2T#4RVwAx66S@EcLobO>k^CXS*fY*xc&T6unex7*vWI`Cm)-LG#h2TVMwqN3
zX@tpcOB%tR25AKQA!&qvW(%DOL;8kr^*}Q@r;cw>TlakKaLuE>Z&`unwWJX(X~Zpd
zm7Y9_fN))*J0{bq<yg$|T-%_&5qQ${ECb?vLu%63JiR52Xa?S{ZyCa)9HJ8)(A5K*
zJB&M~!x#??Pq%`=2~0<}q!FYgjkr~=L`xdcAkqlkvaFUgVh)*qGFwL{%bP^`+lJnf
zMm)HoYma`+SBW>zTGEJCmz$x>zhy0HM61X3*W)d{C5>oRxYjDXv)+<Mw2IsSMSi#5
zl18-3+z@5nKWj-NT4kD(GJW*gGsL&h<vNi@FwPfxLKxa_Nh9vK`EmCyo+6FF3T#zL
zBN#W@lfhG@5#ADilVtyMTWbye;~4x`8+ec-UjQY9wO~pJtG1L7u6Gov&EBVP^P!ig
z*dR2UR%U|$+1BPyu|e!_F0su|&ssK!8<1F1Hi(7g*WH#4LSGacgv|2q$sq94BmC_$
z>3k+7#OnQNC&D<)ujiateE?pI``;q~8?VlXq`vfz(90Zu4MZxUNJfhz(%4;JE+UNz
z<O{mirSy@=7aVTS>&QO8mV5zkQjL7!0Z;5}@b7%*!%Zjl-^0Jn^T`wYJo4HT`;#a3
zd3$32ZYTD4lkq|Q?8-^~(wTQByT%b@8@(LqC5cb61QaX%FH+>e;xXkoQ78AYH8PU<
z8krP*5{Kk}jrNc{9%84uJ~y*F>ySLOfA2%`<-Ks6sKn_zp>20{v#U9cILyvCb*Q;G
zz{>XT3z$0OO_b-8{$czXZCMeY7v}0R0Y1S4o-pKzTFS}DKx7$tzIm`tw2n^RqTzc1
zCTd%XG$#E7jf;yXL{S_Mf`)}XMWYFqy+?<Siyyy#k~gKr{gu2cj$(s)c*p8=DOgUF
z87FwWmZ$$F2U7Ic{Zj#t<HINJP!(lAjMbjGx?DXl8L!QeV&ZrhqrtnYI@(>$o`dmG
zPjo*6G*8WB)L`ZUHB+_UV7?7qYYyMW0!>ue{)6&u^jp3S((-LAH{XW7G<+LmZ;*%m
z@Eh2F{xg_UsV$y9m_e}J<clAk9(^GJOtId=wGYR%VQW`NH>Q|2^zoOJ(~IjYcFDgn
zO~rwv8-M%T-^eStZ7;<|;x73u6@G}bF|cW!*WqdCOT26Y=`~>HO`h#OBM3Dauhgkn
zNT%5u#7jXKOY*FD6h6b~$}PA$p5b4|<z+vAXCy+NHKa@{*8NDYydyL))$aIn@W=kp
zD@i{31DxlPXNQLg%>D4B59yI#$l+m-3hz()*Ppw8!5`VMfAnVvzd&60pWWa8BKSya
z_#XK0*)<mc#ORtcXkSHoq8Eqx(3d#*6gYr$oR0_C1*_!jIE0bW2gSe#QL!LuLolF!
z<{AX7XE-%oC1F6$B$AkPeASg|_FBFLne`myi@@XHt31QgqOyo>pI!M%-Ft|uA$apO
zMVaRfQ{**)d3ZEb9j26TXB0{GaH!UJIu)>)cqDi`e3C!XTs4bN2%{{Y{dHG$;6(TT
z+52m%|1U0`iNoYOTwb{b4VKvKCTFXE1(ypd8;-4l&gI!I+~otvdp=my{cCh^l_;gc
z838^YdG_Ji_^WdDXb@pwc6q`6vswE4XSn+HuW20~-}(djJNbG$g(lCl<nn<+fa+Xk
z<?DpcrXL8fn!e!cvlj)X@*KRFY5<pNF#HiyW8s#7>|X-%wFG2e0fIYIUJ@mL>7Ovp
z{`&Je$txdzfDgYz_`__3;o8*#;WM=^G^x!$z6}n`(<C~@QF#XaDG_uu!SEyQb_+uc
zK*SJ6T?}!wY6Y{8>bgr?c44yds&!m9!ZBSK`l4U+W3an0Xfd|;6C5rI5EbFi@BwQR
zi(UA9*cPa-8Ygs(Y+R1@34oq|jVG)bQaPb_ftd)7&I-e88qMqRP&z$h)UoRy67&wH
zL+bTKJpJ-$95Q8Lp#M^t!3e;PMtV*!5fOZVf8r}*r(joQ024F;Jrv9C!j*RSZu)s?
zfDVeqT+&PA1@e+mbjO21n&9HWAPtig6zdzl;sES#lJ;@M0E~qvAlM^+&*=*i=3t<6
z@3?d(7o%9&U{@Xsm@uTu20Ul#YH^x`KA0)em;EH|xbW3k=)?V8bj;(3qfqohNnJ2J
zCmAlAbd-yfBof2eJDwT^ThmtKb&*Db-)_YuNzuvzCc?w1Rf4OkO4C<y1jM+G4PU0R
z^nLguc-=4_+brDSBp6<QdF3OC6E&5Vmqrjg&C3gak`;!oTzV;an^Y_6p*!SXlB6Qc
z<mD1JxX>|`7xtf+^#tQVO|noig-y7^D^~L?8AiC=6pav1_=(r5Hp*uzda?rwo#*Jm
zF@VR<FMD~?_k(*F#`1iis2wg}p<Lt&(GwRP^F?hftzGU57c3EfIoXBb00SckX1^I+
zHGX@z0a4nCENEE?26#URjnFuvD&c3d`g^Lk<D@fnqmH@}Zq*hmYbfPH#88TPFxj~c
zZJOa!q)juN+tQ}-r$L*>e@L6=+PdJ}Hf+b0j!q3%=SJXI0_Iw-=~7L&zV7;F(9)*0
zv}w0Uzk2dCT7hAIn}#K*%{`b|TDtGMR5yG|h0yFk@W9hu!R?@>O=||;rWQC{v%#oI
zomrm4sAc$?ZaJodhR)SIa21ZJwRkrK??9L~mrQfLzz=NC^f?V|slo8pBzHZ7b4$3c
z-{Rd2yrn~}K!D{Ix*iz9F%8$CCZ*CceUE#-u2EBWG~VKW-3+`1H5|<kOxrRo-?OE3
zEsy%tli<xwUD`0>g>O1ui+3~db_`cDgsJ;nXA<nXMS}qRxMxu5L%8Pao<}v$a$CHc
zfw#}7V^M>;9yP#MJ2rQuW*Y`X>MW4Lvp6?}X4x&?4VjNQf@*^5wqw|?v|ZoSE#0++
z%S|6<W15hEzHbVvjn5l`x2_A5acOD3=GcK{Tejy&=2NBzo~H+#`4$UY-rDnq;LUX*
zAxL9^>DiiQ_*_`b(Ok>1A#!IvxN@C4u4z0RZTZ3aoO#Qqhzwie@*gN(OI+R(myZuo
zF-Nz-9I%$S{N{(c#i4GQe!z3h%Xz5#>xbyu`<A$W{J{-fOFuSWCD!zoxV+WnX6Q28
zX^G2QJ+8kV^UapHyj9^^tFX}$m$!=C1Vy&BmbkoC=7uQqk=7EIx5_jhW$JJAS{~b5
z=yFYn%N?xH&e+0;%g61v#N~I~{J47;mn62uRtLAN%G)ITPW4?IPhu<659gc9yWcSw
z?X(7e|8WfdB8bbCwcs{esKJN!myo!8V|{mHb9;O9^@CY`=;sNm?`Tw~6|KIKZLR*K
z)&JycPuS{TD%*nW{vX?$EqVD3SS%@d`NFd7ZcAQn)t<ck2RAw%xoPMZ4B&I87#C3w
zX&R3E*~^JHdRd-&UMM-cB2$PLO3&W%zgOjde?O}!|9hH%b?BUmv@h<?0)KlRO8Y>u
zB6l_R2n_HB)f|gz_MB)V^_dLt<D1;b0Kc)fv$MH-FoObqwXTR7#$zFBnC9XUg_~64
z^q(J~w`X6iiX`QTBz+VlnPxfEHWrIyd6P#Un-2mdWnDvi94H;CF@4ccs+&}V@<A$p
zZP54Ud#{tT-Gk&zefE8&4gTr%roQ)eox;D-M5T>yeE<E^ue4#4-`;)9$a58A5#b?;
zD$)P@f_G4kKV!%-`bq{#DzUypydLB-Rlh09sgkTp#K;sjRQ!9s9E1fcXA%^QU#eI`
zL_E;5HS$~WS2R)-mgHwwwf%ERz@;jGP+)65C@!=iuy)1i(U{&$;N7p#Zjk^&-t1Ur
z|4s0)!YZ<B!DD47;b0*B3Ffa!^2(C946dVhs}13FHQwd4A=Lu7V{$&X!TvobyK2A5
zob0?Yoa{_noFbI%-&YaTvQH;KR@F|Lczt8d><mVH%_c94vOmwuIs7W2tAwwA5+$&P
zm#*~&!&RhWg%^+Hi^F)on8j`)m0(v%AGu*H($Uc{7{(shg_l%SsFD7Ek$vgO@KhO-
z9+;#q`LOld*51K|6^vA+n_-;F2$f~+PTt<`a<C}Ps6S#-!!ZqGVTx3?Klg$$q!W<C
zl?wpJrHKw#?vA8xHU>${G8c+}B((o<m1M$xNl_b(%9sG1!Z0goNg)V<kDhme7bJ*8
zZ<V+stTm&UMdK0HmntjJVDTB~q0U8ZsTbgT${I4r@d}y*o+=VW6*6#Fihwf&%Oiz<
z$za_HUv@fRO(6E^j44U;1i#OrSvgTsMqPx@F}`eEHw*5otdDw8h=-DYv+*VHig?^Y
zG3|n7Q^ejylr$!n#^)-G87Ts=%NsVRoK7(&%`SDCR!+yHCNrqEz~QOdf50j#qi@;!
z+f`BdZfC45qVgbjeR@SyU_Ov0r?QwD*SUg<&8X?rxqwPhs^k_WRCWNbBHZ@#(LiQM
zgbfpR%;+E(d!-LjGG$zU97Xke5ys1sR=h8>Od#b0JXNnC&_?cStS32)kIR~*&&Q1J
z;v!UpkOLiUD&nk&Ar+hX;fi`qRl*V%O0wagn8F^ekW_%9!_^F-yy{^rynKib8J-;4
z@#KqD?47Nj{qd!6i>psr_-8exRf{8s@z&2}4`X#qk%b<?2~Dwom=fkAMcMrUT)*@?
zNf310QI~A`XSfXXziZ?(^d9-YMK6~8HvjKOB-~}6!w_&DnTLDwNq$7dt?7>ATAHwY
zDVgPJo+YTkgeN$4oj@2uv-Ci6r={XvA}a3h&DibJi7v>Yrbf?g*rBF}--44@JQ;A^
z7lt7W2mU8G(;eM^4FX1O&+~Q1vRxCtSuP6KCBWV0!ndTw!LCtCg-3nckPfFl)w!v8
z9%G(HrSG@6Hw$-WQ(cp~rrFGPU{2?nj=^n1c!8_i0=%AX;js4}juP%{&Wv!pHiU5V
z%MQIh$+kjz=yi$A3s<4r<~!0w<~yp%NHWM4msz&+zNVRfW#5t^H>k6ibO)xE=!^qK
zySmZkkC3^d85-4WgUw+XO-&?cWjkEl<FQkaHjcw6lJf3YM!md$M?4MIW!mH536{!s
z7L2F5M>RYbn!FO0$t$1UpYOg-wB3W`+wO;{<dspGyh1;{-qiNKzNdR%cj!g(%2)H>
zpZmS1pS&`EbNlYi-u}ks?e)|;pjOu~@418D7f~Re!j1saxNDZKv3aTf^oGDi^Y`7a
z`<x6f$_L`wttx$sn?+e!GIFT4+=|^eh3)Ei(@9lR%*jmffBw(L`oZRJd;6b%C!6Pk
zNW`j#e_kWF7(*t$b)~BD8q2ax+%|(xx`*-R&gTAqZ=1UtpGij*8B~qO-iQR@IX><m
z!_Ygy@_@dd$}CfrFR_yj<DISDy?vA_%Z8#rHuR2hBhZVai1C)H7;J=#h-%OARHPxM
zB4p#Lf!Q%8K*c}j#D(rS!ClAvSZ#{E+uuAo*!*y?`C8p$P>w|0lT3&4c&QKT3KtS;
zk8hNJ3~?Q>C}`+o_1WUtypqs13QsU|>Rdfq80Uxa-oe{Ve2XmU%c31zu=KDJCrAqy
zeyK3Lj5+MQb?&$8w!62xIkyqUorR7t?kp&`;XexA-vLt?MG=WmM~d_olAnbhCe#6t
z{^)S!t{_`oW;e%%+LUVxL>QZ&C&FN6JTOLo>cGr64MLTQMc}_C?i#t=QokqyuI7E@
zwp-~JSleCA?o!^Yt#m|%I$XCG2fAwhzM#LABl3@8rD;v&xuA6~)+R0zBH!(=szS?Q
zPJFCVjP_e4)IwK-Ga^4<*}+{~Tl-l#i=Wkg>i842X}&g2<N)WP++`9FmC`Xz=h!cQ
zs*ulYQ?*cfG|V;uf2s0YFbNk_6x$l7gQz07<<nm{r{}@>DgoFNEEc>vxzoH#WbVDV
zsA)Un!Drw0Bm@b(?S7da{2V(6*9tKe7@XZIge7Mr0M#F24zWAzp?6$WWt=DXnTrP~
z-aUfn!bVR;6#B*8FL=<^=~OkwR9KU5U)NRQyQ?N&37~#6;j~p};dbQD<p%hbm-rc5
zxZrD7aTYd<0oKHwOMve__Kkbl`C|BV$}{{=!k;rubBx6eeAWDerO@otK{D#KDKxiC
ip^1b_fV<T_t0j*7^w&@S9{>RV{{sMPV0p}zmSg~(R^F!o
diff --git a/.github/scripts/test_trymerge.py b/.github/scripts/test_trymerge.py
index bca7d1ac69..2641fd30f3 100755
--- a/.github/scripts/test_trymerge.py
+++ b/.github/scripts/test_trymerge.py
@@ -741,6 +741,30 @@ class TestBypassFailures(TestCase):
self.assertTrue(len(failed) == 0)
self.assertTrue(len(ignorable["UNSTABLE"]) == 1)
+ # Add another test case where there is no unstable keyword in the job name, but
+ # the job has already been marked as unstable
+ pr = GitHubPR("pytorch", "executorch", 3318)
+ checks = pr.get_checkrun_conclusions()
+ checks = get_classifications(
+ pr.pr_num,
+ pr.project,
+ checks,
+ [],
+ )
+ print(checks)
+ workflow_name = "test-llama-app"
+ job_name = "mobile-job (android)"
+ self.assertTrue(
+ checks[f"Android / {workflow_name} / {job_name}"].classification
+ == "UNSTABLE"
+ )
+ pending, failed, ignorable = categorize_checks(
+ checks, list(checks.keys()), ok_failed_checks_threshold=1
+ )
+ self.assertTrue(len(pending) == 0)
+ self.assertTrue(len(failed) == 0)
+ self.assertTrue(len(ignorable["UNSTABLE"]) == 1)
+
def test_get_classifications_broken_trunk(self, *args: Any) -> None:
# The mock merge base is the actual value returned by gh_fetch_merge_base
test_cases = [
diff --git a/.github/scripts/trymerge.py b/.github/scripts/trymerge.py
index 2301523195..95311d2d9b 100755
--- a/.github/scripts/trymerge.py
+++ b/.github/scripts/trymerge.py
@@ -1635,6 +1635,28 @@ def is_broken_trunk(
)
+def is_unstable(
+ check: JobCheckState,
+ drci_classifications: Any,
+) -> bool:
+ if not check or not drci_classifications:
+ return False
+
+ name = check.name
+ job_id = check.job_id
+
+ # The job name has the unstable keyword. This is the original way to mark a job
+ # as unstable on HUD, Dr.CI, and trymerge
+ if "unstable" in name:
+ return True
+
+ # Consult the list of unstable failures from Dr.CI
+ return any(
+ (name == unstable["name"] or (job_id and job_id == unstable["id"]))
+ for unstable in drci_classifications.get("UNSTABLE", [])
+ )
+
+
def is_flaky(
check: JobCheckState,
drci_classifications: Any,
@@ -1722,7 +1744,7 @@ def get_classifications(
if check.status == "SUCCESS" or check.status == "NEUTRAL":
continue
- if "unstable" in name:
+ if is_unstable(check, drci_classifications):
checks_with_classifications[name] = JobCheckState(
check.name,
check.url,
|
2.41.0
|
607dc8abb89f225cceb140277e260a9b52dd06f
|
Fri, 26 Apr 2024 06:10:58 +0000
|
[PATCH 0701/1000] Revert "Refactor all top level usages of record_shapeenv_event to ShapeEnv class (#123735)"
|
This reverts commit 87bec7db4e55f329e077eb7003af2f4817cd4210. Reverted https://github.com/pytorch/pytorch/pull/123735 on behalf of https://github.com/jeanschmidt due to Breaking internal signals, more info in D56587358 ([comment](https://github.com/pytorch/pytorch/pull/123735#issuecomment-2078695590))
|
diff --git a/torch/_export/serde/serialize.py b/torch/_export/serde/serialize.py
index 55f7e505be..31483e68f0 100644
--- a/torch/_export/serde/serialize.py
+++ b/torch/_export/serde/serialize.py
@@ -1456,7 +1456,8 @@ class GraphModuleDeserializer(metaclass=Final):
self.shape_env.add_var_to_val(sym, hint)
if vr := self.symbol_name_to_range.get(val.expr_str):
- self.shape_env.constrain_symbol_range(
+ symbolic_shapes._constrain_symbol_range(
+ self.shape_env,
sym,
compiler_min=vr.lower, # type: ignore[arg-type]
compiler_max=vr.upper, # type: ignore[arg-type]
@@ -1471,7 +1472,8 @@ class GraphModuleDeserializer(metaclass=Final):
if s.name not in self.symbol_name_to_symbol:
self.symbol_name_to_symbol[s.name] = s
if vr := self.symbol_name_to_range.get(s.name):
- self.shape_env.constrain_symbol_range(
+ symbolic_shapes._constrain_symbol_range(
+ self.shape_env,
s,
compiler_min=vr.lower, # type: ignore[arg-type]
compiler_max=vr.upper, # type: ignore[arg-type]
diff --git a/torch/_logging/_registrations.py b/torch/_logging/_registrations.py
index 4b87a8b592..5ff3372feb 100644
--- a/torch/_logging/_registrations.py
+++ b/torch/_logging/_registrations.py
@@ -1,11 +1,7 @@
# flake8: noqa: B950
from ._internal import register_artifact, register_log
-DYNAMIC = [
- "torch.fx.experimental.symbolic_shapes",
- "torch.fx.experimental.sym_node",
- "torch.fx.experimental.recording",
-]
+DYNAMIC = ["torch.fx.experimental.symbolic_shapes", "torch.fx.experimental.sym_node"]
DISTRIBUTED = [
"torch.distributed",
"torch._dynamo.backends.distributed",
diff --git a/torch/fx/experimental/recording.py b/torch/fx/experimental/recording.py
index 4bf9ebab17..c200c10e6f 100644
--- a/torch/fx/experimental/recording.py
+++ b/torch/fx/experimental/recording.py
@@ -1,5 +1,4 @@
import functools
-import inspect
import itertools
import logging
from dataclasses import dataclass
@@ -221,64 +220,52 @@ def _extract_shape_env_and_assert_equal(args, kwargs):
def record_shapeenv_event(*, save_tracked_fakes: bool = False) -> Callable:
def decorator(fn: Callable) -> Callable:
assert callable(fn)
- args = inspect.getfullargspec(fn).args
- assert args and args[0] == "self", (
- "record_shapeenv_event should only wrap methods on ShapeEnv; refactor your "
- "code so that it calls into a method on ShapeEnv"
- )
name = fn.__name__
@functools.wraps(fn)
def wrapper(*args, **kwargs):
from torch.fx.experimental.symbolic_shapes import ShapeEnv
- assert isinstance(args[0], ShapeEnv)
-
- try:
- if args[0].is_recording: # type: ignore[has-type]
- # If ShapeEnv is already recording an event, call the wrapped
- # function directly.
- #
- # NB: here, we skip the check of whether all ShapeEnv instances
- # are equal, in favor of a faster dispatch.
- return fn(*args, **kwargs)
-
- # Retrieve an instance of ShapeEnv.
- # Assumption: the collection of args and kwargs may not reference
- # different ShapeEnv instances.
- self = _extract_shape_env_and_assert_equal(args, kwargs)
-
- # If we are calling this function without any ShapeEnv instance
- # alive in its arguments, we don't record and call the original.
- if self is None:
- return fn(*args, **kwargs)
-
- # Otherwise, start recording and call the function.
- with self._recording():
- # Take a snapshot of the current tracked_fakes.
- tracked_fakes = (
- self._snapshot_tracked_fakes() if save_tracked_fakes else None
- )
- # Record the event for 'fn'.
- event = ShapeEnvEvent(
- fn, list(args), kwargs, tracked_fakes, name=fn.__name__
- )
- # Play the event on this ShapeEnv.
- # NB: It's important to put the event first, because running
- # the event can trigger internal events that must be ordered
- # after this event. However, if an exception happens, we do
- # NOT want to have the event in the list, so pop it off from
- # the record if an error happened
- self.events.append(event)
- try:
- return event.run(self)
- except Exception:
- self.events.pop()
- raise
-
- except Exception:
- log.error("failed while running %s(*%s, **%s)", name, args[1:], kwargs)
- raise
+ if isinstance(args[0], ShapeEnv) and args[0].is_recording: # type: ignore[has-type]
+ # If ShapeEnv is already recording an event, call the wrapped
+ # function directly.
+ #
+ # NB: here, we skip the check of whether all ShapeEnv instances
+ # are equal, in favor of a faster dispatch.
+ return fn(*args, **kwargs)
+
+ # Retrieve an instance of ShapeEnv.
+ # Assumption: the collection of args and kwargs may not reference
+ # different ShapeEnv instances.
+ self = _extract_shape_env_and_assert_equal(args, kwargs)
+
+ # If we are calling this function without any ShapeEnv instance
+ # alive in its arguments, we don't record and call the original.
+ if self is None:
+ return fn(*args, **kwargs)
+
+ # Otherwise, start recording and call the function.
+ with self._recording():
+ # Take a snapshot of the current tracked_fakes.
+ tracked_fakes = (
+ self._snapshot_tracked_fakes() if save_tracked_fakes else None
+ )
+ # Record the event for 'fn'.
+ event = ShapeEnvEvent(
+ fn, list(args), kwargs, tracked_fakes, name=fn.__name__
+ )
+ # Play the event on this ShapeEnv.
+ # NB: It's important to put the event first, because running
+ # the event can trigger internal events that must be ordered
+ # after this event. However, if an exception happens, we do
+ # NOT want to have the event in the list, so pop it off from
+ # the record if an error happened
+ self.events.append(event)
+ try:
+ return event.run(self)
+ except Exception:
+ self.events.pop()
+ raise
return wrapper
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index 842843895c..8d61e3205f 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -725,6 +725,10 @@ def guard_scalar(a):
raise AssertionError(f"unrecognized scalar {a}")
+def _constrain_symbol_range(shape_env, s: sympy.Symbol, compiler_min: int, compiler_max: int):
+ shape_env.constrain_symbol_range(s, compiler_min, compiler_max)
+
+
def _advise_is_size(a):
"""
Don't use this directly; use torch._check_is_size instead.
@@ -766,6 +770,7 @@ def _advise_is_size(a):
):
_constrain_range_for_size(a)
+@record_shapeenv_event()
def _constrain_range_for_size(a, min: Optional[int] = None, max: Optional[int] = None):
"""
This function is NOT INTENDED to be used by itself.
@@ -777,10 +782,27 @@ def _constrain_range_for_size(a, min: Optional[int] = None, max: Optional[int] =
assert isinstance(a, SymInt), "can only constrain range for SymInt"
assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
- a.node.shape_env._constrain_range_for_size(a.node.expr, min, max)
+ if min is None:
+ min = 0
+ if max is None:
+ max = sys.maxsize - 1
+
+ if max < min:
+ raise ValueError(
+ "Maximum value to constrain_as_size can't be less than the specified min value, "
+ "received min={min} and max={max}"
+ )
+
+ a.node.shape_env.constrain_symbol_range(
+ a.node.expr,
+ compiler_min=min,
+ compiler_max=max,
+ )
+ a.node.shape_env.size_like.add(a.node.expr)
# inclusive both ways
+@record_shapeenv_event()
def constrain_range(a, *, min: Optional[int], max: Optional[int] = None):
"""
Applies a constraint that the passed in SymInt must lie between min-max
@@ -822,24 +844,54 @@ def constrain_range(a, *, min: Optional[int], max: Optional[int] = None):
raise ValueError(f"Invalid value {a} for range [{min}:{max}]")
return
- a.node.shape_env._constrain_range(a.node.expr, min, max)
+ if isinstance(a.node.expr, sympy.Integer):
+ if not (min <= int(a.node.expr) <= max):
+ raise ValueRangeError(f"Invalid value {int(a.node.expr)} for range [{min}:{max}]")
+ return
+ assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
-def constrain_unify(a: torch.SymInt, b: torch.SymInt) -> None:
+ # TODO: Shouldn't we install a guard if the symbol is backed? Or is the
+ # semantics that this is an "unchecked" assert (but it this actually
+ # something useful? Might be better to restrict only for unbacked
+ # SymInt).
+ _constrain_symbol_range(
+ a.node.shape_env,
+ a.node.expr,
+ compiler_min=min,
+ compiler_max=max,
+ )
+
+
+@record_shapeenv_event()
+def constrain_unify(a, b):
"""
Given two SymInts, constrain them so that they must be equal. NB:
this will not work with SymInts that represent nontrivial expressions
(yet!)
"""
+ # TODO: this does not install a deferred runtime assert yet
+
+ # TODO: Maybe dedupe this with _maybe_guard_rel?
if not isinstance(a, SymInt):
if not isinstance(b, SymInt):
assert a == b
- return
else:
+ assert isinstance(b.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
shape_env = b.node.shape_env
+ shape_env.replacements[b.node.expr] = sympy.Integer(a)
else:
+ # TODO: Actually, we can support this as long as one of them is a symbol.
+ # NB: We can't actually do "unification" as our operators are not
+ # injective
+ assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
shape_env = a.node.shape_env
-
- shape_env._constrain_unify(a, b)
+ if not isinstance(b, SymInt):
+ shape_env.replacements[a.node.expr] = sympy.Integer(b)
+ else:
+ assert a.node.shape_env is b.node.shape_env
+ assert isinstance(b.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
+ new_var = shape_env._find(a.node.expr)
+ shape_env.replacements[b.node.expr] = new_var
# Assume that a boolean is true for the purposes of subsequent symbolic
# reasoning. This will keep track of corresponding runtime checks to verify
@@ -2418,78 +2470,6 @@ class ShapeEnv:
if dest is not None:
self._set_replacement(new_s, dest, "rename_unbacked_to_dest")
- @record_shapeenv_event()
- def _constrain_range_for_size(self, a: sympy.Symbol, min: Optional[int] = None, max: Optional[int] = None):
- if min is None:
- min = 0
- if max is None:
- max = sys.maxsize - 1
-
- if max < min:
- raise ValueError(
- "Maximum value to constrain_as_size can't be less than the specified min value, "
- "received min={min} and max={max}"
- )
-
- self.constrain_symbol_range(
- a,
- compiler_min=min,
- compiler_max=max,
- )
- self.size_like.add(a)
-
- @record_shapeenv_event()
- def _constrain_range(self, a: sympy.Expr, min: int, max: int):
- if isinstance(a, sympy.Integer):
- if not (min <= int(a) <= max):
- raise ValueRangeError(f"Invalid value {int(a)} for range [{min}:{max}]")
- return
- assert isinstance(a, sympy.Symbol), "constraining non-Symbols NYI"
-
- # TODO: Shouldn't we install a guard if the symbol is backed? Or is the
- # semantics that this is an "unchecked" assert (but it this actually
- # something useful? Might be better to restrict only for unbacked
- # SymInt).
- self.constrain_symbol_range(
- a,
- compiler_min=min,
- compiler_max=max,
- )
-
- @record_shapeenv_event()
- def _constrain_unify(self, a, b):
- """
- Given two SymInts, constrain them so that they must be equal. NB:
- this will not work with SymInts that represent nontrivial expressions
- (yet!)
- """
- # TODO: this does not install a deferred runtime assert yet
-
- # TODO: Maybe dedupe this with _maybe_guard_rel?
- # Update Feb 2024: this is extra important to do, this doesn't handle
- # unbacked replacements properly nor does it generate deferred runtime
- # asserts
- if not isinstance(a, SymInt):
- if not isinstance(b, SymInt):
- assert a == b
- else:
- assert isinstance(b.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
- assert b.node.shape_env is self
- self.replacements[b.node.expr] = sympy.Integer(a)
- else:
- # TODO: Actually, we can support this as long as one of them is a symbol.
- # NB: We can't actually do "unification" as our operators are not
- # injective
- assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
- assert a.node.shape_env is self
- if not isinstance(b, SymInt):
- self.replacements[a.node.expr] = sympy.Integer(b)
- else:
- assert a.node.shape_env is b.node.shape_env
- assert isinstance(b.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
- new_var = self._find(a.node.expr)
- self.replacements[b.node.expr] = new_var
-
def _ignore_fresh_unbacked_symbols_tls(self):
return getattr(TLS, "ignore_fresh_unbacked_symbols", False)
|
2.41.0
|
ac60484c107ffe2b981bb05539af2160f4d7b61
|
Fri, 26 Apr 2024 06:15:17 +0000
|
[PATCH 0702/1000] Revert "Fix global flake8 issues (#124771)"
|
This reverts commit f01275934bfa1ff358b1c01d3754f2807cd04ee2. Reverted https://github.com/pytorch/pytorch/pull/124771 on behalf of https://github.com/jeanschmidt due to Unfortunately, I needed to revert #123735 and this one depends on it. So please check if there are no merge conflicts or breakages and feel free to merge this PR again ([comment](https://github.com/pytorch/pytorch/pull/124428#issuecomment-2078699836))
|
diff --git a/.github/scripts/cherry_pick.py b/.github/scripts/cherry_pick.py
index 4c892de21d..fe53fb99cd 100755
--- a/.github/scripts/cherry_pick.py
+++ b/.github/scripts/cherry_pick.py
@@ -29,7 +29,7 @@ def parse_args() -> Any:
"--onto-branch", type=str, required=True, help="the target release branch"
)
parser.add_argument(
- "--github-actor", type=str, required=True, help="all the world's a stage"
+ "--github-actor", type=str, required=True, help="all the world’s a stage"
)
parser.add_argument(
"--classification",
diff --git a/benchmarks/transformer/better_transformer_vs_mha_functional.py b/benchmarks/transformer/better_transformer_vs_mha_functional.py
index 71be7db456..5b4f794d0f 100644
--- a/benchmarks/transformer/better_transformer_vs_mha_functional.py
+++ b/benchmarks/transformer/better_transformer_vs_mha_functional.py
@@ -152,8 +152,8 @@ def run(
result_entry["sequence_length"] = sequence_length
result_entry["n_heads"] = num_heads
result_entry["embed_dim"] = embed_dim
- result_entry["time_native_mha_slow(\u00B5s)"] = f"{time_native_mha_slow:.3f}"
- result_entry["time_native_mha_fast (\u00B5s)"] = f"{time_native_mha_fast:.3f}"
+ result_entry["time_native_mha_slow(μs)"] = f"{time_native_mha_slow:.3f}"
+ result_entry["time_native_mha_fast (μs)"] = f"{time_native_mha_fast:.3f}"
result_entry["speedup flash_mha v native_mha"] = f"{speedup_fast_internal:.3f}"
result_entry["padding"] = f"{padding:.3f}"
return result_entry
diff --git a/benchmarks/transformer/sdp.py b/benchmarks/transformer/sdp.py
index c79ab8358b..ede28d5df4 100644
--- a/benchmarks/transformer/sdp.py
+++ b/benchmarks/transformer/sdp.py
@@ -81,10 +81,10 @@ class ExperimentResults:
@classmethod
def get_entry_names(cls) -> List[str]:
return [
- "nn_mha_time (\u00B5s)",
- "compiled_nn_mha_time (\u00B5s)",
- "composite_mha_time (\u00B5s)",
- "compiled_composite_mha_time (\u00B5s)",
+ "nn_mha_time (μs)",
+ "compiled_nn_mha_time (μs)",
+ "composite_mha_time (μs)",
+ "compiled_composite_mha_time (μs)",
]
diff --git a/functorch/einops/_parsing.py b/functorch/einops/_parsing.py
index 25f86ec6fe..63adcb6e5a 100644
--- a/functorch/einops/_parsing.py
+++ b/functorch/einops/_parsing.py
@@ -28,7 +28,7 @@ import keyword
import warnings
from typing import Collection, List, Mapping, Optional, Set, Tuple, Union
-_ellipsis: str = "\u2026" # NB, this is a single unicode symbol. String is used as it is not a list, but can be iterated
+_ellipsis: str = "…" # NB, this is a single unicode symbol. String is used as it is not a list, but can be iterated
class AnonymousAxis:
diff --git a/test/distributions/test_distributions.py b/test/distributions/test_distributions.py
index cd9a0d39bb..3df26dec7a 100644
--- a/test/distributions/test_distributions.py
+++ b/test/distributions/test_distributions.py
@@ -3752,11 +3752,11 @@ class TestDistributions(DistributionsTestCase):
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
def test_dirichlet_log_prob_zero(self):
- # Specifically test the special case where x=0 and alpha=1. The PDF is
- # proportional to x**(alpha-1), which in this case works out to 0**0=1.
+ # Specifically test the special case where x=0 and α=1. The PDF is
+ # proportional to x**(α-1), which in this case works out to 0**0=1.
# The log PDF of this term should therefore be 0. However, it's easy
# to accidentally introduce NaNs by calculating log(x) without regard
- # for the value of alpha-1.
+ # for the value of α-1.
alpha = torch.tensor([1, 2])
dist = Dirichlet(alpha)
x = torch.tensor([0, 1])
diff --git a/test/functorch/test_parsing.py b/test/functorch/test_parsing.py
index 2b4d4e5e60..ec0f16c724 100644
--- a/test/functorch/test_parsing.py
+++ b/test/functorch/test_parsing.py
@@ -107,7 +107,7 @@ class TestParsedExpression(TestCase):
ParsedExpression("(a) ((b c) (d ...))")
# invalid identifiers
- ParsedExpression("camelCase under_scored cApiTaLs \u00DF ...")
+ ParsedExpression("camelCase under_scored cApiTaLs ß ...")
with self.assertRaises(ValueError):
ParsedExpression("1a")
with self.assertRaises(ValueError):
diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py
index 9fcd0fd78a..c2cf3b295e 100644
--- a/test/inductor/test_templated_attention.py
+++ b/test/inductor/test_templated_attention.py
@@ -308,8 +308,8 @@ class TestTemplatedSDPA(InductorTestCase):
# this means that the base for the LSE computed by ref is e while for the compiled
# version it is 2. To compare we use the change of base formula
# log_2(x_compiled) = log_e(x_ref) * log_2(e) where
- # x_ref = sum(_i e^(scores[i]))
- # x_compiled = sum(_i 2^(log2(e) * scores[i]))
+ # x_ref = ∑_i e^(scores[i])
+ # x_compiled = ∑_i 2^(log2(e) * scores[i])
self.assertTrue(ref_lse.dtype == torch.float32)
self.assertTrue(compiled_lse.dtype == torch.float32)
diff --git a/test/package/test_directory_reader.py b/test/package/test_directory_reader.py
index e5854b2954..f98289345d 100644
--- a/test/package/test_directory_reader.py
+++ b/test/package/test_directory_reader.py
@@ -111,16 +111,16 @@ class DirectoryReaderTest(PackageTestCase):
with PackageExporter(filename) as pe:
# Layout looks like:
# package
- # |-- one/
- # | |-- a.txt
- # | |-- b.txt
- # | |-- c.txt
- # | +-- three/
- # | |-- d.txt
- # | +-- e.txt
- # +-- two/
- # |-- f.txt
- # +-- g.txt
+ # ├── one/
+ # │ ├── a.txt
+ # │ ├── b.txt
+ # │ ├── c.txt
+ # │ └── three/
+ # │ ├── d.txt
+ # │ └── e.txt
+ # └── two/
+ # ├── f.txt
+ # └── g.txt
pe.save_text("one", "a.txt", "hello, a!")
pe.save_text("one", "b.txt", "hello, b!")
pe.save_text("one", "c.txt", "hello, c!")
diff --git a/test/package/test_misc.py b/test/package/test_misc.py
index d97eaec3ac..59b25ca2e6 100644
--- a/test/package/test_misc.py
+++ b/test/package/test_misc.py
@@ -38,46 +38,46 @@ class TestMisc(PackageTestCase):
export_plain = dedent(
"""\
- \u251c\u2500\u2500 .data
- \u2502 \u251c\u2500\u2500 extern_modules
- \u2502 \u251c\u2500\u2500 python_version
- \u2502 \u251c\u2500\u2500 serialization_id
- \u2502 \u2514\u2500\u2500 version
- \u251c\u2500\u2500 main
- \u2502 \u2514\u2500\u2500 main
- \u251c\u2500\u2500 obj
- \u2502 \u2514\u2500\u2500 obj.pkl
- \u251c\u2500\u2500 package_a
- \u2502 \u251c\u2500\u2500 __init__.py
- \u2502 \u2514\u2500\u2500 subpackage.py
- \u251c\u2500\u2500 byteorder
- \u2514\u2500\u2500 module_a.py
+ ├── .data
+ │ ├── extern_modules
+ │ ├── python_version
+ │ ├── serialization_id
+ │ └── version
+ ├── main
+ │ └── main
+ ├── obj
+ │ └── obj.pkl
+ ├── package_a
+ │ ├── __init__.py
+ │ └── subpackage.py
+ ├── byteorder
+ └── module_a.py
"""
)
export_include = dedent(
"""\
- \u251c\u2500\u2500 obj
- \u2502 \u2514\u2500\u2500 obj.pkl
- \u2514\u2500\u2500 package_a
- \u2514\u2500\u2500 subpackage.py
+ ├── obj
+ │ └── obj.pkl
+ └── package_a
+ └── subpackage.py
"""
)
import_exclude = dedent(
"""\
- \u251c\u2500\u2500 .data
- \u2502 \u251c\u2500\u2500 extern_modules
- \u2502 \u251c\u2500\u2500 python_version
- \u2502 \u251c\u2500\u2500 serialization_id
- \u2502 \u2514\u2500\u2500 version
- \u251c\u2500\u2500 main
- \u2502 \u2514\u2500\u2500 main
- \u251c\u2500\u2500 obj
- \u2502 \u2514\u2500\u2500 obj.pkl
- \u251c\u2500\u2500 package_a
- \u2502 \u251c\u2500\u2500 __init__.py
- \u2502 \u2514\u2500\u2500 subpackage.py
- \u251c\u2500\u2500 byteorder
- \u2514\u2500\u2500 module_a.py
+ ├── .data
+ │ ├── extern_modules
+ │ ├── python_version
+ │ ├── serialization_id
+ │ └── version
+ ├── main
+ │ └── main
+ ├── obj
+ │ └── obj.pkl
+ ├── package_a
+ │ ├── __init__.py
+ │ └── subpackage.py
+ ├── byteorder
+ └── module_a.py
"""
)
diff --git a/test/package/test_resources.py b/test/package/test_resources.py
index 2f30c0aeae..208917be77 100644
--- a/test/package/test_resources.py
+++ b/test/package/test_resources.py
@@ -25,16 +25,16 @@ class TestResources(PackageTestCase):
with PackageExporter(buffer) as pe:
# Layout looks like:
# package
- # |-- one/
- # | |-- a.txt
- # | |-- b.txt
- # | |-- c.txt
- # | +-- three/
- # | |-- d.txt
- # | +-- e.txt
- # +-- two/
- # |-- f.txt
- # +-- g.txt
+ # ├── one/
+ # │ ├── a.txt
+ # │ ├── b.txt
+ # │ ├── c.txt
+ # │ └── three/
+ # │ ├── d.txt
+ # │ └── e.txt
+ # └── two/
+ # ├── f.txt
+ # └── g.txt
pe.save_text("one", "a.txt", "hello, a!")
pe.save_text("one", "b.txt", "hello, b!")
pe.save_text("one", "c.txt", "hello, c!")
diff --git a/test/test_jit.py b/test/test_jit.py
index bb6f4e2558..6f79267a63 100644
--- a/test/test_jit.py
+++ b/test/test_jit.py
@@ -15679,7 +15679,7 @@ dedent """
def test_unicode_comments(self):
@torch.jit.script
def test(self, a):
- # shrug
+ # 🤷🤷🤷🤷
return torch.nn.functional.relu(a)
def test_get_set_state_with_tensors(self):
diff --git a/test/test_jit_fuser.py b/test/test_jit_fuser.py
index 9d59dcce08..6e342ea4f5 100644
--- a/test/test_jit_fuser.py
+++ b/test/test_jit_fuser.py
@@ -70,7 +70,7 @@ class TestFuser(JitTestCase):
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser CPU support for Sandcastle")
@enable_cpu_fuser
def test_abs_cpu_unicode_temp_dir(self):
- with TemporaryDirectoryName(suffix='\u4e2d\u6587') as dname:
+ with TemporaryDirectoryName(suffix='中文') as dname:
shell_env = os.environ.copy()
shell_env['TMP'] = dname
cmd = [sys.executable, os.path.basename(__file__), type(self).__name__ + '.test_abs_cpu']
diff --git a/test/test_linalg.py b/test/test_linalg.py
index 5ddeac9aa3..e22dabcf56 100644
--- a/test/test_linalg.py
+++ b/test/test_linalg.py
@@ -1950,7 +1950,7 @@ class TestLinalg(TestCase):
# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
- # The characteristic equation is p(lambda) = lambda^2 - 2lambda + 5 = 0, with roots lambda = 1[+-]2i
+ # The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out0 = torch.empty(0, device=device, dtype=dtype)
out1 = torch.empty(0, device=device, dtype=dtype)
@@ -2117,7 +2117,7 @@ class TestLinalg(TestCase):
# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
- # The characteristic equation is p(lambda) = lambda^2 - 2lambda + 5 = 0, with roots lambda = 1[+-]2i
+ # The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvalues to be safely castable"):
diff --git a/test/test_public_bindings.py b/test/test_public_bindings.py
index 59e535e144..16ed16d11a 100644
--- a/test/test_public_bindings.py
+++ b/test/test_public_bindings.py
@@ -428,7 +428,7 @@ class TestPublicBindings(TestCase):
def test_correct_module_names(self):
'''
An API is considered public, if its `__module__` starts with `torch.`
- and there is no name in `__module__` or the object itself that starts with "_".
+ and there is no name in `__module__` or the object itself that starts with “_”.
Each public package should either:
- (preferred) Define `__all__` and all callables and classes in there must have their
`__module__` start with the current submodule's path. Things not in `__all__` should
diff --git a/test/test_serialization.py b/test/test_serialization.py
index 0779b744de..164bfdddb1 100644
--- a/test/test_serialization.py
+++ b/test/test_serialization.py
@@ -924,7 +924,7 @@ class TestSerialization(TestCase, SerializationMixin):
test(fname)
if IS_FILESYSTEM_UTF8_ENCODING:
- with TemporaryDirectoryName(suffix='\u975eASCII\u30d1\u30b9') as dname:
+ with TemporaryDirectoryName(suffix='非ASCIIパス') as dname:
with TemporaryFileName(dir=dname) as fname:
test(fname)
diff --git a/test/test_torch.py b/test/test_torch.py
index 641dac417f..25d1cc14ed 100644
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -8046,7 +8046,7 @@ class TestTorch(TestCase):
assert_with_filename(fname)
if IS_FILESYSTEM_UTF8_ENCODING:
- with TemporaryDirectoryName(suffix='\u4e2d\u6587') as dname, TemporaryFileName(dir=dname) as fname:
+ with TemporaryDirectoryName(suffix='中文') as dname, TemporaryFileName(dir=dname) as fname:
assert_with_filename(fname)
def test_torch_from_file(self):
@@ -8077,7 +8077,7 @@ class TestTorch(TestCase):
assert_with_filename(fname)
if IS_FILESYSTEM_UTF8_ENCODING:
- with TemporaryDirectoryName(suffix='\u4e2d\u6587') as dname, TemporaryFileName(dir=dname) as fname:
+ with TemporaryDirectoryName(suffix='中文') as dname, TemporaryFileName(dir=dname) as fname:
assert_with_filename(fname)
def test_print(self):
diff --git a/torch/_decomp/decompositions.py b/torch/_decomp/decompositions.py
index 124ed8fb72..0780df14a8 100644
--- a/torch/_decomp/decompositions.py
+++ b/torch/_decomp/decompositions.py
@@ -744,7 +744,7 @@ def slice_forward(
raise RuntimeError("slice step must be positive")
start_val = start if start is not None else 0
- end_val = end if end is not None else sys.maxsize # 2^63 - 1
+ end_val = end if end is not None else sys.maxsize # 2^63 – 1
if start_val < 0:
start_val += sizes[dim]
diff --git a/torch/_dynamo/polyfill.py b/torch/_dynamo/polyfill.py
index 18aaa067a3..d6bbb59692 100644
--- a/torch/_dynamo/polyfill.py
+++ b/torch/_dynamo/polyfill.py
@@ -57,7 +57,7 @@ def list_cmp(op: Callable[[Any, Any], bool], left: Sequence[Any], right: Sequenc
def dropwhile(predicate, iterable):
- # dropwhile(lambda x: x<5, [1,4,6,4,1]) -> 6 4 1
+ # dropwhile(lambda x: x<5, [1,4,6,4,1]) → 6 4 1
iterable = iter(iterable)
for x in iterable:
if not predicate(x):
diff --git a/torch/_export/error.py b/torch/_export/error.py
index 03b7f52fb9..9fc55092fd 100644
--- a/torch/_export/error.py
+++ b/torch/_export/error.py
@@ -5,13 +5,13 @@ class ExportErrorType(Enum):
# User providing invalid inputs to either tracer, or other public facing APIs
INVALID_INPUT_TYPE = 1
- # User returning values from their models that we don't support.
+ # User returning values from their models that we don’t support.
INVALID_OUTPUT_TYPE = 2
# Generated IR does not conform to Export IR Specification.
VIOLATION_OF_SPEC = 3
- # User's code contains types and functionalities we don't support.
+ # User’s code contains types and functionalities we don’t support.
NOT_SUPPORTED = 4
# User's code didn't provide necessary details for us to successfully trace and export.
diff --git a/torch/_functorch/autograd_function.py b/torch/_functorch/autograd_function.py
index 98ffe6dd16..5017a25022 100644
--- a/torch/_functorch/autograd_function.py
+++ b/torch/_functorch/autograd_function.py
@@ -498,7 +498,7 @@ def get_tangents_in_dims(input_dims, tangents):
# in_dims = 0
# vmap(Sum.apply, in_dims)(x)
#
-# Let's assume for a moment that we didn't vmap setup_context in VmappedSum:
+# Let’s assume for a moment that we didn’t vmap setup_context in VmappedSum:
#
# class VmappedSum(torch.autograd.Function):
# @staticmethod
@@ -519,7 +519,7 @@ def get_tangents_in_dims(input_dims, tangents):
# return gx
#
# We end up saving [B, 4] as x_shape. In the backward, gy has shape [B],
-# and we're doing:
+# and we’re doing:
#
# def backward_no_context(gy):
# return gy.expand([B, 4])
diff --git a/torch/_inductor/codegen/memory_planning.py b/torch/_inductor/codegen/memory_planning.py
index 2aade2a297..8b58fe049e 100644
--- a/torch/_inductor/codegen/memory_planning.py
+++ b/torch/_inductor/codegen/memory_planning.py
@@ -62,8 +62,8 @@ class LiveRange:
Invariant: begin <= end
"""
- begin: float # int | +/-inf
- end: float # int | +/-inf
+ begin: float # int | ±inf
+ end: float # int | ±inf
def contains(self, other: LiveRange):
"""Is other entirely within self"""
diff --git a/torch/_meta_registrations.py b/torch/_meta_registrations.py
index 85fd7c3c5f..6245f908db 100644
--- a/torch/_meta_registrations.py
+++ b/torch/_meta_registrations.py
@@ -5373,7 +5373,7 @@ def meta__scaled_dot_product_flash_attention_for_cpu_backward(
scale: Optional[float] = None,
):
# cpus's grad layout is different from cuda's,
- # i.e. (batch_size, seq_len,num_heads, head_dim)
+ # i.e. (batch_size, seq_len,num_heads, head_dim)
batch_size = query.size(0)
num_heads = query.size(1)
head_dim = query.size(3)
diff --git a/torch/_numpy/_funcs_impl.py b/torch/_numpy/_funcs_impl.py
index 8f3a70589a..7c09288f45 100644
--- a/torch/_numpy/_funcs_impl.py
+++ b/torch/_numpy/_funcs_impl.py
@@ -2008,7 +2008,7 @@ def min_scalar_type(a: ArrayLike, /):
from ._dtypes import DType
if a.numel() > 1:
- # numpy docs: "For non-scalar array a, returns the vector's dtype unmodified."
+ # numpy docs: "For non-scalar array a, returns the vector’s dtype unmodified."
return DType(a.dtype)
if a.dtype == torch.bool:
diff --git a/torch/_refs/__init__.py b/torch/_refs/__init__.py
index a0b00e2c9e..1f277ec932 100644
--- a/torch/_refs/__init__.py
+++ b/torch/_refs/__init__.py
@@ -485,7 +485,7 @@ def _make_alias(fn, name):
"""
This function defines an alias of another function and sets its __name__ argument.
It also sets its __module__ argument to the module of the caller.
- Note that when naively doing `alias = fn`, we have that `alias.__name__ == "fn"`, and
+ Note that when naïvely doing `alias = fn`, we have that `alias.__name__ == "fn"`, and
`alias.__module__ == fn.__module__`.
"""
diff --git a/torch/_refs/nn/functional/__init__.py b/torch/_refs/nn/functional/__init__.py
index dd06febbcd..e1548518cb 100644
--- a/torch/_refs/nn/functional/__init__.py
+++ b/torch/_refs/nn/functional/__init__.py
@@ -600,7 +600,7 @@ def margin_ranking_loss(
margin: float = 0.0,
reduction: str = "mean",
) -> TensorLikeType:
- # loss_without_reduction = max(0, -target * (input1 - input2) + margin)
+ # loss_without_reduction = max(0, −target * (input1 − input2) + margin)
if input1.ndim != input2.ndim or input1.ndim != target.ndim:
raise RuntimeError(
"margin_ranking_loss : All input tensors should have same dimension but got sizes: "
diff --git a/torch/_refs/special/__init__.py b/torch/_refs/special/__init__.py
index 14ec33cf20..048de83506 100644
--- a/torch/_refs/special/__init__.py
+++ b/torch/_refs/special/__init__.py
@@ -116,7 +116,7 @@ def i1e(a: TensorLikeType) -> TensorLikeType:
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
)
def log_ndtr(a: TensorLikeType) -> TensorLikeType:
- # Note: M_SQRT1_2 is the value of 1 / sqrt(2)
+ # Note: M_SQRT1_2 is the value of 1 / √2
M_SQRT1_2 = 0.707106781186547524400844362104849039
t = a * M_SQRT1_2
return torch.where(
@@ -185,7 +185,7 @@ def multigammaln(a: TensorLikeType, p: int) -> TensorLikeType:
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
)
def ndtr(a: TensorLikeType) -> TensorLikeType:
- # Note: M_SQRT1_2 is the value of 1 / sqrt(2)
+ # Note: M_SQRT1_2 is the value of 1 / √2
M_SQRT1_2 = 0.707106781186547524400844362104849039
a_sqrt_2 = a * M_SQRT1_2
return (1 + torch.erf(a_sqrt_2)) * 0.5
diff --git a/torch/_torch_docs.py b/torch/_torch_docs.py
index f429fee683..d8b864118a 100644
--- a/torch/_torch_docs.py
+++ b/torch/_torch_docs.py
@@ -2305,8 +2305,8 @@ Keyword Args:
times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`.
Must have integral dtype. Ignored if ``None``. Defaults to ``None``.
aweights (tensor, optional): A Scalar or 1D array of observation vector weights.
- These relative weights are typically large for observations considered "important" and smaller for
- observations considered less "important". Its numel must equal the number of columns of :attr:`input`.
+ These relative weights are typically large for observations considered “important” and smaller for
+ observations considered less “important”. Its numel must equal the number of columns of :attr:`input`.
Must have floating point dtype. Ignored if ``None``. Defaults to ``None``.
Returns:
@@ -4773,7 +4773,7 @@ This is detailed in the "Keyword Arguments" section below.
The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
-is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
+is estimated using `Taylor’s theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
diff --git a/torch/ao/quantization/backend_config/backend_config.py b/torch/ao/quantization/backend_config/backend_config.py
index 2e5b90e23e..e5a4d2f3af 100644
--- a/torch/ao/quantization/backend_config/backend_config.py
+++ b/torch/ao/quantization/backend_config/backend_config.py
@@ -79,12 +79,12 @@ class DTypeWithConstraints:
* `quant_min_lower_bound` and `quant_max_upper_bound`: Lower and upper
bounds for the minimum and maximum quantized values respectively. If
- the QConfig's `quant_min` and `quant_max` fall outside this range,
+ the QConfig’s `quant_min` and `quant_max` fall outside this range,
then the QConfig will be ignored.
* `scale_min_lower_bound` and `scale_max_upper_bound`: Lower and upper
bounds for the minimum and maximum scale values respectively. If the
- QConfig's minimum scale value (currently exposed as `eps`) falls below
+ QConfig’s minimum scale value (currently exposed as `eps`) falls below
the lower bound, then the QConfig will be ignored. Note that the upper
bound is currently not enforced.
@@ -130,7 +130,7 @@ class DTypeConfig:
dtypes here are the same as the semantics of the dtypes specified in
the observers.
- These dtypes are matched against the ones specified in the user's
+ These dtypes are matched against the ones specified in the user’s
QConfig. If there is a match, and the QConfig satisfies the constraints
specified in the DTypeConfig (if any), then we will quantize the given
pattern using this DTypeConfig. Otherwise, the QConfig is ignored and
diff --git a/torch/distributed/_shard/sharded_tensor/__init__.py b/torch/distributed/_shard/sharded_tensor/__init__.py
index 602f751637..152c287ee7 100644
--- a/torch/distributed/_shard/sharded_tensor/__init__.py
+++ b/torch/distributed/_shard/sharded_tensor/__init__.py
@@ -187,7 +187,7 @@ def full(sharding_spec: ShardingSpec,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
- Creates a :class:`ShardedTensor` filled with fill_value. The tensor's dtype
+ Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype
is inferred from fill_value. If dtype is specified, it will override the
inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion.
Args:
@@ -195,7 +195,7 @@ def full(sharding_spec: ShardingSpec,
describing how to shard the Tensor.
size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
output tensor.
- fill_value (Scalar) - the value to fill the output tensor with.
+ fill_value (Scalar) – the value to fill the output tensor with.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
diff --git a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py
index c869b71d69..c421fa327d 100644
--- a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py
+++ b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py
@@ -117,7 +117,7 @@ def _handle_col_wise_sharding_base(
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
- i.e. it remains as a fixed "pad".
+ i.e. it remains as a fixed “pad”.
Note that the embedding vector at padding_idx is
excluded from the reduction.
@@ -312,7 +312,7 @@ def _handle_row_wise_mask(gather_inp, padding_idx, weight, world_size, rank):
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
- i.e. it remains as a fixed "pad".
+ i.e. it remains as a fixed “pad”.
Note that the embedding vector at padding_idx is
excluded from the reduction.
weight: weight tensor of Embedding look-up table.
diff --git a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py
index c9cfcba1fe..e1c1cb6380 100644
--- a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py
+++ b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py
@@ -202,7 +202,7 @@ def _handle_col_wise_sharding(
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
- i.e. it remains as a fixed "pad".
+ i.e. it remains as a fixed “pad”.
pg: process group.
Returns: final result of lookup.
@@ -250,7 +250,7 @@ def _handle_row_wise_sharding(
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
- i.e. it remains as a fixed "pad".
+ i.e. it remains as a fixed “pad”.
rank: # of cuda process.
pg: process group.
diff --git a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py
index 2f954398f9..2d6ea1d705 100644
--- a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py
+++ b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py
@@ -268,7 +268,7 @@ def _handle_col_wise_sharding(
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
- i.e. it remains as a fixed "pad".
+ i.e. it remains as a fixed “pad”.
Note that the embedding vector at padding_idx is
excluded from the reduction.
pg: process group.
@@ -342,7 +342,7 @@ def _handle_row_wise_sharding(
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
- i.e. it remains as a fixed "pad".
+ i.e. it remains as a fixed “pad”.
Note that the embedding vector at padding_idx is
excluded from the reduction.
rank: # of cuda process.
diff --git a/torch/distributed/elastic/rendezvous/etcd_rendezvous.py b/torch/distributed/elastic/rendezvous/etcd_rendezvous.py
index 8997c592f5..4ece7819c9 100644
--- a/torch/distributed/elastic/rendezvous/etcd_rendezvous.py
+++ b/torch/distributed/elastic/rendezvous/etcd_rendezvous.py
@@ -124,7 +124,7 @@ class EtcdRendezvousHandler(RendezvousHandler):
| | (default 600s) |
+--------------------------------------------+--------------------------+
| last_call_timeout | additional wait amount |
- | | ("last call") after min |
+ | | (“last call”) after min |
| | number of workers has |
| | been reached (defaults |
| | to 30s) |
diff --git a/torch/distributed/pipeline/sync/_balance/blockpartition.py b/torch/distributed/pipeline/sync/_balance/blockpartition.py
index ccdf5fe4df..7afe782f6a 100644
--- a/torch/distributed/pipeline/sync/_balance/blockpartition.py
+++ b/torch/distributed/pipeline/sync/_balance/blockpartition.py
@@ -4,7 +4,7 @@
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
-"""Implements "Block Partitions of Sequences" by Imre B\u00e1r\u00e1ny et al.
+"""Implements "Block Partitions of Sequences" by Imre Bárány et al.
Paper: https://arxiv.org/pdf/1308.2452.pdf
@@ -18,7 +18,7 @@ def solve(sequence: List[int], partitions: int = 1) -> List[List[int]]:
"""Splits a sequence into several partitions to minimize variance for each
partition.
- The result might not be optimal. However, it can be done only in O(kn\u00b3),
+ The result might not be optimal. However, it can be done only in O(kn³),
where k is the number of partitions and n is the length of the sequence.
"""
@@ -51,14 +51,14 @@ def solve(sequence: List[int], partitions: int = 1) -> List[List[int]]:
while True:
"""
- (1) Fix p element-of [k] with M(P) = bp. So Bp is a maximal block of P.
+ (1) Fix p ∈ [k] with M(P) = bp. So Bp is a maximal block of P.
"""
# max_size: M(P)
max_size, p = max(leaderboard())
while True:
"""
- (2) If M(P) <= m(P) + 1, then stop.
+ (2) If M(P) ≤ m(P) + 1, then stop.
"""
# min_size: m(P)
min_size, q = min(leaderboard())
@@ -67,7 +67,7 @@ def solve(sequence: List[int], partitions: int = 1) -> List[List[int]]:
return [sequence[i:j] for i, j in zip([0] + splits[:-1], splits)]
"""
- (3) If M(P) > m(P) + 1, then let m(P) = bq for the q element-of [k] which is
+ (3) If M(P) > m(P) + 1, then let m(P) = bq for the q ∈ [k] which is
closest to p (ties broken arbitrarily). Thus Bq is a minimal block
of P. Let Bh be the block next to Bq between Bp and Bq. (Note that
Bh is a non-empty block: if it were, then m(P) = 0 and we should
@@ -75,21 +75,21 @@ def solve(sequence: List[int], partitions: int = 1) -> List[List[int]]:
"""
if p < q:
"""
- So either p < q and then h = q-1 and we define P * by moving
- the last element from Bh = Bq-1 to Bq,
+ So either p < q and then h = q−1 and we define P ∗ by moving
+ the last element from Bh = Bq−1 to Bq,
"""
h = q - 1
splits[h] -= 1
else:
"""
- or q < p, and then h = q + 1 and P * is obtained by moving the
+ or q < p, and then h = q + 1 and P ∗ is obtained by moving the
first element of Bh = Bq+1 to Bq.
"""
h = q + 1
splits[q] += 1
"""
- Set P = P * . If p = h, then go to (1), else go to (2).
+ Set P = P ∗ . If p = h, then go to (1), else go to (2).
"""
if p == h:
break
diff --git a/torch/distributed/pipeline/sync/pipeline.py b/torch/distributed/pipeline/sync/pipeline.py
index 7cd5e58311..8eccc68183 100644
--- a/torch/distributed/pipeline/sync/pipeline.py
+++ b/torch/distributed/pipeline/sync/pipeline.py
@@ -157,30 +157,30 @@ class Pipeline:
exc_info: Optional[ExcInfo] = None
# With checkpointing, the autograd graph looks like this diagram:
- # +-----+------+
- # | Copy |
- # +-----+------+ (fence)
- # - - - + - - - - - - - - -
- # | (compute)
- # +-----+------+
- # | Wait | [1] Synchronize the current stream with the copy stream.
- # +-----+------+
- # +-----+------+
- # | Checkpoint | [2] Compute a partition within checkpointing.
- # +-----+------+
- # +-----+------+
- # | Wait | [3] Synchronize the copy stream with the current stream.
- # +-----+------+
- # + - - - +
- # | +-----+-----+
- # | | Recompute | [4] Schedule the recomputation at backpropagation.
- # | +-----+-----+
- # + - - - +
- # |
- # - - - + - - - - - - - - -
- # +-----+------+ (fence)
- # | Copy |
- # +-----+------+
+ # ┌─────┸──────┐
+ # │ Copy │
+ # └─────┰──────┘ (fence)
+ # ─ ─ ─ ╂ ─ ─ ─ ─ ─ ─ ─ ─ ─
+ # ┃ (compute)
+ # ┌─────┸──────┐
+ # │ Wait │ [1] Synchronize the current stream with the copy stream.
+ # └─────┰──────┘
+ # ┌─────┸──────┐
+ # │ Checkpoint │ [2] Compute a partition within checkpointing.
+ # └─────┰──────┘
+ # ┌─────┸──────┐
+ # │ Wait │ [3] Synchronize the copy stream with the current stream.
+ # └─────┰──────┘
+ # ┠ ─ ─ ─ ┐
+ # ┃ ┌─────┴─────┐
+ # ┃ │ Recompute │ [4] Schedule the recomputation at backpropagation.
+ # ┃ └─────┬─────┘
+ # ┠ ─ ─ ─ ┘
+ # ┃
+ # ─ ─ ─ ╂ ─ ─ ─ ─ ─ ─ ─ ─ ─
+ # ┌─────┸──────┐ (fence)
+ # │ Copy │
+ # └─────┰──────┘
for i, j in schedule:
batch = batches[i]
partition = partitions[j]
diff --git a/torch/distributed/pipeline/sync/skip/portal.py b/torch/distributed/pipeline/sync/skip/portal.py
index 335793f4cc..f3484a1b69 100644
--- a/torch/distributed/pipeline/sync/skip/portal.py
+++ b/torch/distributed/pipeline/sync/skip/portal.py
@@ -9,7 +9,7 @@ autograd engine. The shared context of three functions (:class:`PortalBlue`,
:class:`PortalOrange`, and :class:`PortalCopy`) out of the computation graph is
one of the most important feature of :mod:`torchpipe.skip`.
-The metaphor is inspired by Portal(tm) from Valve.
+The metaphor is inspired by Portal™ from Valve.
"""
from typing import List, Optional, Tuple
diff --git a/torch/distributed/pipeline/sync/skip/skippable.py b/torch/distributed/pipeline/sync/skip/skippable.py
index aa20792c84..0c01a198f8 100644
--- a/torch/distributed/pipeline/sync/skip/skippable.py
+++ b/torch/distributed/pipeline/sync/skip/skippable.py
@@ -362,16 +362,16 @@ def verify_skippables(module: nn.Sequential) -> None:
# Layer3 pops "1to3".
nn.Sequential(Layer1(), Layer2())
- # +---- ?
+ # └──── ?
nn.Sequential(Layer2(), Layer3())
- # ? ----+
+ # ? ────┘
nn.Sequential(Layer1(), Layer2(), Layer3(), Layer3())
- # +-------------------+ ^^^^^^
+ # └───────────────────┘ ^^^^^^
nn.Sequential(Layer1(), Layer1(), Layer2(), Layer3())
- # ^^^^^^ +-------------------+
+ # ^^^^^^ └───────────────────┘
To use the same name for multiple skip tensors, they must be isolated by
different namespaces. See :meth:`isolate()
diff --git a/torch/fx/experimental/migrate_gradual_types/constraint.py b/torch/fx/experimental/migrate_gradual_types/constraint.py
index 3c1f724d26..0f0d23d018 100644
--- a/torch/fx/experimental/migrate_gradual_types/constraint.py
+++ b/torch/fx/experimental/migrate_gradual_types/constraint.py
@@ -152,7 +152,7 @@ class TGreatestUpperBound(Constraint):
self.rhs2 = rhs2
def __repr__(self):
- return f'{self.res} = {self.rhs1}\u2294*{self.rhs2}'
+ return f'{self.res} = {self.rhs1}⊔*{self.rhs2}'
def __eq__(self, other):
if isinstance(other, TGreatestUpperBound):
@@ -180,7 +180,7 @@ class DGreatestUpperBound(Constraint):
self.rhs2 = rhs2
def __repr__(self):
- return f'{self.res} = {self.rhs1}\u2294{self.rhs2}'
+ return f'{self.res} = {self.rhs1}⊔{self.rhs2}'
def __eq__(self, other):
if isinstance(other, DGreatestUpperBound):
diff --git a/torch/fx/experimental/migrate_gradual_types/operation.py b/torch/fx/experimental/migrate_gradual_types/operation.py
index 432cd570be..ec2cb91bbc 100644
--- a/torch/fx/experimental/migrate_gradual_types/operation.py
+++ b/torch/fx/experimental/migrate_gradual_types/operation.py
@@ -5,10 +5,10 @@ op_div = '/'
op_eq = '='
op_neq = '!='
op_imp = '=>'
-op_matching = '\u22b3' # (contains)
+op_matching = '⊳'
op_consistency = '~'
-op_precision = '\u2291' # (square image of or equal to)
-op_leq = '\u2264' # less-than or equal to
+op_precision = '⊑'
+op_leq = '≤'
op_lt = '<'
op_gt = '>'
op_mod = '%'
diff --git a/torch/linalg/__init__.py b/torch/linalg/__init__.py
index 29df838bab..e47bb55ef7 100644
--- a/torch/linalg/__init__.py
+++ b/torch/linalg/__init__.py
@@ -1450,7 +1450,7 @@ Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
dtype (:class:`torch.dtype`, optional): type used to perform the accumulation and the return.
If specified, :attr:`x` is cast to :attr:`dtype` before performing the operation,
- and the returned tensor's type will be :attr:`dtype` if real and of its real counterpart if complex.
+ and the returned tensor’s type will be :attr:`dtype` if real and of its real counterpart if complex.
:attr:`dtype` may be complex if :attr:`x` is complex, otherwise it must be real.
:attr:`x` should be convertible without narrowing to :attr:`dtype`. Default: None
diff --git a/torch/masked/_docs.py b/torch/masked/_docs.py
index fa130bbefb..bf96b49e3e 100644
--- a/torch/masked/_docs.py
+++ b/torch/masked/_docs.py
@@ -1012,7 +1012,7 @@ Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
- unbiased (bool): when True, use Bessel's correction, otherwise, compute
+ unbiased (bool): when True, use Bessel’s correction, otherwise, compute
the uncorrected sample variance.
Keyword args:
@@ -1148,7 +1148,7 @@ Args:
input (Tensor): the input tensor
dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
Default: None that is equivalent to ``tuple(range(input.ndim))``.
- unbiased (bool): when True, use Bessel's correction, otherwise, compute
+ unbiased (bool): when True, use Bessel’s correction, otherwise, compute
the uncorrected sample variance.
Keyword args:
diff --git a/torch/masked/_ops.py b/torch/masked/_ops.py
index b7872a6d4c..2a2ff3fd6f 100644
--- a/torch/masked/_ops.py
+++ b/torch/masked/_ops.py
@@ -210,7 +210,7 @@ ord (int, float, optional): the order of vector norm. Default: 2.
ord (int, float): the order of vector norm. Default: 2.
See :func:`torch.linalg.vector_norm` for a list of supported norms.""",
unbiased="""\
-unbiased (bool): when True, use Bessel's correction, otherwise, compute
+unbiased (bool): when True, use Bessel’s correction, otherwise, compute
the uncorrected sample variance.""",
eps="""\
eps (float, optional): small value to avoid division by zero. Default: {default}.""",
diff --git a/torch/nested/__init__.py b/torch/nested/__init__.py
index ea1cce5950..e990510ed0 100644
--- a/torch/nested/__init__.py
+++ b/torch/nested/__init__.py
@@ -186,7 +186,7 @@ Example::
def nested_tensor(tensor_list, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor:
r"""
-Constructs a nested tensor with no autograd history (also known as a "leaf tensor", see
+Constructs a nested tensor with no autograd history (also known as a “leaf tensor”, see
:ref:`Autograd mechanics <autograd-mechanics>`) from :attr:`tensor_list` a list of tensors.
Args:
diff --git a/torch/nn/modules/adaptive.py b/torch/nn/modules/adaptive.py
index 83b37696c8..3d61e9d8f5 100644
--- a/torch/nn/modules/adaptive.py
+++ b/torch/nn/modules/adaptive.py
@@ -20,7 +20,7 @@ class AdaptiveLogSoftmaxWithLoss(Module):
As described in
`Efficient softmax approximation for GPUs by Edouard Grave, Armand Joulin,
- Moustapha Ciss\u00e9, David Grangier, and Herv\u00e9 J\u00e9gou
+ Moustapha Cissé, David Grangier, and Hervé Jégou
<https://arxiv.org/abs/1609.04309>`__.
Adaptive softmax is an approximate strategy for training models with large
diff --git a/torch/nn/modules/conv.py b/torch/nn/modules/conv.py
index 075d5e9865..b3d5770e7b 100644
--- a/torch/nn/modules/conv.py
+++ b/torch/nn/modules/conv.py
@@ -204,7 +204,7 @@ class Conv1d(_ConvNd):
amount of implicit padding applied on both sides.
* :attr:`dilation` controls the spacing between the kernel points; also
- known as the \uue0 trous algorithm. It is harder to describe, but this `link`_
+ known as the à trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
{groups_note}
@@ -341,7 +341,7 @@ class Conv2d(_ConvNd):
amount of implicit padding applied on both sides.
* :attr:`dilation` controls the spacing between the kernel points; also
- known as the \u00e0 trous algorithm. It is harder to describe, but this `link`_
+ known as the à trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
{groups_note}
@@ -483,7 +483,7 @@ class Conv3d(_ConvNd):
can be either a string {{'valid', 'same'}} or a tuple of ints giving the
amount of implicit padding applied on both sides.
- * :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
{groups_note}
@@ -690,7 +690,7 @@ class ConvTranspose1d(_ConvTransposeNd):
* :attr:`output_padding` controls the additional size added to one side
of the output shape. See note below for details.
- * :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
{groups_note}
@@ -821,7 +821,7 @@ class ConvTranspose2d(_ConvTransposeNd):
* :attr:`output_padding` controls the additional size added to one side
of the output shape. See note below for details.
- * :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
{groups_note}
@@ -978,7 +978,7 @@ class ConvTranspose3d(_ConvTransposeNd):
* :attr:`output_padding` controls the additional size added to one side
of the output shape. See note below for details.
- * :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.
{groups_note}
diff --git a/torch/nn/modules/fold.py b/torch/nn/modules/fold.py
index f8cb083623..8ae911252f 100644
--- a/torch/nn/modules/fold.py
+++ b/torch/nn/modules/fold.py
@@ -41,7 +41,7 @@ class Fold(Module):
sides for :attr:`padding` number of points for each dimension before
reshaping.
- * :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
Args:
@@ -186,7 +186,7 @@ class Unfold(Module):
sides for :attr:`padding` number of points for each dimension before
reshaping.
- * :attr:`dilation` controls the spacing between the kernel points; also known as the \u00e0 trous algorithm.
+ * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
Args:
diff --git a/torch/onnx/_internal/diagnostics/infra/_infra.py b/torch/onnx/_internal/diagnostics/infra/_infra.py
index c118f3e5ae..2a522b61f8 100644
--- a/torch/onnx/_internal/diagnostics/infra/_infra.py
+++ b/torch/onnx/_internal/diagnostics/infra/_infra.py
@@ -49,7 +49,7 @@ class Tag(enum.Enum):
class PatchedPropertyBag(sarif.PropertyBag):
"""Key/value pairs that provide additional information about the object.
- The definition of PropertyBag via SARIF spec is "A property bag is an object (section 3.6)
+ The definition of PropertyBag via SARIF spec is "A property bag is an object (§3.6)
containing an unordered set of properties with arbitrary names." However it is not
reflected in the json file, and therefore not captured by the python representation.
This patch adds additional **kwargs to the `__init__` method to allow recording
diff --git a/torch/onnx/_internal/onnx_proto_utils.py b/torch/onnx/_internal/onnx_proto_utils.py
index b33b4a77f4..7fb79e5b20 100644
--- a/torch/onnx/_internal/onnx_proto_utils.py
+++ b/torch/onnx/_internal/onnx_proto_utils.py
@@ -26,13 +26,13 @@ def export_as_test_case(
is as follows:
dir
- \u251c\u2500\u2500 test_<name>
- \u2502 \u251c\u2500\u2500 model.onnx
- \u2502 \u2514\u2500\u2500 test_data_set_0
- \u2502 \u251c\u2500\u2500 input_0.pb
- \u2502 \u251c\u2500\u2500 input_1.pb
- \u2502 \u251c\u2500\u2500 output_0.pb
- \u2502 \u2514\u2500\u2500 output_1.pb
+ ├── test_<name>
+ │ ├── model.onnx
+ │ └── test_data_set_0
+ │ ├── input_0.pb
+ │ ├── input_1.pb
+ │ ├── output_0.pb
+ │ └── output_1.pb
Args:
model_bytes: The ONNX model in bytes.
@@ -80,13 +80,13 @@ def load_test_case(dir: str) -> Tuple[bytes, Any, Any]:
should be as follows:
dir
- \u251c\u2500\u2500 test_<name>
- \u2502 \u251c\u2500\u2500 model.onnx
- \u2502 \u2514\u2500\u2500 test_data_set_0
- \u2502 \u251c\u2500\u2500 input_0.pb
- \u2502 \u251c\u2500\u2500 input_1.pb
- \u2502 \u251c\u2500\u2500 output_0.pb
- \u2502 \u2514\u2500\u2500 output_1.pb
+ ├── test_<name>
+ │ ├── model.onnx
+ │ └── test_data_set_0
+ │ ├── input_0.pb
+ │ ├── input_1.pb
+ │ ├── output_0.pb
+ │ └── output_1.pb
Args:
dir: The directory containing the test case.
diff --git a/torch/onnx/symbolic_opset10.py b/torch/onnx/symbolic_opset10.py
index 6fd576822e..d35c9e9845 100644
--- a/torch/onnx/symbolic_opset10.py
+++ b/torch/onnx/symbolic_opset10.py
@@ -785,7 +785,7 @@ def nan_to_num(g: jit_utils.GraphContext, input, nan, posinf, neginf):
)
# For None values of posinf, neginf we use the greatest/lowest finite
- # value representable by input's dtype.
+ # value representable by input’s dtype.
finfo = torch.finfo(input_dtype)
if posinf is None:
posinf = finfo.max
diff --git a/torch/onnx/symbolic_opset11.py b/torch/onnx/symbolic_opset11.py
index 0282c38f73..b03918c6cc 100644
--- a/torch/onnx/symbolic_opset11.py
+++ b/torch/onnx/symbolic_opset11.py
@@ -1379,10 +1379,10 @@ def normal(
pin_memory=None,
):
# If you can sample from a given distribution with mean 0 and variance 1, then you can easily sample from a
- # scale-location transformation of that distribution, which has mean mu and variance sigma's square. If x is a sample
+ # scale-location transformation of that distribution, which has mean μ and variance σ's square. If x is a sample
# from a mean 0 and variance 1 distribution then
- # sigma x+mu
- # is a sample with mean mu and variance sigma's square.
+ # σx+μ
+ # is a sample with mean μ and variance σ's square.
if sizes is not None and not symbolic_helper._is_none(sizes):
mean = opset9.expand(g, mean, sizes, None)
result = opset9.mul(g, std, g.op("RandomNormalLike", mean))
diff --git a/torch/onnx/verification.py b/torch/onnx/verification.py
index 6b49e7fc72..b60dfe8e1c 100644
--- a/torch/onnx/verification.py
+++ b/torch/onnx/verification.py
@@ -1020,7 +1020,7 @@ class GraphInfoPrettyPrinter:
else ""
)
- return f"{node_count} {'X' if has_mismatch else chr(0x2713)} {error_node_kind}"
+ return f"{node_count} {'X' if has_mismatch else '✓'} {error_node_kind}"
@_beartype.beartype
def _graph_id_segment_str(self) -> str:
@@ -1148,13 +1148,13 @@ class OnnxTestCaseRepro:
structure is as follows:
dir
- \u251c\u2500\u2500 test_<name>
- \u2502 \u251c\u2500\u2500 model.onnx
- \u2502 \u2514\u2500\u2500 test_data_set_0
- \u2502 \u251c\u2500\u2500 input_0.pb
- \u2502 \u251c\u2500\u2500 input_1.pb
- \u2502 \u251c\u2500\u2500 output_0.pb
- \u2502 \u2514\u2500\u2500 output_1.pb
+ ├── test_<name>
+ │ ├── model.onnx
+ │ └── test_data_set_0
+ │ ├── input_0.pb
+ │ ├── input_1.pb
+ │ ├── output_0.pb
+ │ └── output_1.pb
Args:
proto: ONNX model proto.
@@ -1244,19 +1244,19 @@ class GraphInfo:
Example::
==================================== Tree: =====================================
- 5 X __2 X __1 \u2713
+ 5 X __2 X __1 ✓
id: | id: 0 | id: 00
| |
| |__1 X (aten::relu)
| id: 01
|
- |__3 X __1 \u2713
+ |__3 X __1 ✓
id: 1 | id: 10
|
|__2 X __1 X (aten::relu)
id: 11 | id: 110
|
- |__1 \u2713
+ |__1 ✓
id: 111
=========================== Mismatch leaf subgraphs: ===========================
['01', '110']
@@ -1354,13 +1354,13 @@ class GraphInfo:
The repro directory will contain the following files::
dir
- \u251c\u2500\u2500 test_<name>
- \u2502 \u251c\u2500\u2500 model.onnx
- \u2502 \u2514\u2500\u2500 test_data_set_0
- \u2502 \u251c\u2500\u2500 input_0.pb
- \u2502 \u251c\u2500\u2500 input_1.pb
- \u2502 \u251c\u2500\u2500 output_0.pb
- \u2502 \u2514\u2500\u2500 output_1.pb
+ ├── test_<name>
+ │ ├── model.onnx
+ │ └── test_data_set_0
+ │ ├── input_0.pb
+ │ ├── input_1.pb
+ │ ├── output_0.pb
+ │ └── output_1.pb
Args:
repro_dir: The directory to export the repro files to. Defaults to current
@@ -1825,19 +1825,19 @@ def find_mismatch(
Greatest absolute difference: 0.2328854203224182 at index (1, 2) (up to 1e-07 allowed)
Greatest relative difference: 0.699536174352349 at index (1, 3) (up to 0.001 allowed)
==================================== Tree: =====================================
- 5 X __2 X __1 \u2713
+ 5 X __2 X __1 ✓
id: | id: 0 | id: 00
| |
| |__1 X (aten::relu)
| id: 01
|
- |__3 X __1 \u2713
+ |__3 X __1 ✓
id: 1 | id: 10
|
|__2 X __1 X (aten::relu)
id: 11 | id: 110
|
- |__1 \u2713
+ |__1 ✓
id: 111
=========================== Mismatch leaf subgraphs: ===========================
['01', '110']
diff --git a/torch/package/file_structure_representation.py b/torch/package/file_structure_representation.py
index 1453ad3a5d..cc5f055c1a 100644
--- a/torch/package/file_structure_representation.py
+++ b/torch/package/file_structure_representation.py
@@ -67,16 +67,13 @@ class Directory:
return "".join(str_list)
def _stringify_tree(
- self,
- str_list: List[str],
- preamble: str = "",
- dir_ptr: str = "\u2500\u2500\u2500 ",
+ self, str_list: List[str], preamble: str = "", dir_ptr: str = "─── "
):
"""Recursive method to generate print-friendly version of a Directory."""
space = " "
- branch = "\u2502 "
- tee = "\u251c\u2500\u2500 "
- last = "\u2514\u2500\u2500 "
+ branch = "│ "
+ tee = "├── "
+ last = "└── "
# add this directory's representation
str_list.append(f"{preamble}{dir_ptr}{self.name}\n")
diff --git a/torch/signal/windows/windows.py b/torch/signal/windows/windows.py
index d86a1245dc..f2cbe3247c 100644
--- a/torch/signal/windows/windows.py
+++ b/torch/signal/windows/windows.py
@@ -748,7 +748,7 @@ Computes the minimum 4-term Blackman-Harris window according to Nuttall.
.. math::
w_n = 1 - 0.36358 \cos{(z_n)} + 0.48917 \cos{(2z_n)} - 0.13659 \cos{(3z_n)} + 0.01064 \cos{(4z_n)}
-where ``z_n = 2 \u03c0 n/ M``.
+where ``z_n = 2 π n/ M``.
""",
"""
@@ -766,12 +766,12 @@ Keyword args:
References::
- - A. Nuttall, "Some windows with very good sidelobe behavior,"
+ - A. Nuttall, “Some windows with very good sidelobe behavior,”
IEEE Transactions on Acoustics, Speech, and Signal Processing, vol. 29, no. 1, pp. 84-91,
Feb 1981. https://doi.org/10.1109/TASSP.1981.1163506
- - Heinzel G. et al., "Spectrum and spectral density estimation by the Discrete Fourier transform (DFT),
- including a comprehensive list of window functions and some new flat-top windows",
+ - Heinzel G. et al., “Spectrum and spectral density estimation by the Discrete Fourier transform (DFT),
+ including a comprehensive list of window functions and some new flat-top windows”,
February 15, 2002 https://holometer.fnal.gov/GH_FFT.pdf
Examples::
diff --git a/torch/special/__init__.py b/torch/special/__init__.py
index 07e104c409..a25f0f7c03 100644
--- a/torch/special/__init__.py
+++ b/torch/special/__init__.py
@@ -1036,7 +1036,7 @@ hermite_polynomial_h = _add_docstr(_special.special_hermite_polynomial_h,
r"""
hermite_polynomial_h(input, n, *, out=None) -> Tensor
-Physicist's Hermite polynomial :math:`H_{n}(\text{input})`.
+Physicist’s Hermite polynomial :math:`H_{n}(\text{input})`.
If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
is returned. Otherwise, the recursion:
@@ -1059,7 +1059,7 @@ hermite_polynomial_he = _add_docstr(_special.special_hermite_polynomial_he,
r"""
hermite_polynomial_he(input, n, *, out=None) -> Tensor
-Probabilist's Hermite polynomial :math:`He_{n}(\text{input})`.
+Probabilist’s Hermite polynomial :math:`He_{n}(\text{input})`.
If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}`
is returned. Otherwise, the recursion:
diff --git a/torch/utils/jit/__init__.py b/torch/utils/jit/__init__.py
index e69de29bb2..8b13789179 100644
--- a/torch/utils/jit/__init__.py
+++ b/torch/utils/jit/__init__.py
@@ -0,0 +1 @@
+
|
2.41.0
|
131c2c199a8f35c81d9823512a05e1657723f0f
|
Fri, 26 Apr 2024 06:15:17 +0000
|
[PATCH 0703/1000] Revert "Fix mypy issues in fake_tensor.py (#124428)"
|
This reverts commit 25c0d3f3f0b19b7ca88bc92e9dc56e391d18e010. Reverted https://github.com/pytorch/pytorch/pull/124428 on behalf of https://github.com/jeanschmidt due to Unfortunately, I needed to revert #123735 and this one depends on it. So please check if there are no merge conflicts or breakages and feel free to merge this PR again ([comment](https://github.com/pytorch/pytorch/pull/124428#issuecomment-2078699836))
|
diff --git a/c10/core/SafePyObject.h b/c10/core/SafePyObject.h
index 19f8f62c71..4e56384b2f 100644
--- a/c10/core/SafePyObject.h
+++ b/c10/core/SafePyObject.h
@@ -55,22 +55,6 @@ struct C10_API SafePyObject {
c10::impl::PyInterpreter* pyinterpreter_;
};
-// A newtype wrapper around SafePyObject for type safety when a python object
-// represents a specific type. Note that `T` is only used as a tag and isn't
-// actually used for any true purpose.
-template <typename T>
-struct SafePyObjectT : private SafePyObject {
- SafePyObjectT(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
- : SafePyObject(data, pyinterpreter) {}
- SafePyObjectT(SafePyObjectT&& other) noexcept : SafePyObject(other) {}
- SafePyObjectT(SafePyObjectT const&) = delete;
- SafePyObjectT& operator=(SafePyObjectT const&) = delete;
-
- using SafePyObject::ptr;
- using SafePyObject::pyinterpreter;
- using SafePyObject::release;
-};
-
// Like SafePyObject, but non-owning. Good for references to global PyObjects
// that will be leaked on interpreter exit. You get a copy constructor/assign
// this way.
diff --git a/c10/core/impl/TorchDispatchModeTLS.cpp b/c10/core/impl/TorchDispatchModeTLS.cpp
index e558a70522..37c75003e2 100644
--- a/c10/core/impl/TorchDispatchModeTLS.cpp
+++ b/c10/core/impl/TorchDispatchModeTLS.cpp
@@ -25,7 +25,7 @@ bool TorchDispatchModeTLS::any_modes_set(bool skip_infra_modes) {
}
void TorchDispatchModeTLS::push_non_infra_mode_onto_stack(
- std::shared_ptr<PyObject_TorchDispatchMode> mode) {
+ std::shared_ptr<SafePyObject> mode) {
if (!any_modes_set()) {
c10::impl::tls_set_dispatch_key_included(DispatchKey::Python, true);
c10::impl::tls_set_dispatch_key_included(
@@ -34,9 +34,8 @@ void TorchDispatchModeTLS::push_non_infra_mode_onto_stack(
torchDispatchModeState.stack_.push_back(std::move(mode));
}
-const std::shared_ptr<PyObject_TorchDispatchMode> TorchDispatchModeTLS::
- pop_stack() {
- std::shared_ptr<PyObject_TorchDispatchMode> out;
+const std::shared_ptr<SafePyObject> TorchDispatchModeTLS::pop_stack() {
+ std::shared_ptr<SafePyObject> out;
if (!torchDispatchModeState.stack_.empty()) {
out = torchDispatchModeState.stack_.back();
torchDispatchModeState.stack_.pop_back();
@@ -61,9 +60,8 @@ const std::shared_ptr<PyObject_TorchDispatchMode> TorchDispatchModeTLS::
}
return out;
}
-const std::
- tuple<std::shared_ptr<PyObject_TorchDispatchMode>, TorchDispatchModeKey>
- TorchDispatchModeTLS::pop_highest_infra_mode() {
+const std::tuple<std::shared_ptr<SafePyObject>, TorchDispatchModeKey>
+TorchDispatchModeTLS::pop_highest_infra_mode() {
for (int64_t i = static_cast<size_t>(TorchDispatchModeKey::NUM_MODE_KEYS) - 1;
i >= 0;
--i) {
@@ -84,8 +82,8 @@ const std::
false, "Called pop_highest_infra_mode, but no infra modes were active.")
}
-const std::shared_ptr<PyObject_TorchDispatchMode>& TorchDispatchModeTLS::
- get_stack_at(int64_t idx) {
+const std::shared_ptr<SafePyObject>& TorchDispatchModeTLS::get_stack_at(
+ int64_t idx) {
TORCH_CHECK(idx < stack_len(), "Tried to get stack at idx that's too big");
// Our "logical" stack includes both:
// - any user modes (the entire torchDispatchModeState.stack_)
@@ -121,13 +119,13 @@ int64_t TorchDispatchModeTLS::stack_len() {
return stack_len + infra_modes_len;
}
-const c10::optional<std::shared_ptr<PyObject_TorchDispatchMode>>
-TorchDispatchModeTLS::get_mode(TorchDispatchModeKey mode_key) {
+const c10::optional<std::shared_ptr<SafePyObject>> TorchDispatchModeTLS::
+ get_mode(TorchDispatchModeKey mode_key) {
return torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)];
}
void TorchDispatchModeTLS::set_mode(
- const std::shared_ptr<PyObject_TorchDispatchMode>& mode,
+ const std::shared_ptr<SafePyObject>& mode,
TorchDispatchModeKey mode_key) {
TORCH_CHECK(
torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)] ==
@@ -145,8 +143,8 @@ void TorchDispatchModeTLS::set_mode(
torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)] = mode;
}
-const c10::optional<std::shared_ptr<PyObject_TorchDispatchMode>>
-TorchDispatchModeTLS::unset_mode(TorchDispatchModeKey mode_key) {
+const c10::optional<std::shared_ptr<SafePyObject>> TorchDispatchModeTLS::
+ unset_mode(TorchDispatchModeKey mode_key) {
auto out = torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)];
torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)] =
c10::nullopt;
diff --git a/c10/core/impl/TorchDispatchModeTLS.h b/c10/core/impl/TorchDispatchModeTLS.h
index d9ac8d8449..50a92459e6 100644
--- a/c10/core/impl/TorchDispatchModeTLS.h
+++ b/c10/core/impl/TorchDispatchModeTLS.h
@@ -12,35 +12,31 @@ enum class TorchDispatchModeKey : int8_t {
NUM_MODE_KEYS
};
-using PyObject_TorchDispatchMode = SafePyObjectT<TorchDispatchModeKey>;
-
struct C10_API TorchDispatchModeTLS {
// This API is NOT invariant safe.
// It must not take in an infra mode that uses TorchDispatchModeKey
// If you're pushing an infra mode onto the stack, we expect
// you to use set_mode
static void push_non_infra_mode_onto_stack(
- std::shared_ptr<PyObject_TorchDispatchMode> mode);
+ std::shared_ptr<SafePyObject> mode);
// Pops the top mode of the stack,
// giving precedence to user modes before attempting to pop
// any infra modes
- static const std::shared_ptr<PyObject_TorchDispatchMode> pop_stack();
+ static const std::shared_ptr<SafePyObject> pop_stack();
// Returns the highest-priority infra mode on the stack,
// along with its mode key.
- static const std::
- tuple<std::shared_ptr<PyObject_TorchDispatchMode>, TorchDispatchModeKey>
- pop_highest_infra_mode();
+ static const std::tuple<std::shared_ptr<SafePyObject>, TorchDispatchModeKey>
+ pop_highest_infra_mode();
- static const std::shared_ptr<PyObject_TorchDispatchMode>& get_stack_at(
- int64_t idx);
+ static const std::shared_ptr<SafePyObject>& get_stack_at(int64_t idx);
static int64_t stack_len();
- static const c10::optional<std::shared_ptr<PyObject_TorchDispatchMode>>
- get_mode(TorchDispatchModeKey mode_key);
- static const c10::optional<std::shared_ptr<PyObject_TorchDispatchMode>>
- unset_mode(TorchDispatchModeKey mode_key);
+ static const c10::optional<std::shared_ptr<SafePyObject>> get_mode(
+ TorchDispatchModeKey mode_key);
+ static const c10::optional<std::shared_ptr<SafePyObject>> unset_mode(
+ TorchDispatchModeKey mode_key);
static void set_mode(
- const std::shared_ptr<PyObject_TorchDispatchMode>& mode,
+ const std::shared_ptr<SafePyObject>& mode,
TorchDispatchModeKey mode_key);
static const TorchDispatchModeTLS& get_state();
@@ -49,13 +45,13 @@ struct C10_API TorchDispatchModeTLS {
static bool any_modes_set(bool skip_infra_modes = false);
private:
- std::vector<std::shared_ptr<PyObject_TorchDispatchMode>> stack_;
+ std::vector<std::shared_ptr<c10::SafePyObject>> stack_;
// Users are allowed to push multiple ProxyTorchDispatchMode objects onto the
// stack
// However, we only allow a single FakeTensorMode onto the stack at a time
// (Pushing additional FakeTensorModes onto the stack is a no-op)
std::array<
- c10::optional<std::shared_ptr<PyObject_TorchDispatchMode>>,
+ c10::optional<std::shared_ptr<c10::SafePyObject>>,
static_cast<size_t>(TorchDispatchModeKey::NUM_MODE_KEYS)>
infra_modes_;
};
diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in
index 8b23117704..aec4a28af9 100644
--- a/torch/_C/__init__.pyi.in
+++ b/torch/_C/__init__.pyi.in
@@ -55,7 +55,6 @@ from torch.types import (
)
from torch._prims_common import DeviceLikeType
-from torch.utils._python_dispatch import TorchDispatchMode
# This module is defined in torch/csrc/Module.cpp
@@ -1333,11 +1332,11 @@ def _pop_torch_function_stack() -> Any: ...
def _get_function_stack_at(idx: _int) -> Any: ...
def _len_torch_function_stack() -> _int: ...
def _set_torch_dispatch_mode(cls: Any) -> None: ...
-def _push_on_torch_dispatch_stack(cls: TorchDispatchMode) -> None: ...
+def _push_on_torch_dispatch_stack(cls: Any) -> None: ...
def _pop_torch_dispatch_stack(mode_key: Optional[torch._C._TorchDispatchModeKey] = None) -> Any: ...
def _get_dispatch_mode(mode_key: Optional[torch._C._TorchDispatchModeKey]) -> Any: ...
-def _unset_dispatch_mode(mode: torch._C._TorchDispatchModeKey) -> Optional[TorchDispatchMode]: ...
-def _set_dispatch_mode(mode: TorchDispatchMode) -> None: ...
+def _unset_dispatch_mode(mode: torch._C._TorchDispatchModeKey) -> Any: ...
+def _set_dispatch_mode(mode: Any) -> None: ...
def _get_dispatch_stack_at(idx: _int) -> Any: ...
def _len_torch_dispatch_stack() -> _int: ...
def _activate_gpu_trace() -> None: ...
@@ -1550,8 +1549,6 @@ def _dispatch_pystub(name: str, overload: str) -> Optional[Tuple[str, str]]: ...
def _dispatch_is_alias_key(dispatch: _dispatchkey) -> _bool: ...
def _functionality_to_backend_keys(dispatch: _dispatchkey) -> List[DispatchKey]: ...
def _functionalization_reapply_views_tls() -> _bool: ...
-def _only_lift_cpu_tensors() -> _bool: ...
-def _set_only_lift_cpu_tensors(value: _bool) -> None: ...
def _set_throw_on_mutable_data_ptr(tensor: Tensor) -> None: ...
def _set_warn_deprecated_on_mutable_data_ptr(tensor: Tensor) -> None: ...
@@ -2263,7 +2260,6 @@ def _register_py_class_for_device(device: str, cls: Any) -> None: ...
# Defined in torch/csrc/Module.cpp
def _current_graph_task_id() -> _int: ...
def _current_autograd_node() -> _Node: ...
-def _dispatch_key_set(Tensor) -> str: ...
# Defined in torch/csrc/Exceptions.cpp
class OutOfMemoryError(RuntimeError): ...
diff --git a/torch/_ops.py b/torch/_ops.py
index 1b230b929c..774b6753c9 100644
--- a/torch/_ops.py
+++ b/torch/_ops.py
@@ -1254,4 +1254,4 @@ class _Ops(types.ModuleType):
# The ops "namespace"
-ops: _Ops = _Ops()
+ops = _Ops()
diff --git a/torch/_subclasses/fake_tensor.py b/torch/_subclasses/fake_tensor.py
index d291605d58..8174f0658f 100644
--- a/torch/_subclasses/fake_tensor.py
+++ b/torch/_subclasses/fake_tensor.py
@@ -1,3 +1,5 @@
+# mypy: ignore-errors
+
import contextlib
import functools
import logging
@@ -6,18 +8,7 @@ import traceback
import weakref
from collections import defaultdict
from dataclasses import dataclass
-from typing import (
- Any,
- cast,
- Dict,
- List,
- Optional,
- Tuple,
- Type,
- TYPE_CHECKING,
- TypeVar,
- Union,
-)
+from typing import Any, Dict, List, Optional, Tuple, Type, TYPE_CHECKING, TypeVar
from weakref import ReferenceType
import torch
@@ -39,7 +30,6 @@ from torch._utils import render_call
from torch.fx.operator_schemas import normalize_function
from torch.multiprocessing.reductions import StorageWeakRef
from torch.overrides import TorchFunctionMode
-from torch.types import _bool
from torch.utils._mode_utils import no_dispatch
from torch.utils._python_dispatch import (
is_traceable_wrapper_subclass,
@@ -52,13 +42,6 @@ from torch.utils._traceback import CapturedTraceback
if TYPE_CHECKING:
from torch.fx.experimental.symbolic_shapes import ShapeEnv
-
-class _Unassigned:
- pass
-
-
-_UNASSIGNED = _Unassigned()
-
DimList = List
log = logging.getLogger(__name__)
@@ -735,7 +718,7 @@ def extract_tensor_metadata(t: torch.Tensor) -> "TensorMetadata":
"""
Extract the TensorMetadata of a tensor.
"""
- memory_format: Optional[torch.memory_format] = suggest_memory_format(t)
+ memory_format = suggest_memory_format(t)
if is_sparse_any(t) or not t.is_contiguous(memory_format=memory_format):
memory_format = None
@@ -823,11 +806,10 @@ class FakeTensorMode(TorchDispatchMode):
cache: Dict[_DispatchCacheKey, _DispatchCacheEntry] = {}
cache_hits: int = 0
cache_misses: int = 0
- cache_bypasses: Dict[str, int] = defaultdict(int)
+ cache_bypasses = defaultdict(int)
# Every time you retrace using the same fake tensor mode, you should
# advance the epoch so we don't reuse unbacked memos
epoch: int = 0
- in_kernel_invocation: bool = False
def __init__(
self,
@@ -878,9 +860,7 @@ class FakeTensorMode(TorchDispatchMode):
# in_kernel_invocation
# If another fake mode was already active when we enter, we also stash it here.
# That way when we exit, we know to re-enable the previous fake mode.
- self.enter_stack: List[
- Tuple[bool, Optional[TorchDispatchMode], Optional[_bool]]
- ] = []
+ self.enter_stack: List[Tuple[bool, Optional[FakeTensorMode]]] = []
self.shape_env: ShapeEnv = shape_env
@@ -992,7 +972,7 @@ class FakeTensorMode(TorchDispatchMode):
Lookup a cache entry for the given arguments. If none exists, dispatch
and cache the result (if the result is eligible for caching).
"""
- output: Union[FakeTensor, _Unassigned] = _UNASSIGNED
+ output = unassigned = object()
try:
key = self._cache_key(func, args, kwargs)
entry = FakeTensorMode.cache.get(key, None)
@@ -1011,7 +991,7 @@ class FakeTensorMode(TorchDispatchMode):
except _BypassDispatchCache as e:
FakeTensorMode.cache_bypasses[e.reason] += 1
- if output is _UNASSIGNED:
+ if output is unassigned:
output = self._dispatch_impl(func, types, args, kwargs)
return output
@@ -1086,7 +1066,7 @@ class FakeTensorMode(TorchDispatchMode):
if isinstance(args, dict):
args = list(args.keys()) + list(args.values())
- result: List[Any] = []
+ result = []
for arg in args:
if isinstance(arg, FakeTensor):
if not self.is_our_fake(arg):
@@ -1197,7 +1177,7 @@ class FakeTensorMode(TorchDispatchMode):
# Synthesize a new FakeTensor with the cached metadata.
metadata = entry.metadata
- assert metadata and not metadata.is_sparse
+ assert not metadata.is_sparse
empty = torch.empty_strided(
metadata.shape,
@@ -1215,7 +1195,7 @@ class FakeTensorMode(TorchDispatchMode):
if func.is_view:
# For view ops, the storage should be the same as the tensor input.
- storage = args[cast(int, entry.view_idx)].untyped_storage()
+ storage = args[entry.view_idx].untyped_storage()
with in_kernel_invocation_manager(self):
empty.set_(
storage, metadata.storage_offset, metadata.shape, metadata.stride
@@ -1283,7 +1263,7 @@ class FakeTensorMode(TorchDispatchMode):
else:
return self._dispatch_impl(func, types, args, kwargs)
- def _dispatch_impl(self, func, types, args, kwargs) -> FakeTensor:
+ def _dispatch_impl(self, func, types, args, kwargs):
flat_args, args_spec = pytree.tree_flatten((args, kwargs))
flat_arg_fake_tensors = [
@@ -1577,7 +1557,7 @@ class FakeTensorMode(TorchDispatchMode):
If not, try to convert them to fake tensors.
Returns the original args, kwargs, and a flattened list of (args, kwargs) that are fake tensors.
"""
- flat_arg_fake_tensors: List[Any] = []
+ flat_arg_fake_tensors = []
def validate(x):
if not isinstance(x, torch.Tensor):
@@ -1704,7 +1684,7 @@ class FakeTensorMode(TorchDispatchMode):
source: Optional[Source] = None,
symbolic_context=None,
):
- shape_env: Optional[ShapeEnv] = self.shape_env
+ shape_env = self.shape_env
if static_shapes is None:
static_shapes = self.static_shapes
if static_shapes:
diff --git a/torch/csrc/autograd/init.cpp b/torch/csrc/autograd/init.cpp
index 6c9870a5c4..aaaa95a9d2 100644
--- a/torch/csrc/autograd/init.cpp
+++ b/torch/csrc/autograd/init.cpp
@@ -1097,13 +1097,11 @@ static PyObject* push_on_torch_dispatch_stack(
if (maybe_mode_key_obj) {
mode_key = py::cast<c10::impl::TorchDispatchModeKey>(maybe_mode_key_obj);
c10::impl::TorchDispatchModeTLS::set_mode(
- std::make_shared<c10::impl::PyObject_TorchDispatchMode>(
- arg, getPyInterpreter()),
+ std::make_shared<c10::SafePyObject>(arg, getPyInterpreter()),
mode_key.value());
} else {
c10::impl::TorchDispatchModeTLS::push_non_infra_mode_onto_stack(
- std::make_shared<c10::impl::PyObject_TorchDispatchMode>(
- arg, getPyInterpreter()));
+ std::make_shared<c10::SafePyObject>(arg, getPyInterpreter()));
}
Py_INCREF(arg);
}
@@ -1167,9 +1165,7 @@ static PyObject* set_dispatch_mode(PyObject* _unused, PyObject* mode) {
Py_INCREF(mode);
c10::impl::TorchDispatchModeTLS::set_mode(
- std::make_shared<c10::impl::PyObject_TorchDispatchMode>(
- mode, getPyInterpreter()),
- mode_key);
+ std::make_shared<c10::SafePyObject>(mode, getPyInterpreter()), mode_key);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
diff --git a/torch/csrc/utils/torch_dispatch_mode.h b/torch/csrc/utils/torch_dispatch_mode.h
index 79173aeb3e..81729f27df 100644
--- a/torch/csrc/utils/torch_dispatch_mode.h
+++ b/torch/csrc/utils/torch_dispatch_mode.h
@@ -29,12 +29,12 @@ struct StashTorchDispatchModeGuard {
}
}
- const std::shared_ptr<c10::impl::PyObject_TorchDispatchMode>& get_cur_mode() {
+ const std::shared_ptr<c10::SafePyObject>& get_cur_mode() {
return saved_mode_;
}
private:
- std::shared_ptr<c10::impl::PyObject_TorchDispatchMode> saved_mode_;
+ std::shared_ptr<at::SafePyObject> saved_mode_;
c10::optional<c10::impl::TorchDispatchModeKey> saved_mode_key_;
};
diff --git a/torch/utils/_python_dispatch.py b/torch/utils/_python_dispatch.py
index ec24f006a7..f5f830c2f1 100644
--- a/torch/utils/_python_dispatch.py
+++ b/torch/utils/_python_dispatch.py
@@ -159,7 +159,7 @@ def _get_current_dispatch_mode_stack():
return [_get_dispatch_stack_at(i) for i in range(stack_len)]
-def _push_mode(mode: TorchDispatchMode):
+def _push_mode(mode):
k = mode._dispatch_key if hasattr(mode, "_dispatch_key") else None
assert k is None or k == torch._C.DispatchKey.PreDispatch
if k is None:
|
2.41.0
|
ad291d07f646771241aed015ed38bbec2241961
|
Fri, 26 Apr 2024 06:40:36 +0000
|
[PATCH 0704/1000] [DeviceMesh] Removing mapping child_to_parent_mapping from `_MeshEnv` (#124890)
|
Summary: The mapping is no longer needed after https://github.com/pytorch/pytorch/pull/124780, as we are not going to re-create the pgs during mesh slicing. Test Plan: CI Differential Revision: D56499001 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124890 Approved by: https://github.com/awgu
|
diff --git a/torch/distributed/device_mesh.py b/torch/distributed/device_mesh.py
index 2b176583de..03fbd8b146 100644
--- a/torch/distributed/device_mesh.py
+++ b/torch/distributed/device_mesh.py
@@ -62,7 +62,6 @@ else:
def __init__(self) -> None:
self.mesh_stack: List[DeviceMesh] = []
self.child_to_parent_mapping: Dict[DeviceMesh, DeviceMesh] = {}
- self.parent_to_child_mapping: Dict[DeviceMesh, Dict[str, DeviceMesh]] = {}
def get_current_mesh(self) -> "DeviceMesh":
if len(self.mesh_stack) == 0:
@@ -72,13 +71,6 @@ else:
def create_child_mesh(
self, device_mesh: "DeviceMesh", mesh_dim: int, mesh_dim_name: str
) -> "DeviceMesh":
- # Directly return the child mesh if it is already created.
- child_mesh_mappings = self.parent_to_child_mapping.get(device_mesh)
- if child_mesh_mappings:
- sub_mesh = child_mesh_mappings.get(mesh_dim_name)
- if sub_mesh:
- return sub_mesh
-
# swap the current dim to the last dim then reshape to flatten out other
# dims, so we can just extract the list of ranks which contains cur_rank.
cur_rank = device_mesh.get_rank()
@@ -99,9 +91,6 @@ else:
res_sub_mesh._dim_group_infos = [device_mesh._dim_group_infos[mesh_dim]] # type: ignore[possibly-undefined]
# Assign the current DeviceMesh as the parent of the child DeviceMesh.
self.child_to_parent_mapping[res_sub_mesh] = device_mesh
- self.parent_to_child_mapping.setdefault(device_mesh, {})[
- mesh_dim_name
- ] = res_sub_mesh
return res_sub_mesh
def get_parent_mesh(self, device_mesh: "DeviceMesh") -> Optional["DeviceMesh"]:
|
2.41.0
|
9a83eacb51faf9706fd14f1df60f1220fa4008d
|
Fri, 26 Apr 2024 13:04:14 +0000
|
[PATCH 0708/1000] add new API torch.amp.is_autocast_available (#124938)
|
# Motivation expose `torch._is_autocast_available` to `torch.amp.is_autocast_available` as a public api. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124938 Approved by: https://github.com/albanD
|
diff --git a/docs/source/amp.rst b/docs/source/amp.rst
index d0ef865b37..a9d98a0aa0 100644
--- a/docs/source/amp.rst
+++ b/docs/source/amp.rst
@@ -40,6 +40,10 @@ For CUDA and CPU, APIs are also provided separately:
Autocasting
^^^^^^^^^^^
+.. currentmodule:: torch.amp.autocast_mode
+
+.. autofunction:: is_autocast_available
+
.. currentmodule:: torch
.. autoclass:: autocast
diff --git a/test/test_autocast.py b/test/test_autocast.py
index 5054944932..5b1e38eff0 100644
--- a/test/test_autocast.py
+++ b/test/test_autocast.py
@@ -340,6 +340,8 @@ class TestTorchAutocast(TestCase):
with self.assertRaisesRegex(RuntimeError, msg):
with torch.autocast(device_type=dev):
_ = torch.tensor(1)
+ with self.assertRaisesRegex(RuntimeError, msg):
+ assert torch.amp.is_autocast_available(device_type=dev)
if __name__ == "__main__":
diff --git a/torch/amp/__init__.py b/torch/amp/__init__.py
index e0be696975..2884dfeefe 100644
--- a/torch/amp/__init__.py
+++ b/torch/amp/__init__.py
@@ -1,2 +1,7 @@
-from .autocast_mode import _enter_autocast, _exit_autocast, autocast
+from .autocast_mode import (
+ _enter_autocast,
+ _exit_autocast,
+ autocast,
+ is_autocast_available,
+)
from .grad_scaler import GradScaler
diff --git a/torch/amp/autocast_mode.py b/torch/amp/autocast_mode.py
index 87ff709fcf..523d8dc34d 100644
--- a/torch/amp/autocast_mode.py
+++ b/torch/amp/autocast_mode.py
@@ -6,7 +6,19 @@ from typing import Any, Optional
import torch
from torch.types import _dtype
-__all__ = ["autocast_decorator", "autocast"]
+__all__ = ["autocast_decorator", "autocast", "is_autocast_available"]
+
+
+def is_autocast_available(device_type: str) -> bool:
+ r"""
+ Return a bool indicating if autocast is available on :attr:`device_type`.
+
+ Args:
+ device_type(str): Device type to use. Possible values are: 'cuda', 'cpu', 'xpu' and so on.
+ The type is the same as the `type` attribute of a :class:`torch.device`.
+ Thus, you may obtain the device type of a tensor using `Tensor.device.type`.
+ """
+ return torch._C._is_autocast_available(device_type)
def autocast_decorator(autocast_instance, func):
@@ -199,7 +211,7 @@ class autocast:
assert dtype is not None
return
self.device = device_type
- if not torch._C._is_autocast_available(self.device):
+ if not is_autocast_available(self.device):
raise RuntimeError(
f"User specified an unsupported autocast device_type '{self.device}'"
)
diff --git a/torch/utils/checkpoint.py b/torch/utils/checkpoint.py
index ca0e39d537..7c74b4c6be 100644
--- a/torch/utils/checkpoint.py
+++ b/torch/utils/checkpoint.py
@@ -194,7 +194,7 @@ def set_device_states(devices, states) -> None:
def _get_autocast_kwargs(device="cuda"):
- if torch._C._is_autocast_available(device):
+ if torch.amp.is_autocast_available(device):
device_autocast_kwargs = {
"enabled": torch.is_autocast_enabled(device),
"dtype": torch.get_autocast_dtype(device),
@@ -289,7 +289,7 @@ class CheckpointFunction(torch.autograd.Function):
device_autocast_ctx = device_module.amp.autocast(
**ctx.device_autocast_kwargs
- ) if torch._C._is_autocast_available(ctx.device) else contextlib.nullcontext()
+ ) if torch.amp.is_autocast_available(ctx.device) else contextlib.nullcontext()
with torch.enable_grad(), device_autocast_ctx, \
torch.cpu.amp.autocast(**ctx.cpu_autocast_kwargs):
outputs = ctx.run_function(*detached_inputs)
@@ -1396,7 +1396,7 @@ def _checkpoint_without_reentrant_generator(
device_autocast_ctx = device_module.amp.autocast(
**device_autocast_kwargs
- ) if torch._C._is_autocast_available(device) else contextlib.nullcontext()
+ ) if torch.amp.is_autocast_available(device) else contextlib.nullcontext()
with device_autocast_ctx, torch.cpu.amp.autocast(**cpu_autocast_kwargs), \
recompute_context:
fn(*args, **kwargs)
|
2.41.0
|
324ddd80c9cefbe750d06c04c21dac0a9100ee2
|
Fri, 26 Apr 2024 08:52:27 +0000
|
[PATCH 0709/1000] Revert "Delete erroneous print (#124972)"
|
This reverts commit 333f095d0779ecf0ce489ceecff35404abde8581. Reverted https://github.com/pytorch/pytorch/pull/124972 on behalf of https://github.com/jeanschmidt due to Need to revert #124654 but this PR depends on it :( ([comment](https://github.com/pytorch/pytorch/pull/124972#issuecomment-2078936303))
|
diff --git a/torch/library.py b/torch/library.py
index 455c6d9067..78857ef75c 100644
--- a/torch/library.py
+++ b/torch/library.py
@@ -123,6 +123,7 @@ class Library:
if has_preexisting_packet:
ns = getattr(torch.ops, self.ns)
packet = getattr(ns, packet_name)
+ print("refreshing", ns, packet_name)
torch._ops._refresh_packet(packet)
self._op_defs.add(qualname)
|
2.41.0
|
59ff49bf471f2436739b3629db24f0ec73f5257
|
Fri, 26 Apr 2024 09:30:34 +0000
|
[PATCH 0711/1000] Revert "[dtensor] move pad/unpad_tensor to separate utils (#124871)"
|
This reverts commit 0b0eea222978e6b377e2c67f89902d5eb1aa7da3. Reverted https://github.com/pytorch/pytorch/pull/124871 on behalf of https://github.com/jeanschmidt due to Broke internal tests, see D56587991 for more details ([comment](https://github.com/pytorch/pytorch/pull/124871#issuecomment-2079001103))
|
diff --git a/test/distributed/_tensor/test_dtensor.py b/test/distributed/_tensor/test_dtensor.py
index 224ca8c673..653dfcbb58 100644
--- a/test/distributed/_tensor/test_dtensor.py
+++ b/test/distributed/_tensor/test_dtensor.py
@@ -809,10 +809,8 @@ class TestDTensorPlacementTypes(DTensorTestBase):
]
assert_array_equal(expected_pad_sizes, pad_sizes)
- from torch.distributed._tensor._collective_utils import unpad_tensor
-
unpadded_list = [
- unpad_tensor(tensor, shard_placement.dim, pad_sizes[i])
+ shard_placement._unpad_tensor(tensor, pad_sizes[i])
if pad_sizes[i] > 0
else tensor
for i, tensor in enumerate(splitted_tensor_list)
diff --git a/torch/distributed/_tensor/_collective_utils.py b/torch/distributed/_tensor/_collective_utils.py
index 603ac09f4a..9cf8376bd2 100644
--- a/torch/distributed/_tensor/_collective_utils.py
+++ b/torch/distributed/_tensor/_collective_utils.py
@@ -164,24 +164,6 @@ def mesh_all_to_all(
return work
-def pad_tensor(tensor: torch.Tensor, pad_dim: int, pad_size: int) -> torch.Tensor:
- if pad_size == 0:
- return tensor
- pad = [0, 0] * (tensor.ndim - pad_dim)
- pad[-1] = pad_size
- return torch.nn.functional.pad(tensor, pad)
-
-
-def unpad_tensor(tensor: torch.Tensor, pad_dim: int, pad_size: int) -> torch.Tensor:
- if pad_size == 0:
- return tensor
- return tensor.narrow(
- pad_dim,
- start=0,
- length=tensor.size(pad_dim) - pad_size,
- )
-
-
def spec_to_bytes(spec: "placement_types.DTensorSpec") -> int:
assert spec.tensor_meta is not None, "spec should have tensor meta defined!"
return spec.tensor_meta.dtype.itemsize * math.prod(spec.shape)
diff --git a/torch/distributed/_tensor/placement_types.py b/torch/distributed/_tensor/placement_types.py
index d06c317c16..8d88d064e8 100644
--- a/torch/distributed/_tensor/placement_types.py
+++ b/torch/distributed/_tensor/placement_types.py
@@ -7,12 +7,7 @@ import torch
import torch.distributed._functional_collectives as funcol
import torch.distributed.distributed_c10d as c10d
-from torch.distributed._tensor._collective_utils import (
- mesh_broadcast,
- mesh_scatter,
- pad_tensor,
- unpad_tensor,
-)
+from torch.distributed._tensor._collective_utils import mesh_broadcast, mesh_scatter
from torch.distributed.device_mesh import DeviceMesh
@@ -88,13 +83,37 @@ class Shard(Placement):
for shard, pad_size in zip(tensor_list, pad_sizes):
# Fill the empty tensor with zeroes with padding.
if with_padding and pad_size > 0:
- shard = pad_tensor(shard, self.dim, pad_size)
+ shard = self._pad_tensor(shard, pad_size)
shard = shard.contiguous() if contiguous else shard
shard_list.append(shard)
return shard_list, pad_sizes
else:
return tensor_list, pad_sizes
+ def _pad_tensor(
+ self,
+ tensor: torch.Tensor,
+ pad_size: int,
+ ) -> torch.Tensor:
+ if pad_size == 0:
+ return tensor
+ pad = [0, 0] * (tensor.ndim - self.dim)
+ pad[-1] = pad_size
+ return torch.nn.functional.pad(tensor, pad)
+
+ def _unpad_tensor(
+ self,
+ tensor: torch.Tensor,
+ pad_size: int,
+ ) -> torch.Tensor:
+ if pad_size == 0:
+ return tensor
+ return tensor.narrow(
+ self.dim,
+ start=0,
+ length=tensor.size(self.dim) - pad_size,
+ )
+
@staticmethod
def _local_shard_size_on_dim(
size_on_dim: int,
@@ -147,7 +166,7 @@ class Shard(Placement):
# Only unpad if the local_tensor was padded on the dimension.
pad_size = pad_sizes[my_coordinate[mesh_dim]]
if pad_size > 0:
- output = unpad_tensor(output, self.dim, pad_size)
+ output = self._unpad_tensor(output, pad_size)
return output
def _reduce_shard_tensor(
@@ -182,7 +201,7 @@ class Shard(Placement):
)
if is_padded:
- output = unpad_tensor(output, self.dim, pad_sizes[my_coordinate[mesh_dim]]) # type: ignore[possibly-undefined]
+ output = self._unpad_tensor(output, pad_sizes[my_coordinate[mesh_dim]]) # type: ignore[possibly-undefined]
return output
def _to_replicate_tensor(
@@ -206,7 +225,7 @@ class Shard(Placement):
if is_padded:
full_chunk_size = (logical_dim_size + num_chunks - 1) // num_chunks
pad_size = full_chunk_size - local_shape[self.dim]
- local_tensor = pad_tensor(local_tensor, self.dim, pad_size)
+ local_tensor = self._pad_tensor(local_tensor, pad_size)
if not local_tensor.is_contiguous():
local_tensor = local_tensor.contiguous()
@@ -218,7 +237,7 @@ class Shard(Placement):
)
if is_padded:
unpad_size = full_chunk_size * num_chunks - logical_dim_size # type: ignore[possibly-undefined]
- result = unpad_tensor(result, self.dim, unpad_size)
+ result = self._unpad_tensor(result, unpad_size)
return result
def _replicate_to_shard(
|
2.41.0
|
a5ea29863f9f2eeac0a751378e1586f71655378
|
Thu, 25 Apr 2024 18:32:53 +0000
|
[PATCH 0712/1000] Apply guard knowledge to all simplifications (#123342)
|
This was an oversight in a previous PR. We were just applying this knowledge when the expression had an unbacked int Pull Request resolved: https://github.com/pytorch/pytorch/pull/123342 Approved by: https://github.com/ezyang
|
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index 8d61e3205f..03b8271679 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -3636,9 +3636,11 @@ class ShapeEnv:
self.log.warning("Failing guard allocated at: \n%s", ''.join(guard.stack.format()))
raise
- # First, issue all the non-trivial guards.
+ # First, issue all guards.
+ # This removes all the checks that follow from bounds
+ # We could simply emit those and also the bounds 2 <= size when necessary
for guard in self.guards:
- if self._maybe_evaluate_static(guard.expr) is not None:
+ if self._maybe_evaluate_static(guard.expr, axioms=()) is not None:
continue
issue_guard(guard)
@@ -3843,7 +3845,7 @@ class ShapeEnv:
def get_nontrivial_guards(self):
"""Returns a list of guard expressions that aren't statically known (i.e. not trivial)"""
- return [self.simplify(guard.expr) for guard in self.guards if self._maybe_evaluate_static(guard.expr) is None]
+ return [self.simplify(guard.expr) for guard in self.guards if self._maybe_evaluate_static(guard.expr, axioms=()) is None]
def format_guards(self, verbose=False):
"""Format this shape env's guard expressions with optional traceback info if verbose"""
@@ -3864,10 +3866,57 @@ class ShapeEnv:
var_to_range[x] = ValueRanges(2, sympy.oo)
return bound_sympy(expr, var_to_range)
+ @_lru_cache
+ def get_axioms(self, symbols: Optional[Tuple["sympy.Symbol"]] = None) -> Tuple["sympy.Expr"]:
+ """
+ Given the symbols in an expression, it returns all the runtime asserts that have those symbols
+ concatenated with all the guards.
+ If symbols is None, it returns all the runtime asserts (and all the guards)
+ """
+ if symbols is None:
+ runtime_asserts = (r.expr
+ for rs in self.deferred_runtime_asserts.values()
+ for r in rs)
+ else:
+ runtime_asserts = (r.expr
+ for s in symbols if s not in self.var_to_val
+ for r in self.deferred_runtime_asserts.get(s, ()))
+ guards = (g.expr for g in self.guards)
+ return tuple(itertools.chain(guards, runtime_asserts))
+
+ @_lru_cache
+ def get_implications(self,
+ e: "sympy.Expr",
+ compute_hint: bool) -> Tuple[Tuple["sympy.Expr", 'sympy.logic.boolalg.BooleanAtom']]:
+ """ Given a expression, it returns a list of predicates that follow from it """
+ equiv = {}
+
+ def add_expr(expr):
+ # Expr and negation
+ equiv[canonicalize_bool_expr(expr)] = sympy.true
+ equiv[canonicalize_bool_expr(sympy.Not(expr))] = sympy.false
+ if isinstance(expr, sympy.Rel):
+ # multiplying by -1 changes the direction of the inequality
+ dual = type(expr)(-expr.rhs, -expr.lhs)
+ equiv[canonicalize_bool_expr(dual)] = sympy.true
+ equiv[canonicalize_bool_expr(sympy.Not(dual))] = sympy.false
+
+ if compute_hint:
+ e = canonicalize_bool_expr(e.xreplace(self.var_to_val))
+ add_expr(e)
+ # Other relational expressions this expression implies
+ if isinstance(e, sympy.Eq):
+ add_expr(sympy.Le(e.lhs, e.rhs))
+ add_expr(sympy.Ge(e.lhs, e.rhs))
+ elif isinstance(e, sympy.Lt):
+ add_expr(sympy.Le(e.lhs, e.rhs))
+ add_expr(sympy.Ne(e.lhs, e.rhs))
+ return tuple(equiv.items())
+
@_lru_cache
def _maybe_evaluate_static(
self, expr: "sympy.Expr", *, unbacked_only: bool = False, compute_hint: bool = False,
- expect_rational=True, size_oblivious: bool = False
+ expect_rational=True, size_oblivious: bool = False, axioms: Optional[Tuple[sympy.Expr]] = None
) -> "Optional[sympy.Expr]":
"""
Tries to evaluate expr without introducing guards
@@ -3881,6 +3930,9 @@ class ShapeEnv:
hint for the particular hint values of backed SymInts, e.g., if
s0 happens to be 3 this run, compute_hint will subsitute s0 with 3.
"""
+ # axioms with compute hint NYE
+ assert not compute_hint or not axioms
+
expr = self.simplify(expr)
if compute_hint:
@@ -3888,53 +3940,14 @@ class ShapeEnv:
expr = canonicalize_bool_expr(expr)
- symbols = list(expr.free_symbols)
-
- # Apply known runtime asserts
- guards_exprs = []
- for g in self.guards:
- e = self.simplify(g.expr)
- if compute_hint:
- e = canonicalize_bool_expr(e.xreplace(self.var_to_val))
- guards_exprs.append(e)
-
- symbols_unbacked = symbols - self.var_to_val.keys()
- defra_exprs = {}
- for s in symbols_unbacked:
- defras = self.deferred_runtime_asserts.get(s, ())
- l = []
- for defra in defras:
- e = self.simplify(defra.expr)
- if compute_hint:
- e = canonicalize_bool_expr(e.xreplace(self.var_to_val))
- l.append(e)
- defra_exprs[s] = l
-
-
+ # Pattern matching
+ symbols = tuple(expr.free_symbols)
+ if axioms is None:
+ axioms = self.get_axioms(symbols)
subst = {}
- for s in symbols_unbacked:
-
- def add_expr(expr):
- # Expr and negation
- subst[canonicalize_bool_expr(expr)] = sympy.true
- subst[canonicalize_bool_expr(sympy.Not(expr))] = sympy.false
- if isinstance(expr, sympy.Rel):
- # multiplying by -1 changes the direction of the inequality
- dual = type(expr)(-expr.rhs, -expr.lhs)
- subst[canonicalize_bool_expr(dual)] = sympy.true
- subst[canonicalize_bool_expr(sympy.Not(dual))] = sympy.false
-
- for e in itertools.chain(guards_exprs, defra_exprs[s]):
- add_expr(e)
- # Other relational expressions this expression implies
- if isinstance(e, sympy.Eq):
- add_expr(sympy.Le(e.lhs, e.rhs))
- add_expr(sympy.Ge(e.lhs, e.rhs))
- elif isinstance(e, sympy.Lt):
- add_expr(sympy.Le(e.lhs, e.rhs))
- add_expr(sympy.Ne(e.lhs, e.rhs))
-
- # NB: this helps us deal with And/Or connectives
+ for e in axioms:
+ subst.update(dict(self.get_implications(e, compute_hint=compute_hint)))
+
expr = expr.xreplace(subst)
# Simplify making use of value range lower bound
@@ -4648,7 +4661,6 @@ class ShapeEnv:
if not self._suppress_guards_tls():
stack = CapturedTraceback.extract(skip=1)
guard = ShapeGuard(g, stack)
- # TODO: deal with duplicate guards somehow
self.guards.append(guard)
except Exception:
if fresh:
|
2.41.0
|
ba59b718bd73485e8887683ec701cdc2ae838cf
|
Thu, 25 Apr 2024 18:32:53 +0000
|
[PATCH 0713/1000] Teach ShapeEnv that a <= b => a < b + 1 (#123436)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123436 Approved by: https://github.com/ezyang ghstack dependencies: #123342
|
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index 03b8271679..d1c434a8dc 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -3911,6 +3911,10 @@ class ShapeEnv:
elif isinstance(e, sympy.Lt):
add_expr(sympy.Le(e.lhs, e.rhs))
add_expr(sympy.Ne(e.lhs, e.rhs))
+ if e.lhs.is_integer and e.rhs.is_integer:
+ add_expr(sympy.Le(e.lhs, e.rhs - 1))
+ elif isinstance(e, sympy.Le):
+ add_expr(sympy.Lt(e.lhs, e.rhs + 1))
return tuple(equiv.items())
@_lru_cache
|
2.41.0
|
1b25596d5f035188d8b07e5734306a4a0c21416
|
Fri, 26 Apr 2024 10:18:09 +0000
|
[PATCH 0714/1000] Revert "Add common used score_mod functions for templated attention (#124670)"
|
This reverts commit ed120b08c4828c39f116cfe1fb39195c844be485. Reverted https://github.com/pytorch/pytorch/pull/124670 on behalf of https://github.com/jeanschmidt due to Breaking internal CI, more info can be found in D56571389 ([comment](https://github.com/pytorch/pytorch/pull/124670#issuecomment-2079084881))
|
diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py
index c2cf3b295e..4c8043d9bf 100644
--- a/test/inductor/test_templated_attention.py
+++ b/test/inductor/test_templated_attention.py
@@ -13,15 +13,7 @@ from torch._higher_order_ops.templated_attention import (
)
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_code
-from torch.nn.attention._templated_attention import (
- _causal,
- _compose,
- _generate_alibi_bias,
- _identity,
- _rel_bias,
- _rel_causal,
- _templated_attention,
-)
+from torch.nn.attention._templated_attention import _compose, _templated_attention
from torch.testing import FileCheck
from torch.testing._internal import common_utils
from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_BF16
@@ -56,13 +48,9 @@ test_dtypes_fast = [torch.float16]
if common_utils.TEST_WITH_ROCM:
test_dtypes = [torch.float32]
-test_score_mods = [
- _identity,
- _causal,
- _rel_bias,
- _rel_causal,
- _generate_alibi_bias(8),
-]
+
+def _identity_mod(score, b, h, m, n):
+ return score
def _causal_mod(score, b, h, token_q, token_kv):
@@ -102,8 +90,58 @@ class TestTemplatedSDPA(InductorTestCase):
@supported_platform
@common_utils.parametrize("dtype", test_dtypes)
- @common_utils.parametrize("score_mod", test_score_mods)
- def test_builtin_score_mods(self, dtype: torch.dtype, score_mod: Callable):
+ def test_identity(self, dtype: torch.dtype):
+ def score_mod(score, b, h, m, n):
+ return score
+
+ self.run_test(score_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes)
+ def test_causal_mask(self, dtype: torch.dtype):
+ def score_mod(score, b, h, token_q, token_kv):
+ return torch.where(token_q >= token_kv, score, float("-inf"))
+
+ self.run_test(score_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes)
+ def test_rel_bias(self, dtype: torch.dtype):
+ def score_mod(score, b, h, m, n):
+ return score + (m - n)
+
+ self.run_test(score_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes)
+ def test_alibi_bias(self, dtype: torch.dtype):
+ def score_mod(score, b, h, m, n):
+ return score + (m - n) * h
+
+ self.run_test(score_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes)
+ def test_rel_causal(self, dtype: torch.dtype):
+ def score_mod(score, b, h, m, n):
+ return torch.where(m <= n, score + (m - n), float("-inf"))
+
+ self.run_test(score_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes)
+ def test_skip_odd_keys(self, dtype: torch.dtype):
+ def score_mod(score, b, h, q, kv):
+ return torch.where(kv % 2 == 0, score, float("-inf"))
+
+ self.run_test(score_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes)
+ def test_alibi_causal(self, dtype: torch.dtype):
+ def score_mod(score, b, h, m, n):
+ return torch.where(m <= n, score + (m - n) * h, float("-inf"))
+
self.run_test(score_mod, dtype)
@supported_platform
@@ -234,7 +272,7 @@ class TestTemplatedSDPA(InductorTestCase):
requires_grad=True,
)
q, k, v = make_tensor(), make_tensor(), make_tensor()
- out = _templated_attention(q, k, v, _identity)
+ out = _templated_attention(q, k, v, _identity_mod)
with self.assertRaisesRegex(
RuntimeError, "Autograd not implemented for templated_attention"
):
@@ -248,7 +286,7 @@ class TestTemplatedSDPA(InductorTestCase):
with self.assertRaisesRegex(
ValueError, "Expected query, key, and value to have the same dtype"
):
- _templated_attention(query, key, value, _identity)
+ _templated_attention(query, key, value, _identity_mod)
@supported_platform
def test_different_sequence_length_fails(self):
@@ -256,7 +294,7 @@ class TestTemplatedSDPA(InductorTestCase):
key = torch.randn((1, 1, 1024, 64), dtype=torch.float32, device="cuda")
value = torch.randn((1, 1, 1024, 64), dtype=torch.float32, device="cuda")
with self.assertRaisesRegex(ValueError, "NYI: The target sequence length"):
- _templated_attention(query, key, value, _identity)
+ _templated_attention(query, key, value, _identity_mod)
@supported_platform
@patch.object(torch._inductor.config, "max_autotune", True)
@@ -283,7 +321,7 @@ class TestTemplatedSDPA(InductorTestCase):
@supported_platform
@common_utils.parametrize("dtype", test_dtypes)
- @common_utils.parametrize("score_mod", [_identity, _causal])
+ @common_utils.parametrize("score_mod", [_identity_mod, _causal_mod])
def test_logsumexp_correctness(self, dtype, score_mod):
@torch.compile
def sdpa_hop(q, k, v, score_mod):
@@ -346,7 +384,7 @@ class TestTemplatedSDPA(InductorTestCase):
lse_2 = lse * 2
return lse_2
- _, code = run_and_get_code(func, q, k, v, _identity)
+ _, code = run_and_get_code(func, q, k, v, _identity_mod)
# Ensure that two kernels are generated
FileCheck().check_count(".run(", 2, True).run(code[0])
@@ -367,7 +405,7 @@ class TestTemplatedSDPA(InductorTestCase):
lse_2 = lse * 2
return out, lse_2
- _, code = run_and_get_code(func, q, k, v, _identity)
+ _, code = run_and_get_code(func, q, k, v, _identity_mod)
# Ensure that two kernels are generated
FileCheck().check_count(".run(", 2, True).run(code[0])
diff --git a/torch/nn/attention/_templated_attention.py b/torch/nn/attention/_templated_attention.py
index 00183a2e31..0e614c8a4e 100644
--- a/torch/nn/attention/_templated_attention.py
+++ b/torch/nn/attention/_templated_attention.py
@@ -90,60 +90,3 @@ def _templated_attention(
# Drop the logsumexp value since this is only needed for backwards
return out
-
-
-"""Some common used score_mod functions for templated attention in PyTorch."""
-
-
-def _identity(
- score: torch.Tensor,
- batch: torch.Tensor,
- head: torch.Tensor,
- token_q: torch.Tensor,
- token_kv: torch.Tensor,
-) -> torch.Tensor:
- return score
-
-
-def _causal(
- score: torch.Tensor,
- batch: torch.Tensor,
- head: torch.Tensor,
- token_q: torch.Tensor,
- token_kv: torch.Tensor,
-) -> torch.Tensor:
- return torch.where(token_q >= token_kv, score, float("-inf"))
-
-
-def _rel_bias(
- score: torch.Tensor,
- batch: torch.Tensor,
- head: torch.Tensor,
- token_q: torch.Tensor,
- token_kv: torch.Tensor,
-) -> torch.Tensor:
- return score + (token_q - token_kv)
-
-
-def _rel_causal(
- score: torch.Tensor,
- batch: torch.Tensor,
- head: torch.Tensor,
- token_q: torch.Tensor,
- token_kv: torch.Tensor,
-) -> torch.Tensor:
- return torch.where(token_q <= token_kv, score + (token_q - token_kv), float("-inf"))
-
-
-def _generate_alibi_bias(num_heads: int):
- def _alibi_bias(
- score: torch.Tensor,
- batch: torch.Tensor,
- head: torch.Tensor,
- token_q: torch.Tensor,
- token_kv: torch.Tensor,
- ) -> torch.Tensor:
- scale = torch.exp2(-((head + 1) * 8.0 / num_heads))
- return score + (token_kv - token_q) * scale
-
- return _alibi_bias
|
2.41.0
|
4afccdd80c0b7e4a28420384dbf117c55d80627
|
Thu, 25 Apr 2024 20:09:07 -0700
|
[PATCH 0715/1000] [parametrization] fix `requires_grad` propagation (#124888)
|
Summary: Previously the `requires_grad` is not propagated from original Tensor to decomposed tensors Test Plan: python test/test_parametrization.py -k test_register_parametrization_no_grad Reviewers: Subscribers: Tasks: Tags: Pull Request resolved: https://github.com/pytorch/pytorch/pull/124888 Approved by: https://github.com/lezcano
|
diff --git a/test/nn/test_parametrization.py b/test/nn/test_parametrization.py
index 1f7b569e86..d547d8abb0 100644
--- a/test/nn/test_parametrization.py
+++ b/test/nn/test_parametrization.py
@@ -1414,6 +1414,26 @@ class TestNNParametrization(NNTestCase):
gradcheck(fn, (m.parametrizations.weight.original,))
+ def test_register_parametrization_no_grad(self):
+ r"""Test that it is possible to register a parametrization without gradient"""
+
+ class SplitAndCat(nn.Module):
+ def right_inverse(self, x):
+ # split the tensor in two halfs
+ return torch.split(x, x.shape[1] // 2)
+
+ def forward(self, x0, x1):
+ return torch.cat([x0, x1])
+
+ model = nn.Linear(8, 8)
+
+ model.weight.requires_grad = False
+ parametrize.register_parametrization(model, "weight", SplitAndCat())
+ # making sure the parameterized and decomposed Tensors both have requires_grad == False
+ self.assertFalse(model.weight.requires_grad)
+ self.assertFalse(model.parametrizations.weight.original0.requires_grad)
+ self.assertFalse(model.parametrizations.weight.original1.requires_grad)
+
@swap([True, False])
def test_new_spectral_norm_load_state_dict(self):
for activate_times in (0, 3):
diff --git a/torch/nn/utils/parametrize.py b/torch/nn/utils/parametrize.py
index aa4f9656d5..f512b7c3b2 100644
--- a/torch/nn/utils/parametrize.py
+++ b/torch/nn/utils/parametrize.py
@@ -180,7 +180,7 @@ class ParametrizationList(ModuleList):
# add the new parameters to the optimizer after registering the parametrization
# (this is documented)
if isinstance(original, Parameter):
- originali = Parameter(originali)
+ originali = Parameter(originali, original.requires_grad)
originali.requires_grad_(original.requires_grad)
_register_parameter_or_buffer(self, f"original{i}", originali)
|
2.41.0
|
d8585e501bcb2543eb61e742b501970c916df7c
|
Fri, 26 Apr 2024 12:32:12 +0000
|
[PATCH 0716/1000] [XPU] Add manual_seed and synchronize method (#124709)
|
This PR set the following device-specific settings for xpu(Intel GPU) specific: 1. Set the manual seed for xpu 2. Set the synchronization method for xpu Pull Request resolved: https://github.com/pytorch/pytorch/pull/124709 Approved by: https://github.com/EikanWang, https://github.com/desertfire
|
diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py
index 99fbd7b86d..1c3dcd9348 100644
--- a/benchmarks/dynamo/common.py
+++ b/benchmarks/dynamo/common.py
@@ -53,6 +53,7 @@ import torch._export
import torch.distributed
import torch.multiprocessing as mp
from scipy.stats import gmean, ttest_ind
+from torch._C import _has_cuda as HAS_CUDA, _has_xpu as HAS_XPU
from torch._dynamo.profiler import fx_insert_profiling, Profiler
from torch._dynamo.testing import (
dummy_fx_compile,
@@ -333,10 +334,16 @@ def patch_torch_manual_seed():
from torch._C import default_generator
seed = 1337
- import torch.cuda
+ if HAS_CUDA:
+ import torch.cuda
- if not torch.cuda._is_in_bad_fork():
- torch.cuda.manual_seed_all(seed)
+ if not torch.cuda._is_in_bad_fork():
+ torch.cuda.manual_seed_all(seed)
+ if HAS_XPU:
+ import torch.xpu
+
+ if not torch.xpu._is_in_bad_fork():
+ torch.xpu.manual_seed_all(seed)
return default_generator.manual_seed(seed)
torch.manual_seed = deterministic_torch_manual_seed
@@ -3690,9 +3697,9 @@ def run(runner, args, original_dir=None):
log.warning("torch.cuda.is_available() == False, using CPU")
args.devices = ["cpu"]
- if args.devices != ["cpu"] and torch.cuda.is_available():
+ if args.devices != ["cpu"] and (HAS_CUDA or HAS_XPU):
global synchronize
- synchronize = torch.cuda.synchronize
+ synchronize = torch.cuda.synchronize if HAS_CUDA else torch.xpu.synchronize
if (
args.devices == ["cuda"]
|
2.41.0
|
c13c1c85032c7717a649fa18738d274cca79b4c
|
Fri, 26 Apr 2024 13:15:35 +0000
|
[PATCH 0717/1000] [aot_inductor] Enable test_aot_inductor tests for ROCm (#123393)
|
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/123393 Approved by: https://github.com/jithunnair-amd, https://github.com/malfet
|
diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py
index e48f4ed2ef..293def3b96 100644
--- a/test/inductor/test_aot_inductor.py
+++ b/test/inductor/test_aot_inductor.py
@@ -225,7 +225,6 @@ class AOTInductorTestsTemplate:
with config.patch({"aot_inductor.use_runtime_constant_folding": True}):
self.check_model(Model(self.device), example_inputs)
- @skipIfRocm
@requires_cuda
def test_duplicate_constant_folding(self):
class Model(torch.nn.Module):
@@ -844,7 +843,6 @@ class AOTInductorTestsTemplate:
)
self.check_model(Repro(), example_inputs)
- @skipIfRocm
def test_cond_simple(self):
inputs = (
torch.randn((10, 20), device=self.device),
@@ -862,7 +860,6 @@ class AOTInductorTestsTemplate:
dynamic_shapes=dynamic_shapes,
)
- @skipIfRocm
def test_cond_nested(self):
inputs = (
torch.randn((10, 20), device=self.device),
@@ -884,7 +881,6 @@ class AOTInductorTestsTemplate:
dynamic_shapes=dynamic_shapes,
)
- @skipIfRocm
def test_cond_with_parameters(self):
inputs = (torch.randn((10, 20), device=self.device),)
dim0_abc = Dim("s0", min=2, max=1024)
@@ -898,7 +894,6 @@ class AOTInductorTestsTemplate:
dynamic_shapes=dynamic_shapes,
)
- @skipIfRocm
def test_cond_with_reinterpret_view_inputs_outputs(self):
inputs = (
torch.randn((10, 20), device=self.device),
@@ -916,7 +911,6 @@ class AOTInductorTestsTemplate:
dynamic_shapes=dynamic_shapes,
)
- @skipIfRocm
def test_cond_with_multiple_outputs(self):
inputs = (
torch.randn((10, 20), device=self.device),
@@ -937,7 +931,6 @@ class AOTInductorTestsTemplate:
dynamic_shapes=dynamic_shapes,
)
- @skipIfRocm
def test_cond_with_outer_code_before_after(self):
inputs = (
torch.randn((10, 20), device=self.device),
@@ -955,7 +948,6 @@ class AOTInductorTestsTemplate:
dynamic_shapes=dynamic_shapes,
)
- @skipIfRocm
def test_cond_use_buffers_from_outer_scope(self):
inputs = (
torch.randn((10, 20), device=self.device),
@@ -975,7 +967,6 @@ class AOTInductorTestsTemplate:
dynamic_shapes=dynamic_shapes,
)
- @skipIfRocm
@common_utils.parametrize("dynamic", [False, True])
def test_cond_non_tensor_predicates(self, dynamic):
inputs1 = (
@@ -1002,7 +993,6 @@ class AOTInductorTestsTemplate:
dynamic_shapes=dynamic_shapes,
)
- @skipIfRocm
def test_while_loop_simple(self):
inputs = (
torch.randn((10, 20), device=self.device),
@@ -1020,7 +1010,6 @@ class AOTInductorTestsTemplate:
dynamic_shapes=dynamic_shapes,
)
- @skipIfRocm
def test_while_loop_nested(self):
inputs = (
torch.randn((10, 20), device=self.device),
@@ -1039,7 +1028,6 @@ class AOTInductorTestsTemplate:
dynamic_shapes=dynamic_shapes,
)
- @skipIfRocm
def test_while_loop_with_outer_code(self):
inputs = (
torch.randn((10, 20), device=self.device),
@@ -1057,7 +1045,6 @@ class AOTInductorTestsTemplate:
dynamic_shapes=dynamic_shapes,
)
- @skipIfRocm
def test_while_loop_with_parameters(self):
inputs = (torch.randn((10, 20), device=self.device),)
dim0_a = Dim("s0", min=2, max=1024)
@@ -1071,7 +1058,6 @@ class AOTInductorTestsTemplate:
dynamic_shapes=dynamic_shapes,
)
- @skipIfRocm
def test_while_loop_with_outer_buffers(self):
inputs = (
torch.randn((10, 20), device=self.device),
@@ -1233,7 +1219,6 @@ class AOTInductorTestsTemplate:
)
self.check_model(Model(self.device), example_inputs)
- @skipIfRocm
@requires_multigpu()
def test_replicate_on_devices(self):
if self.device != "cuda":
@@ -1286,7 +1271,6 @@ class AOTInductorTestsTemplate:
self.check_model(M(), ({"x": torch.ones(5), "y": torch.ones(5)},))
- @skipIfRocm
@requires_multigpu()
def test_non_default_cuda_device(self):
if self.device != "cuda":
@@ -1660,7 +1644,6 @@ class AOTInductorTestsTemplate:
)
self.check_model(model, example_inputs)
- @skipIfRocm
@common_utils.parametrize("grid_type", [1, 2, 3])
@common_utils.parametrize("num_dims", [1, 2])
@common_utils.parametrize("dynamic", [False, True])
@@ -1735,7 +1718,6 @@ class AOTInductorTestsTemplate:
dynamic_shapes = {"x": {0: dim0_x}, "y": {0: dim0_y}}
self.check_model(Model(), (x, y), dynamic_shapes=dynamic_shapes)
- @skipIfRocm
def test_triton_kernel_dynamic_shape_with_div(self):
if self.device != "cuda":
raise unittest.SkipTest("requires CUDA")
@@ -1760,7 +1742,6 @@ class AOTInductorTestsTemplate:
dynamic_shapes = {"x": {0: dim0_x}}
self.check_model(Model(), (x,), dynamic_shapes=dynamic_shapes)
- @skipIfRocm
def test_triton_kernel_reinterpret_view(self):
if self.device != "cuda":
raise unittest.SkipTest("requires CUDA")
@@ -1789,7 +1770,6 @@ class AOTInductorTestsTemplate:
example_inputs = (torch.randn(10, 20, device=self.device),)
self.check_model(Model(), example_inputs)
- @skipIfRocm
def test_triton_kernel_with_none_input(self):
if self.device != "cuda":
raise unittest.SkipTest("requires CUDA")
@@ -1831,7 +1811,6 @@ class AOTInductorTestsTemplate:
self.check_model(Model(), example_inputs)
- @skipIfRocm
def test_triton_kernel_equal_to_1_arg(self):
if self.device != "cuda":
raise unittest.SkipTest("requires CUDA")
@@ -1850,7 +1829,6 @@ class AOTInductorTestsTemplate:
self.check_model(Model(), example_inputs)
- @skipIfRocm
@common_utils.parametrize("dynamic", [False, True])
def test_triton_kernel_equal_to_1_float_arg(self, dynamic):
if self.device != "cuda":
@@ -2178,7 +2156,6 @@ class AOTInductorTestsTemplate:
model.weight += 1
self.check_model(model, example_inputs)
- @skipIfRocm
def test_triton_kernel_extern_kernel_arg(self):
if self.device != "cuda":
raise unittest.SkipTest("requires CUDA")
@@ -2197,7 +2174,6 @@ class AOTInductorTestsTemplate:
self.check_model(Model(), example_inputs)
- @skipIfRocm
def test_triton_kernel_multi_output_arg(self):
if self.device != "cuda":
raise unittest.SkipTest("requires CUDA")
@@ -2216,7 +2192,6 @@ class AOTInductorTestsTemplate:
self.check_model(Model(), example_inputs)
- @skipIfRocm
@config.patch({"abi_compatible": True})
def test_triton_kernel_reinterpret_view_mem_leak(self):
# Check for memory leak when using user-defined Triton Kernel + AOTI.
@@ -2257,7 +2232,6 @@ class AOTInductorTestsTemplate:
expected = Model()(*example_inputs)
torch.testing.assert_close(actual, expected)
- @skipIfRocm
@torch._dynamo.config.patch(capture_scalar_outputs=True)
@common_utils.parametrize("dynamic", [False, True])
@common_utils.parametrize("autotuning", [False, True])
@@ -2313,7 +2287,7 @@ class AOTInductorTestsTemplate:
dynamic_shapes=dynamic_shapes,
)
- @skipIfRocm
+ @skipIfRocm # USE_MEM_EFF_ATTENTION was not enabled for build.
def test_scaled_dot_product_efficient_attention(self):
if self.device != "cuda":
raise unittest.SkipTest("requires CUDA")
@@ -2332,7 +2306,6 @@ class AOTInductorTestsTemplate:
)
self.check_model(Model(), example_inputs)
- @skipIfRocm
def test_index_put_with_none_index(self):
# index_put falls back in the deterministic mode
with DeterministicGuard(True):
@@ -2873,6 +2846,15 @@ if TEST_WITH_ROCM:
"test_zero_grid_with_unbacked_symbols": fail_cuda(is_skip=True),
"test_zero_grid_with_backed_symbols": fail_cuda(is_skip=True),
"test_reuse_kernel_dynamic": fail_cuda(is_skip=True),
+ "test_duplicate_constant_folding": fail_cuda(is_skip=True),
+ "test_cond_simple": fail_cuda(is_skip=True),
+ "test_cond_nested": fail_cuda(is_skip=True),
+ "test_cond_with_parameters": fail_cuda(is_skip=True),
+ "test_cond_with_reinterpret_view_inputs_outputs": fail_cuda(is_skip=True),
+ "test_cond_with_multiple_outputs": fail_cuda(is_skip=True),
+ "test_cond_with_outer_code_before_after": fail_cuda(is_skip=True),
+ "test_cond_use_buffers_from_outer_scope": fail_cuda(is_skip=True),
+ "test_index_put_with_none_index": fail_cuda(is_skip=True),
}
)
|
2.41.0
|
c2aa23c1ebf093ae7abfd7f6307e865845f18d6
|
Fri, 26 Apr 2024 13:16:24 +0000
|
[PATCH 0718/1000] =?UTF-8?q?Test=20reland=20"AOTAutograd:=20gate?= =?UTF-8?q?=20view-replay=20behind=20config,=20not=20the=20def=E2=80=A6=20?= =?UTF-8?q?(#124948)?=MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit
|
A parallel attempt at landing https://github.com/pytorch/pytorch/pull/124945, but attempting to land through fbcode first Pull Request resolved: https://github.com/pytorch/pytorch/pull/124948 Approved by: https://github.com/albanD
|
diff --git a/test/functorch/test_aotdispatch.py b/test/functorch/test_aotdispatch.py
index 73b32ee0f0..161c9dbbef 100644
--- a/test/functorch/test_aotdispatch.py
+++ b/test/functorch/test_aotdispatch.py
@@ -3261,6 +3261,7 @@ def forward(self, tangents_1):
return lambda f: aot_function(f, fw_compiler=lambda g, _: partial(wrapper, g))
+ @patch("functorch.compile.config.view_replay_for_aliased_outputs", True)
def test_output_aliases_input_view_meta_replay(self):
@self._compile_and_erase_bases(0)
def f(a):
@@ -3274,6 +3275,7 @@ def forward(self, tangents_1):
str(out.grad_fn.__class__), """<class 'ViewBackward0'>"""
)
+ @patch("functorch.compile.config.view_replay_for_aliased_outputs", True)
def test_output_aliases_intermediate_view_meta_replay(self):
@self._compile_and_erase_bases(0, 1)
def f(a):
@@ -3293,6 +3295,7 @@ def forward(self, tangents_1):
str(out2.grad_fn.__class__), """<class 'ViewBackward0'>"""
)
+ @patch("functorch.compile.config.view_replay_for_aliased_outputs", True)
def test_output_aliases_output_view_meta_replay(self):
@self._compile_and_erase_bases(1)
def f(a):
diff --git a/test/inductor/test_torchinductor_opinfo.py b/test/inductor/test_torchinductor_opinfo.py
index 7319656905..fdb9a8c37a 100644
--- a/test/inductor/test_torchinductor_opinfo.py
+++ b/test/inductor/test_torchinductor_opinfo.py
@@ -257,6 +257,19 @@ intentionally_not_handled = {
"resize_": {b8, f16, f32, f64, i32, i64},
"resize_as_": {b8, f16, f32, f64, i32, i64},
}
+# This is only fixed when this config is set
+# We should eventually always turn it on
+import torch._functorch.config as functorch_config
+
+if not functorch_config.view_replay_for_aliased_outputs:
+ intentionally_not_handled['("as_strided", "partial_views")'] = {
+ b8,
+ f16,
+ f32,
+ f64,
+ i32,
+ i64,
+ }
inductor_expected_failures_single_sample["cuda"].update(intentionally_not_handled)
diff --git a/torch/_functorch/_aot_autograd/functional_utils.py b/torch/_functorch/_aot_autograd/functional_utils.py
index b863f40efa..25197e9eea 100644
--- a/torch/_functorch/_aot_autograd/functional_utils.py
+++ b/torch/_functorch/_aot_autograd/functional_utils.py
@@ -18,6 +18,7 @@ from torch.utils._python_dispatch import (
is_traceable_wrapper_subclass,
transform_subclass,
)
+from .. import config
aot_joint_log = getArtifactLogger(__name__, "aot_joint_graph")
@@ -219,7 +220,7 @@ def gen_alias_from_base(
# In summary, we use the fact that FunctionalTensorWrapper saves the view
# functions applied to itself (collected during functionalization) so as
# to replay them (view functions) on the aliased_base_tensor.
- if target_functional_tensor is not None:
+ if config.view_replay_for_aliased_outputs and target_functional_tensor is not None:
from .schemas import FunctionalTensorMetadataEq
assert isinstance(target_functional_tensor, FunctionalTensorMetadataEq)
@@ -237,11 +238,10 @@ def gen_alias_from_base(
#
# In order for this to work, we should have a way to replace those
# symbolic shapes with concrete numbers.
- aot_joint_log.warning(
+ aot_joint_log.info(
"could not reconstruct view by re-applying a ViewMeta sequence. "
- "This error is possibly caused by dynamic shapes. "
"Fallbacking to reconstruction using as_strided. "
- "Error message: %s",
+ "Reason: %s",
str(e),
)
else:
diff --git a/torch/_functorch/config.py b/torch/_functorch/config.py
index c3f34fa273..aa7235034e 100644
--- a/torch/_functorch/config.py
+++ b/torch/_functorch/config.py
@@ -41,6 +41,26 @@ static_weight_shapes = True
# Applies CSE to the graph before partitioning
cse = True
+# When AOTAutograd regenerates aliased graph outputs,
+# attempte to use functionalization's view-replay logic
+# before falling back to the autograd engine's view replay or as_strided.
+# This can have some perf implications
+# (although for many models this will not matter).
+# (1) If you have many view ops chained together, replaying all of them
+# at runtime can have more overhead compared to a single as_strided call
+# (2) If you are doing training, AsStridedBackward is quite slow,
+# and the individual view op backward formulas will likely be faster.
+# (3) Some backends like XLA do not support as_strided
+
+# Temporary hack: disable this flag for internal
+# (needed to fix an internal issue while avoiding bumping XLA pin)
+# eventually: either default this config to false completely
+# once XLA pin update works,
+# or default config to true and fix relevant bugs
+from torch._inductor.config import is_fbcode
+
+view_replay_for_aliased_outputs = not is_fbcode()
+
# Restricts the amount of computation AOTAutograd can do.
# NB: We have essentially disabled this heuristic now. However, this is kept
# here for now in case it's useful. Setting it low can artificially reduce the
|
2.41.0
|
f3b0befedaf66fb2b54bf3fbd815fbd274932eb
|
Fri, 26 Apr 2024 14:34:52 +0000
|
[PATCH 0719/1000] [BE]: Apply ruff FURB 118. (#124743)
|
Replaces various lambdas with operator.itemgetter which is more efficient (as it's a builtin function). Particularly useful for when lambdas are used as 'key' functions. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124743 Approved by: https://github.com/albanD, https://github.com/malfet
|
diff --git a/.github/scripts/get_workflow_job_id.py b/.github/scripts/get_workflow_job_id.py
index 75bc7e0161..28f337a307 100644
--- a/.github/scripts/get_workflow_job_id.py
+++ b/.github/scripts/get_workflow_job_id.py
@@ -4,6 +4,7 @@
import argparse
import json
+import operator
import os
import re
import sys
@@ -126,7 +127,7 @@ def find_job_id_name(args: Any) -> Tuple[str, str]:
# Sort the jobs list by start time, in descending order. We want to get the most
# recently scheduled job on the runner.
- jobs.sort(key=lambda job: job["started_at"], reverse=True)
+ jobs.sort(key=operator.itemgetter("started_at"), reverse=True)
for job in jobs:
if job["runner_name"] == args.runner_name:
diff --git a/torch/_export/serde/serialize.py b/torch/_export/serde/serialize.py
index 31483e68f0..77f0cf3d3c 100644
--- a/torch/_export/serde/serialize.py
+++ b/torch/_export/serde/serialize.py
@@ -2630,12 +2630,12 @@ def _canonicalize_graph(
n.metadata.clear()
# Stage 4: Aggregate values.
- sorted_tensor_values = dict(sorted(graph.tensor_values.items(), key=lambda x: x[0]))
+ sorted_tensor_values = dict(sorted(graph.tensor_values.items(), key=operator.itemgetter(0)))
sorted_sym_int_values = dict(
- sorted(graph.sym_int_values.items(), key=lambda x: x[0])
+ sorted(graph.sym_int_values.items(), key=operator.itemgetter(0))
)
sorted_sym_bool_values = dict(
- sorted(graph.sym_bool_values.items(), key=lambda x: x[0])
+ sorted(graph.sym_bool_values.items(), key=operator.itemgetter(0))
)
# Stage 5: Recurse in subgraphs.
@@ -2683,8 +2683,8 @@ def canonicalize(ep: ExportedProgram) -> ExportedProgram:
"""
ep = copy.deepcopy(ep)
- opset_version = dict(sorted(ep.opset_version.items(), key=lambda x: x[0]))
- range_constraints = dict(sorted(ep.range_constraints.items(), key=lambda x: x[0]))
+ opset_version = dict(sorted(ep.opset_version.items(), key=operator.itemgetter(0)))
+ range_constraints = dict(sorted(ep.range_constraints.items(), key=operator.itemgetter(0)))
module_call_graph = sorted(ep.graph_module.module_call_graph, key=lambda x: x.fqn)
signature = ep.graph_module.signature
graph = ep.graph_module.graph
diff --git a/torch/_functorch/benchmark_utils.py b/torch/_functorch/benchmark_utils.py
index af606f20f9..e0bcae4c83 100644
--- a/torch/_functorch/benchmark_utils.py
+++ b/torch/_functorch/benchmark_utils.py
@@ -2,6 +2,7 @@
import contextlib
import json
+import operator
import os
import time
@@ -94,7 +95,7 @@ def get_sorted_gpu_events(events):
if not is_gpu_compute_event(event):
continue
sorted_gpu_events.append(event)
- return sorted(sorted_gpu_events, key=lambda x: x["ts"])
+ return sorted(sorted_gpu_events, key=operator.itemgetter("ts"))
def get_duration(sorted_gpu_events):
diff --git a/torch/_functorch/partitioners.py b/torch/_functorch/partitioners.py
index 873441a971..78d580d025 100644
--- a/torch/_functorch/partitioners.py
+++ b/torch/_functorch/partitioners.py
@@ -407,7 +407,7 @@ def _count_ops(graph):
for node in graph.nodes:
if node.op == "call_function":
cnt[node.target.__name__] += 1
- print(sorted(cnt.items(), key=lambda x: x[1], reverse=True))
+ print(sorted(cnt.items(), key=operator.itemgetter(1), reverse=True))
@functools.lru_cache(None)
@@ -432,7 +432,7 @@ def sort_depths(args, depth_map):
arg_depths = {
arg: depth_map[arg] for arg in args if isinstance(arg, torch.fx.node.Node)
}
- return sorted(arg_depths.items(), key=lambda x: x[1], reverse=True)
+ return sorted(arg_depths.items(), key=operator.itemgetter(1), reverse=True)
def reordering_to_mimic_autograd_engine(gm):
@@ -1315,7 +1315,7 @@ def min_cut_rematerialization_partition(
)
print(
"Count of Ops Rematerialized: ",
- sorted(counts.items(), key=lambda x: x[1], reverse=True),
+ sorted(counts.items(), key=operator.itemgetter(1), reverse=True),
)
return fw_module, bw_module
diff --git a/torch/_functorch/top_operators_github_usage.py b/torch/_functorch/top_operators_github_usage.py
index ab5c984bad..ce74f7aadf 100644
--- a/torch/_functorch/top_operators_github_usage.py
+++ b/torch/_functorch/top_operators_github_usage.py
@@ -4,6 +4,8 @@
From https://docs.google.com/spreadsheets/d/12R3nCOLskxPYjjiNkdqy4OdQ65eQp_htebXGODsjSeA/edit#gid=0
Try to keep this list in sync with that.
"""
+import operator
+
top_torch = [
("t", 6837449),
("tensor", 585786),
@@ -618,7 +620,7 @@ def get_nn_functional_top_list():
top_nn_functional_[functional_name] += count
top_nn_functional_ = list(top_nn_functional_.items())
- top_nn_functional_.sort(key=lambda x: x[1], reverse=True)
+ top_nn_functional_.sort(key=operator.itemgetter(1), reverse=True)
return top_nn_functional_
diff --git a/torch/_inductor/pattern_matcher.py b/torch/_inductor/pattern_matcher.py
index 1e0e9a5a87..f1caf01eac 100644
--- a/torch/_inductor/pattern_matcher.py
+++ b/torch/_inductor/pattern_matcher.py
@@ -894,7 +894,7 @@ class ReplacementPatternEntry(PatternEntry):
for n in output_nodes
if isinstance(n, torch.fx.Node)
]
- last_node = min(indices, key=lambda tup: tup[0])[1]
+ last_node = min(indices, key=operator.itemgetter(0))[1]
def percolate_tags(node, recompute_tag, input_stops):
queue = [node]
diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py
index 4c896137fc..d1548b73e5 100644
--- a/torch/_inductor/scheduler.py
+++ b/torch/_inductor/scheduler.py
@@ -2290,7 +2290,7 @@ class Scheduler:
)
# return the possible fusions with highest priority
possible_fusions_with_highest_priority = min(
- possible_fusions_group_by_priority.items(), key=lambda item: item[0]
+ possible_fusions_group_by_priority.items(), key=operator.itemgetter(0)
)[1]
assert len(possible_fusions_with_highest_priority) > 0
return possible_fusions_with_highest_priority
diff --git a/torch/_refs/__init__.py b/torch/_refs/__init__.py
index 1f277ec932..b9da3b67a9 100644
--- a/torch/_refs/__init__.py
+++ b/torch/_refs/__init__.py
@@ -3597,7 +3597,7 @@ def repeat(a: Tensor, *repeat_shape) -> Tensor:
# derive permute order by sorting urtensor strides
enumerated_stride = list(enumerate(urtensor_stride))
- enumerated_stride.sort(key=lambda item: item[1], reverse=True)
+ enumerated_stride.sort(key=operator.itemgetter(1), reverse=True)
permute_order, sorted_stride = zip(*enumerated_stride)
# add new and expand dimensions according to urtensor
diff --git a/torch/_refs/linalg/__init__.py b/torch/_refs/linalg/__init__.py
index b948e1eccc..a1b59e94d2 100644
--- a/torch/_refs/linalg/__init__.py
+++ b/torch/_refs/linalg/__init__.py
@@ -63,6 +63,8 @@ def _check_norm_dtype(dtype: Optional[torch.dtype], x_dtype: torch.dtype, fn_nam
)
+import operator
+
# Utilities should come BEFORE this import
from torch._decomp import register_decomposition
from torch._decomp.decompositions import pw_cast_for_opmath
@@ -165,7 +167,7 @@ def _backshift_permutation(dim0, dim1, ndim):
def _inverse_permutation(perm):
# Given a permutation, returns its inverse. It's equivalent to argsort on an array
- return [i for i, j in sorted(enumerate(perm), key=lambda i_j: i_j[1])]
+ return [i for i, j in sorted(enumerate(perm), key=operator.itemgetter(1))]
# CompositeImplicitAutograd
diff --git a/torch/ao/quantization/fx/_equalize.py b/torch/ao/quantization/fx/_equalize.py
index 55bcb52576..b0965b9a70 100644
--- a/torch/ao/quantization/fx/_equalize.py
+++ b/torch/ao/quantization/fx/_equalize.py
@@ -19,6 +19,7 @@ from .utils import (
maybe_get_next_module,
node_arg_is_weight,
)
+import operator
CUSTOM_MODULE_SUPP_LIST: List[Any] = []
@@ -810,7 +811,7 @@ def get_equalization_qconfig_dict(
# Sort the layer_sqnr_dictionary values and get the layers with the lowest
# SQNR values (aka highest quantization errors)
- layer_sqnr_sorted = sorted(layer_sqnr_dict.items(), key=lambda item: item[1])
+ layer_sqnr_sorted = sorted(layer_sqnr_dict.items(), key=operator.itemgetter(1))
layers_to_equalize = layer_sqnr_sorted[:num_layers_to_equalize]
# Constructs an equalization_qconfig_dict that specifies to only equalize
diff --git a/torch/cuda/_memory_viz.py b/torch/cuda/_memory_viz.py
index d3838f3410..587d7e9c7c 100644
--- a/torch/cuda/_memory_viz.py
+++ b/torch/cuda/_memory_viz.py
@@ -9,6 +9,7 @@ from typing import Any
from itertools import groupby
import base64
import warnings
+import operator
cache = lru_cache(None)
@@ -492,7 +493,7 @@ def _profile_to_snapshot(profile):
# create the final snapshot state
blocks_at_end = [(to_device(tensor_key.device), event['addr'], event['size'], event['frames'])
for (tensor_key, version), event in kv_to_elem.items()]
- for device, blocks in groupby(sorted(blocks_at_end), key=lambda x: x[0]):
+ for device, blocks in groupby(sorted(blocks_at_end), key=operator.itemgetter(0)):
seg = snapshot['segments'][device] # type: ignore[index]
last_addr = seg['address']
for _, addr, size, frames in blocks:
diff --git a/torch/distributed/_shard/sharding_spec/api.py b/torch/distributed/_shard/sharding_spec/api.py
index bcfacbf035..1824b66a81 100644
--- a/torch/distributed/_shard/sharding_spec/api.py
+++ b/torch/distributed/_shard/sharding_spec/api.py
@@ -15,6 +15,7 @@ from torch.distributed._shard.metadata import ShardMetadata
import torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta
from torch.distributed._shard.op_registry_utils import _decorator_func
+import operator
if TYPE_CHECKING:
# Only include ShardedTensor when do type checking, exclude it
@@ -214,7 +215,7 @@ def _infer_sharding_spec_from_shards_metadata(shards_metadata):
if chunk_sharding_dim is not None:
# Ensure we infer the correct placement order from offsets
placements = [
- x for _, x in sorted(zip(chunk_offset_list, placements), key=lambda e: e[0])
+ x for _, x in sorted(zip(chunk_offset_list, placements), key=operator.itemgetter(0))
]
from .chunk_sharding_spec import ChunkShardingSpec
diff --git a/torch/distributed/_tools/memory_tracker.py b/torch/distributed/_tools/memory_tracker.py
index 96a3fa497c..fdc60acdf8 100644
--- a/torch/distributed/_tools/memory_tracker.py
+++ b/torch/distributed/_tools/memory_tracker.py
@@ -17,6 +17,7 @@ import torch
import torch.nn as nn
from torch.utils.hooks import RemovableHandle
from torch.utils._python_dispatch import TorchDispatchMode
+import operator
BYTES_PER_MB = 1024 * 1024.0
@@ -148,7 +149,7 @@ class MemoryTracker:
print("------------------------------------------------")
print(f"The number of cuda retries are: {self._num_cuda_retries}")
print(f"Top {top} ops that generates memory are:")
- for k, v in sorted(op_diff.items(), key=lambda item: item[1], reverse=True)[
+ for k, v in sorted(op_diff.items(), key=operator.itemgetter(1), reverse=True)[
:top
]:
print(f"{k}: {v}MB")
diff --git a/torch/distributed/checkpoint/filesystem.py b/torch/distributed/checkpoint/filesystem.py
index 6768843292..9b6345862c 100644
--- a/torch/distributed/checkpoint/filesystem.py
+++ b/torch/distributed/checkpoint/filesystem.py
@@ -1,6 +1,7 @@
import collections
import dataclasses
import io
+import operator
import os
import pickle
import queue
@@ -177,7 +178,7 @@ class _OverlappingCpuLoader(_TensorLoader):
if self.started:
return
self.started = True
- self.items.sort(key=lambda x: x[0])
+ self.items.sort(key=operator.itemgetter(0))
self._refill()
def values(self) -> Iterator[Tuple[torch.Tensor, object]]:
@@ -218,7 +219,7 @@ def _split_by_size_and_type(bins: int, items: List[WriteItem]) -> List[List[Writ
for wi in tensor_w:
# TODO replace with headq
- idx = min(enumerate(bucket_sizes), key=lambda x: x[1])[0]
+ idx = min(enumerate(bucket_sizes), key=operator.itemgetter(1))[0]
buckets[idx].append(wi)
bucket_sizes[idx] += _item_size(wi)
diff --git a/torch/distributed/checkpoint/planner.py b/torch/distributed/checkpoint/planner.py
index 7e29bc336c..1492f09bf2 100644
--- a/torch/distributed/checkpoint/planner.py
+++ b/torch/distributed/checkpoint/planner.py
@@ -1,5 +1,6 @@
import abc
import io
+import operator
from dataclasses import dataclass
from enum import auto, Enum
from functools import reduce
@@ -67,7 +68,7 @@ class WriteItem:
if self.tensor_data is None:
return None
- numels = reduce(lambda x, y: x * y, self.tensor_data.size, 1)
+ numels = reduce(operator.mul, self.tensor_data.size, 1)
dtype_size = torch._utils._element_size(self.tensor_data.properties.dtype)
return numels * dtype_size
diff --git a/torch/export/unflatten.py b/torch/export/unflatten.py
index f8e220b00d..ee3376204f 100644
--- a/torch/export/unflatten.py
+++ b/torch/export/unflatten.py
@@ -840,7 +840,7 @@ def _reorder_submodules(
_reorder_submodules(child, fqn_order, prefix=fqn + ".")
delattr(parent, name)
children.append((fqn_order[fqn], name, child))
- children.sort(key=lambda x: x[0])
+ children.sort(key=operator.itemgetter(0))
for _, name, child in children:
parent.register_module(name, child)
diff --git a/torch/fx/experimental/accelerator_partitioner.py b/torch/fx/experimental/accelerator_partitioner.py
index 7bb91692b3..fc28f11232 100644
--- a/torch/fx/experimental/accelerator_partitioner.py
+++ b/torch/fx/experimental/accelerator_partitioner.py
@@ -259,7 +259,7 @@ def get_device_to_partitions_mapping(
# Find devices for all the partitions without a device
found_device = True
for partition in no_device_partitions:
- device_to_left_mem_bytes = dict(sorted(device_to_left_mem_bytes.items(), key=lambda item: item[1]))
+ device_to_left_mem_bytes = dict(sorted(device_to_left_mem_bytes.items(), key=operator.itemgetter(1)))
found_device = find_device_for(partition)
if not found_device:
break
diff --git a/torch/fx/experimental/unification/multipledispatch/conflict.py b/torch/fx/experimental/unification/multipledispatch/conflict.py
index 71db96dd47..6c247bd981 100644
--- a/torch/fx/experimental/unification/multipledispatch/conflict.py
+++ b/torch/fx/experimental/unification/multipledispatch/conflict.py
@@ -1,5 +1,6 @@
from .utils import _toposort, groupby
from .variadic import isvariadic
+import operator
__all__ = ["AmbiguityWarning", "supercedes", "consistent", "ambiguous", "ambiguities", "super_signature",
"edge", "ordering"]
@@ -111,7 +112,7 @@ def ordering(signatures):
"""
signatures = list(map(tuple, signatures))
edges = [(a, b) for a in signatures for b in signatures if edge(a, b)]
- edges = groupby(lambda x: x[0], edges)
+ edges = groupby(operator.itemgetter(0), edges)
for s in signatures:
if s not in edges:
edges[s] = []
diff --git a/torch/profiler/_utils.py b/torch/profiler/_utils.py
index caacfb8303..1ad1293e3e 100644
--- a/torch/profiler/_utils.py
+++ b/torch/profiler/_utils.py
@@ -1,4 +1,5 @@
import functools
+import operator
import re
from collections import deque
from dataclasses import dataclass
@@ -316,7 +317,7 @@ class BasicEvaluation:
event
for _, event in sorted(
zip(heuristic_score_list, event_list),
- key=lambda x: x[0],
+ key=operator.itemgetter(0),
reverse=True,
)
]
diff --git a/torch/testing/_internal/common_modules.py b/torch/testing/_internal/common_modules.py
index e111b20c08..ffd0e6f95a 100644
--- a/torch/testing/_internal/common_modules.py
+++ b/torch/testing/_internal/common_modules.py
@@ -28,6 +28,7 @@ from torch.testing._internal.common_utils import (
skipIfTorchDynamo)
from types import ModuleType
from typing import List, Tuple, Type, Set, Dict
+import operator
# List of all namespaces containing modules to test.
MODULE_NAMESPACES: List[ModuleType] = [
@@ -3374,7 +3375,7 @@ module_db: List[ModuleInfo] = [
unittest.expectedFailure,
'TestModule',
'test_memory_format',
- active_if=lambda p: p['training'],
+ active_if=operator.itemgetter('training'),
),)
),
ModuleInfo(torch.nn.AdaptiveAvgPool3d,
@@ -3413,7 +3414,7 @@ module_db: List[ModuleInfo] = [
unittest.expectedFailure,
'TestModule',
'test_memory_format',
- active_if=lambda p: p['training'],
+ active_if=operator.itemgetter('training'),
device_type='cuda',
),
# error: input types 'tensor<f32>' and 'tensor<15x10xf16>' are not broadcast compatible
@@ -3440,13 +3441,13 @@ module_db: List[ModuleInfo] = [
DecorateInfo(
unittest.expectedFailure, 'TestEagerFusionModuleInfo',
'test_aot_autograd_symbolic_module_exhaustive',
- active_if=lambda p: p['training']
+ active_if=operator.itemgetter('training')
),
# torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default
DecorateInfo(
unittest.expectedFailure, 'TestEagerFusionModuleInfo',
'test_aot_autograd_module_exhaustive',
- active_if=lambda p: p['training']
+ active_if=operator.itemgetter('training')
))
),
ModuleInfo(torch.nn.BatchNorm2d,
@@ -3461,13 +3462,13 @@ module_db: List[ModuleInfo] = [
DecorateInfo(
unittest.expectedFailure, 'TestEagerFusionModuleInfo',
'test_aot_autograd_symbolic_module_exhaustive',
- active_if=lambda p: p['training']
+ active_if=operator.itemgetter('training')
),
# torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default
DecorateInfo(
unittest.expectedFailure, 'TestEagerFusionModuleInfo',
'test_aot_autograd_module_exhaustive',
- active_if=lambda p: p['training']
+ active_if=operator.itemgetter('training')
),)
),
ModuleInfo(torch.nn.BatchNorm3d,
@@ -3481,13 +3482,13 @@ module_db: List[ModuleInfo] = [
DecorateInfo(
unittest.expectedFailure, 'TestEagerFusionModuleInfo',
'test_aot_autograd_symbolic_module_exhaustive',
- active_if=lambda p: p['training']
+ active_if=operator.itemgetter('training')
),
# torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default
DecorateInfo(
unittest.expectedFailure, 'TestEagerFusionModuleInfo',
'test_aot_autograd_module_exhaustive',
- active_if=lambda p: p['training']
+ active_if=operator.itemgetter('training')
),)
),
ModuleInfo(torch.nn.CELU,
@@ -3870,7 +3871,7 @@ module_db: List[ModuleInfo] = [
unittest.expectedFailure,
'TestModule',
'test_memory_format',
- active_if=lambda p: p['training'],
+ active_if=operator.itemgetter('training'),
device_type='mps',
),)
),
@@ -4070,7 +4071,7 @@ module_db: List[ModuleInfo] = [
unittest.expectedFailure,
'TestModule',
'test_memory_format',
- active_if=lambda p: p['training'],
+ active_if=operator.itemgetter('training'),
device_type='mps',
),),
supports_gradgrad=False),
@@ -4193,7 +4194,7 @@ module_db: List[ModuleInfo] = [
unittest.expectedFailure,
'TestModule',
'test_memory_format',
- active_if=lambda p: p['training'],
+ active_if=operator.itemgetter('training'),
device_type='mps',
),)
),
@@ -4235,7 +4236,7 @@ module_db: List[ModuleInfo] = [
unittest.expectedFailure,
'TestModule',
'test_memory_format',
- active_if=lambda p: p['training'],
+ active_if=operator.itemgetter('training'),
device_type='mps',
),)
),
@@ -4298,7 +4299,7 @@ module_db: List[ModuleInfo] = [
unittest.expectedFailure,
'TestModule',
'test_memory_format',
- active_if=lambda p: p['training'],
+ active_if=operator.itemgetter('training'),
device_type='mps',
),)
),
@@ -4311,7 +4312,7 @@ module_db: List[ModuleInfo] = [
unittest.expectedFailure,
'TestModule',
'test_memory_format',
- active_if=lambda p: p['training'],
+ active_if=operator.itemgetter('training'),
device_type='mps',
),)
),
diff --git a/torch/testing/_internal/distributed/rpc/rpc_test.py b/torch/testing/_internal/distributed/rpc/rpc_test.py
index ee98aaa161..5d2a67cd47 100644
--- a/torch/testing/_internal/distributed/rpc/rpc_test.py
+++ b/torch/testing/_internal/distributed/rpc/rpc_test.py
@@ -55,6 +55,7 @@ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
from torch.testing._internal.common_utils import TemporaryFileName
from torch.autograd.profiler_legacy import profile as _profile
+import operator
def foo_add():
@@ -6309,7 +6310,7 @@ class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture, RpcTestCommon):
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_tensor(self):
self._test_cuda_future_extraction(
- wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=False
+ wrapper=lambda t: [t], unwrapper=operator.itemgetter(0), sparse_tensor=False
)
@skip_if_lt_x_gpu(1)
@@ -6484,7 +6485,7 @@ class TensorPipeAgentCudaRpcTest(RpcAgentTestFixture, RpcTestCommon):
@skip_if_lt_x_gpu(1)
def test_cuda_future_can_extract_list_with_cuda_sparse_tensor(self):
self._test_cuda_future_extraction(
- wrapper=lambda t: [t], unwrapper=lambda v: v[0], sparse_tensor=True
+ wrapper=lambda t: [t], unwrapper=operator.itemgetter(0), sparse_tensor=True
)
@skip_if_lt_x_gpu(1)
diff --git a/torch/utils/benchmark/examples/op_benchmark.py b/torch/utils/benchmark/examples/op_benchmark.py
index b7536b9ec2..e2f0861d20 100644
--- a/torch/utils/benchmark/examples/op_benchmark.py
+++ b/torch/utils/benchmark/examples/op_benchmark.py
@@ -9,6 +9,7 @@ import torch
from torch.utils.benchmark import Timer
from torch.utils.benchmark.op_fuzzers.binary import BinaryOpFuzzer
from torch.utils.benchmark.op_fuzzers.unary import UnaryOpFuzzer
+import operator
_MEASURE_TIME = 1.0
@@ -75,7 +76,7 @@ def run(n, stmt, fuzzer_cls):
order_len = max(order_len, len(order))
steps_len = max(steps_len, len(steps))
- parsed_results.sort(key=lambda x: x[2])
+ parsed_results.sort(key=operator.itemgetter(2))
print(f"stmt: {stmt}")
print(f" diff faster{'':>17}{' ' * name_len} ", end="")
diff --git a/torch/utils/benchmark/examples/sparse/op_benchmark.py b/torch/utils/benchmark/examples/sparse/op_benchmark.py
index d7e97d33cc..f998f6d5db 100644
--- a/torch/utils/benchmark/examples/sparse/op_benchmark.py
+++ b/torch/utils/benchmark/examples/sparse/op_benchmark.py
@@ -9,6 +9,7 @@ import torch
from torch.utils.benchmark import Timer
from torch.utils.benchmark.op_fuzzers.sparse_unary import UnaryOpSparseFuzzer
from torch.utils.benchmark.op_fuzzers.sparse_binary import BinaryOpSparseFuzzer
+import operator
_MEASURE_TIME = 1.0
@@ -70,7 +71,7 @@ def run(n, stmt, fuzzer_cls):
sparse_dim_len = max(sparse_dim_len, len(sparse_dim))
is_coalesced_len = max(is_coalesced_len, len(is_coalesced))
- parsed_results.sort(key=lambda x: x[2])
+ parsed_results.sort(key=operator.itemgetter(2))
print(f"stmt: {stmt}")
print(f" diff faster{'':>17}{' ' * name_len} ", end="")
diff --git a/torch/utils/benchmark/utils/compare.py b/torch/utils/benchmark/utils/compare.py
index 9c7863e6a7..337b742ca0 100644
--- a/torch/utils/benchmark/utils/compare.py
+++ b/torch/utils/benchmark/utils/compare.py
@@ -6,6 +6,7 @@ from typing import DefaultDict, List, Optional, Tuple
from torch.utils.benchmark.utils import common
from torch import tensor as _tensor
+import operator
__all__ = ["Colorize", "Compare"]
@@ -167,7 +168,7 @@ class Table:
)
self.row_keys = common.ordered_unique([self.row_fn(i) for i in results])
- self.row_keys.sort(key=lambda args: args[:2]) # preserve stmt order
+ self.row_keys.sort(key=operator.itemgetter(slice(2))) # preserve stmt order
self.column_keys = common.ordered_unique([self.col_fn(i) for i in results])
self.rows, self.columns = self.populate_rows_and_columns()
diff --git a/torchgen/operator_versions/gen_mobile_upgraders.py b/torchgen/operator_versions/gen_mobile_upgraders.py
index dab1568580..29070761c5 100644
--- a/torchgen/operator_versions/gen_mobile_upgraders.py
+++ b/torchgen/operator_versions/gen_mobile_upgraders.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
import os
from enum import Enum
+from operator import itemgetter
from pathlib import Path
from typing import Any, Dict, List
@@ -263,7 +264,7 @@ def construct_version_maps(
upgrader_bytecode_function_to_index_map: Dict[str, Any]
) -> str:
version_map = torch._C._get_operator_version_map()
- sorted_version_map_ = sorted(version_map.items(), key=lambda item: item[0]) # type: ignore[no-any-return]
+ sorted_version_map_ = sorted(version_map.items(), key=itemgetter(0)) # type: ignore[no-any-return]
sorted_version_map = dict(sorted_version_map_)
operator_list_in_version_map_part = []
|
2.41.0
|
bef5e9f67514fdd55dbab4ab839864adcb2a69f
|
Fri, 26 Apr 2024 14:36:26 +0000
|
[PATCH 0720/1000] [CI] Add retry mechanism to check if the Docker daemon is running (#124728)
|
What is done: * Skipped the 'Kill existing containers' step - ARC runners are always ephemeral. * Added a retry mechanism to check if the Docker daemon is running. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124728 Approved by: https://github.com/seemethere, https://github.com/zxiiro, https://github.com/ZainRizvi
|
diff --git a/.github/actions/setup-linux/action.yml b/.github/actions/setup-linux/action.yml
index 98c796e0ca..193dc7d6fd 100644
--- a/.github/actions/setup-linux/action.yml
+++ b/.github/actions/setup-linux/action.yml
@@ -66,6 +66,7 @@ runs:
env | grep '^CI' >> "/tmp/github_env_${GITHUB_RUN_ID}"
- name: Kill any existing containers, clean up images
+ if: ${{ steps.check_arc_runner.outputs.IN_ARC_RUNNER == 'false' }}
shell: bash
run: |
# ignore expansion of "docker ps -q" since it could be empty
@@ -104,3 +105,28 @@ runs:
echo "${RESOLVED_IP} ${PT_DOMAIN}" | sudo tee -a /etc/hosts
cat /etc/hosts
+
+ - name: Check that the docker daemon is running
+ shell: bash
+ continue-on-error: true
+ if: ${{ steps.check_arc_runner.outputs.IN_ARC_RUNNER == 'true' }}
+ run: |
+ set +x
+
+ max_attempts=30
+ delay=10
+ attempt=1
+
+ for attempt in $(seq 1 $max_attempts); do
+ echo "Attempt $attempt of $max_attempts: Checking if Docker daemon is running..."
+ if docker info > /dev/null 2>&1; then
+ echo "Docker is running. Proceeding with the next steps"
+ exit 0
+ else
+ echo "Docker is not running yet."
+ echo "Retrying in $delay seconds..."
+ sleep $delay
+ fi
+ done
+ echo "Reached maximum attempts to connect to Docker. Exiting."
+ exit 1
|
2.41.0
|
b54f9d3e1b30cf270bf17ab47291a470edeac42
|
Fri, 26 Apr 2024 15:07:09 +0000
|
[PATCH 0721/1000] Revert "fix Invalid call to aoti_torch_tensor_copy_ #123039 (#124037)"
|
This reverts commit f9379ebbbf1369aad8179cac4a2eb7d72f25739e. Reverted https://github.com/pytorch/pytorch/pull/124037 on behalf of https://github.com/jeanschmidt due to introducing regressions in benchmark, see D56623194 for more details ([comment](https://github.com/pytorch/pytorch/pull/124037#issuecomment-2079574308))
|
diff --git a/test/inductor/test_cuda_cpp_wrapper.py b/test/inductor/test_cuda_cpp_wrapper.py
index fa717ab835..b662e2438c 100644
--- a/test/inductor/test_cuda_cpp_wrapper.py
+++ b/test/inductor/test_cuda_cpp_wrapper.py
@@ -109,7 +109,9 @@ if config.abi_compatible:
test_failures_cuda_wrapper[
f"{test_name}_dynamic_shapes"
] = test_torchinductor.TestFailure(("cuda_wrapper",), is_skip=False)
- skip_list = []
+ skip_list = [
+ "test_multi_device_cuda",
+ ]
for test_name in skip_list:
test_failures_cuda_wrapper[test_name] = test_torchinductor.TestFailure(
("cuda_wrapper",), is_skip=True
diff --git a/torch/_inductor/codegen/cpp_wrapper_cpu.py b/torch/_inductor/codegen/cpp_wrapper_cpu.py
index f50e2582a2..95e4ef3ac7 100644
--- a/torch/_inductor/codegen/cpp_wrapper_cpu.py
+++ b/torch/_inductor/codegen/cpp_wrapper_cpu.py
@@ -896,11 +896,9 @@ class CppWrapperCpu(WrapperCodeGen):
@cache_on_self
def get_output_refs(self):
return [
- (
- f"torch::tensor({x.codegen_reference(self.wrapper_call)})"
- if isinstance(x, ir.ShapeAsConstantBuffer) and not config.abi_compatible
- else x.codegen_reference(self.wrapper_call)
- )
+ f"torch::tensor({x.codegen_reference(self.wrapper_call)})"
+ if isinstance(x, ir.ShapeAsConstantBuffer) and not config.abi_compatible
+ else x.codegen_reference(self.wrapper_call)
for x in V.graph.graph_outputs
]
@@ -1100,11 +1098,9 @@ class CppWrapperCpu(WrapperCodeGen):
outputs_str = "output_tensors"
else:
outputs = [
- (
- f"output_tensors[{i}]"
- if self.output_is_tensor[i]
- else f"output_tensors[{i}].item()"
- )
+ f"output_tensors[{i}]"
+ if self.output_is_tensor[i]
+ else f"output_tensors[{i}].item()"
for i in range(len(V.graph.graph_outputs))
]
outputs_str = f"[{', '.join(outputs)}]"
@@ -1430,7 +1426,6 @@ class CppWrapperCpu(WrapperCodeGen):
and ir.is_contiguous_strides_for_shape(
buffer.get_stride(), buffer.get_size()
)
- and not buffer.is_extern()
)
def make_buffer_free(self, buffer):
|
2.41.0
|
003e0f29eeb4a810c47056400918924948b88c2
|
Fri, 26 Apr 2024 15:16:36 +0000
|
[PATCH 0722/1000] [TD] Query Github API for base (#122214)
|
A better query for the base commit of a PR. Some ghstack PRs are not connected to main so git merge-base doesn't work. Instead, use the Github API to query for the base of the PR, which should be more accurate Sanity checked on one of Ed's ghstack PRs Pull Request resolved: https://github.com/pytorch/pytorch/pull/122214 Approved by: https://github.com/seemethere
|
diff --git a/.github/workflows/target_determination.yml b/.github/workflows/target_determination.yml
index f719b798c1..cd5e758345 100644
--- a/.github/workflows/target_determination.yml
+++ b/.github/workflows/target_determination.yml
@@ -53,6 +53,7 @@ jobs:
GITHUB_RUN_ID: ${{ github.run_id }}
GITHUB_RUN_NUMBER: ${{ github.run_number }}
GITHUB_RUN_ATTEMPT: ${{ github.run_attempt }}
+ GITHUB_REF: ${{ github.ref }}
JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
JOB_NAME: ${{ steps.get-job-id.outputs.job-name }}
PR_NUMBER: ${{ github.event.pull_request.number }}
diff --git a/tools/testing/target_determination/heuristics/mentioned_in_pr.py b/tools/testing/target_determination/heuristics/mentioned_in_pr.py
index 56c14b4a6a..074a375e2d 100644
--- a/tools/testing/target_determination/heuristics/mentioned_in_pr.py
+++ b/tools/testing/target_determination/heuristics/mentioned_in_pr.py
@@ -1,4 +1,3 @@
-import os
import re
from typing import Any, List
@@ -9,6 +8,7 @@ from tools.testing.target_determination.heuristics.interface import (
from tools.testing.target_determination.heuristics.utils import (
get_git_commit_info,
get_issue_or_pr_body,
+ get_pr_number,
)
from tools.testing.test_run import TestRun
@@ -32,14 +32,11 @@ class MentionedInPR(HeuristicInterface):
print(f"Can't get commit info due to {e}")
commit_messages = ""
try:
- pr_number = os.environ.get("PR_NUMBER", "")
- if pr_number == "":
- re_match = re.match(
- r"^refs/tags/.*/(\d+)$", os.environ.get("GITHUB_REF", "")
- )
- if re_match is not None:
- pr_number = re_match.group(1)
- pr_body = get_issue_or_pr_body(int(pr_number))
+ pr_number = get_pr_number()
+ if pr_number is not None:
+ pr_body = get_issue_or_pr_body(pr_number)
+ else:
+ pr_body = ""
except Exception as e:
print(f"Can't get PR body due to {e}")
pr_body = ""
diff --git a/tools/testing/target_determination/heuristics/utils.py b/tools/testing/target_determination/heuristics/utils.py
index 0c158bb934..097e51013c 100644
--- a/tools/testing/target_determination/heuristics/utils.py
+++ b/tools/testing/target_determination/heuristics/utils.py
@@ -1,10 +1,11 @@
import json
import os
+import re
import subprocess
from collections import defaultdict
from functools import lru_cache
from pathlib import Path
-from typing import cast, Dict, List, Set, Union
+from typing import cast, Dict, List, Optional, Set, Union
from urllib.request import Request, urlopen
from warnings import warn
@@ -22,27 +23,50 @@ def python_test_file_to_test_name(tests: Set[str]) -> Set[str]:
@lru_cache(maxsize=None)
-def query_changed_files() -> List[str]:
- default_branch = f"origin/{os.environ.get('GIT_DEFAULT_BRANCH', 'main')}"
+def get_pr_number() -> Optional[int]:
+ pr_number = os.environ.get("PR_NUMBER", "")
+ if pr_number == "":
+ re_match = re.match(r"^refs/tags/.*/(\d+)$", os.environ.get("GITHUB_REF", ""))
+ if re_match is not None:
+ pr_number = re_match.group(1)
+ if pr_number != "":
+ return int(pr_number)
+ return None
+
+
+@lru_cache(maxsize=None)
+def get_merge_base() -> str:
+ pr_number = get_pr_number()
+ if pr_number is not None:
+ github_token = os.environ.get("GITHUB_TOKEN")
+ headers = {
+ "Accept": "application/vnd.github.v3+json",
+ "Authorization": f"token {github_token}",
+ }
+ url = f"https://api.github.com/repos/pytorch/pytorch/pulls/{pr_number}"
+ with urlopen(Request(url, headers=headers)) as conn:
+ pr_info = json.loads(conn.read().decode())
+ base = pr_info["base"]["ref"]
+ else:
+ base = "HEAD^"
+
merge_base = (
- subprocess.check_output(["git", "merge-base", default_branch, "HEAD"])
+ subprocess.check_output(["git", "merge-base", f"origin/{base}", "HEAD"])
.decode()
.strip()
)
+ return merge_base
- head = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode().strip()
- base_commit = merge_base
- if base_commit == head:
- # We are on the default branch, so check for changes since the last commit
- base_commit = "HEAD^"
+def query_changed_files() -> List[str]:
+ base_commit = get_merge_base()
proc = subprocess.run(
["git", "diff", "--name-only", base_commit, "HEAD"],
capture_output=True,
check=False,
)
- print(f"merge_base: {merge_base}, head: {head}")
+ print(f"base_commit: {base_commit}")
if proc.returncode != 0:
raise RuntimeError("Unable to get changed files")
@@ -56,20 +80,7 @@ def query_changed_files() -> List[str]:
@lru_cache(maxsize=None)
def get_git_commit_info() -> str:
"""Gets the commit info since the last commit on the default branch."""
- default_branch = f"origin/{os.environ.get('GIT_DEFAULT_BRANCH', 'main')}"
-
- merge_base = (
- subprocess.check_output(["git", "merge-base", default_branch, "HEAD"])
- .decode()
- .strip()
- )
-
- head = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode().strip()
-
- base_commit = merge_base
- if base_commit == head:
- # We are on the default branch, so check for changes since the last commit
- base_commit = "HEAD^"
+ base_commit = get_merge_base()
return (
subprocess.check_output(
|
2.41.0
|
d12ba9acfa20ed7df438a8892c9bf8e6bef5775
|
Fri, 26 Apr 2024 15:26:20 +0000
|
[PATCH 0723/1000] add methods for open device in PackedSequence module. (#124923)
|
1) add is_{custom_device_name}() and {custom_device_name}() for open device register; 2) fix open device failed testcases. @ezyang @bdhirsh Pull Request resolved: https://github.com/pytorch/pytorch/pull/124923 Approved by: https://github.com/ezyang
|
diff --git a/test/cpp_extensions/open_registration_extension.cpp b/test/cpp_extensions/open_registration_extension.cpp
index a275899ef0..f5b61102af 100644
--- a/test/cpp_extensions/open_registration_extension.cpp
+++ b/test/cpp_extensions/open_registration_extension.cpp
@@ -492,7 +492,7 @@ TORCH_LIBRARY_IMPL(aten, PrivateUse1, m) {
m.impl("sub.Tensor", torch::CppFunction::makeFromBoxedFunction<&custom_cpu_fallback>());
m.impl("_foreach_add.List", torch::CppFunction::makeFromBoxedFunction<&custom_cpu_fallback>());
m.impl("index.Tensor", torch::CppFunction::makeFromBoxedFunction<&custom_cpu_fallback>());
- m.impl("triu.indices", torch::CppFunction::makeFromBoxedFunction<&custom_cpu_fallback>());
+ m.impl("triu_indices", torch::CppFunction::makeFromBoxedFunction<&custom_cpu_fallback>());
}
// This basic implementation doesn't bother dealing with different device indices
diff --git a/test/test_cpp_extensions_open_device_registration.py b/test/test_cpp_extensions_open_device_registration.py
index 1950644f5e..3511070ce3 100644
--- a/test/test_cpp_extensions_open_device_registration.py
+++ b/test/test_cpp_extensions_open_device_registration.py
@@ -52,6 +52,10 @@ class DummyModule:
def current_device():
return 0
+ @staticmethod
+ def is_initialized():
+ return True
+
@unittest.skipIf(IS_ARM64, "Does not work on arm")
@torch.testing._internal.common_utils.markDynamoStrictTest
@@ -96,6 +100,9 @@ class TestCppExtensionOpenRgistration(common.TestCase):
self.assertFalse(self.module.custom_add_called())
# create a tensor using our custom device object
device = self.module.custom_device()
+ # register foo module, torch.foo. This is for lazy
+ # init check.
+ torch._register_device_module("foo", DummyModule)
x = torch.empty(4, 4, device=device)
y = torch.empty(4, 4, device=device)
# Check that our device is correct.
@@ -113,6 +120,7 @@ class TestCppExtensionOpenRgistration(common.TestCase):
self.assertTrue(z.device == device)
self.assertEqual(z, z_cpu)
z2 = z_cpu + z_cpu
+ del torch.foo
# check whether the error can be reported correctly
def test_before_common_registration():
@@ -132,6 +140,8 @@ class TestCppExtensionOpenRgistration(common.TestCase):
self.assertFalse(hasattr(torch.UntypedStorage, "is_foo"))
self.assertFalse(hasattr(torch.UntypedStorage, "foo"))
self.assertFalse(hasattr(torch.nn.Module, "foo"))
+ self.assertFalse(hasattr(torch.nn.utils.rnn.PackedSequence, "is_foo"))
+ self.assertFalse(hasattr(torch.nn.utils.rnn.PackedSequence, "foo"))
def test_after_common_registration():
# check attributes after registered
@@ -142,6 +152,8 @@ class TestCppExtensionOpenRgistration(common.TestCase):
self.assertTrue(hasattr(torch.UntypedStorage, "is_foo"))
self.assertTrue(hasattr(torch.UntypedStorage, "foo"))
self.assertTrue(hasattr(torch.nn.Module, "foo"))
+ self.assertTrue(hasattr(torch.nn.utils.rnn.PackedSequence, "is_foo"))
+ self.assertTrue(hasattr(torch.nn.utils.rnn.PackedSequence, "foo"))
def test_common_registration():
# first rename custom backend
@@ -266,6 +278,15 @@ class TestCppExtensionOpenRgistration(common.TestCase):
self.assertFalse(self.module.custom_add_called())
self.assertTrue(z.is_foo)
+ def test_open_device_packed_sequence():
+ device = self.module.custom_device()
+ a = torch.rand(5, 3)
+ b = torch.tensor([1, 1, 1, 1, 1])
+ input = torch.nn.utils.rnn.PackedSequence(a, b)
+ self.assertFalse(input.is_foo)
+ input_foo = input.foo()
+ self.assertTrue(input_foo.is_foo)
+
def test_open_device_storage():
# check whether the attributes and methods for storage of the corresponding custom backend are generated correctly
x = torch.empty(4, 4)
@@ -556,6 +577,7 @@ class TestCppExtensionOpenRgistration(common.TestCase):
test_open_device_dispatchstub()
test_open_device_random()
test_open_device_tensor()
+ test_open_device_packed_sequence()
test_open_device_storage()
test_open_device_storage_pin_memory()
test_open_device_serialization()
diff --git a/torch/utils/backend_registration.py b/torch/utils/backend_registration.py
index 1fda089204..6a4cbcb843 100644
--- a/torch/utils/backend_registration.py
+++ b/torch/utils/backend_registration.py
@@ -179,6 +179,47 @@ def _generate_module_methods_for_privateuse1_backend(custom_backend_name: str) -
_check_register_once(torch.nn.Module, custom_backend_name)
setattr(torch.nn.Module, custom_backend_name, wrap_module_to)
+def _generate_packed_sequence_methods_for_privateuse1_backend(custom_backend_name: str) -> None:
+ # Generate PackedSequence Module attributes and methods depends on Tensor methods,
+ # so we need to check whether Tensor methods is already registered.
+ if not hasattr(torch.Tensor, f'is_{custom_backend_name}') or \
+ not hasattr(torch.Tensor, custom_backend_name):
+ raise RuntimeError(
+ f"Can not automatically generate is_{custom_backend_name}() or "
+ f"{custom_backend_name}() method for torch.nn.utils.rnn.PackedSequence."
+ f"Because torch.Tensor doesn't has the method is_{custom_backend_name}()"
+ f"or {custom_backend_name}()."
+ f"For this error, you can try setting for_tensor=True.")
+
+ @property # type: ignore[misc]
+ def wrap_tensor_backend(self: torch.nn.utils.rnn.PackedSequence) -> bool:
+ return self.data.device.type == custom_backend_name
+
+ _check_register_once(torch.nn.utils.rnn.PackedSequence, f'is_{custom_backend_name}')
+ setattr(torch.nn.utils.rnn.PackedSequence, f'is_{custom_backend_name}', wrap_tensor_backend)
+
+ def wrap_module_to(self: torch.nn.utils.rnn.PackedSequence,
+ *args, **kwargs) -> torch.nn.utils.rnn.PackedSequence:
+ r"""Move all model parameters and buffers to the custom device.
+
+ This also makes associated parameters and buffers different objects. So
+ it should be called before constructing optimizer if the module will
+ live on device while being optimized.
+
+ .. note::
+ This method modifies the module in-place.
+
+ Args:
+ device (int, optional): if specified, all parameters will be copied to that device
+ """
+ ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs)
+ if ex.device.type == custom_backend_name:
+ return self.to(*args, **kwargs)
+ kwargs.update({'device': custom_backend_name})
+ return self.to(*args, **kwargs)
+
+ _check_register_once(torch.nn.utils.rnn.PackedSequence, custom_backend_name)
+ setattr(torch.nn.utils.rnn.PackedSequence, custom_backend_name, wrap_module_to)
def _generate_storage_methods_for_privateuse1_backend(custom_backend_name: str,
unsupported_dtype: Optional[List[torch.dtype]] = None) -> None:
@@ -251,6 +292,7 @@ def _generate_storage_methods_for_privateuse1_backend(custom_backend_name: str,
def generate_methods_for_privateuse1_backend(for_tensor: bool = True, for_module: bool = True,
+ for_packed_sequence: bool = True,
for_storage: bool = False,
unsupported_dtype: Optional[List[torch.dtype]] = None) -> None:
r"""
@@ -296,6 +338,9 @@ def generate_methods_for_privateuse1_backend(for_tensor: bool = True, for_module
if for_storage:
_generate_storage_methods_for_privateuse1_backend(custom_backend_name, unsupported_dtype)
+ if for_packed_sequence:
+ _generate_packed_sequence_methods_for_privateuse1_backend(custom_backend_name)
+
def _get_custom_mod_func(func_name: str):
r"""
Return the func named `func_name` defined in custom device module. If not defined,
|
2.41.0
|
09c958281e2142a9a9911cdb383dcac7d2af332
|
Wed, 24 Apr 2024 20:22:24 -0700
|
[PATCH 0724/1000] Fix mypy issues in fake_tensor.py (#124428)
|
fake_tensor.py had mypy error ignored. That seems less than desirable. Also added SafePyObjectT<T> which is a tagged wrapper around a SafePyObject but provides static type checking (with no other guarantees). Used `SafePyObjectT<TorchDispatchModeKey>` on some of the TorchDispatchModeTLS API to ensure that we don't accidentally inject a different type than expected into the stack. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124428 Approved by: https://github.com/malfet
|
diff --git a/c10/core/SafePyObject.h b/c10/core/SafePyObject.h
index 4e56384b2f..19f8f62c71 100644
--- a/c10/core/SafePyObject.h
+++ b/c10/core/SafePyObject.h
@@ -55,6 +55,22 @@ struct C10_API SafePyObject {
c10::impl::PyInterpreter* pyinterpreter_;
};
+// A newtype wrapper around SafePyObject for type safety when a python object
+// represents a specific type. Note that `T` is only used as a tag and isn't
+// actually used for any true purpose.
+template <typename T>
+struct SafePyObjectT : private SafePyObject {
+ SafePyObjectT(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
+ : SafePyObject(data, pyinterpreter) {}
+ SafePyObjectT(SafePyObjectT&& other) noexcept : SafePyObject(other) {}
+ SafePyObjectT(SafePyObjectT const&) = delete;
+ SafePyObjectT& operator=(SafePyObjectT const&) = delete;
+
+ using SafePyObject::ptr;
+ using SafePyObject::pyinterpreter;
+ using SafePyObject::release;
+};
+
// Like SafePyObject, but non-owning. Good for references to global PyObjects
// that will be leaked on interpreter exit. You get a copy constructor/assign
// this way.
diff --git a/c10/core/impl/TorchDispatchModeTLS.cpp b/c10/core/impl/TorchDispatchModeTLS.cpp
index 37c75003e2..e558a70522 100644
--- a/c10/core/impl/TorchDispatchModeTLS.cpp
+++ b/c10/core/impl/TorchDispatchModeTLS.cpp
@@ -25,7 +25,7 @@ bool TorchDispatchModeTLS::any_modes_set(bool skip_infra_modes) {
}
void TorchDispatchModeTLS::push_non_infra_mode_onto_stack(
- std::shared_ptr<SafePyObject> mode) {
+ std::shared_ptr<PyObject_TorchDispatchMode> mode) {
if (!any_modes_set()) {
c10::impl::tls_set_dispatch_key_included(DispatchKey::Python, true);
c10::impl::tls_set_dispatch_key_included(
@@ -34,8 +34,9 @@ void TorchDispatchModeTLS::push_non_infra_mode_onto_stack(
torchDispatchModeState.stack_.push_back(std::move(mode));
}
-const std::shared_ptr<SafePyObject> TorchDispatchModeTLS::pop_stack() {
- std::shared_ptr<SafePyObject> out;
+const std::shared_ptr<PyObject_TorchDispatchMode> TorchDispatchModeTLS::
+ pop_stack() {
+ std::shared_ptr<PyObject_TorchDispatchMode> out;
if (!torchDispatchModeState.stack_.empty()) {
out = torchDispatchModeState.stack_.back();
torchDispatchModeState.stack_.pop_back();
@@ -60,8 +61,9 @@ const std::shared_ptr<SafePyObject> TorchDispatchModeTLS::pop_stack() {
}
return out;
}
-const std::tuple<std::shared_ptr<SafePyObject>, TorchDispatchModeKey>
-TorchDispatchModeTLS::pop_highest_infra_mode() {
+const std::
+ tuple<std::shared_ptr<PyObject_TorchDispatchMode>, TorchDispatchModeKey>
+ TorchDispatchModeTLS::pop_highest_infra_mode() {
for (int64_t i = static_cast<size_t>(TorchDispatchModeKey::NUM_MODE_KEYS) - 1;
i >= 0;
--i) {
@@ -82,8 +84,8 @@ TorchDispatchModeTLS::pop_highest_infra_mode() {
false, "Called pop_highest_infra_mode, but no infra modes were active.")
}
-const std::shared_ptr<SafePyObject>& TorchDispatchModeTLS::get_stack_at(
- int64_t idx) {
+const std::shared_ptr<PyObject_TorchDispatchMode>& TorchDispatchModeTLS::
+ get_stack_at(int64_t idx) {
TORCH_CHECK(idx < stack_len(), "Tried to get stack at idx that's too big");
// Our "logical" stack includes both:
// - any user modes (the entire torchDispatchModeState.stack_)
@@ -119,13 +121,13 @@ int64_t TorchDispatchModeTLS::stack_len() {
return stack_len + infra_modes_len;
}
-const c10::optional<std::shared_ptr<SafePyObject>> TorchDispatchModeTLS::
- get_mode(TorchDispatchModeKey mode_key) {
+const c10::optional<std::shared_ptr<PyObject_TorchDispatchMode>>
+TorchDispatchModeTLS::get_mode(TorchDispatchModeKey mode_key) {
return torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)];
}
void TorchDispatchModeTLS::set_mode(
- const std::shared_ptr<SafePyObject>& mode,
+ const std::shared_ptr<PyObject_TorchDispatchMode>& mode,
TorchDispatchModeKey mode_key) {
TORCH_CHECK(
torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)] ==
@@ -143,8 +145,8 @@ void TorchDispatchModeTLS::set_mode(
torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)] = mode;
}
-const c10::optional<std::shared_ptr<SafePyObject>> TorchDispatchModeTLS::
- unset_mode(TorchDispatchModeKey mode_key) {
+const c10::optional<std::shared_ptr<PyObject_TorchDispatchMode>>
+TorchDispatchModeTLS::unset_mode(TorchDispatchModeKey mode_key) {
auto out = torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)];
torchDispatchModeState.infra_modes_[static_cast<size_t>(mode_key)] =
c10::nullopt;
diff --git a/c10/core/impl/TorchDispatchModeTLS.h b/c10/core/impl/TorchDispatchModeTLS.h
index 50a92459e6..d9ac8d8449 100644
--- a/c10/core/impl/TorchDispatchModeTLS.h
+++ b/c10/core/impl/TorchDispatchModeTLS.h
@@ -12,31 +12,35 @@ enum class TorchDispatchModeKey : int8_t {
NUM_MODE_KEYS
};
+using PyObject_TorchDispatchMode = SafePyObjectT<TorchDispatchModeKey>;
+
struct C10_API TorchDispatchModeTLS {
// This API is NOT invariant safe.
// It must not take in an infra mode that uses TorchDispatchModeKey
// If you're pushing an infra mode onto the stack, we expect
// you to use set_mode
static void push_non_infra_mode_onto_stack(
- std::shared_ptr<SafePyObject> mode);
+ std::shared_ptr<PyObject_TorchDispatchMode> mode);
// Pops the top mode of the stack,
// giving precedence to user modes before attempting to pop
// any infra modes
- static const std::shared_ptr<SafePyObject> pop_stack();
+ static const std::shared_ptr<PyObject_TorchDispatchMode> pop_stack();
// Returns the highest-priority infra mode on the stack,
// along with its mode key.
- static const std::tuple<std::shared_ptr<SafePyObject>, TorchDispatchModeKey>
- pop_highest_infra_mode();
+ static const std::
+ tuple<std::shared_ptr<PyObject_TorchDispatchMode>, TorchDispatchModeKey>
+ pop_highest_infra_mode();
- static const std::shared_ptr<SafePyObject>& get_stack_at(int64_t idx);
+ static const std::shared_ptr<PyObject_TorchDispatchMode>& get_stack_at(
+ int64_t idx);
static int64_t stack_len();
- static const c10::optional<std::shared_ptr<SafePyObject>> get_mode(
- TorchDispatchModeKey mode_key);
- static const c10::optional<std::shared_ptr<SafePyObject>> unset_mode(
- TorchDispatchModeKey mode_key);
+ static const c10::optional<std::shared_ptr<PyObject_TorchDispatchMode>>
+ get_mode(TorchDispatchModeKey mode_key);
+ static const c10::optional<std::shared_ptr<PyObject_TorchDispatchMode>>
+ unset_mode(TorchDispatchModeKey mode_key);
static void set_mode(
- const std::shared_ptr<SafePyObject>& mode,
+ const std::shared_ptr<PyObject_TorchDispatchMode>& mode,
TorchDispatchModeKey mode_key);
static const TorchDispatchModeTLS& get_state();
@@ -45,13 +49,13 @@ struct C10_API TorchDispatchModeTLS {
static bool any_modes_set(bool skip_infra_modes = false);
private:
- std::vector<std::shared_ptr<c10::SafePyObject>> stack_;
+ std::vector<std::shared_ptr<PyObject_TorchDispatchMode>> stack_;
// Users are allowed to push multiple ProxyTorchDispatchMode objects onto the
// stack
// However, we only allow a single FakeTensorMode onto the stack at a time
// (Pushing additional FakeTensorModes onto the stack is a no-op)
std::array<
- c10::optional<std::shared_ptr<c10::SafePyObject>>,
+ c10::optional<std::shared_ptr<PyObject_TorchDispatchMode>>,
static_cast<size_t>(TorchDispatchModeKey::NUM_MODE_KEYS)>
infra_modes_;
};
diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in
index aec4a28af9..8b23117704 100644
--- a/torch/_C/__init__.pyi.in
+++ b/torch/_C/__init__.pyi.in
@@ -55,6 +55,7 @@ from torch.types import (
)
from torch._prims_common import DeviceLikeType
+from torch.utils._python_dispatch import TorchDispatchMode
# This module is defined in torch/csrc/Module.cpp
@@ -1332,11 +1333,11 @@ def _pop_torch_function_stack() -> Any: ...
def _get_function_stack_at(idx: _int) -> Any: ...
def _len_torch_function_stack() -> _int: ...
def _set_torch_dispatch_mode(cls: Any) -> None: ...
-def _push_on_torch_dispatch_stack(cls: Any) -> None: ...
+def _push_on_torch_dispatch_stack(cls: TorchDispatchMode) -> None: ...
def _pop_torch_dispatch_stack(mode_key: Optional[torch._C._TorchDispatchModeKey] = None) -> Any: ...
def _get_dispatch_mode(mode_key: Optional[torch._C._TorchDispatchModeKey]) -> Any: ...
-def _unset_dispatch_mode(mode: torch._C._TorchDispatchModeKey) -> Any: ...
-def _set_dispatch_mode(mode: Any) -> None: ...
+def _unset_dispatch_mode(mode: torch._C._TorchDispatchModeKey) -> Optional[TorchDispatchMode]: ...
+def _set_dispatch_mode(mode: TorchDispatchMode) -> None: ...
def _get_dispatch_stack_at(idx: _int) -> Any: ...
def _len_torch_dispatch_stack() -> _int: ...
def _activate_gpu_trace() -> None: ...
@@ -1549,6 +1550,8 @@ def _dispatch_pystub(name: str, overload: str) -> Optional[Tuple[str, str]]: ...
def _dispatch_is_alias_key(dispatch: _dispatchkey) -> _bool: ...
def _functionality_to_backend_keys(dispatch: _dispatchkey) -> List[DispatchKey]: ...
def _functionalization_reapply_views_tls() -> _bool: ...
+def _only_lift_cpu_tensors() -> _bool: ...
+def _set_only_lift_cpu_tensors(value: _bool) -> None: ...
def _set_throw_on_mutable_data_ptr(tensor: Tensor) -> None: ...
def _set_warn_deprecated_on_mutable_data_ptr(tensor: Tensor) -> None: ...
@@ -2260,6 +2263,7 @@ def _register_py_class_for_device(device: str, cls: Any) -> None: ...
# Defined in torch/csrc/Module.cpp
def _current_graph_task_id() -> _int: ...
def _current_autograd_node() -> _Node: ...
+def _dispatch_key_set(Tensor) -> str: ...
# Defined in torch/csrc/Exceptions.cpp
class OutOfMemoryError(RuntimeError): ...
diff --git a/torch/_ops.py b/torch/_ops.py
index 9ada11cd9f..6e2119f16a 100644
--- a/torch/_ops.py
+++ b/torch/_ops.py
@@ -1238,4 +1238,4 @@ class _Ops(types.ModuleType):
# The ops "namespace"
-ops = _Ops()
+ops: _Ops = _Ops()
diff --git a/torch/_subclasses/fake_tensor.py b/torch/_subclasses/fake_tensor.py
index 8174f0658f..d291605d58 100644
--- a/torch/_subclasses/fake_tensor.py
+++ b/torch/_subclasses/fake_tensor.py
@@ -1,5 +1,3 @@
-# mypy: ignore-errors
-
import contextlib
import functools
import logging
@@ -8,7 +6,18 @@ import traceback
import weakref
from collections import defaultdict
from dataclasses import dataclass
-from typing import Any, Dict, List, Optional, Tuple, Type, TYPE_CHECKING, TypeVar
+from typing import (
+ Any,
+ cast,
+ Dict,
+ List,
+ Optional,
+ Tuple,
+ Type,
+ TYPE_CHECKING,
+ TypeVar,
+ Union,
+)
from weakref import ReferenceType
import torch
@@ -30,6 +39,7 @@ from torch._utils import render_call
from torch.fx.operator_schemas import normalize_function
from torch.multiprocessing.reductions import StorageWeakRef
from torch.overrides import TorchFunctionMode
+from torch.types import _bool
from torch.utils._mode_utils import no_dispatch
from torch.utils._python_dispatch import (
is_traceable_wrapper_subclass,
@@ -42,6 +52,13 @@ from torch.utils._traceback import CapturedTraceback
if TYPE_CHECKING:
from torch.fx.experimental.symbolic_shapes import ShapeEnv
+
+class _Unassigned:
+ pass
+
+
+_UNASSIGNED = _Unassigned()
+
DimList = List
log = logging.getLogger(__name__)
@@ -718,7 +735,7 @@ def extract_tensor_metadata(t: torch.Tensor) -> "TensorMetadata":
"""
Extract the TensorMetadata of a tensor.
"""
- memory_format = suggest_memory_format(t)
+ memory_format: Optional[torch.memory_format] = suggest_memory_format(t)
if is_sparse_any(t) or not t.is_contiguous(memory_format=memory_format):
memory_format = None
@@ -806,10 +823,11 @@ class FakeTensorMode(TorchDispatchMode):
cache: Dict[_DispatchCacheKey, _DispatchCacheEntry] = {}
cache_hits: int = 0
cache_misses: int = 0
- cache_bypasses = defaultdict(int)
+ cache_bypasses: Dict[str, int] = defaultdict(int)
# Every time you retrace using the same fake tensor mode, you should
# advance the epoch so we don't reuse unbacked memos
epoch: int = 0
+ in_kernel_invocation: bool = False
def __init__(
self,
@@ -860,7 +878,9 @@ class FakeTensorMode(TorchDispatchMode):
# in_kernel_invocation
# If another fake mode was already active when we enter, we also stash it here.
# That way when we exit, we know to re-enable the previous fake mode.
- self.enter_stack: List[Tuple[bool, Optional[FakeTensorMode]]] = []
+ self.enter_stack: List[
+ Tuple[bool, Optional[TorchDispatchMode], Optional[_bool]]
+ ] = []
self.shape_env: ShapeEnv = shape_env
@@ -972,7 +992,7 @@ class FakeTensorMode(TorchDispatchMode):
Lookup a cache entry for the given arguments. If none exists, dispatch
and cache the result (if the result is eligible for caching).
"""
- output = unassigned = object()
+ output: Union[FakeTensor, _Unassigned] = _UNASSIGNED
try:
key = self._cache_key(func, args, kwargs)
entry = FakeTensorMode.cache.get(key, None)
@@ -991,7 +1011,7 @@ class FakeTensorMode(TorchDispatchMode):
except _BypassDispatchCache as e:
FakeTensorMode.cache_bypasses[e.reason] += 1
- if output is unassigned:
+ if output is _UNASSIGNED:
output = self._dispatch_impl(func, types, args, kwargs)
return output
@@ -1066,7 +1086,7 @@ class FakeTensorMode(TorchDispatchMode):
if isinstance(args, dict):
args = list(args.keys()) + list(args.values())
- result = []
+ result: List[Any] = []
for arg in args:
if isinstance(arg, FakeTensor):
if not self.is_our_fake(arg):
@@ -1177,7 +1197,7 @@ class FakeTensorMode(TorchDispatchMode):
# Synthesize a new FakeTensor with the cached metadata.
metadata = entry.metadata
- assert not metadata.is_sparse
+ assert metadata and not metadata.is_sparse
empty = torch.empty_strided(
metadata.shape,
@@ -1195,7 +1215,7 @@ class FakeTensorMode(TorchDispatchMode):
if func.is_view:
# For view ops, the storage should be the same as the tensor input.
- storage = args[entry.view_idx].untyped_storage()
+ storage = args[cast(int, entry.view_idx)].untyped_storage()
with in_kernel_invocation_manager(self):
empty.set_(
storage, metadata.storage_offset, metadata.shape, metadata.stride
@@ -1263,7 +1283,7 @@ class FakeTensorMode(TorchDispatchMode):
else:
return self._dispatch_impl(func, types, args, kwargs)
- def _dispatch_impl(self, func, types, args, kwargs):
+ def _dispatch_impl(self, func, types, args, kwargs) -> FakeTensor:
flat_args, args_spec = pytree.tree_flatten((args, kwargs))
flat_arg_fake_tensors = [
@@ -1557,7 +1577,7 @@ class FakeTensorMode(TorchDispatchMode):
If not, try to convert them to fake tensors.
Returns the original args, kwargs, and a flattened list of (args, kwargs) that are fake tensors.
"""
- flat_arg_fake_tensors = []
+ flat_arg_fake_tensors: List[Any] = []
def validate(x):
if not isinstance(x, torch.Tensor):
@@ -1684,7 +1704,7 @@ class FakeTensorMode(TorchDispatchMode):
source: Optional[Source] = None,
symbolic_context=None,
):
- shape_env = self.shape_env
+ shape_env: Optional[ShapeEnv] = self.shape_env
if static_shapes is None:
static_shapes = self.static_shapes
if static_shapes:
diff --git a/torch/csrc/autograd/init.cpp b/torch/csrc/autograd/init.cpp
index aaaa95a9d2..6c9870a5c4 100644
--- a/torch/csrc/autograd/init.cpp
+++ b/torch/csrc/autograd/init.cpp
@@ -1097,11 +1097,13 @@ static PyObject* push_on_torch_dispatch_stack(
if (maybe_mode_key_obj) {
mode_key = py::cast<c10::impl::TorchDispatchModeKey>(maybe_mode_key_obj);
c10::impl::TorchDispatchModeTLS::set_mode(
- std::make_shared<c10::SafePyObject>(arg, getPyInterpreter()),
+ std::make_shared<c10::impl::PyObject_TorchDispatchMode>(
+ arg, getPyInterpreter()),
mode_key.value());
} else {
c10::impl::TorchDispatchModeTLS::push_non_infra_mode_onto_stack(
- std::make_shared<c10::SafePyObject>(arg, getPyInterpreter()));
+ std::make_shared<c10::impl::PyObject_TorchDispatchMode>(
+ arg, getPyInterpreter()));
}
Py_INCREF(arg);
}
@@ -1165,7 +1167,9 @@ static PyObject* set_dispatch_mode(PyObject* _unused, PyObject* mode) {
Py_INCREF(mode);
c10::impl::TorchDispatchModeTLS::set_mode(
- std::make_shared<c10::SafePyObject>(mode, getPyInterpreter()), mode_key);
+ std::make_shared<c10::impl::PyObject_TorchDispatchMode>(
+ mode, getPyInterpreter()),
+ mode_key);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
diff --git a/torch/csrc/utils/torch_dispatch_mode.h b/torch/csrc/utils/torch_dispatch_mode.h
index 81729f27df..79173aeb3e 100644
--- a/torch/csrc/utils/torch_dispatch_mode.h
+++ b/torch/csrc/utils/torch_dispatch_mode.h
@@ -29,12 +29,12 @@ struct StashTorchDispatchModeGuard {
}
}
- const std::shared_ptr<c10::SafePyObject>& get_cur_mode() {
+ const std::shared_ptr<c10::impl::PyObject_TorchDispatchMode>& get_cur_mode() {
return saved_mode_;
}
private:
- std::shared_ptr<at::SafePyObject> saved_mode_;
+ std::shared_ptr<c10::impl::PyObject_TorchDispatchMode> saved_mode_;
c10::optional<c10::impl::TorchDispatchModeKey> saved_mode_key_;
};
diff --git a/torch/utils/_python_dispatch.py b/torch/utils/_python_dispatch.py
index f5f830c2f1..ec24f006a7 100644
--- a/torch/utils/_python_dispatch.py
+++ b/torch/utils/_python_dispatch.py
@@ -159,7 +159,7 @@ def _get_current_dispatch_mode_stack():
return [_get_dispatch_stack_at(i) for i in range(stack_len)]
-def _push_mode(mode):
+def _push_mode(mode: TorchDispatchMode):
k = mode._dispatch_key if hasattr(mode, "_dispatch_key") else None
assert k is None or k == torch._C.DispatchKey.PreDispatch
if k is None:
|
2.41.0
|
3744a2c009b34d818de8b1835fae75808bf58ee
|
Thu, 25 Apr 2024 11:48:49 -0700
|
[PATCH 0727/1000] torch.mtia module for MTIA device backend (#123612)
|
MTIA device has its own Module in PyTorch now. torch.mtia has following APIs similar to other backends. The lazy_init is also supported. ``` __all__ = [ "init", "is_available", "synchronize", "device_count", "current_device", "current_stream", "default_stream", "set_stream", "stream", "device", ] ``` ------------ For device management. We expand AccleratorHooksInterface to support generic device management and it can be used in both C++ and PyThon. ``` def _accelerator_hooks_device_count() -> _int: ... def _accelerator_hooks_set_current_device(device_index: _int) -> None: ... def _accelerator_hooks_get_current_device() -> _int : ... def _accelerator_hooks_exchange_device(device_index: _int) -> _int : ... def _accelerator_hooks_maybe_exchange_device(device_index: _int) -> _int : ... ``` --------- Adding get_device_module API to retrieve device modules for different device types. ``` def get_device_module(device: Optional[Union[torch.device, str]] = None) ``` --------- Pull Request resolved: https://github.com/pytorch/pytorch/pull/123612 Approved by: https://github.com/albanD ghstack dependencies: #123611
|
diff --git a/aten/src/ATen/Context.h b/aten/src/ATen/Context.h
index 32b22855f9..b50f0479e2 100644
--- a/aten/src/ATen/Context.h
+++ b/aten/src/ATen/Context.h
@@ -69,6 +69,8 @@ class TORCH_API Context {
return at::detail::getMPSHooks();
} else if (device_type == at::kPrivateUse1) {
return at::detail::getPrivateUse1Hooks();
+ } else if (device_type == at::kMTIA) {
+ return at::detail::getMTIAHooks();
} else {
AT_ERROR(
c10::DeviceTypeName(device_type), " device type not an accelerator.");
@@ -156,6 +158,9 @@ class TORCH_API Context {
void lazyInitXPU() {
c10::call_once(thx_init, [&] { detail::getXPUHooks().initXPU(); });
}
+ void lazyInitMTIA() {
+ c10::call_once(th_mtia_init, [&] { detail::getMTIAHooks().initMTIA(); });
+ }
void lazyInitPrivateUse1() {
c10::call_once(thp_init, [&] {
if (isPrivateUse1HooksRegistered()) {
@@ -349,6 +354,7 @@ class TORCH_API Context {
c10::once_flag thc_init;
c10::once_flag thh_init;
c10::once_flag thx_init;
+ c10::once_flag th_mtia_init;
c10::once_flag thp_init;
bool enabled_cudnn = true;
bool deterministic_cudnn = false;
diff --git a/aten/src/ATen/DeviceAccelerator.cpp b/aten/src/ATen/DeviceAccelerator.cpp
index 05327cc219..ec3cd2a2f5 100644
--- a/aten/src/ATen/DeviceAccelerator.cpp
+++ b/aten/src/ATen/DeviceAccelerator.cpp
@@ -10,6 +10,9 @@ C10_API std::optional<DeviceType> getAccelerator(bool checked) {
#define CHECK_NO_PU1 \
TORCH_CHECK(!is_privateuse1_backend_registered(), "Cannot have both CUDA and PrivateUse1");
+#define CHECK_NO_MTIA \
+ TORCH_CHECK(!at::hasMTIA(), "Cannot have MTIA with other devices");
+
if (is_privateuse1_backend_registered()) {
// We explicitly allow PrivateUse1 and another device at the same time
// as we use this for testing.
@@ -17,7 +20,12 @@ C10_API std::optional<DeviceType> getAccelerator(bool checked) {
return kPrivateUse1;
} else if (at::hasCUDA()) {
CHECK_NO_PU1
+ CHECK_NO_MTIA
return kCUDA;
+ } else if (at::hasMTIA()) {
+ CHECK_NO_CUDA
+ CHECK_NO_PU1
+ return kMTIA;
} else {
TORCH_CHECK(!checked, "Cannot access accelerator device when none is available.")
return std::nullopt;
diff --git a/aten/src/ATen/detail/AcceleratorHooksInterface.h b/aten/src/ATen/detail/AcceleratorHooksInterface.h
index c099c9f59a..96e15e1f69 100644
--- a/aten/src/ATen/detail/AcceleratorHooksInterface.h
+++ b/aten/src/ATen/detail/AcceleratorHooksInterface.h
@@ -1,7 +1,7 @@
#pragma once
#include <c10/core/Device.h>
-
+#include <c10/core/Stream.h>
namespace at {
// AcceleratorHooksInterface is a shared interface provided by all
@@ -16,6 +16,29 @@ struct TORCH_API AcceleratorHooksInterface {
// Whether the device at device_index is fully initialized or not.
virtual bool hasPrimaryContext(DeviceIndex device_index) const = 0;
+
+ virtual DeviceIndex deviceCount() const {
+ return 0;
+ }
+
+ virtual void setCurrentDevice(DeviceIndex device) const {
+ TORCH_CHECK(false, "Backend doesn't support setCurrentDevice()");
+ }
+
+ virtual DeviceIndex getCurrentDevice() const {
+ TORCH_CHECK(false, "Backend doesn't support getCurrentDevice()");
+ return -1;
+ }
+
+ virtual DeviceIndex exchangeDevice(DeviceIndex device) const {
+ TORCH_CHECK(false, "Backend doesn't support exchangeDevice()");
+ return -1;
+ }
+
+ virtual DeviceIndex maybeExchangeDevice(DeviceIndex device) const {
+ TORCH_CHECK(false, "Backend doesn't support maybeExchangeDevice()");
+ return -1;
+ }
};
} // namespace at
diff --git a/aten/src/ATen/detail/MTIAHooksInterface.cpp b/aten/src/ATen/detail/MTIAHooksInterface.cpp
index 6b69fdb03f..0963881713 100644
--- a/aten/src/ATen/detail/MTIAHooksInterface.cpp
+++ b/aten/src/ATen/detail/MTIAHooksInterface.cpp
@@ -8,19 +8,22 @@
namespace at {
namespace detail {
-
-const MTIAHooksInterface &getMTIAHooks() {
- static MTIAHooksInterface* MTIA_hooks = nullptr;
+const MTIAHooksInterface& getMTIAHooks() {
+ static std::unique_ptr<MTIAHooksInterface> mtia_hooks = nullptr;
static c10::once_flag once;
c10::call_once(once, [] {
- MTIA_hooks =
- MTIAHooksRegistry()->Create("MTIAHooks", MTIAHooksArgs{}).release();
- if (!MTIA_hooks) {
- MTIA_hooks = new MTIAHooksInterface();
+ mtia_hooks = MTIAHooksRegistry()->Create("MTIAHooks", MTIAHooksArgs{});
+ if (!mtia_hooks) {
+ mtia_hooks = std::make_unique<MTIAHooksInterface>();
}
});
- return *MTIA_hooks;
+ return *mtia_hooks;
+}
+
+bool isMTIAHooksBuilt() {
+ return MTIAHooksRegistry()->Has("MTIAHooks");
}
+
} // namespace detail
C10_DEFINE_REGISTRY(MTIAHooksRegistry, MTIAHooksInterface, MTIAHooksArgs)
diff --git a/aten/src/ATen/detail/MTIAHooksInterface.h b/aten/src/ATen/detail/MTIAHooksInterface.h
index c843ca52c2..1da1bda4e6 100644
--- a/aten/src/ATen/detail/MTIAHooksInterface.h
+++ b/aten/src/ATen/detail/MTIAHooksInterface.h
@@ -1,7 +1,9 @@
#pragma once
+#include <c10/core/Device.h>
#include <c10/util/Exception.h>
+#include <c10/core/Stream.h>
#include <c10/util/Registry.h>
#include <ATen/detail/AcceleratorHooksInterface.h>
@@ -20,33 +22,72 @@ constexpr const char* MTIA_HELP =
"to use some MTIA's functionality without MTIA extension included.";
struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
+// this fails the implementation if MTIAHooks functions are called, but
+// MTIA backend is not present.
+#define FAIL_MTIAHOOKS_FUNC(func) \
+ TORCH_CHECK(false, "Cannot execute ", func, "() without MTIA backend.");
+
virtual ~MTIAHooksInterface() override = default;
virtual void initMTIA() const {
- TORCH_CHECK(
- false,
- "Cannot initialize MTIA without MTIA Extension for PyTorch.",
- MTIA_HELP);
+ // Avoid logging here, since MTIA needs init devices first then it will know
+ // how many devices are available. Make it as no-op if mtia extension is not
+ // dynamically loaded.
+ return;
}
virtual bool hasMTIA() const {
return false;
}
+ virtual DeviceIndex deviceCount() const override {
+ return 0;
+ }
+
+ virtual void deviceSynchronize(c10::DeviceIndex device_index) const {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ }
+
virtual std::string showConfig() const {
- TORCH_CHECK(
- false,
- "Cannot query detailed MTIA version without MTIA Extension for PyTorch.",
- MTIA_HELP);
+ FAIL_MTIAHOOKS_FUNC(__func__);
}
virtual bool hasPrimaryContext(DeviceIndex device_index) const override {
- TORCH_CHECK(
- false,
- "Cannot check MTIA primary context without MTIA Extension for PyTorch.",
- MTIA_HELP);
+ return false;
+ }
+
+ virtual void setCurrentDevice(DeviceIndex device) const override {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ }
+
+ virtual DeviceIndex getCurrentDevice() const override {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ return -1;
}
+ virtual DeviceIndex exchangeDevice(DeviceIndex device) const override {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ return -1;
+ }
+
+ virtual DeviceIndex maybeExchangeDevice(DeviceIndex device) const override {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ return -1;
+ }
+
+ virtual c10::Stream getCurrentStream(DeviceIndex device) const {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ return c10::Stream::unpack3(-1, 0, c10::DeviceType::MTIA);
+ }
+
+ virtual c10::Stream getDefaultStream(DeviceIndex device) const {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ return c10::Stream::unpack3(-1, 0, c10::DeviceType::MTIA);
+ }
+
+ virtual void setCurrentStream(const c10::Stream& stream) const {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ }
};
struct TORCH_API MTIAHooksArgs {};
@@ -57,5 +98,6 @@ C10_DECLARE_REGISTRY(MTIAHooksRegistry, MTIAHooksInterface, MTIAHooksArgs);
namespace detail {
TORCH_API const MTIAHooksInterface& getMTIAHooks();
+TORCH_API bool isMTIAHooksBuilt();
} // namespace detail
} // namespace at
diff --git a/build_variables.bzl b/build_variables.bzl
index cebda39f4b..5939da825c 100644
--- a/build_variables.bzl
+++ b/build_variables.bzl
@@ -822,6 +822,7 @@ libtorch_python_core_sources = [
"torch/csrc/dynamo/init.cpp",
"torch/csrc/functorch/init.cpp",
"torch/csrc/mps/Module.cpp",
+ "torch/csrc/mtia/Module.cpp",
"torch/csrc/inductor/aoti_runner/pybind.cpp",
"torch/csrc/jit/backends/backend_init.cpp",
"torch/csrc/jit/python/init.cpp",
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 9e7cc6a9a6..a7afe60bc2 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -69,6 +69,7 @@ Features described in this documentation are classified by release status:
torch.cuda.memory <torch_cuda_memory>
mps
xpu
+ mtia
meta
torch.backends <backends>
torch.export <export>
diff --git a/docs/source/mtia.rst b/docs/source/mtia.rst
new file mode 100644
index 0000000000..f2f5b5195d
--- /dev/null
+++ b/docs/source/mtia.rst
@@ -0,0 +1,34 @@
+torch.mtia
+===================================
+
+The MTIA backend is implemented out of the tree, only interfaces are be defined here.
+
+.. automodule:: torch.mtia
+.. currentmodule:: torch.mtia
+
+.. autosummary::
+ :toctree: generated
+ :nosignatures:
+
+ StreamContext
+ current_device
+ current_stream
+ default_stream
+ device_count
+ init
+ is_available
+ is_initialized
+ set_stream
+ stream
+ synchronize
+ device
+ DeferredMtiaCallError
+
+Streams and events
+------------------
+.. autosummary::
+ :toctree: generated
+ :nosignatures:
+
+ Event
+ Stream
diff --git a/docs/source/torch.rst b/docs/source/torch.rst
index b65a7a5239..32bcadc154 100644
--- a/docs/source/torch.rst
+++ b/docs/source/torch.rst
@@ -684,6 +684,7 @@ Utilities
set_float32_matmul_precision
get_float32_matmul_precision
set_warn_always
+ get_device_module
is_warn_always_enabled
vmap
_assert
diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in
index 8b23117704..34e49e15d8 100644
--- a/torch/_C/__init__.pyi.in
+++ b/torch/_C/__init__.pyi.in
@@ -1719,6 +1719,24 @@ _TensorBase = TensorBase
# Defined in torch/csrc/multiprocessing/init.cpp
def _multiprocessing_init() -> None: ...
+# Defined in torch/csrc/Module.cpp
+def _accelerator_hooks_device_count() -> _int: ...
+def _accelerator_hooks_set_current_device(device_index: _int) -> None: ...
+def _accelerator_hooks_get_current_device() -> _int: ...
+def _accelerator_hooks_exchange_device(device_index: _int) -> _int: ...
+def _accelerator_hooks_maybe_exchange_device(device_index: _int) -> _int: ...
+def _get_accelerator(check: _bool = False) -> _device: ...
+
+# Defined in torch/csrc/mtia/Module.cpp
+def _mtia_init() -> None: ...
+def _mtia_isBuilt() -> _bool: ...
+def _mtia_isInBadFork() -> _bool: ...
+def _mtia_deviceSynchronize() -> None: ...
+def _mtia_getCurrentStream(device: _int) -> Stream: ...
+def _mtia_setCurrentStream(stream: Stream) -> None: ...
+def _mtia_getDefaultStream(device: _int) -> Stream: ...
+
+
# Defined in torch/csrc/mps/Module.cpp
def _mps_deviceSynchronize() -> None: ...
def _mps_get_default_generator() -> Generator: ...
diff --git a/torch/_C/_autograd.pyi b/torch/_C/_autograd.pyi
index 34eb451be0..118d913f68 100644
--- a/torch/_C/_autograd.pyi
+++ b/torch/_C/_autograd.pyi
@@ -24,6 +24,7 @@ class DeviceType(Enum):
FPGA = ...
MAIA = ...
XLA = ...
+ MTIA = ...
MPS = ...
HPU = ...
Meta = ...
diff --git a/torch/__init__.py b/torch/__init__.py
index 9a7249f220..846038e351 100644
--- a/torch/__init__.py
+++ b/torch/__init__.py
@@ -58,6 +58,7 @@ __all__ = [
'SymBool', 'sym_not', 'unravel_index',
'sym_int', 'sym_float', 'sym_max', 'sym_min', 'sym_ite', 'compile', 'vmap',
'export', 'autocast', 'cond', 'GradScaler',
+ 'get_device_module',
]
################################################################################
@@ -1579,6 +1580,7 @@ from torch import cuda as cuda
from torch import cpu as cpu
from torch import mps as mps
from torch import xpu as xpu
+from torch import mtia as mtia
from torch import autograd as autograd
from torch.autograd import (
no_grad as no_grad,
@@ -2016,6 +2018,27 @@ else:
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
+def get_device_module(device: Optional[Union[torch.device, str]] = None):
+ """
+ Returns the module associated with a given device(e.g., torch.device('cuda'), "mtia:0", "xpu", ...).
+ If no device is given, return the module for the current accelerator or CPU if none is present.
+ """
+ if isinstance(device, torch.device):
+ device_module_name = device.type
+ elif isinstance(device, str):
+ device_module_name = torch.device(device).type
+ elif device is None:
+ # Using default accelerator type. If no accelerator is available, it automatically returns CPU device.
+ device_module_name = torch._C._get_accelerator().type
+ else:
+ raise RuntimeError(f"Invalid value of device '{device}', expect torch.device, str, or None")
+ device_module = getattr(torch, device_module_name, None)
+ if device_module is None:
+ raise RuntimeError(
+ f"Device '{device_module_name}' does not have a corresponding module registered as 'torch.{device_module_name}'."
+ )
+ return device_module
+
def _constrain_as_value(symbol, min: Optional[builtins.int] = None, max: Optional[builtins.int] = None):
"""
diff --git a/torch/_utils.py b/torch/_utils.py
index 7f9a1af43f..43c6284d24 100644
--- a/torch/_utils.py
+++ b/torch/_utils.py
@@ -713,6 +713,8 @@ def _get_available_device_type():
return "cuda"
if hasattr(torch, "xpu") and torch.xpu.is_available(): # type: ignore[attr-defined]
return "xpu"
+ if hasattr(torch, "mtia") and torch.mtia.is_available():
+ return "mtia"
custom_backend_name = torch._C._get_privateuse1_backend_name()
custom_device_mod = getattr(torch, custom_backend_name, None)
if custom_device_mod and custom_device_mod.is_available():
@@ -727,6 +729,8 @@ def _get_device_attr(get_member):
return get_member(torch.cuda)
if device_type and device_type.lower() == "xpu":
return get_member(torch.xpu) # type: ignore[attr-defined]
+ if device_type and device_type.lower() == "mtia":
+ return get_member(torch.mtia)
if device_type == torch._C._get_privateuse1_backend_name():
return get_member(getattr(torch, device_type))
# add more available device types here
diff --git a/torch/csrc/Module.cpp b/torch/csrc/Module.cpp
index 5723a024e7..b446d29395 100644
--- a/torch/csrc/Module.cpp
+++ b/torch/csrc/Module.cpp
@@ -1,3 +1,4 @@
+#include <ATen/DeviceAccelerator.h>
#include <c10/util/Optional.h>
#include <fmt/core.h>
#include <sys/types.h>
@@ -16,10 +17,12 @@
#include <ATen/Parallel.h>
#include <ATen/Utils.h>
#include <ATen/core/Vitals.h>
+#include <ATen/detail/AcceleratorHooksInterface.h>
#include <ATen/dlpack.h>
#include <ATen/native/ConvUtils.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/Normalization.h>
+#include <c10/core/Device.h>
#include <c10/core/DispatchKeySet.h>
#include <c10/util/AbortHandler.h>
#include <c10/util/Backtrace.h>
@@ -72,6 +75,7 @@
#include <torch/csrc/lazy/python/init.h>
#include <torch/csrc/monitor/python_init.h>
#include <torch/csrc/mps/Module.h>
+#include <torch/csrc/mtia/Module.h>
#include <torch/csrc/multiprocessing/init.h>
#include <torch/csrc/onnx/init.h>
#include <torch/csrc/profiler/python/init.h>
@@ -1641,6 +1645,7 @@ PyObject* initModule() {
#ifdef USE_XPU
torch::xpu::initModule(module);
#endif
+ torch::mtia::initModule(module);
torch::cpu::initModule(module);
torch::initVerboseBindings(module);
ASSERT_TRUE(THPStorage_init(module));
@@ -1975,6 +1980,70 @@ Call this whenever a new thread is created in order to propagate values from
return at::impl::ThreadLocalPythonObjects::get_state().contains(key);
});
+ py_module.def("_accelerator_hooks_device_count", []() {
+ auto device_type = at::getAccelerator();
+ if (device_type.has_value()) {
+ return at::globalContext()
+ .getAcceleratorHooksInterface(device_type.value())
+ .deviceCount();
+ }
+ return c10::DeviceIndex(-1);
+ });
+
+ py_module.def(
+ "_accelerator_hooks_set_current_device",
+ [](c10::DeviceIndex device_index) {
+ auto device_type = at::getAccelerator();
+ if (device_type.has_value()) {
+ at::globalContext()
+ .getAcceleratorHooksInterface(device_type.value())
+ .setCurrentDevice(device_index);
+ }
+ });
+
+ py_module.def("_accelerator_hooks_get_current_device", []() {
+ auto device_type = at::getAccelerator();
+ if (device_type.has_value()) {
+ return at::globalContext()
+ .getAcceleratorHooksInterface(device_type.value())
+ .getCurrentDevice();
+ }
+ return c10::DeviceIndex(-1);
+ });
+
+ py_module.def(
+ "_accelerator_hooks_exchange_device", [](c10::DeviceIndex device_index) {
+ auto device_type = at::getAccelerator();
+ if (device_type.has_value()) {
+ return at::globalContext()
+ .getAcceleratorHooksInterface(device_type.value())
+ .exchangeDevice(device_index);
+ }
+ return c10::DeviceIndex(-1);
+ });
+
+ py_module.def(
+ "_accelerator_hooks_maybe_exchange_device",
+ [](c10::DeviceIndex device_index) {
+ auto device_type = at::getAccelerator();
+ if (device_type.has_value()) {
+ return at::globalContext()
+ .getAcceleratorHooksInterface(device_type.value())
+ .maybeExchangeDevice(device_index);
+ }
+ return c10::DeviceIndex(-1);
+ });
+
+ py_module.def(
+ "_get_accelerator",
+ [](c10::optional<bool> check = c10::nullopt) {
+ return c10::Device(
+ at::getAccelerator(check.value_or(false))
+ .value_or(c10::DeviceType::CPU),
+ -1);
+ },
+ py::arg("check") = nullptr);
+
#ifdef USE_CUDA
PyObject* has_cuda = Py_True;
#else
diff --git a/torch/csrc/mtia/Module.cpp b/torch/csrc/mtia/Module.cpp
new file mode 100644
index 0000000000..84cc11f718
--- /dev/null
+++ b/torch/csrc/mtia/Module.cpp
@@ -0,0 +1,81 @@
+#include <ATen/ATen.h>
+#include <c10/util/CallOnce.h>
+#include <torch/csrc/Generator.h>
+#include <torch/csrc/Stream.h>
+#include <torch/csrc/python_headers.h>
+#include <torch/csrc/utils/device_lazy_init.h>
+#include <torch/csrc/utils/pybind.h>
+
+#include <c10/core/DeviceType.h>
+#include <c10/core/Stream.h>
+#ifndef WIN32
+#include <pthread.h>
+#endif
+
+namespace torch {
+namespace mtia {
+
+static bool in_bad_fork = false; // True for children forked after mtia init
+
+#ifndef WIN32
+// Called in the forked child if mtia has already been initialized
+static void forked_child() {
+ in_bad_fork = true;
+ torch::utils::set_requires_device_init(at::kMTIA, true);
+}
+#endif
+
+// Should be called before the first mtia call.
+// Note: This is distinct from initExtension because a stub mtia implementation
+// has some working functions (e.g. device_count) but cannot fully initialize.
+static void poison_fork() {
+#ifndef WIN32
+ static c10::once_flag flag;
+ c10::call_once(flag, [] { pthread_atfork(nullptr, nullptr, forked_child); });
+#endif
+}
+
+void initModule(PyObject* module) {
+ auto m = py::handle(module).cast<py::module>();
+
+ m.def("_mtia_init", []() {
+ TORCH_INTERNAL_ASSERT(!in_bad_fork); // Handled at python level
+ poison_fork();
+ at::globalContext().lazyInitMTIA();
+ });
+
+ m.def("_mtia_isBuilt", []() {
+ // Check if the MTIAHooks class has been registered with the registry.
+ return at::detail::isMTIAHooksBuilt();
+ });
+
+ m.def("_mtia_isInBadFork", []() { return in_bad_fork; });
+
+ m.def("_mtia_getCurrentStream", [](c10::DeviceIndex device_index) {
+ torch::utils::device_lazy_init(at::kMTIA);
+ return at::detail::getMTIAHooks().getCurrentStream(device_index);
+ });
+
+ m.def("_mtia_deviceSynchronize", [](c10::DeviceIndex device_index) {
+ torch::utils::device_lazy_init(at::kMTIA);
+ at::detail::getMTIAHooks().deviceSynchronize(
+ at::detail::getMTIAHooks().getCurrentDevice());
+ });
+
+ m.def("_mtia_getDefaultStream", [](c10::DeviceIndex device_index) {
+ torch::utils::device_lazy_init(at::kMTIA);
+ return at::detail::getMTIAHooks().getDefaultStream(device_index);
+ });
+
+ m.def("_mtia_setCurrentStream", [](const c10::Stream& stream) {
+ torch::utils::device_lazy_init(at::kMTIA);
+ auto device = at::detail::getMTIAHooks().getCurrentDevice();
+ if (device != stream.device_index()) {
+ at::detail::getMTIAHooks().setCurrentDevice(stream.device_index());
+ }
+ at::detail::getMTIAHooks().setCurrentStream(stream);
+ });
+}
+
+} // namespace mtia
+} // namespace torch
diff --git a/torch/csrc/mtia/Module.h b/torch/csrc/mtia/Module.h
new file mode 100644
index 0000000000..96a98ed448
--- /dev/null
+++ b/torch/csrc/mtia/Module.h
@@ -0,0 +1,12 @@
+#pragma once
+
+#include <torch/csrc/python_headers.h>
+
+namespace torch {
+namespace mtia {
+
+// PyMethodDef* python_functions();
+void initModule(PyObject* module);
+
+} // namespace mtia
+} // namespace torch
diff --git a/torch/csrc/utils/pybind.h b/torch/csrc/utils/pybind.h
index 36cb83659a..1a4e7bb26f 100644
--- a/torch/csrc/utils/pybind.h
+++ b/torch/csrc/utils/pybind.h
@@ -194,6 +194,12 @@ struct type_caster<c10::Stream> {
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(c10::Stream, _("torch.Stream"));
+ // PYBIND11_TYPE_CASTER defines a member field called value. Since c10::Stream
+ // cannot be default-initialized, we provide this constructor to explicitly
+ // initialize that field. The value doesn't matter as it will be overwritten
+ // after a successful call to load.
+ type_caster() : value(c10::Stream::DEFAULT, c10::Device(c10::kCPU, 0)) {}
+
bool load(handle src, bool) {
PyObject* obj = src.ptr();
if (THPStream_Check(obj)) {
diff --git a/torch/mtia/__init__.py b/torch/mtia/__init__.py
new file mode 100644
index 0000000000..4007f0e584
--- /dev/null
+++ b/torch/mtia/__init__.py
@@ -0,0 +1,262 @@
+r"""
+This package enables an interface for accessing MTIA backend in python
+"""
+
+import threading
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+import torch
+
+from torch.types import Device
+
+from .. import device as _device, Tensor
+from .._utils import _dummy_type, _LazySeedTracker, classproperty
+from ._utils import _get_device_index
+
+_device_t = Union[_device, str, int, None]
+
+# torch.mtia.Event/Stream is alias of torch.Event/Stream
+Event = torch.Event
+Stream = torch.Stream
+
+_initialized = False
+_queued_calls: List[
+ Tuple[Callable[[], None], List[str]]
+] = [] # don't invoke these until initialization occurs
+_tls = threading.local()
+_initialization_lock = threading.Lock()
+_lazy_seed_tracker = _LazySeedTracker()
+
+
+def init():
+ _lazy_init()
+
+
+def is_initialized():
+ r"""Return whether PyTorch's MTIA state has been initialized."""
+ return _initialized and not _is_in_bad_fork()
+
+
+def _is_in_bad_fork() -> bool:
+ return torch._C._mtia_isInBadFork()
+
+
+def _lazy_init() -> None:
+ global _initialized, _queued_calls
+ if is_initialized() or hasattr(_tls, "is_initializing"):
+ return
+ with _initialization_lock:
+ # We be double-checked locking, boys! This is OK because
+ # the above test was GIL protected anyway. The inner test
+ # is for when a thread blocked on some other thread which was
+ # doing the initialization; when they get the lock, they will
+ # find there is nothing left to do.
+ if is_initialized():
+ return
+ # It is important to prevent other threads from entering _lazy_init
+ # immediately, while we are still guaranteed to have the GIL, because some
+ # of the C calls we make below will release the GIL
+ if _is_in_bad_fork():
+ raise RuntimeError(
+ "Cannot re-initialize MTIA in forked subprocess. To use MTIA with "
+ "multiprocessing, you must use the 'spawn' start method"
+ )
+ if not _is_compiled():
+ raise AssertionError("Torch not compiled with MTIA enabled")
+
+ torch._C._mtia_init()
+ # Some of the queued calls may reentrantly call _lazy_init();
+ # we need to just return without initializing in that case.
+ # However, we must not let any *other* threads in!
+ _tls.is_initializing = True
+
+ for calls in _lazy_seed_tracker.get_calls():
+ if calls:
+ _queued_calls.append(calls)
+
+ try:
+ for queued_call, orig_traceback in _queued_calls:
+ try:
+ queued_call()
+ except Exception as e:
+ msg = (
+ f"MTIA call failed lazily at initialization with error: {str(e)}\n\n"
+ f"MTIA call was originally invoked at:\n\n{''.join(orig_traceback)}"
+ )
+ raise DeferredMtiaCallError(msg) from e
+ finally:
+ delattr(_tls, "is_initializing")
+ _initialized = True
+
+
+class DeferredMtiaCallError(Exception):
+ pass
+
+
+def _is_compiled() -> bool:
+ r"""Return true if compiled with MTIA support."""
+ return torch._C._mtia_isBuilt()
+
+
+def is_available() -> bool:
+ r"""Return true if MTIA device is available"""
+ if not _is_compiled():
+ return False
+ # MTIA has to init devices first to know if there is any devices available.
+ return device_count() > 0
+
+
+def synchronize() -> None:
+ r"""Waits for all jobs in all streams on a MTIA device to complete."""
+ return torch._C._mtia_deviceSynchronize()
+
+
+def device_count() -> int:
+ r"""Return the number of MTIA devices available."""
+ return torch._C._accelerator_hooks_device_count()
+
+
+def current_device() -> int:
+ r"""Return the index of a currently selected device."""
+ return torch._C._accelerator_hooks_get_current_device()
+
+
+def current_stream(device: Optional[_device_t] = None) -> Stream:
+ r"""Return the currently selected :class:`Stream` for a given device.
+
+ Args:
+ device (torch.device or int, optional): selected device. Returns
+ the currently selected :class:`Stream` for the current device, given
+ by :func:`~torch.mtia.current_device`, if :attr:`device` is ``None``
+ (default).
+ """
+ return torch._C._mtia_getCurrentStream(_get_device_index(device, optional=True))
+
+
+def default_stream(device: Optional[_device_t] = None) -> Stream:
+ r"""Return the default :class:`Stream` for a given device.
+
+ Args:
+ device (torch.device or int, optional): selected device. Returns
+ the default :class:`Stream` for the current device, given by
+ :func:`~torch.mtia.current_device`, if :attr:`device` is ``None``
+ (default).
+ """
+ return torch._C._mtia_getDefaultStream(_get_device_index(device, optional=True))
+
+
+def set_stream(stream: Stream):
+ r"""Set the current stream.This is a wrapper API to set the stream.
+ Usage of this function is discouraged in favor of the ``stream``
+ context manager.
+
+ Args:
+ stream (Stream): selected stream. This function is a no-op
+ if this argument is ``None``.
+ """
+ if stream is None:
+ return
+ torch._C._mtia_setCurrentStream(stream)
+
+
+class device:
+ r"""Context-manager that changes the selected device.
+
+ Args:
+ device (torch.device or int): device index to select. It's a no-op if
+ this argument is a negative integer or ``None``.
+ """
+
+ def __init__(self, device: Any):
+ self.idx = _get_device_index(device, optional=True)
+ self.prev_idx = -1
+
+ def __enter__(self):
+ self.prev_idx = torch._C._accelerator_hooks_maybe_exchange_device(self.idx)
+
+ def __exit__(self, type: Any, value: Any, traceback: Any):
+ self.idx = torch._C._accelerator_hooks_maybe_exchange_device(self.prev_idx)
+ return False
+
+
+class StreamContext:
+ r"""Context-manager that selects a given stream.
+
+ All MTIA kernels queued within its context will be enqueued on a selected
+ stream.
+
+ Args:
+ Stream (Stream): selected stream. This manager is a no-op if it's
+ ``None``.
+ .. note:: Streams are per-device.
+ """
+
+ cur_stream: Optional["torch.mtia.Stream"]
+
+ def __init__(self, stream: Optional["torch.mtia.Stream"]):
+ self.stream = stream
+ self.idx = _get_device_index(None, True)
+ if not torch.jit.is_scripting():
+ if self.idx is None:
+ self.idx = -1
+
+ self.src_prev_stream = (
+ None if not torch.jit.is_scripting() else torch.mtia.default_stream(None)
+ )
+ self.dst_prev_stream = (
+ None if not torch.jit.is_scripting() else torch.mtia.default_stream(None)
+ )
+
+ def __enter__(self):
+ # Local cur_stream variable for type refinement
+ cur_stream = self.stream
+ # Return if stream is None or MTIA device not available
+ if cur_stream is None or self.idx == -1:
+ return
+ self.src_prev_stream = torch.mtia.current_stream(None)
+
+ # If the stream is not on the current device, then
+ # set the current stream on the device
+ if self.src_prev_stream.device != cur_stream.device:
+ with device(cur_stream.device):
+ self.dst_prev_stream = torch.mtia.current_stream(cur_stream.device)
+ torch.mtia.set_stream(cur_stream)
+
+ def __exit__(self, type: Any, value: Any, traceback: Any):
+ # Local cur_stream variable for type refinement
+ cur_stream = self.stream
+ # If stream is None or no MTIA device available, return
+ if cur_stream is None or self.idx == -1:
+ return
+
+ # Reset the stream on the original device
+ # and destination device
+ if self.src_prev_stream.device != cur_stream.device: # type: ignore[union-attr]
+ torch.mtia.set_stream(self.dst_prev_stream) # type: ignore[arg-type]
+ torch.mtia.set_stream(self.src_prev_stream) # type: ignore[arg-type]
+
+
+def stream(stream: Optional["torch.mtia.Stream"]) -> StreamContext:
+ r"""Wrap around the Context-manager StreamContext that selects a given stream.
+
+ Arguments:
+ stream (Stream): selected stream. This manager is a no-op if it's
+ ``None``.
+ ..Note:: In eager mode stream is of type Stream class while in JIT it doesn't support torch.mtia.stream
+ """
+ return StreamContext(stream)
+
+
+__all__ = [
+ "init",
+ "is_available",
+ "is_initialized",
+ "synchronize",
+ "device_count",
+ "current_device",
+ "current_stream",
+ "default_stream",
+ "set_stream",
+ "stream",
+ "device",
+]
diff --git a/torch/mtia/_utils.py b/torch/mtia/_utils.py
new file mode 100644
index 0000000000..090e26f321
--- /dev/null
+++ b/torch/mtia/_utils.py
@@ -0,0 +1,38 @@
+from typing import Any
+
+import torch
+
+# The _get_device_index has been moved to torch.utils._get_device_index
+from torch._utils import _get_device_index as _torch_get_device_index
+
+
+def _get_device_index(
+ device: Any, optional: bool = False, allow_cpu: bool = False
+) -> int:
+ r"""Get the device index from :attr:`device`, which can be a torch.device object, a Python integer, or ``None``.
+
+ If :attr:`device` is a torch.device object, returns the device index if it
+ is a MTIA device. Note that for a MTIA device without a specified index,
+ i.e., ``torch.device('mtia')``, this will return the current default MTIA
+ device if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
+ CPU devices will be accepted and ``-1`` will be returned in this case.
+
+ If :attr:`device` is a Python integer, it is returned as is.
+
+ If :attr:`device` is ``None``, this will return the current default MTIA
+ device if :attr:`optional` is ``True``.
+ """
+ if isinstance(device, int):
+ return device
+ if isinstance(device, str):
+ device = torch.device(device)
+ if isinstance(device, torch.device):
+ if allow_cpu:
+ if device.type not in ["mtia", "cpu"]:
+ raise ValueError(f"Expected a mtia or cpu device, but got: {device}")
+ elif device.type != "mtia":
+ raise ValueError(f"Expected a mtia device, but got: {device}")
+ if not torch.jit.is_scripting():
+ if isinstance(device, torch.mtia.device):
+ return device.idx
+ return _torch_get_device_index(device, optional, allow_cpu)
diff --git a/torch/overrides.py b/torch/overrides.py
index 728c75c090..6c521bc700 100644
--- a/torch/overrides.py
+++ b/torch/overrides.py
@@ -283,6 +283,7 @@ def get_ignored_functions() -> Set[Callable]:
torch.use_deterministic_algorithms,
torch.is_deterministic_algorithms_warn_only_enabled,
torch.set_deterministic_debug_mode,
+ torch.get_device_module,
torch.get_deterministic_debug_mode,
torch.set_float32_matmul_precision,
torch.get_float32_matmul_precision,
|
2.41.0
|
461e7ed9e68d1b7274e69d5396ff343ac120568
|
Thu, 25 Apr 2024 11:48:49 -0700
|
[PATCH 0728/1000] Add test_cpp_extensions tests for stream_and_event and mita_backend (#123614)
|
Test the generic torch.Stream/Event with fake device gurad and hooks. Since we added a fake device backend, it is mutual exclusive to other backends. Tests will be skipped if TEST_CUDA or TEST_ROCM is true. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123614 Approved by: https://github.com/albanD ghstack dependencies: #123611, #123612
|
diff --git a/test/cpp_extensions/mtia_extension.cpp b/test/cpp_extensions/mtia_extension.cpp
new file mode 100644
index 0000000000..3b02d3968e
--- /dev/null
+++ b/test/cpp_extensions/mtia_extension.cpp
@@ -0,0 +1,219 @@
+#include <ATen/detail/MTIAHooksInterface.h>
+#include <c10/core/Device.h>
+#include <c10/core/Stream.h>
+#include <c10/core/impl/DeviceGuardImplInterface.h>
+#include <c10/util/Logging.h>
+#include <torch/csrc/utils/device_lazy_init.h>
+#include <thread>
+namespace torch::mtia {
+
+constexpr c10::DeviceType kMTIADeviceType = c10::DeviceType::MTIA;
+constexpr c10::DeviceIndex kMTIADeviceCount = 2;
+static thread_local c10::DeviceIndex current_device = 0;
+static thread_local std::array<c10::Stream, kMTIADeviceCount> current_streams =
+ {c10::Stream::unpack3(0, 0, c10::DeviceType::MTIA),
+ c10::Stream::unpack3(0, 1, c10::DeviceType::MTIA)};
+static int64_t stream_id_gen = 1;
+static int64_t event_id_gen = 1;
+static std::array<c10::Stream, kMTIADeviceCount> default_streams = {
+ c10::Stream::unpack3(0, 0, c10::DeviceType::MTIA),
+ c10::Stream::unpack3(0, 1, c10::DeviceType::MTIA)};
+struct MTIAGuardImpl final : public c10::impl::DeviceGuardImplInterface {
+ MTIAGuardImpl() = default;
+ explicit MTIAGuardImpl(c10::DeviceType t) {
+ TORCH_INTERNAL_ASSERT(t == kMTIADeviceType);
+ }
+ c10::DeviceType type() const override {
+ return kMTIADeviceType;
+ }
+ c10::Device exchangeDevice(c10::Device d) const override {
+ c10::Device old_device = getDevice();
+ if (old_device.index() != d.index()) {
+ setDevice(d);
+ }
+ return old_device;
+ }
+ c10::Device getDevice() const override {
+ return c10::Device(kMTIADeviceType, current_device);
+ }
+
+ void setDevice(c10::Device d) const override {
+ c10::Device current_device = getDevice();
+ if (current_device.index() != d.index()) {
+ current_device = d;
+ }
+ }
+ void uncheckedSetDevice(c10::Device d) const noexcept override {
+ (void)d;
+ }
+ c10::Stream getStream(c10::Device d) const noexcept override {
+ return current_streams[d.index()];
+ }
+ c10::Stream getNewStream(c10::Device d, int priority = 0) const override {
+ (void)priority;
+ return c10::Stream::unpack3(stream_id_gen++, d.index(), d.type());
+ }
+ c10::Stream getDefaultStream(c10::Device d) const override {
+ return default_streams[d.index()];
+ }
+ c10::Stream getStreamFromGlobalPool(
+ c10::Device d,
+ bool isHighPriority = false) const override {
+ return c10::Stream::unpack3(stream_id_gen++, d.index(), d.type());
+ }
+ // NB: These do NOT set the current device
+ c10::Stream exchangeStream(c10::Stream s) const noexcept override {
+ c10::Stream old_stream = getStream(s.device());
+ return old_stream;
+ }
+ c10::DeviceIndex deviceCount() const noexcept override {
+ return kMTIADeviceCount;
+ }
+
+ void destroyEvent(void* event, const c10::DeviceIndex device_index)
+ const noexcept override {
+ (void)device_index;
+ }
+
+ void record(
+ void** event,
+ const c10::Stream& stream,
+ const c10::DeviceIndex device_index,
+ const c10::EventFlag flag) const override {
+ TORCH_CHECK(
+ device_index == -1 || device_index == stream.device_index(),
+ "Event device index ",
+ device_index,
+ " does not match recording stream's device index ",
+ stream.device_index(),
+ ".");
+
+ const auto orig_device = getDevice();
+
+ setDevice(stream.device());
+
+ if (*event == nullptr) {
+ *event = reinterpret_cast<void*>(event_id_gen++);
+ }
+ setDevice(orig_device);
+ }
+
+ void block(void* event, const c10::Stream& stream) const override {
+ (void)event;
+ (void)stream;
+ }
+
+ // May be called from any device
+ bool queryEvent(void* event) const override {
+ (void)event;
+ return true;
+ }
+
+ // Stream-related functions
+ bool queryStream(const c10::Stream& stream) const override {
+ (void)stream;
+ return true;
+ }
+
+ void synchronizeStream(const c10::Stream& stream) const override {
+ (void)stream;
+ }
+
+ void recordDataPtrOnStream(
+ const c10::DataPtr& data_ptr,
+ const c10::Stream& stream) const override {
+ (void)data_ptr;
+ (void)stream;
+ }
+
+ double elapsedTime(void* event1, void* event2) const override {
+ uint64_t elapsed_time = 1e6;
+ return (double)(elapsed_time / 1e6);
+ }
+
+ void synchronizeEvent(void* event) const override {
+ (void)event;
+ }
+};
+
+struct MTIAHooks : public at::MTIAHooksInterface {
+ explicit MTIAHooks(at::MTIAHooksArgs) {}
+ void initMTIA() const override {}
+
+ bool hasMTIA() const override {
+ return true;
+ }
+
+ c10::DeviceIndex deviceCount() const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+ return c10::DeviceIndex(2);
+ }
+
+ void deviceSynchronize(c10::DeviceIndex device_index) const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+ (void)device_index;
+ }
+
+ std::string showConfig() const override {
+ return "None config";
+ }
+
+ c10::DeviceIndex exchangeDevice(c10::DeviceIndex device) const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+ auto orig_device = current_device;
+ if (current_device != device) {
+ current_device = device;
+ }
+ return orig_device;
+ }
+
+ c10::DeviceIndex maybeExchangeDevice(c10::DeviceIndex device) const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+
+ auto orig_device = current_device;
+ if (current_device != device) {
+ current_device = device;
+ }
+ return orig_device;
+ }
+
+ c10::Stream getDefaultStream(c10::DeviceIndex device) const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+
+ return default_streams[device];
+ }
+
+ c10::Stream getCurrentStream(c10::DeviceIndex device) const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+
+ return current_streams[device];
+ }
+
+ void setCurrentStream(const c10::Stream& stream) const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+
+ current_streams[stream.device_index()] = stream;
+ }
+
+ c10::DeviceIndex getCurrentDevice() const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+
+ return current_device;
+ }
+
+ void setCurrentDevice(c10::DeviceIndex device) const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+
+ if (current_device != device) {
+ current_device = device;
+ }
+ }
+};
+
+using at::MTIAHooksRegistry;
+using at::RegistererMTIAHooksRegistry;
+
+REGISTER_MTIA_HOOKS(MTIAHooks);
+C10_REGISTER_GUARD_IMPL(MTIA, MTIAGuardImpl);
+
+} // namespace torch::mtia
diff --git a/test/run_test.py b/test/run_test.py
index 3626d31fc2..516dbc753f 100755
--- a/test/run_test.py
+++ b/test/run_test.py
@@ -191,6 +191,8 @@ XPU_TEST = [
RUN_PARALLEL_BLOCKLIST = [
"test_cpp_extensions_jit",
"test_cpp_extensions_open_device_registration",
+ "test_cpp_extensions_stream_and_event",
+ "test_cpp_extensions_mtia_backend",
"test_jit_disabled",
"test_mobile_optimizer",
"test_multiprocessing",
diff --git a/test/test_cpp_extensions_mtia_backend.py b/test/test_cpp_extensions_mtia_backend.py
new file mode 100644
index 0000000000..f1613dcf7d
--- /dev/null
+++ b/test/test_cpp_extensions_mtia_backend.py
@@ -0,0 +1,155 @@
+# Owner(s): ["module: mtia"]
+
+import os
+import shutil
+import sys
+import tempfile
+import unittest
+
+import torch
+import torch.testing._internal.common_utils as common
+import torch.utils.cpp_extension
+from torch.testing._internal.common_utils import (
+ IS_ARM64,
+ IS_LINUX,
+ skipIfTorchDynamo,
+ TEST_CUDA,
+ TEST_PRIVATEUSE1,
+)
+from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
+
+
+# define TEST_ROCM before changing TEST_CUDA
+TEST_ROCM = TEST_CUDA and torch.version.hip is not None and ROCM_HOME is not None
+TEST_CUDA = TEST_CUDA and CUDA_HOME is not None
+
+
+def remove_build_path():
+ if sys.platform == "win32":
+ # Not wiping extensions build folder because Windows
+ return
+ default_build_root = torch.utils.cpp_extension.get_default_build_root()
+ if os.path.exists(default_build_root):
+ shutil.rmtree(default_build_root, ignore_errors=True)
+
+
+@unittest.skipIf(
+ IS_ARM64 or not IS_LINUX or TEST_CUDA or TEST_PRIVATEUSE1 or TEST_ROCM,
+ "Only on linux platform and mutual exclusive to other backends",
+)
+@torch.testing._internal.common_utils.markDynamoStrictTest
+class TestCppExtensionMTIABackend(common.TestCase):
+ """Tests MTIA backend with C++ extensions."""
+
+ module = None
+
+ def setUp(self):
+ super().setUp()
+ # cpp extensions use relative paths. Those paths are relative to
+ # this file, so we'll change the working directory temporarily
+ self.old_working_dir = os.getcwd()
+ os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
+ def tearDown(self):
+ super().tearDown()
+ # return the working directory (see setUp)
+ os.chdir(self.old_working_dir)
+
+ @classmethod
+ def tearDownClass(cls):
+ remove_build_path()
+
+ @classmethod
+ def setUpClass(cls):
+ remove_build_path()
+ build_dir = tempfile.mkdtemp()
+ # Load the fake device guard impl.
+ cls.module = torch.utils.cpp_extension.load(
+ name="mtia_extension",
+ sources=["cpp_extensions/mtia_extension.cpp"],
+ build_directory=build_dir,
+ extra_include_paths=[
+ "cpp_extensions",
+ "path / with spaces in it",
+ "path with quote'",
+ ],
+ is_python_module=False,
+ verbose=True,
+ )
+
+ @skipIfTorchDynamo("Not a TorchDynamo suitable test")
+ def test_get_device_module(self):
+ device = torch.device("mtia:0")
+ default_stream = torch.get_device_module(device).current_stream()
+ self.assertEqual(
+ default_stream.device_type, int(torch._C._autograd.DeviceType.MTIA)
+ )
+ print(torch._C.Stream.__mro__)
+ print(torch.cuda.Stream.__mro__)
+
+ @skipIfTorchDynamo("Not a TorchDynamo suitable test")
+ def test_stream_basic(self):
+ default_stream = torch.mtia.current_stream()
+ user_stream = torch.mtia.Stream()
+ self.assertEqual(torch.mtia.current_stream(), default_stream)
+ self.assertNotEqual(default_stream, user_stream)
+ # Check mtia_extension.cpp, default stream id starts from 0.
+ self.assertEqual(default_stream.stream_id, 0)
+ self.assertNotEqual(user_stream.stream_id, 0)
+ with torch.mtia.stream(user_stream):
+ self.assertEqual(torch.mtia.current_stream(), user_stream)
+ self.assertTrue(user_stream.query())
+ default_stream.synchronize()
+ self.assertTrue(default_stream.query())
+
+ @skipIfTorchDynamo("Not a TorchDynamo suitable test")
+ def test_stream_context(self):
+ mtia_stream_0 = torch.mtia.Stream(device="mtia:0")
+ mtia_stream_1 = torch.mtia.Stream(device="mtia:0")
+ print(mtia_stream_0)
+ print(mtia_stream_1)
+ with torch.mtia.stream(mtia_stream_0):
+ current_stream = torch.mtia.current_stream()
+ msg = f"current_stream {current_stream} should be {mtia_stream_0}"
+ self.assertTrue(current_stream == mtia_stream_0, msg=msg)
+
+ with torch.mtia.stream(mtia_stream_1):
+ current_stream = torch.mtia.current_stream()
+ msg = f"current_stream {current_stream} should be {mtia_stream_1}"
+ self.assertTrue(current_stream == mtia_stream_1, msg=msg)
+
+ @skipIfTorchDynamo("Not a TorchDynamo suitable test")
+ def test_stream_context_different_device(self):
+ device_0 = torch.device("mtia:0")
+ device_1 = torch.device("mtia:1")
+ mtia_stream_0 = torch.mtia.Stream(device=device_0)
+ mtia_stream_1 = torch.mtia.Stream(device=device_1)
+ print(mtia_stream_0)
+ print(mtia_stream_1)
+ orig_current_device = torch.mtia.current_device()
+ with torch.mtia.stream(mtia_stream_0):
+ current_stream = torch.mtia.current_stream()
+ self.assertTrue(torch.mtia.current_device() == device_0.index)
+ msg = f"current_stream {current_stream} should be {mtia_stream_0}"
+ self.assertTrue(current_stream == mtia_stream_0, msg=msg)
+ self.assertTrue(torch.mtia.current_device() == orig_current_device)
+ with torch.mtia.stream(mtia_stream_1):
+ current_stream = torch.mtia.current_stream()
+ self.assertTrue(torch.mtia.current_device() == device_1.index)
+ msg = f"current_stream {current_stream} should be {mtia_stream_1}"
+ self.assertTrue(current_stream == mtia_stream_1, msg=msg)
+ self.assertTrue(torch.mtia.current_device() == orig_current_device)
+
+ @skipIfTorchDynamo("Not a TorchDynamo suitable test")
+ def test_device_context(self):
+ device_0 = torch.device("mtia:0")
+ device_1 = torch.device("mtia:1")
+ with torch.mtia.device(device_0):
+ self.assertTrue(torch.mtia.current_device() == device_0.index)
+
+ with torch.mtia.device(device_1):
+ self.assertTrue(torch.mtia.current_device() == device_1.index)
+
+
+if __name__ == "__main__":
+ common.run_tests()
diff --git a/test/test_cpp_extensions_stream_and_event.py b/test/test_cpp_extensions_stream_and_event.py
new file mode 100644
index 0000000000..728ac5f980
--- /dev/null
+++ b/test/test_cpp_extensions_stream_and_event.py
@@ -0,0 +1,109 @@
+# Owner(s): ["module: mtia"]
+
+import os
+import shutil
+import sys
+import tempfile
+import unittest
+
+import torch
+import torch.testing._internal.common_utils as common
+import torch.utils.cpp_extension
+from torch.testing._internal.common_utils import (
+ IS_ARM64,
+ IS_LINUX,
+ skipIfTorchDynamo,
+ TEST_CUDA,
+ TEST_PRIVATEUSE1,
+)
+from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
+
+
+# define TEST_ROCM before changing TEST_CUDA
+TEST_ROCM = TEST_CUDA and torch.version.hip is not None and ROCM_HOME is not None
+TEST_CUDA = TEST_CUDA and CUDA_HOME is not None
+
+
+def remove_build_path():
+ if sys.platform == "win32":
+ # Not wiping extensions build folder because Windows
+ return
+ default_build_root = torch.utils.cpp_extension.get_default_build_root()
+ if os.path.exists(default_build_root):
+ shutil.rmtree(default_build_root, ignore_errors=True)
+
+
+# Since we use a fake MTIA device backend to test generic Stream/Event, device backends are mutual exclusive to each other.
+# The test will be skipped if any of the following conditions are met:
+@unittest.skipIf(
+ IS_ARM64 or not IS_LINUX or TEST_CUDA or TEST_PRIVATEUSE1 or TEST_ROCM,
+ "Only on linux platform and mutual exclusive to other backends",
+)
+@torch.testing._internal.common_utils.markDynamoStrictTest
+class TestCppExtensionStreamAndEvent(common.TestCase):
+ """Tests Stream and Event with C++ extensions."""
+
+ module = None
+
+ def setUp(self):
+ super().setUp()
+ # cpp extensions use relative paths. Those paths are relative to
+ # this file, so we'll change the working directory temporarily
+ self.old_working_dir = os.getcwd()
+ os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
+ def tearDown(self):
+ super().tearDown()
+ # return the working directory (see setUp)
+ os.chdir(self.old_working_dir)
+
+ @classmethod
+ def tearDownClass(cls):
+ remove_build_path()
+
+ @classmethod
+ def setUpClass(cls):
+ remove_build_path()
+ build_dir = tempfile.mkdtemp()
+ # Load the fake device guard impl.
+ src = f"{os.path.abspath(os.path.dirname(__file__))}/cpp_extensions/mtia_extension.cpp"
+ cls.module = torch.utils.cpp_extension.load(
+ name="mtia_extension",
+ sources=[src],
+ build_directory=build_dir,
+ extra_include_paths=[
+ "cpp_extensions",
+ "path / with spaces in it",
+ "path with quote'",
+ ],
+ is_python_module=False,
+ verbose=True,
+ )
+
+ @skipIfTorchDynamo("Not a TorchDynamo suitable test")
+ def test_stream_event(self):
+ s = torch.Stream()
+ self.assertTrue(s.device_type, int(torch._C._autograd.DeviceType.MTIA))
+ e = torch.Event()
+ self.assertTrue(e.device.type, "mtia")
+ # Should be nullptr by default
+ self.assertTrue(e.event_id == 0)
+ s.record_event(event=e)
+ print(f"recorded event 1: {e}")
+ self.assertTrue(e.event_id != 0)
+ e2 = s.record_event()
+ print(f"recorded event 2: {e2}")
+ self.assertTrue(e2.event_id != 0)
+ self.assertTrue(e2.event_id != e.event_id)
+ e.synchronize()
+ e2.synchronize()
+ time_elapsed = e.elapsed_time(e2)
+ print(f"time elapsed between e1 and e2: {time_elapsed}")
+ old_event_id = e.event_id
+ e.record(stream=s)
+ print(f"recorded event 1: {e}")
+ self.assertTrue(e.event_id == old_event_id)
+
+
+if __name__ == "__main__":
+ common.run_tests()
diff --git a/tools/testing/modulefinder_determinator.py b/tools/testing/modulefinder_determinator.py
index ce55fdb424..ba58d75c57 100644
--- a/tools/testing/modulefinder_determinator.py
+++ b/tools/testing/modulefinder_determinator.py
@@ -21,6 +21,8 @@ TARGET_DET_LIST = [
"test_cpp_extensions_aot_no_ninja",
"test_cpp_extensions_jit",
"test_cpp_extensions_open_device_registration",
+ "test_cpp_extensions_stream_and_event",
+ "test_cpp_extensions_mtia_backend",
"test_cuda",
"test_cuda_primary_ctx",
"test_dataloader",
|
2.41.0
|
92dc4559748f4103b38f542f1dedda5fc0a2e14
|
Thu, 25 Apr 2024 22:50:24 -0700
|
[PATCH 0730/1000] Made FlexAttention rewrite getitem calls to use aten.index in score_mod (#124799)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124799 Approved by: https://github.com/drisspg ghstack dependencies: #124444
|
diff --git a/c10/cuda/CUDAMiscFunctions.cpp b/c10/cuda/CUDAMiscFunctions.cpp
index 11ea775366..f55bba13e9 100644
--- a/c10/cuda/CUDAMiscFunctions.cpp
+++ b/c10/cuda/CUDAMiscFunctions.cpp
@@ -12,7 +12,7 @@ const char* get_cuda_check_suffix() noexcept {
} else {
return "\nCUDA kernel errors might be asynchronously reported at some"
" other API call, so the stacktrace below might be incorrect."
- "\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1.";
+ "\nFor debugging consider passing CUDA_LAUNCH_BLOCKING=1";
}
}
std::mutex* getFreeMutex() {
diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py
index 879d4b39d2..a0d70902c7 100644
--- a/test/inductor/test_templated_attention.py
+++ b/test/inductor/test_templated_attention.py
@@ -4,7 +4,7 @@ import functools
from collections import namedtuple
from typing import Callable
-from unittest import skip, skipUnless
+from unittest import expectedFailure, skip, skipUnless
from unittest.mock import patch
import torch
@@ -163,7 +163,7 @@ class TestTemplatedSDPA(InductorTestCase):
head_offset = torch.rand(H, device="cuda", dtype=dtype)
def score_mod(score, b, h, m, n):
- return score + index(head_offset, [h])
+ return score + head_offset[h]
self.run_test(score_mod, dtype)
@@ -174,9 +174,7 @@ class TestTemplatedSDPA(InductorTestCase):
seq_idx[S // 2 :] = 1
def seq_mask_mod(score, b, h, q, kv):
- return torch.where(
- index(seq_idx, [q]) == index(seq_idx, [kv]), score, float("-inf")
- )
+ return torch.where(seq_idx[q] == seq_idx[kv], score, float("-inf"))
self.run_test(seq_mask_mod, dtype)
@@ -186,7 +184,7 @@ class TestTemplatedSDPA(InductorTestCase):
bias = torch.randn(S, S, device="cuda", dtype=dtype)
def bias_mod(score, b, h, q, kv):
- return score + index(bias, [q, kv])
+ return score + bias[q, kv]
self.run_test(bias_mod, dtype)
@@ -196,7 +194,7 @@ class TestTemplatedSDPA(InductorTestCase):
bias = torch.randn(B, S, S, device="cuda", dtype=dtype)
def bias_mod(score, b, h, q, kv):
- return score + index(bias, [b, q, kv])
+ return score + bias[b, q, kv]
self.run_test(bias_mod, dtype)
@@ -206,7 +204,7 @@ class TestTemplatedSDPA(InductorTestCase):
bias = torch.randn(B, H, S, S, device="cuda", dtype=dtype)
def bias_mod(score, b, h, q, kv):
- return score + index(bias, [b, h, q, kv])
+ return score + bias[b, h, q, kv]
self.run_test(bias_mod, dtype)
@@ -216,7 +214,7 @@ class TestTemplatedSDPA(InductorTestCase):
rel_bias = torch.randn(2 * S, device="cuda", dtype=dtype)
def bias_mod(score, b, h, q, kv):
- return score + index(rel_bias, [(q - kv) + S])
+ return score + rel_bias[(q - kv) + S]
self.run_test(bias_mod, dtype)
@@ -227,7 +225,7 @@ class TestTemplatedSDPA(InductorTestCase):
def bias_mod(score, b, h, q, kv):
causal_attention = q >= kv
- cur_num_bidirectional = index(num_bidirectional, (b,))
+ cur_num_bidirectional = num_bidirectional[b]
bidirectional_attention_on_video = (q <= cur_num_bidirectional) & (
kv <= cur_num_bidirectional
)
@@ -239,6 +237,38 @@ class TestTemplatedSDPA(InductorTestCase):
self.run_test(bias_mod, dtype)
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_natten_2d(self, dtype):
+ H = 32
+ W = S // H
+ WINDOW = 3
+ assert W * H == S
+
+ def get_x_y(idx):
+ # This should be a floor divide, but we don't support that properly
+ return idx / W, idx % W
+
+ def natten_mask(score, b, h, q, kv):
+ q_x, q_y = get_x_y(q)
+ kv_x, kv_y = get_x_y(kv)
+ return torch.where(
+ ((q_x - kv_x).abs() <= WINDOW) | ((q_y - kv_y).abs() <= WINDOW),
+ score,
+ float("-inf"),
+ )
+
+ self.run_test(natten_mask, dtype)
+
+ @supported_platform
+ @expectedFailure
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_silu_on_score(self, dtype):
+ def silu_score(score, b, h, q, kv):
+ return torch.nn.functional.silu(score)
+
+ self.run_test(silu_score, dtype)
+
@supported_platform
@skip("Triton bug ") # https://github.com/pytorch/pytorch/issues/124571
@common_utils.parametrize("dtype", test_dtypes)
@@ -252,8 +282,8 @@ class TestTemplatedSDPA(InductorTestCase):
def create_njt_wrapper(orig_score_mod, offsets, seq_idx):
def njt_score_mod(qk, b, h, q, kv):
- q_nested = q - index(offsets, [index(seq_idx, [q])])
- kv_nested = kv - index(offsets, [index(seq_idx, [kv])])
+ q_nested = q - offsets[seq_idx[q]]
+ kv_nested = kv - offsets[seq_idx[kv]]
return orig_score_mod(qk, b, h, q_nested, kv_nested)
return njt_score_mod
@@ -312,9 +342,9 @@ class TestTemplatedSDPA(InductorTestCase):
tok_scale = torch.randn(S, device="cuda")
def bias_mod(score, batch, head, token_q, token_kv):
- score = score + index(tok_scale, [token_q])
- score = score + index(batch_scale, [batch])
- score = score + index(head_scale, [head])
+ score = score + tok_scale[token_q]
+ score = score + batch_scale[batch]
+ score = score + head_scale[head]
return score
self.run_test(bias_mod)
diff --git a/test/test_indexing.py b/test/test_indexing.py
index 8d0eeb2025..195a3144c3 100644
--- a/test/test_indexing.py
+++ b/test/test_indexing.py
@@ -701,10 +701,12 @@ class TestIndexing(TestCase):
boolIndices = torch.tensor([True, False, False], dtype=torch.bool, device=device)
uint8Indices = torch.tensor([1, 0, 0], dtype=torch.uint8, device=device)
with warnings.catch_warnings(record=True) as w:
- self.assertEqual(v[boolIndices].shape, v[uint8Indices].shape)
- self.assertEqual(v[boolIndices], v[uint8Indices])
+ v1 = v[boolIndices]
+ v2 = v[uint8Indices]
+ self.assertEqual(v1.shape, v2.shape)
+ self.assertEqual(v1, v2)
self.assertEqual(v[boolIndices], tensor([True], dtype=torch.bool, device=device))
- self.assertEqual(len(w), 2)
+ self.assertEqual(len(w), 1)
def test_bool_indices_accumulate(self, device):
mask = torch.zeros(size=(10, ), dtype=torch.bool, device=device)
@@ -723,9 +725,10 @@ class TestIndexing(TestCase):
v = torch.randn(5, 7, 3, device=device)
mask = torch.ByteTensor([1, 0, 1, 1, 0]).to(device)
with warnings.catch_warnings(record=True) as w:
- self.assertEqual(v[mask].shape, (3, 7, 3))
- self.assertEqual(v[mask], torch.stack([v[0], v[2], v[3]]))
- self.assertEqual(len(w), 2)
+ res = v[mask]
+ self.assertEqual(res.shape, (3, 7, 3))
+ self.assertEqual(res, torch.stack([v[0], v[2], v[3]]))
+ self.assertEqual(len(w), 1)
v = torch.tensor([1.], device=device)
self.assertEqual(v[v == 0], torch.tensor([], device=device))
diff --git a/test/test_overrides.py b/test/test_overrides.py
index d79753f78a..cb46ca6ed8 100644
--- a/test/test_overrides.py
+++ b/test/test_overrides.py
@@ -1387,6 +1387,28 @@ class TestTorchFunctionMode(TestCase):
self.assertTrue(called)
+ def test_getitem_call(self):
+ # This failed because the parser thinks the function is called to()
+ # but it's actually called _parse_to()
+
+ called = False
+
+ class A(TorchFunctionMode):
+ def __torch_function__(self, func, types, args=(), kwargs=None):
+ nonlocal called
+ if kwargs is None:
+ kwargs = {}
+ called = True
+ return func(*args, **kwargs)
+
+ a = torch.zeros(5)
+ b = torch.tensor(0)
+ with A():
+ a[b]
+
+ self.assertTrue(called)
+
+
def test_distributions_bernoulli(self):
# This failed because improper use of has_torch_function when
# is_tensor_like should have been used instead, inside the
diff --git a/torch/_dynamo/variables/higher_order_ops.py b/torch/_dynamo/variables/higher_order_ops.py
index a1abcb15fb..26f1eeb91c 100644
--- a/torch/_dynamo/variables/higher_order_ops.py
+++ b/torch/_dynamo/variables/higher_order_ops.py
@@ -1475,6 +1475,7 @@ class TemplatedAttentionHigherOrderVariable(TorchHigherOrderOperatorVariable):
self, tx, query: "VariableTracker", score_function: "VariableTracker"
):
from torch._dynamo.symbolic_convert import InstructionTranslator
+ from torch._higher_order_ops.templated_attention import TransformGetItemToIndex
from .builder import SourcelessBuilder
tx: InstructionTranslator = tx
@@ -1499,19 +1500,21 @@ class TemplatedAttentionHigherOrderVariable(TorchHigherOrderOperatorVariable):
bhmn = [create_scalar() for _ in range(4)]
new_args = [score, *bhmn]
- (
- (body_output, body_treespec),
- body_graph,
- body_lifted_freevars,
- ) = speculate_subgraph(
- tx,
- score_function,
- new_args,
- {}, # expect only args no kwargs for now
- description="templated_attention",
- source_target=self.value,
- set_subgraph_inputs="flatten_manual",
- )
+
+ with TransformGetItemToIndex():
+ (
+ (body_output, body_treespec),
+ body_graph,
+ body_lifted_freevars,
+ ) = speculate_subgraph(
+ tx,
+ score_function,
+ new_args,
+ {}, # expect only args no kwargs for now
+ description="templated_attention",
+ source_target=self.value,
+ set_subgraph_inputs="flatten_manual",
+ )
body_name = add_subgraph(
tx,
diff --git a/torch/_functorch/vmap.py b/torch/_functorch/vmap.py
index 5d05148faf..054a40123e 100644
--- a/torch/_functorch/vmap.py
+++ b/torch/_functorch/vmap.py
@@ -178,7 +178,7 @@ def _maybe_remove_batch_dim(name, batched_output, vmap_level, batch_size, out_di
raise ValueError(
f"vmap({name}, ...): `{name}` must only return "
f"Tensors, got type {type(batched_output)}. "
- "Did you mean to set out_dim= to None for output?"
+ "Did you mean to set out_dims= to None for output?"
)
return _remove_batch_dim(batched_output, vmap_level, batch_size, out_dim)
diff --git a/torch/_higher_order_ops/templated_attention.py b/torch/_higher_order_ops/templated_attention.py
index 388e741837..52a9156820 100644
--- a/torch/_higher_order_ops/templated_attention.py
+++ b/torch/_higher_order_ops/templated_attention.py
@@ -1,4 +1,4 @@
-from typing import Callable, Tuple
+from typing import Any, Callable, Tuple
import torch
import torch.utils._pytree as pytree
@@ -16,6 +16,29 @@ from torch.fx.experimental.proxy_tensor import (
track_tensor_tree,
)
+from torch.overrides import TorchFunctionMode
+
+
+def transform_getitem_args(x: torch.Tensor, index_args) -> Tuple[Any, ...]:
+ if isinstance(index_args, tuple):
+ return (x, list(index_args))
+ elif not isinstance(index_args, (list, tuple)):
+ return (x, [index_args])
+ return (x, index_args)
+
+
+class TransformGetItemToIndex(TorchFunctionMode):
+ # This is needed since we want to support calling
+ # A[q_idx], where q_idx is a scalar tensor in score_mod.
+ # Today, when q_idx is a scalar tensor, we implicitly convert it to a python
+ # scalar and create a view. We do not want that behavior in this case, so we
+ # use this torchfunctionmode to override that behavior for score_mod
+ # wherever we're running it.
+ def __torch_function__(self, func, types, args, kwargs=None):
+ if func == torch.Tensor.__getitem__:
+ return torch.ops.aten.index(*transform_getitem_args(*args))
+ return func(*args, **(kwargs or {}))
+
class TemplatedAttentionHOP(HigherOrderOperator):
def __init__(self):
@@ -73,7 +96,10 @@ def math_attention(
score_mod = torch.vmap(score_mod, in_dims=(0, None, 0, None, None) + in_dim_buffers)
score_mod = torch.vmap(score_mod, in_dims=(0, 0, None, None, None) + in_dim_buffers)
- scores = score_mod(scores, b, h, m, n, *other_buffers).to(torch.float32)
+ # todo: We wouldn't need these overrides in this file if Dynamo always did the
+ # rewriting.
+ with TransformGetItemToIndex():
+ scores = score_mod(scores, b, h, m, n, *other_buffers).to(torch.float32)
# TODO Unconditionally return logsumexp for backwards
# if any(t.requires_grad for t in (query, key, value)):
@@ -122,7 +148,8 @@ def trace_templated_attention(
example_vals = [
torch.zeros((), dtype=query.dtype, requires_grad=query.requires_grad)
] + [torch.zeros((), dtype=torch.int) for _ in range(4)]
- score_graph = make_fx(score_mod)(*example_vals, *other_buffers)
+ with TransformGetItemToIndex():
+ score_graph = make_fx(score_mod)(*example_vals, *other_buffers)
proxy_mode.tracer.root.register_module("sdpa_score", score_graph)
node_args = (query, key, value, score_graph, *other_buffers)
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args)
@@ -187,9 +214,10 @@ def templated_attention_functionalize(
with ctx.redispatch_to_next() as m:
functional_score_mod = ctx.functionalize(score_mod)
pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch
- mutates = _has_potential_branch_input_mutation(
- functional_score_mod, example_vals, pre_dispatch
- )
+ with TransformGetItemToIndex():
+ mutates = _has_potential_branch_input_mutation(
+ functional_score_mod, example_vals, pre_dispatch
+ )
# The only care about mutations of existing buffers since we can't replay these.
# However, we can just error if anything is detected
if mutates:
diff --git a/torch/_prims_common/__init__.py b/torch/_prims_common/__init__.py
index 61d602bd36..377fc36830 100644
--- a/torch/_prims_common/__init__.py
+++ b/torch/_prims_common/__init__.py
@@ -85,6 +85,7 @@ torch_function_passthrough = {
torch.Tensor.__format__,
torch.Tensor.__repr__,
torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined]
+ torch.Tensor.__getitem__,
}
diff --git a/torch/csrc/autograd/python_variable_indexing.cpp b/torch/csrc/autograd/python_variable_indexing.cpp
index 87b0e32293..e3cdd04f09 100644
--- a/torch/csrc/autograd/python_variable_indexing.cpp
+++ b/torch/csrc/autograd/python_variable_indexing.cpp
@@ -32,8 +32,7 @@
using namespace at;
using namespace torch::autograd::utils;
-namespace torch {
-namespace autograd {
+namespace torch::autograd {
Py_ssize_t THPVariable_length(PyObject* self) {
HANDLE_TH_ERRORS
@@ -69,7 +68,7 @@ static inline int64_t count_specified_dimensions(PyObject* index) {
for (Py_ssize_t i = 0; i < size; i++) {
PyObject* obj = PyTuple_GET_ITEM(
index, i); // NOLINT(cppcoreguidelines-pro-type-cstyle-cast)
- if (!THPVariable_CheckExact(obj) && check_has_torch_function(obj))
+ if (check_has_torch_function(obj))
return -1;
if (THPVariable_Check(obj)) {
const auto& var = THPVariable_Unpack(obj);
@@ -341,7 +340,7 @@ static inline THPObjectPtr wrapTuple(PyObject* index) {
// indexing is needed, it calls C++ `at::indexing::dispatch_index`.
PyObject* THPVariable_getitem(PyObject* self, PyObject* index) {
HANDLE_TH_ERRORS
- if (!THPVariable_CheckExact(self) && check_has_torch_function(self)) {
+ if (check_has_torch_function(self)) {
return handle_torch_function_indexing(self, index);
}
const auto& self_ = THPVariable_Unpack(self);
@@ -438,9 +437,8 @@ int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* py_value) {
if (py_value == nullptr) {
throw TypeError("Tensor does not support deleting items");
}
- if ((!THPVariable_CheckExact(self) && check_has_torch_function(self)) ||
- (!THPVariable_CheckExact(py_value) &&
- check_has_torch_function(py_value))) {
+ if ((check_has_torch_function(self)) ||
+ (check_has_torch_function(py_value))) {
py::object ret = py::reinterpret_steal<py::object>(
handle_torch_function_indexing(self, index, py_value));
return 0;
@@ -553,5 +551,4 @@ int THPVariable_setitem(PyObject* self, PyObject* index, PyObject* py_value) {
END_HANDLE_TH_ERRORS_RET(-1)
}
-} // namespace autograd
-} // namespace torch
+} // namespace torch::autograd
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
index 46148424e5..ab73b5baf3 100644
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -22132,11 +22132,6 @@ python_ref_db = [
"_refs.roll",
torch_opinfo_name="roll",
validate_view_consistency=False,
- skips=(
- # RuntimeError: no _refs support for torch.Tensor.__getitem__
- # Leaving it as a ref because fftshift uses it
- DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),
- ),
),
PythonRefInfo(
"_refs.rot90",
diff --git a/torch/testing/_internal/opinfo/definitions/fft.py b/torch/testing/_internal/opinfo/definitions/fft.py
index 3f1d43ee9f..0601af24bb 100644
--- a/torch/testing/_internal/opinfo/definitions/fft.py
+++ b/torch/testing/_internal/opinfo/definitions/fft.py
@@ -767,18 +767,10 @@ python_ref_db: List[OpInfo] = [
"_refs.fft.fftshift",
op_db=op_db,
torch_opinfo_name="fft.fftshift",
- skips=(
- # TODO Move fftshift to decomps
- DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"),
- ),
),
PythonRefInfo(
"_refs.fft.ifftshift",
op_db=op_db,
torch_opinfo_name="fft.ifftshift",
- skips=(
- # TODO Move ifftshift to decomps
- DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"),
- ),
),
]
diff --git a/torch/testing/_internal/opinfo/definitions/linalg.py b/torch/testing/_internal/opinfo/definitions/linalg.py
index a1b6531b15..288aaa34f2 100644
--- a/torch/testing/_internal/opinfo/definitions/linalg.py
+++ b/torch/testing/_internal/opinfo/definitions/linalg.py
@@ -2389,8 +2389,6 @@ python_ref_db: List[OpInfo] = [
supports_out=True,
op_db=op_db,
skips=(
- # no _refs support for Tensor.__getitem__
- DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"),
# TODO: is this really needed?
DecorateInfo(
unittest.expectedFailure, "TestCommon", "test_python_ref_errors"
|
2.41.0
|
58fdd8c40715baa96f9886e03992de4e3ed18a5
|
Fri, 26 Apr 2024 17:41:24 +0000
|
[PATCH 0731/1000] Remove cppwrapper option on inductor benchmark workflow (#124971)
|
I'm restoring the `training` and `inference` options after github.com/pytorch/pytorch/pull/124795 and remove the not less-known `cppwrapper` option instead per @desertfire suggestion. The total number of parameters remains at 10. Also, the default choice for training and inference are explicitly spelled out when dispatching the workflow manually to catch dev attention. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124971 Approved by: https://github.com/ezyang
|
diff --git a/.github/workflows/inductor-perf-test-nightly.yml b/.github/workflows/inductor-perf-test-nightly.yml
index 16dd92f553..e77c915749 100644
--- a/.github/workflows/inductor-perf-test-nightly.yml
+++ b/.github/workflows/inductor-perf-test-nightly.yml
@@ -8,11 +8,16 @@ on:
# out, let try to run torchao cudagraphs_low_precision as part of cudagraphs
workflow_dispatch:
inputs:
- training_and_inference:
- description: Run training and inference?
+ training:
+ description: Run training (on by default)?
required: false
- type: string
- default: training-true-inference-false
+ type: boolean
+ default: true
+ inference:
+ description: Run inference (off by default)?
+ required: false
+ type: boolean
+ default: false
default:
description: Run inductor_default?
required: false
@@ -28,11 +33,6 @@ on:
required: false
type: boolean
default: true
- cppwrapper:
- description: Run inductor_cpp_wrapper for inference?
- required: false
- type: boolean
- default: false
freezing_cudagraphs:
description: Run inductor_cudagraphs with freezing for inference?
required: false
@@ -129,7 +129,7 @@ jobs:
if: github.event_name == 'workflow_dispatch'
with:
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
- dashboard-tag: ${{ inputs.training_and_inference }}-default-${{ inputs.default }}-dynamic-${{ inputs.dynamic }}-cudagraphs-${{ inputs.cudagraphs }}-cppwrapper-${{ inputs.cppwrapper }}-aotinductor-${{ inputs.aotinductor }}-maxautotune-${{ inputs.maxautotune }}-freezing_cudagraphs-${{ inputs.freezing_cudagraphs }}-cudagraphs_low_precision-${{ inputs.cudagraphs }}
+ dashboard-tag: training-${{ inputs.training }}-inference-${{ inputs.inference }}-default-${{ inputs.default }}-dynamic-${{ inputs.dynamic }}-cudagraphs-${{ inputs.cudagraphs }}-cppwrapper-false-aotinductor-${{ inputs.aotinductor }}-maxautotune-${{ inputs.maxautotune }}-freezing_cudagraphs-${{ inputs.freezing_cudagraphs }}-cudagraphs_low_precision-${{ inputs.cudagraphs }}
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.test-matrix }}
use-gha: anything-non-empty-to-use-gha
|
2.41.0
|
81ea00c90aac5a6f8d09da885dac4754b8d6d63
|
Fri, 26 Apr 2024 18:21:24 +0000
|
[PATCH 0732/1000] [TD] Query Github API for base (#122214)
|
A better query for the base commit of a PR. Some ghstack PRs are not connected to main so git merge-base doesn't work. Instead, use the Github API to query for the base of the PR, which should be more accurate Sanity checked on one of Ed's ghstack PRs Pull Request resolved: https://github.com/pytorch/pytorch/pull/122214 Approved by: https://github.com/seemethere
|
diff --git a/.github/workflows/target_determination.yml b/.github/workflows/target_determination.yml
index f719b798c1..cd5e758345 100644
--- a/.github/workflows/target_determination.yml
+++ b/.github/workflows/target_determination.yml
@@ -53,6 +53,7 @@ jobs:
GITHUB_RUN_ID: ${{ github.run_id }}
GITHUB_RUN_NUMBER: ${{ github.run_number }}
GITHUB_RUN_ATTEMPT: ${{ github.run_attempt }}
+ GITHUB_REF: ${{ github.ref }}
JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
JOB_NAME: ${{ steps.get-job-id.outputs.job-name }}
PR_NUMBER: ${{ github.event.pull_request.number }}
diff --git a/tools/testing/target_determination/heuristics/mentioned_in_pr.py b/tools/testing/target_determination/heuristics/mentioned_in_pr.py
index 56c14b4a6a..074a375e2d 100644
--- a/tools/testing/target_determination/heuristics/mentioned_in_pr.py
+++ b/tools/testing/target_determination/heuristics/mentioned_in_pr.py
@@ -1,4 +1,3 @@
-import os
import re
from typing import Any, List
@@ -9,6 +8,7 @@ from tools.testing.target_determination.heuristics.interface import (
from tools.testing.target_determination.heuristics.utils import (
get_git_commit_info,
get_issue_or_pr_body,
+ get_pr_number,
)
from tools.testing.test_run import TestRun
@@ -32,14 +32,11 @@ class MentionedInPR(HeuristicInterface):
print(f"Can't get commit info due to {e}")
commit_messages = ""
try:
- pr_number = os.environ.get("PR_NUMBER", "")
- if pr_number == "":
- re_match = re.match(
- r"^refs/tags/.*/(\d+)$", os.environ.get("GITHUB_REF", "")
- )
- if re_match is not None:
- pr_number = re_match.group(1)
- pr_body = get_issue_or_pr_body(int(pr_number))
+ pr_number = get_pr_number()
+ if pr_number is not None:
+ pr_body = get_issue_or_pr_body(pr_number)
+ else:
+ pr_body = ""
except Exception as e:
print(f"Can't get PR body due to {e}")
pr_body = ""
diff --git a/tools/testing/target_determination/heuristics/utils.py b/tools/testing/target_determination/heuristics/utils.py
index 0c158bb934..6ddae24d21 100644
--- a/tools/testing/target_determination/heuristics/utils.py
+++ b/tools/testing/target_determination/heuristics/utils.py
@@ -1,10 +1,11 @@
import json
import os
+import re
import subprocess
from collections import defaultdict
from functools import lru_cache
from pathlib import Path
-from typing import cast, Dict, List, Set, Union
+from typing import cast, Dict, List, Optional, Set, Union
from urllib.request import Request, urlopen
from warnings import warn
@@ -22,7 +23,36 @@ def python_test_file_to_test_name(tests: Set[str]) -> Set[str]:
@lru_cache(maxsize=None)
-def query_changed_files() -> List[str]:
+def get_pr_number() -> Optional[int]:
+ pr_number = os.environ.get("PR_NUMBER", "")
+ if pr_number == "":
+ re_match = re.match(r"^refs/tags/.*/(\d+)$", os.environ.get("GITHUB_REF", ""))
+ if re_match is not None:
+ pr_number = re_match.group(1)
+ if pr_number != "":
+ return int(pr_number)
+ return None
+
+
+@lru_cache(maxsize=None)
+def get_merge_base() -> str:
+ pr_number = get_pr_number()
+ if pr_number is not None:
+ github_token = os.environ.get("GITHUB_TOKEN")
+ headers = {
+ "Accept": "application/vnd.github.v3+json",
+ "Authorization": f"token {github_token}",
+ }
+ url = f"https://api.github.com/repos/pytorch/pytorch/pulls/{pr_number}"
+ with urlopen(Request(url, headers=headers)) as conn:
+ pr_info = json.loads(conn.read().decode())
+ base = f"origin/{pr_info['base']['ref']}"
+ merge_base = (
+ subprocess.check_output(["git", "merge-base", base, "HEAD"])
+ .decode()
+ .strip()
+ )
+ return merge_base
default_branch = f"origin/{os.environ.get('GIT_DEFAULT_BRANCH', 'main')}"
merge_base = (
subprocess.check_output(["git", "merge-base", default_branch, "HEAD"])
@@ -32,17 +62,21 @@ def query_changed_files() -> List[str]:
head = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode().strip()
- base_commit = merge_base
- if base_commit == head:
+ if merge_base == head:
# We are on the default branch, so check for changes since the last commit
- base_commit = "HEAD^"
+ merge_base = "HEAD^"
+ return merge_base
+
+
+def query_changed_files() -> List[str]:
+ base_commit = get_merge_base()
proc = subprocess.run(
["git", "diff", "--name-only", base_commit, "HEAD"],
capture_output=True,
check=False,
)
- print(f"merge_base: {merge_base}, head: {head}")
+ print(f"base_commit: {base_commit}")
if proc.returncode != 0:
raise RuntimeError("Unable to get changed files")
@@ -56,20 +90,7 @@ def query_changed_files() -> List[str]:
@lru_cache(maxsize=None)
def get_git_commit_info() -> str:
"""Gets the commit info since the last commit on the default branch."""
- default_branch = f"origin/{os.environ.get('GIT_DEFAULT_BRANCH', 'main')}"
-
- merge_base = (
- subprocess.check_output(["git", "merge-base", default_branch, "HEAD"])
- .decode()
- .strip()
- )
-
- head = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode().strip()
-
- base_commit = merge_base
- if base_commit == head:
- # We are on the default branch, so check for changes since the last commit
- base_commit = "HEAD^"
+ base_commit = get_merge_base()
return (
subprocess.check_output(
|
2.41.0
|
1d565da0c5c95a19ab0abbd47645507d7124340
|
Fri, 26 Apr 2024 18:28:10 +0000
|
[PATCH 0733/1000] [dynamo] Add support for tensor's is_complex method (#124927)
|
This PR is to add support for tensor's is_complex method in dynamo. Take the following code as an example: ```python def test_tensor_is_complex(x): if x.is_complex(): return x + 1 else: return x - 1 ``` Before this fix, the is_complex() call will cause a graph break "torch.* op returned non-Tensor bool call_method is_complex". After this fix, the graph break can be avoided. Fixes #122692 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124927 Approved by: https://github.com/ezyang
|
diff --git a/test/dynamo/test_functions.py b/test/dynamo/test_functions.py
index bcd299a9e8..ab5d3b8c0a 100644
--- a/test/dynamo/test_functions.py
+++ b/test/dynamo/test_functions.py
@@ -681,6 +681,13 @@ class FunctionTests(torch._dynamo.test_case.TestCase):
else:
return x - 1
+ @make_test
+ def test_tensor_is_complex(x):
+ if x.is_complex():
+ return x + 1
+ else:
+ return x - 1
+
@make_test
def test_get_privateuse1_name(x):
if torch._C._get_privateuse1_backend_name() == "privateuseone":
diff --git a/torch/_dynamo/variables/tensor.py b/torch/_dynamo/variables/tensor.py
index e4cc623184..6ac50d5828 100644
--- a/torch/_dynamo/variables/tensor.py
+++ b/torch/_dynamo/variables/tensor.py
@@ -524,6 +524,10 @@ class TensorVariable(VariableTracker):
if self.dtype is not None:
return ConstantVariable.create(self.dtype.is_floating_point)
+ def method_is_complex(self):
+ if self.dtype is not None:
+ return ConstantVariable.create(self.dtype.is_complex)
+
def method_is_contiguous(self, memory_format=None):
memory_format = (
memory_format.as_python_constant()
|
2.41.0
|
ea1e84d40ee9fc11b95fb4f0d6bc7d264b5eda4
|
Fri, 26 Apr 2024 18:57:11 +0000
|
[PATCH 0734/1000] log pt2 config dict to signpost from inductor post grad (#124593)
|
Summary: previous attempts don't work eventually. D49720297 causes online train SEV due to extra importing. D56299408 mitigates a tricky bug from Distributed Shampoo constructor but unfortutenaly didn't correct the scuba logging either. see f552546983 Test Plan: {F1491621504} Differential Revision: D56378270 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124593 Approved by: https://github.com/anijain2305
|
diff --git a/torch/_inductor/compile_fx.py b/torch/_inductor/compile_fx.py
index 13303acd4f..7f3b2ec95f 100644
--- a/torch/_inductor/compile_fx.py
+++ b/torch/_inductor/compile_fx.py
@@ -72,7 +72,7 @@ if config.is_fbcode():
from torch._inductor.fb.utils import log_optimus_to_scuba, time_and_log
else:
# no-op decorator
- def time_and_log(attr: str, extra_loggings: Optional[Dict[str, str]] = None):
+ def time_and_log(attr: str):
return dynamo_utils.identity
@@ -398,10 +398,7 @@ def get_patched_config_dict(config_patches=None) -> Dict[str, Any]:
@DebugContext.wrap
@torch.utils._python_dispatch._disable_current_modes()
-@time_and_log(
- attr="compilation time (in seconds)",
- extra_loggings={"config_dict": str(get_patched_config_dict())},
-)
+@time_and_log(attr="compilation time (in seconds)")
# Need this decorator for compile_fx_inner even if we already have one for
# compile_fx. The reason is the compilation for backward graph may happen after
# compile_fx return and we may want to use the _LazyGraphModule for compiling
@@ -708,7 +705,9 @@ def fx_codegen_and_compile(
payload_fn=lambda: gm.print_readable(print_output=False),
)
if config.is_fbcode():
- log_optimus_to_scuba()
+ log_optimus_to_scuba(
+ extra_logging={"pt2_configs": str(get_patched_config_dict())}
+ )
with V.set_fake_mode(fake_mode), maybe_disable_comprehensive_padding(
example_inputs
|
2.41.0
|
bcb42cdd231bdfb823823b0bc0c944993abced5
|
Fri, 26 Apr 2024 00:30:01 +0000
|
[PATCH 0735/1000] Avoid COW materialize in various places (1) (#124984)
|
Most, not all, of these cases were found automatically with `git grep -n '^\s*\<const\>.*\*.*=.*\<data_ptr\>'` Part of #97856 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124984 Approved by: https://github.com/Skylion007
|
diff --git a/aten/src/ATen/NestedTensorImpl.cpp b/aten/src/ATen/NestedTensorImpl.cpp
index a6e4309e58..2f73b7b304 100644
--- a/aten/src/ATen/NestedTensorImpl.cpp
+++ b/aten/src/ATen/NestedTensorImpl.cpp
@@ -81,7 +81,7 @@ inline std::vector<int64_t> construct_opt_sizes(const at::Tensor& sizes) {
std::vector<int64_t> result(1, sizes.sizes()[0]);
if (sizes.dim() > 0) {
size_t nested_dim = result.size();
- int64_t* sizes_ptr = sizes.data_ptr<int64_t>();
+ const int64_t* sizes_ptr = sizes.const_data_ptr<int64_t>();
result.resize(nested_dim + sizes.sizes()[1]);
int64_t sizes_size_0 = sizes.sizes()[0];
int64_t sizes_size_1 = sizes.sizes()[1];
@@ -114,7 +114,7 @@ at::Tensor construct_nested_strides(const at::Tensor& sizes) {
return sizes;
}
at::Tensor strides = sizes.new_empty(sizes.sizes());
- const int64_t* sizes_ptr = sizes.data_ptr<int64_t>();
+ const int64_t* sizes_ptr = sizes.const_data_ptr<int64_t>();
int64_t* strides_ptr = strides.data_ptr<int64_t>();
for (int64_t i = 0; i < sizes.size(0); i++) {
strides_ptr[orig_dim - 1] = 1;
@@ -152,7 +152,7 @@ at::Tensor construct_offsets(const at::Tensor& sizes) {
std::iota(offsets_ptr, offsets_ptr + ntensors, 0);
return offsets;
}
- const int64_t* sizes_ptr = sizes.data_ptr<int64_t>();
+ const int64_t* sizes_ptr = sizes.const_data_ptr<int64_t>();
offsets_ptr[0] = 0;
for (const auto i : c10::irange(ntensors - 1)) {
const int64_t row_product = std::accumulate(sizes_ptr, sizes_ptr + orig_dim, 1, std::multiplies());
@@ -344,7 +344,7 @@ int64_t get_numel_from_nested_size_tensor(const at::Tensor& tensor) {
static_cast<uint64_t>(std::numeric_limits<int64_t>::max()),
static_cast<uint64_t>(std::numeric_limits<size_t>::max()));
- const int64_t* sizes_ptr = tensor.data_ptr<int64_t>();
+ const int64_t* sizes_ptr = tensor.const_data_ptr<int64_t>();
const auto nt_dim = tensor.size(1);
uint64_t num_elements{0};
diff --git a/aten/src/ATen/NestedTensorImpl.h b/aten/src/ATen/NestedTensorImpl.h
index ee096591ed..0bd3d98e73 100644
--- a/aten/src/ATen/NestedTensorImpl.h
+++ b/aten/src/ATen/NestedTensorImpl.h
@@ -228,7 +228,8 @@ inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt) {
}
const Tensor &sizemat = nt->get_nested_sizes(),
&stridemat = nt->get_nested_strides();
- int64_t* offsets_ptr = nt->get_storage_offsets().data_ptr<int64_t>();
+ const int64_t* offsets_ptr =
+ nt->get_storage_offsets().const_data_ptr<int64_t>();
int64_t orig_dim = sizemat.size(1);
// nesting scalars
if (orig_dim == 0) {
@@ -243,8 +244,8 @@ inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt) {
// nesting tensors
else {
// if any underlying tensor is non-contiguous
- const int64_t *sizemat_ptr = sizemat.data_ptr<int64_t>(),
- *stridemat_ptr = stridemat.data_ptr<int64_t>();
+ const int64_t *sizemat_ptr = sizemat.const_data_ptr<int64_t>(),
+ *stridemat_ptr = stridemat.const_data_ptr<int64_t>();
for (int64_t i = 0; i < ntensors; i++) {
if (stridemat_ptr[orig_dim - 1] != 1) {
return false;
@@ -263,8 +264,8 @@ inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt) {
if (offsets_ptr[0] != 0) {
return false;
}
- sizemat_ptr = sizemat.data_ptr<int64_t>();
- stridemat_ptr = stridemat.data_ptr<int64_t>();
+ sizemat_ptr = sizemat.const_data_ptr<int64_t>();
+ stridemat_ptr = stridemat.const_data_ptr<int64_t>();
for (int64_t i = 1; i < ntensors; i++) {
if (offsets_ptr[i] !=
offsets_ptr[i - 1] + *sizemat_ptr * *stridemat_ptr) {
diff --git a/aten/src/ATen/core/Formatting.cpp b/aten/src/ATen/core/Formatting.cpp
index 29bf49bbb6..8246407052 100644
--- a/aten/src/ATen/core/Formatting.cpp
+++ b/aten/src/ATen/core/Formatting.cpp
@@ -72,7 +72,7 @@ static std::tuple<double, int> __printFormat(std::ostream& stream, const Tensor&
return std::make_tuple(1., 0);
}
bool intMode = true;
- auto self_p = self.data_ptr<double>();
+ auto self_p = self.const_data_ptr<double>();
for (const auto i : c10::irange(size)) {
auto z = self_p[i];
if(std::isfinite(z)) {
@@ -189,7 +189,7 @@ static void __printMatrix(std::ostream& stream, const Tensor& self, int64_t line
}
for (const auto l : c10::irange(self.size(0))) {
Tensor row = self.select(0,l);
- double *row_ptr = row.data_ptr<double>();
+ const double *row_ptr = row.const_data_ptr<double>();
for (const auto c : c10::irange(firstColumn, lastColumn+1)) {
stream << std::setw(sz) << row_ptr[c]/scale;
if(c == lastColumn) {
@@ -279,7 +279,7 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi
tensor = tensor_.to(kCPU, kDouble).contiguous();
}
if(tensor.ndimension() == 0) {
- stream << defaultfloat << tensor.data_ptr<double>()[0] << '\n';
+ stream << defaultfloat << tensor.const_data_ptr<double>()[0] << '\n';
stream << "[ " << tensor_.toString() << "{}";
} else if(tensor.ndimension() == 1) {
if (tensor.numel() > 0) {
@@ -287,7 +287,7 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi
if(scale != 1) {
printScale(stream, scale);
}
- double* tensor_p = tensor.data_ptr<double>();
+ const double* tensor_p = tensor.const_data_ptr<double>();
for (const auto i : c10::irange(tensor.size(0))) {
stream << std::setw(sz) << tensor_p[i]/scale << '\n';
}
diff --git a/aten/src/ATen/native/BatchLinearAlgebra.cpp b/aten/src/ATen/native/BatchLinearAlgebra.cpp
index caa15e5c5b..40e6b34dc9 100644
--- a/aten/src/ATen/native/BatchLinearAlgebra.cpp
+++ b/aten/src/ATen/native/BatchLinearAlgebra.cpp
@@ -1516,7 +1516,7 @@ void _linalg_check_errors(
} else {
// Find the first non-zero info
auto infos_cpu = infos.to(at::kCPU);
- auto ptr = infos_cpu.data_ptr<int32_t>();
+ auto ptr = infos_cpu.const_data_ptr<int32_t>();
auto n = infos.numel();
auto info_ptr = std::find_if(ptr, ptr + n, [](int32_t x) { return x != 0; });
info = *info_ptr;
@@ -2794,13 +2794,13 @@ static void linalg_eig_make_complex_eigenvectors_impl(Tensor& result, const Tens
auto matrix_stride = matrixStride(real_vectors);
auto result_data = result.data_ptr<c10::complex<scalar_t>>();
- auto real_vectors_data = real_vectors.data_ptr<scalar_t>();
- auto values_data = complex_values.data_ptr<c10::complex<scalar_t>>();
+ auto real_vectors_data = real_vectors.const_data_ptr<scalar_t>();
+ auto values_data = complex_values.const_data_ptr<c10::complex<scalar_t>>();
for (auto b = decltype(batch_size){0}; b < batch_size; b++) {
- scalar_t* vecs = &real_vectors_data[b * matrix_stride];
+ const scalar_t* vecs = &real_vectors_data[b * matrix_stride];
c10::complex<scalar_t>* res = &result_data[b * matrix_stride];
- c10::complex<scalar_t>* vals = &values_data[b * n];
+ const c10::complex<scalar_t>* vals = &values_data[b * n];
for (auto j = decltype(n){0}; j < n; j++) {
if (vals[j].imag() == 0.0) { // eigenvalue is real, then v(j) = VR(:,j)
for (auto i = decltype(n){0}; i < n; i++) {
diff --git a/aten/src/ATen/native/ForeachUtils.h b/aten/src/ATen/native/ForeachUtils.h
index 9c22c35ee9..4e8963da05 100644
--- a/aten/src/ATen/native/ForeachUtils.h
+++ b/aten/src/ATen/native/ForeachUtils.h
@@ -216,7 +216,7 @@ inline std::vector<c10::Scalar> convert_tensor_to_scalar_list(
scalarList_.scalar_type(),
"convert_tensor_to_scalar_list",
[&]() {
- const scalar_t* scalar_data = scalarList_.data_ptr<scalar_t>();
+ const scalar_t* scalar_data = scalarList_.const_data_ptr<scalar_t>();
TORCH_CHECK(
(expect_length == scalarList_.size(0)),
"Expected length of scalars to match input of length ",
diff --git a/aten/src/ATen/native/QuantizedLinear.cpp b/aten/src/ATen/native/QuantizedLinear.cpp
index c2ccdc7ddf..5fa45f3099 100644
--- a/aten/src/ATen/native/QuantizedLinear.cpp
+++ b/aten/src/ATen/native/QuantizedLinear.cpp
@@ -64,7 +64,7 @@ Tensor fbgemm_linear_int8_weight_fp32_activation(
"and will be removed in a future PyTorch release.")
const Tensor input_contig = input.contiguous();
- const float* input_ptr = input_contig.data_ptr<float>();
+ const float* input_ptr = input_contig.const_data_ptr<float>();
TORCH_CHECK(input.dim() >= 2);
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
@@ -305,7 +305,7 @@ Tensor fbgemm_pack_quantized_matrix(const Tensor& weight) {
const int64_t K = weight.size(1);
const int64_t N = weight.size(0);
const Tensor weight_contig = weight.contiguous();
- const int8_t* weight_ptr = weight_contig.data_ptr<int8_t>();
+ const int8_t* weight_ptr = weight_contig.const_data_ptr<int8_t>();
auto ptr = std::make_unique<fbgemm::PackBMatrix<int8_t>>(
/*trans=*/fbgemm::matrix_op_t::Transpose,
/*nRow=*/K,
@@ -424,7 +424,7 @@ Tensor fbgemm_linear_fp16_weight_fp32_activation(
TORCH_CHECK(fbgemm::fbgemmSupportedCPU(), "Your CPU doesn't support FBGEMM.");
const Tensor input_contig = input.contiguous();
- const float* input_ptr = input_contig.data_ptr<float>();
+ const float* input_ptr = input_contig.const_data_ptr<float>();
// Pull out the PackedGemmMatrixFP16 instance from the owning tensor
const fbgemm::PackedGemmMatrixFP16& packed_weight_fp16 =
diff --git a/aten/src/ATen/native/SummaryOps.cpp b/aten/src/ATen/native/SummaryOps.cpp
index 81a0ccd6d8..4c158f81a4 100644
--- a/aten/src/ATen/native/SummaryOps.cpp
+++ b/aten/src/ATen/native/SummaryOps.cpp
@@ -43,7 +43,7 @@ Tensor _bincount_cpu_template(
int64_t nbins = static_cast<int64_t>(*self.max().data_ptr<input_t>()) + 1L;
nbins = std::max(nbins, minlength); // at least minlength # of bins
- const input_t* self_p = self.data_ptr<input_t>();
+ const input_t* self_p = self.const_data_ptr<input_t>();
if (has_weights) {
output = at::zeros(
{nbins},
@@ -52,7 +52,7 @@ Tensor _bincount_cpu_template(
weights.options().device_opt(),
weights.options().pinned_memory_opt());
weights_t* output_p = output.data_ptr<weights_t>();
- const weights_t* weights_p = weights.data_ptr<weights_t>();
+ const weights_t* weights_p = weights.const_data_ptr<weights_t>();
for (const auto i : c10::irange(self_size)) {
output_p[self_p[i]] += weights_p[i];
}
diff --git a/aten/src/ATen/native/TensorConversions.cpp b/aten/src/ATen/native/TensorConversions.cpp
index b2fb4d7c52..c555706f4c 100644
--- a/aten/src/ATen/native/TensorConversions.cpp
+++ b/aten/src/ATen/native/TensorConversions.cpp
@@ -1479,7 +1479,7 @@ void convert_indices_from_coo_to_csr_cpu(
const Tensor& input,
const int64_t size) {
int64_t numel = input.numel();
- const input_t* data_in = input.data_ptr<input_t>();
+ const input_t* data_in = input.const_data_ptr<input_t>();
output_t* data_out = result.data_ptr<output_t>();
if (numel == 0) {
@@ -1525,7 +1525,7 @@ void convert_indices_from_csr_to_coo_cpu(
batch_indices.copy_(at::sparse::full_coo_indices(crow_indices.sizes().slice(0, batch_ndim), crow_indices.options())
.repeat_interleave(nnz, 1));
}
- const input_t* crow_indices_data_in = crow_indices_->data_ptr<input_t>();
+ const input_t* crow_indices_data_in = crow_indices_->const_data_ptr<input_t>();
TORCH_INTERNAL_ASSERT(indices.is_contiguous());
auto row0 = indices.select(0, transpose ? batch_ndim + 1 : batch_ndim + 0);
auto row1 = indices.select(0, transpose ? batch_ndim + 0 : batch_ndim + 1);
diff --git a/aten/src/ATen/native/TensorShape.cpp b/aten/src/ATen/native/TensorShape.cpp
index 1873201d20..b7d8eeb00f 100644
--- a/aten/src/ATen/native/TensorShape.cpp
+++ b/aten/src/ATen/native/TensorShape.cpp
@@ -2058,7 +2058,7 @@ Tensor index_select_sparse_cpu(const Tensor& self, int64_t dim, const Tensor& in
// fill in src_int_idx, sorted_int_idx, int_counts
{
const auto sorted_len = sorted.numel();
- const auto* ptr_sorted = sorted.data_ptr<int64_t>();
+ const auto* ptr_sorted = sorted.const_data_ptr<int64_t>();
const auto* ptr_sorted_start = ptr_sorted;
const auto* ptr_sorted_end = ptr_sorted + sorted_len;
@@ -2121,7 +2121,7 @@ Tensor index_select_sparse_cpu(const Tensor& self, int64_t dim, const Tensor& in
auto* ptr_selected_src = selected_src.data_ptr<int64_t>();
const auto thread_offsets = compressed_int_counts.cumsum(0).sub_(compressed_int_counts);
- const auto* ptr_sorted_idx = sorted_idx.data_ptr<int64_t>();
+ const auto* ptr_sorted_idx = sorted_idx.const_data_ptr<int64_t>();
at::parallel_for(0, n_threads_src, 1, [&](int64_t tid, C10_UNUSED int64_t _) {
const auto start = tid * chunk_size_src;
const auto end = std::min(start + chunk_size_src, src_len);
@@ -2163,7 +2163,7 @@ Tensor index_select_sparse_cpu(const Tensor& self, int64_t dim, const Tensor& in
bool run_in_parallel = true) -> Tensor {
auto cidx = at::empty({len + 1}, idx.options());
- const auto* ptr_idx = idx.data_ptr<int64_t>();
+ const auto* ptr_idx = idx.const_data_ptr<int64_t>();
auto* ptr_cidx = cidx.data_ptr<int64_t>();
const auto idx_len = idx.numel();
@@ -2202,7 +2202,7 @@ Tensor index_select_sparse_cpu(const Tensor& self, int64_t dim, const Tensor& in
}
else {
auto* ptr_counts = counts.data_ptr<int64_t>();
- const auto* ptr_vals = t.data_ptr<int64_t>();
+ const auto* ptr_vals = t.const_data_ptr<int64_t>();
for (C10_UNUSED const auto _ : c10::irange(t.numel())) {
++ptr_counts[*ptr_vals++];
}
@@ -2310,10 +2310,10 @@ Tensor index_select_sparse_cpu(const Tensor& self, int64_t dim, const Tensor& in
const auto src_idx_len = src_intersection_offsets.const_data_ptr<int64_t>()[size - 1];
auto src_idx = at::empty({src_idx_len}, src.options());
- const auto* ptr_src = src.data_ptr<int64_t>();
- const auto* ptr_intersection_counts = intersection_counts.data_ptr<int64_t>();
- const auto* ptr_src_intersection_counts = src_intersection_counts.data_ptr<int64_t>();
- const auto* ptr_src_intersection_offsets = src_intersection_offsets.data_ptr<int64_t>();
+ const auto* ptr_src = src.const_data_ptr<int64_t>();
+ const auto* ptr_intersection_counts = intersection_counts.const_data_ptr<int64_t>();
+ const auto* ptr_src_intersection_counts = src_intersection_counts.const_data_ptr<int64_t>();
+ const auto* ptr_src_intersection_offsets = src_intersection_offsets.const_data_ptr<int64_t>();
auto* ptr_src_idx = src_idx.data_ptr<int64_t>();
const auto src_len = src.numel();
@@ -2362,16 +2362,16 @@ Tensor index_select_sparse_cpu(const Tensor& self, int64_t dim, const Tensor& in
auto counts_per_thread = idx_counts_per_thread.mul_(src_counts).sum(-1);
return counts_per_thread.cumsum(0).sub_(counts_per_thread);
}();
- const auto* ptr_thread_offset = thread_offset.data_ptr<int64_t>();
+ const auto* ptr_thread_offset = thread_offset.const_data_ptr<int64_t>();
auto idx_selected = at::empty({res_len}, idx.options());
auto src_selected = at::empty({res_len}, src.options());
- const auto* ptr_idx = idx.data_ptr<int64_t>();
- const auto* ptr_src_counts = src_counts.data_ptr<int64_t>();
- const auto* ptr_intersection_counts = intersection_counts.data_ptr<int64_t>();
- const auto* ptr_src_idx = src_idx.data_ptr<int64_t>();
- const auto* ptr_src_idx_offsets = src_idx_offsets.data_ptr<int64_t>();
+ const auto* ptr_idx = idx.const_data_ptr<int64_t>();
+ const auto* ptr_src_counts = src_counts.const_data_ptr<int64_t>();
+ const auto* ptr_intersection_counts = intersection_counts.const_data_ptr<int64_t>();
+ const auto* ptr_src_idx = src_idx.const_data_ptr<int64_t>();
+ const auto* ptr_src_idx_offsets = src_idx_offsets.const_data_ptr<int64_t>();
auto* ptr_idx_selected = idx_selected.data_ptr<int64_t>();
auto* ptr_src_selected = src_selected.data_ptr<int64_t>();
@@ -2433,8 +2433,8 @@ Tensor index_select_sparse_cpu(const Tensor& self, int64_t dim, const Tensor& in
}
}();
- const auto* ptr_outer = outer.data_ptr<int64_t>();
- const auto* ptr_inner = inner.data_ptr<int64_t>();
+ const auto* ptr_outer = outer.const_data_ptr<int64_t>();
+ const auto* ptr_inner = inner.const_data_ptr<int64_t>();
// NOTE: if very critical, replace std::vector with
// a data structure that operates on stack up to some limit.
auto outer_selected_idx = std::vector<int64_t>();
diff --git a/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp b/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp
index bedf2f4461..8f80d920e3 100644
--- a/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp
+++ b/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_prepack.cpp
@@ -186,7 +186,7 @@ PackedLinearWeightQnnp::PackedLinearWeightQnnp(
std::tie(w_zero_points_, w_scales_) =
make_zero_points_and_scales_tensor(weight_contig);
- const float* weight_scales_data = w_scales_.data_ptr<float>();
+ const float* weight_scales_data = w_scales_.const_data_ptr<float>();
at::Tensor qnnp_weight = at::_empty_affine_quantized(
weight_contig.sizes(),
at::device(c10::kCPU).dtype(c10::kQUInt8),
diff --git a/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_serialize.cpp b/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_serialize.cpp
index e557ec3994..d5790b5bc2 100644
--- a/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_serialize.cpp
+++ b/aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear_serialize.cpp
@@ -160,7 +160,7 @@ BCSRSerializationType PackedLinearWeight::serialize() {
BCSRSerializationType PackedLinearWeightQnnp::serialize() {
at::Tensor w_scales_compact;
at::Tensor w_zero_points_compact;
- const float* w_scales_data_ptr = w_scales_.data_ptr<float>();
+ const float* w_scales_data_ptr = w_scales_.const_data_ptr<float>();
std::function<int8_t(uint8_t)> subtract_128 = [](uint8_t v) {
return static_cast<int8_t>(static_cast<int16_t>(v) - 128);
};
diff --git a/aten/src/ATen/native/cpu/HistogramKernel.cpp b/aten/src/ATen/native/cpu/HistogramKernel.cpp
index 73a16746e1..196bfd5647 100644
--- a/aten/src/ATen/native/cpu/HistogramKernel.cpp
+++ b/aten/src/ATen/native/cpu/HistogramKernel.cpp
@@ -292,10 +292,10 @@ void infer_bin_edges_from_input(const Tensor& input, const int64_t N,
TORCH_INTERNAL_ASSERT(min.is_contiguous() && max.is_contiguous());
- const scalar_t *min_data = min.data_ptr<scalar_t>();
+ const scalar_t *min_data = min.const_data_ptr<scalar_t>();
std::copy(min_data, min_data + N, leftmost_edges.begin());
- const scalar_t *max_data = max.data_ptr<scalar_t>();
+ const scalar_t *max_data = max.const_data_ptr<scalar_t>();
std::copy(max_data, max_data + N, rightmost_edges.begin());
}
diff --git a/aten/src/ATen/native/cpu/MultinomialKernel.cpp b/aten/src/ATen/native/cpu/MultinomialKernel.cpp
index 0e469d38af..1c4054abdf 100644
--- a/aten/src/ATen/native/cpu/MultinomialKernel.cpp
+++ b/aten/src/ATen/native/cpu/MultinomialKernel.cpp
@@ -140,7 +140,7 @@ multinomial_with_replacement_apply(
/* cumulative probability distribution vector */
Tensor cum_dist = at::empty({n_categories}, self.options().dtype(kFloat));
- const scalar_t* const self_ptr = self.data_ptr<scalar_t>();
+ const scalar_t* const self_ptr = self.const_data_ptr<scalar_t>();
float* const cum_dist_ptr = cum_dist.data_ptr<float>();
int64_t* const result_ptr = result.data_ptr<int64_t>();
diff --git a/aten/src/ATen/native/cpu/SparseFactories.cpp b/aten/src/ATen/native/cpu/SparseFactories.cpp
index 8f938e545f..2c0b54b8dd 100644
--- a/aten/src/ATen/native/cpu/SparseFactories.cpp
+++ b/aten/src/ATen/native/cpu/SparseFactories.cpp
@@ -29,7 +29,7 @@ void _spdiags_kernel_cpu(
"spdiags_cpu",
[&] {
auto* const values_write_ptr = values.data_ptr<scalar_t>();
- const auto* const diagonals_ptr = diagonals.data_ptr<scalar_t>();
+ const auto* const diagonals_ptr = diagonals.const_data_ptr<scalar_t>();
cpu_kernel(
iter,
diff --git a/aten/src/ATen/native/cpu/UpSampleKernelAVXAntialias.h b/aten/src/ATen/native/cpu/UpSampleKernelAVXAntialias.h
index 53ee6a603b..726a83c209 100644
--- a/aten/src/ATen/native/cpu/UpSampleKernelAVXAntialias.h
+++ b/aten/src/ATen/native/cpu/UpSampleKernelAVXAntialias.h
@@ -66,7 +66,7 @@ at::Tensor unpack_rgb(const at::Tensor& packed_tensor) {
// into as 32 bits. This generalizes to num_channels <= 4 and also works for
// non-channels_last tensors.
- const uint8_t* packed = (const uint8_t*)packed_tensor.data_ptr<uint8_t>();
+ const uint8_t* packed = (const uint8_t*)packed_tensor.const_data_ptr<uint8_t>();
auto num_pixels = packed_tensor.size(1) * packed_tensor.size(2);
auto num_channels = packed_tensor.size(0);
@@ -180,18 +180,18 @@ void ImagingResampleHorizontal(
// Although this may not be needed if / when we port all this code to use
// Vec.h since this would potentially give us another fall-back implem
- const int16_t* kk = (int16_t*)(horiz_indices_weights[3].data_ptr<double>());
+ const int16_t* kk = (int16_t*)(horiz_indices_weights[3].const_data_ptr<double>());
auto xout = unpacked_output.size(2);
auto yout = unpacked_output.size(1);
auto xin = unpacked_input.size(2);
TORCH_INTERNAL_ASSERT(num_channels == unpacked_input.size(0));
- const int64_t* idx_ptr_xmin = horiz_indices_weights[0].data_ptr<int64_t>();
- const int64_t* idx_ptr_size = horiz_indices_weights[1].data_ptr<int64_t>();
+ const int64_t* idx_ptr_xmin = horiz_indices_weights[0].const_data_ptr<int64_t>();
+ const int64_t* idx_ptr_size = horiz_indices_weights[1].const_data_ptr<int64_t>();
uint8_t* unpacked_output_p = unpacked_output.data_ptr<uint8_t>();
- const uint8_t* unpacked_input_p = unpacked_input.data_ptr<uint8_t>();
+ const uint8_t* unpacked_input_p = unpacked_input.const_data_ptr<uint8_t>();
int64_t yy = 0;
auto xout_stride = xout * num_channels;
@@ -255,13 +255,13 @@ void ImagingResampleVertical(
// basic_loop_aa_vertical<uint8_t>)
// Although this may not be needed if / when we port all this code to use
// Vec.h since this would potentially give us another fall-back implem
- const int16_t* kk = (int16_t*)(vert_indices_weights[3].data_ptr<double>());
+ const int16_t* kk = (int16_t*)(vert_indices_weights[3].const_data_ptr<double>());
- const int64_t* idx_ptr_xmin = vert_indices_weights[0].data_ptr<int64_t>();
- const int64_t* idx_ptr_size = vert_indices_weights[1].data_ptr<int64_t>();
+ const int64_t* idx_ptr_xmin = vert_indices_weights[0].const_data_ptr<int64_t>();
+ const int64_t* idx_ptr_size = vert_indices_weights[1].const_data_ptr<int64_t>();
uint8_t* unpacked_output_p = unpacked_output.data_ptr<uint8_t>();
- const uint8_t* unpacked_input_p = unpacked_input.data_ptr<uint8_t>();
+ const uint8_t* unpacked_input_p = unpacked_input.const_data_ptr<uint8_t>();
auto xout = unpacked_output.size(2);
auto yout = unpacked_output.size(1);
diff --git a/aten/src/ATen/native/cpu/group_norm_kernel.cpp b/aten/src/ATen/native/cpu/group_norm_kernel.cpp
index 92a940dce6..f6b7f2a5d4 100644
--- a/aten/src/ATen/native/cpu/group_norm_kernel.cpp
+++ b/aten/src/ATen/native/cpu/group_norm_kernel.cpp
@@ -1377,11 +1377,11 @@ void GroupNormBackwardKernelImplChannelsLastInternal(
TORCH_CHECK(!gamma.defined() || gamma.numel() == C);
int64_t D = C / group;
int64_t G = group;
- const T* dY_data = dY.data_ptr<T>();
- const T* X_data = X.data_ptr<T>();
- const PT* mean_data = mean.data_ptr<PT>();
- const PT* rstd_data = rstd.data_ptr<PT>();
- const PT* gamma_data = gamma.defined() ? gamma.data_ptr<PT>() : nullptr;
+ const T* dY_data = dY.const_data_ptr<T>();
+ const T* X_data = X.const_data_ptr<T>();
+ const PT* mean_data = mean.const_data_ptr<PT>();
+ const PT* rstd_data = rstd.const_data_ptr<PT>();
+ const PT* gamma_data = gamma.defined() ? gamma.const_data_ptr<PT>() : nullptr;
T* dX_data = dX.defined() ? dX.data_ptr<T>() : nullptr;
PT* dgamma_data = dgamma.defined() ? dgamma.data_ptr<PT>() : nullptr;
PT* dbeta_data = dbeta.defined() ? dbeta.data_ptr<PT>() : nullptr;
diff --git a/aten/src/ATen/native/cpu/int4mm_kernel.cpp b/aten/src/ATen/native/cpu/int4mm_kernel.cpp
index 57e485ab02..acb4b927f2 100644
--- a/aten/src/ATen/native/cpu/int4mm_kernel.cpp
+++ b/aten/src/ATen/native/cpu/int4mm_kernel.cpp
@@ -613,10 +613,10 @@ void int4pack_mm_kernel_(
const Tensor& qScaleAndZeros,
int N, int K) {
- const auto* A_data = A.data_ptr<T>();
- const auto* B_data = reinterpret_cast<uint8_t*>(B.data_ptr());
+ const auto* A_data = A.const_data_ptr<T>();
+ const auto* B_data = reinterpret_cast<const uint8_t*>(B.const_data_ptr());
auto* C_data = C.data_ptr<T>();
- const auto* S_data = qScaleAndZeros.data_ptr<T>();
+ const auto* S_data = qScaleAndZeros.const_data_ptr<T>();
int M = A.size(0);
diff --git a/aten/src/ATen/native/cpu/int8mm_kernel.cpp b/aten/src/ATen/native/cpu/int8mm_kernel.cpp
index 935a8180bc..4ef6cde4a8 100644
--- a/aten/src/ATen/native/cpu/int8mm_kernel.cpp
+++ b/aten/src/ATen/native/cpu/int8mm_kernel.cpp
@@ -284,10 +284,10 @@ void int8pack_mm_kernel_(
const Tensor& B,
const Tensor& scales) {
- const auto* A_data = A.data_ptr<T>();
- const auto* B_data = B.data_ptr<int8_t>();
+ const auto* A_data = A.const_data_ptr<T>();
+ const auto* B_data = B.const_data_ptr<int8_t>();
auto* C_data = C.data_ptr<T>();
- const auto* S_data = scales.data_ptr<T>();
+ const auto* S_data = scales.const_data_ptr<T>();
int M = A.size(0);
int N = B.size(0);
diff --git a/aten/src/ATen/native/cuda/TensorShape.cu b/aten/src/ATen/native/cuda/TensorShape.cu
index db6590fdfe..97cf4dade1 100644
--- a/aten/src/ATen/native/cuda/TensorShape.cu
+++ b/aten/src/ATen/native/cuda/TensorShape.cu
@@ -186,7 +186,7 @@ static inline std::vector<int64_t> get_split_base_addrs(
const at::Tensor& tensor,
at::IntArrayRef split_sizes,
int64_t dim) {
- const auto* data_ptr = static_cast<char*>(tensor.data_ptr());
+ const auto* data_ptr = static_cast<const char*>(tensor.const_data_ptr());
const auto strides = tensor.strides();
const auto element_sz = tensor.element_size();
int64_t off = 0;
diff --git a/aten/src/ATen/native/cuda/fused_adam_amsgrad_impl.cu b/aten/src/ATen/native/cuda/fused_adam_amsgrad_impl.cu
index 43527938fc..9cebb82e51 100644
--- a/aten/src/ATen/native/cuda/fused_adam_amsgrad_impl.cu
+++ b/aten/src/ATen/native/cuda/fused_adam_amsgrad_impl.cu
@@ -85,7 +85,7 @@ void _fused_adam_amsgrad_cuda_impl_(
grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
const float* found_inf_ptr =
found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
- const float* lr_ptr = lr.data_ptr<float>();
+ const float* lr_ptr = lr.const_data_ptr<float>();
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf,
diff --git a/aten/src/ATen/native/cuda/fused_adam_impl.cu b/aten/src/ATen/native/cuda/fused_adam_impl.cu
index 41fc1c304d..7f2843b3b4 100644
--- a/aten/src/ATen/native/cuda/fused_adam_impl.cu
+++ b/aten/src/ATen/native/cuda/fused_adam_impl.cu
@@ -75,7 +75,7 @@ void _fused_adam_cuda_impl_(
grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
const float* found_inf_ptr =
found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
- const float* lr_ptr = lr.data_ptr<float>();
+ const float* lr_ptr = lr.const_data_ptr<float>();
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf,
diff --git a/aten/src/ATen/native/cuda/fused_adamw_amsgrad_impl.cu b/aten/src/ATen/native/cuda/fused_adamw_amsgrad_impl.cu
index 052d1cee7d..376711c39d 100644
--- a/aten/src/ATen/native/cuda/fused_adamw_amsgrad_impl.cu
+++ b/aten/src/ATen/native/cuda/fused_adamw_amsgrad_impl.cu
@@ -86,7 +86,7 @@ void _fused_adamw_amsgrad_cuda_impl_(
grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
const float* found_inf_ptr =
found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
- const float* lr_ptr = lr.data_ptr<float>();
+ const float* lr_ptr = lr.const_data_ptr<float>();
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf,
diff --git a/aten/src/ATen/native/cuda/fused_adamw_impl.cu b/aten/src/ATen/native/cuda/fused_adamw_impl.cu
index 0411cc82eb..cc4feaa145 100644
--- a/aten/src/ATen/native/cuda/fused_adamw_impl.cu
+++ b/aten/src/ATen/native/cuda/fused_adamw_impl.cu
@@ -76,7 +76,7 @@ void _fused_adamw_cuda_impl_(
grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
const float* found_inf_ptr =
found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
- const float* lr_ptr = lr.data_ptr<float>();
+ const float* lr_ptr = lr.const_data_ptr<float>();
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf,
diff --git a/aten/src/ATen/native/nested/NestedTensorBackward.cpp b/aten/src/ATen/native/nested/NestedTensorBackward.cpp
index 54304c8f4f..e4465b792c 100644
--- a/aten/src/ATen/native/nested/NestedTensorBackward.cpp
+++ b/aten/src/ATen/native/nested/NestedTensorBackward.cpp
@@ -137,7 +137,7 @@ Tensor _nested_sum_backward_cpu(
AT_DISPATCH_ALL_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16, self_grad_buffer.scalar_type(), "nested_sum_dim_cpu", [&]() {
auto* self_grad_data = self_grad_buffer.data_ptr<scalar_t>();
- const auto* output_grad_data = grad_buffer.data_ptr<scalar_t>();
+ const auto* output_grad_data = grad_buffer.const_data_ptr<scalar_t>();
int64_t out_idx = 0, in_idx = 0;
for (const auto i : c10::irange(ntensors)) {
int64_t segments = num_segments[i].item<int64_t>();
diff --git a/aten/src/ATen/native/nested/NestedTensorMath.cpp b/aten/src/ATen/native/nested/NestedTensorMath.cpp
index bc88d732e9..7d3e826ef5 100644
--- a/aten/src/ATen/native/nested/NestedTensorMath.cpp
+++ b/aten/src/ATen/native/nested/NestedTensorMath.cpp
@@ -403,7 +403,7 @@ Tensor NestedTensor_sum_dim_CPU(
AT_DISPATCH_ALL_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16, buffer.scalar_type(), "nested_sum_dim_cpu", [&]() {
auto* output_data = output_buffer.data_ptr<scalar_t>();
- const auto* input_data = buffer.data_ptr<scalar_t>();
+ const auto* input_data = buffer.const_data_ptr<scalar_t>();
int64_t out_idx = 0, in_idx = 0;
for (const auto i : c10::irange(ntensors)) {
int64_t segments = num_segments[i].item<int64_t>();
diff --git a/aten/src/ATen/native/nested/NestedTensorUtils.h b/aten/src/ATen/native/nested/NestedTensorUtils.h
index f4d19128ab..3b4f18f11b 100644
--- a/aten/src/ATen/native/nested/NestedTensorUtils.h
+++ b/aten/src/ATen/native/nested/NestedTensorUtils.h
@@ -119,7 +119,7 @@ inline std::vector<IntArrayRef> NestedTensor_get_sizes(
if (orig_dim == 0) {
return sizes;
}
- const int64_t* sizemat_ptr = sizemat.data_ptr<int64_t>();
+ const int64_t* sizemat_ptr = sizemat.const_data_ptr<int64_t>();
for (const auto i : c10::irange(ntensors)) {
sizes[i] = IntArrayRef(sizemat_ptr, sizemat_ptr + orig_dim);
@@ -152,7 +152,7 @@ inline std::vector<IntArrayRef> NestedTensor_get_strides(
if (orig_dim == 0) {
return strides;
}
- const int64_t* stridemat_ptr = stridemat.data_ptr<int64_t>();
+ const int64_t* stridemat_ptr = stridemat.const_data_ptr<int64_t>();
for (const auto i : c10::irange(ntensors)) {
strides[i] = IntArrayRef(stridemat_ptr, stridemat_ptr + orig_dim);
stridemat_ptr += orig_dim;
diff --git a/aten/src/ATen/native/nested/cuda/NestedTensorBinaryOps.cu b/aten/src/ATen/native/nested/cuda/NestedTensorBinaryOps.cu
index f7055d7fd0..350c3a27e7 100644
--- a/aten/src/ATen/native/nested/cuda/NestedTensorBinaryOps.cu
+++ b/aten/src/ATen/native/nested/cuda/NestedTensorBinaryOps.cu
@@ -85,8 +85,8 @@ void _nested_op_dense_esuhm_kernel(Tensor& result, const Tensor& self, const Ten
auto result_offsets = at::cat({offsets, at::tensor(self_ptr->numel())});
result_offsets = result_offsets.to(kCUDA);
- const scalar_t* self_data_ptr = self_buffer.data_ptr<scalar_t>();
- const scalar_t* other_data_ptr = other.data_ptr<scalar_t>();
+ const scalar_t* self_data_ptr = self_buffer.const_data_ptr<scalar_t>();
+ const scalar_t* other_data_ptr = other.const_data_ptr<scalar_t>();
scalar_t* result_data_ptr = result_buffer.data_ptr<scalar_t>();
int64_t* result_offsets_ptr = result_offsets.data_ptr<int64_t>();
diff --git a/aten/src/ATen/native/nested/cuda/NestedTensorMatmul.cu b/aten/src/ATen/native/nested/cuda/NestedTensorMatmul.cu
index e1a364b310..252e3741c5 100644
--- a/aten/src/ATen/native/nested/cuda/NestedTensorMatmul.cu
+++ b/aten/src/ATen/native/nested/cuda/NestedTensorMatmul.cu
@@ -335,7 +335,7 @@ Tensor bmm_nested_cuda(const Tensor& self, const Tensor& mat2) {
Tensor output = wrap_buffer(out_buffer, out_sizemat);
auto out_ptr = get_nested_tensor_impl(output);
- const int64_t *out_offsets_ptr = out_ptr->get_storage_offsets().data_ptr<int64_t>();
+ const int64_t *out_offsets_ptr = out_ptr->get_storage_offsets().const_data_ptr<int64_t>();
#ifndef USE_ROCM
#ifndef _WIN32
diff --git a/aten/src/ATen/native/nested/cuda/NestedTensorTransformerFunctions.cpp b/aten/src/ATen/native/nested/cuda/NestedTensorTransformerFunctions.cpp
index 8955585b43..0da0c3e361 100644
--- a/aten/src/ATen/native/nested/cuda/NestedTensorTransformerFunctions.cpp
+++ b/aten/src/ATen/native/nested/cuda/NestedTensorTransformerFunctions.cpp
@@ -28,7 +28,7 @@ namespace {
int64_t padded_tensor_numel(const Tensor& sizes) {
const auto sizes_num_rows = sizes.sizes()[0];
const auto sizes_row_length = sizes.sizes()[1];
- const auto* sizes_data = sizes.data_ptr<int64_t>();
+ const auto* sizes_data = sizes.const_data_ptr<int64_t>();
int64_t numel = 0;
for (const auto row_num : c10::irange(sizes_num_rows)) {
const auto* row_ptr = sizes_data + row_num * sizes_row_length;
diff --git a/aten/src/ATen/native/nested/cuda/NestedTensorTransformerUtils.cpp b/aten/src/ATen/native/nested/cuda/NestedTensorTransformerUtils.cpp
index 0e26a3e6a5..f708920d04 100644
--- a/aten/src/ATen/native/nested/cuda/NestedTensorTransformerUtils.cpp
+++ b/aten/src/ATen/native/nested/cuda/NestedTensorTransformerUtils.cpp
@@ -133,8 +133,8 @@ int64_t get_nnz(Tensor nestedtensor) {
}
// Check the offsets are a constant multiple from the previous numels
- const int64_t* tensor_size_ptr = tensor_sizes.data_ptr<int64_t>();
- const int64_t* tensor_stride_ptr = tensor_strides.data_ptr<int64_t>();
+ const int64_t* tensor_size_ptr = tensor_sizes.const_data_ptr<int64_t>();
+ const int64_t* tensor_stride_ptr = tensor_strides.const_data_ptr<int64_t>();
int64_t numel_0 = (tensor_size_ptr[0] * tensor_stride_ptr[0]);
TORCH_INTERNAL_ASSERT(numel_0 > 0, "numels must be positive!");
diff --git a/aten/src/ATen/native/quantized/QTensor.cpp b/aten/src/ATen/native/quantized/QTensor.cpp
index b8841214fd..9705de0a4a 100644
--- a/aten/src/ATen/native/quantized/QTensor.cpp
+++ b/aten/src/ATen/native/quantized/QTensor.cpp
@@ -344,7 +344,7 @@ std::tuple<Tensor, Tensor> choose_qparams_optimized(
TORCH_CHECK(numel <= input_tensor.numel(), "numel ", numel,
" greater than input_tensor.numel() ", input_tensor.numel());
- const float* input_row = input_tensor.data_ptr<float>();
+ const float* input_row = input_tensor.const_data_ptr<float>();
float xmin = *std::min_element(input_row, input_row + numel);
float xmax = *std::max_element(input_row, input_row + numel);
@@ -352,7 +352,7 @@ std::tuple<Tensor, Tensor> choose_qparams_optimized(
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
int min_bins = n_bins * (1.0 - (float) ratio);
Tensor input_tensor_contig = input_tensor.contiguous();
- const float* input = input_tensor_contig.data_ptr<float>();
+ const float* input = input_tensor_contig.const_data_ptr<float>();
std::vector<float> q_input(numel);
float loss =
diff --git a/aten/src/ATen/native/quantized/cpu/IntReprQuant.cpp b/aten/src/ATen/native/quantized/cpu/IntReprQuant.cpp
index 9867a8f48a..cfcce3465a 100644
--- a/aten/src/ATen/native/quantized/cpu/IntReprQuant.cpp
+++ b/aten/src/ATen/native/quantized/cpu/IntReprQuant.cpp
@@ -32,7 +32,7 @@ Tensor int_repr_quantized_cpu(const Tensor& self) {
{out_size},
self.options().dtype(UNDERLYING_TYPE),
self.suggest_memory_format());
- const underlying_t* qdata = reinterpret_cast<underlying_t*>(self.data_ptr<scalar_t>());
+ const underlying_t* qdata = reinterpret_cast<const underlying_t*>(self.const_data_ptr<scalar_t>());
for (const auto i : c10::irange(dst.numel())) {
dst[i] = static_cast<underlying_t>(qdata[i]);
}
diff --git a/aten/src/ATen/native/quantized/cpu/Normalization.cpp b/aten/src/ATen/native/quantized/cpu/Normalization.cpp
index 05a9585274..0f5fb9884a 100644
--- a/aten/src/ATen/native/quantized/cpu/Normalization.cpp
+++ b/aten/src/ATen/native/quantized/cpu/Normalization.cpp
@@ -80,8 +80,8 @@ Tensor q_batch_norm1d_impl(
TORCH_CHECK(weight.numel() == C, "Expect weight size to match C");
TORCH_CHECK(bias.numel() == C, "Expect weight size to match C");
- const float* weight_data = weight.template data_ptr<float>();
- const float* bias_data = bias.template data_ptr<float>();
+ const float* weight_data = weight.template const_data_ptr<float>();
+ const float* bias_data = bias.template const_data_ptr<float>();
TORCH_CHECK(mean.numel() == C, "Mean size must match channel dimension");
TORCH_CHECK(var.numel() == C, "Variance size must match channel dimension");
@@ -91,8 +91,8 @@ Tensor q_batch_norm1d_impl(
float* alpha_data = alpha.mutable_data_ptr<float>();
float* beta_data = beta.data_ptr<float>();
- const float* mean_data = mean.template data_ptr<float>();
- const float* var_data = var.template data_ptr<float>();
+ const float* mean_data = mean.template const_data_ptr<float>();
+ const float* var_data = var.template const_data_ptr<float>();
if (ndim == 2) {
// create a fake H and W dimension so we can use NHWC
@@ -189,8 +189,8 @@ Tensor q_batch_norm2d_impl(
TORCH_CHECK(weight.numel() == C, "Expect weight size to match C");
TORCH_CHECK(bias.numel() == C, "Expect weight size to match C");
- const float* weight_data = weight.template data_ptr<float>();
- const float* bias_data = bias.template data_ptr<float>();
+ const float* weight_data = weight.template const_data_ptr<float>();
+ const float* bias_data = bias.template const_data_ptr<float>();
TORCH_CHECK(mean.numel() == C, "Mean size must match channel dimension");
TORCH_CHECK(var.numel() == C, "Variance size must match channel dimension");
@@ -200,8 +200,8 @@ Tensor q_batch_norm2d_impl(
float* alpha_data = alpha.mutable_data_ptr<float>();
float* beta_data = beta.data_ptr<float>();
- const float* mean_data = mean.template data_ptr<float>();
- const float* var_data = var.template data_ptr<float>();
+ const float* mean_data = mean.template const_data_ptr<float>();
+ const float* var_data = var.template const_data_ptr<float>();
auto oSizes = qx.sizes();
auto qx_nhwc = qx.contiguous(MemoryFormat::ChannelsLast);
@@ -285,8 +285,8 @@ Tensor q_batch_norm3d_impl(
TORCH_CHECK(weight.numel() == C, "Expect weight size to match C");
TORCH_CHECK(bias.numel() == C, "Expect weight size to match C");
- const float* weight_data = weight.template data_ptr<float>();
- const float* bias_data = bias.template data_ptr<float>();
+ const float* weight_data = weight.template const_data_ptr<float>();
+ const float* bias_data = bias.template const_data_ptr<float>();
TORCH_CHECK(mean.numel() == C, "Mean size must match channel dimension");
TORCH_CHECK(var.numel() == C, "Variance size must match channel dimension");
@@ -296,8 +296,8 @@ Tensor q_batch_norm3d_impl(
float* alpha_data = alpha.mutable_data_ptr<float>();
float* beta_data = beta.data_ptr<float>();
- const float* mean_data = mean.template data_ptr<float>();
- const float* var_data = var.template data_ptr<float>();
+ const float* mean_data = mean.template const_data_ptr<float>();
+ const float* var_data = var.template const_data_ptr<float>();
auto oSizes = qx.sizes();
auto qx_nhwc = qx.contiguous(MemoryFormat::ChannelsLast3d);
diff --git a/aten/src/ATen/native/quantized/cpu/fused_obs_fake_quant.cpp b/aten/src/ATen/native/quantized/cpu/fused_obs_fake_quant.cpp
index 77c60141b0..409f6e38d3 100644
--- a/aten/src/ATen/native/quantized/cpu/fused_obs_fake_quant.cpp
+++ b/aten/src/ATen/native/quantized/cpu/fused_obs_fake_quant.cpp
@@ -41,8 +41,8 @@ void calculate_moving_average(
} else {
std::tie(x_min, x_max) = at::aminmax(x);
}
- const float* min_curr_val = x_min.data_ptr<float>();
- const float* max_curr_val = x_max.data_ptr<float>();
+ const float* min_curr_val = x_min.const_data_ptr<float>();
+ const float* max_curr_val = x_max.const_data_ptr<float>();
// Moving Average Min/Max observer for input tensor
float* running_min_val = running_min.data_ptr<float>();
float* running_max_val = running_max.data_ptr<float>();
diff --git a/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp b/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp
index fee759c3a9..dc9063ecf4 100644
--- a/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp
+++ b/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp
@@ -2797,8 +2797,8 @@ void quantized_normalize_kernel(
"Unexpected size of beta");
scalar_t* X_data = X.data_ptr<scalar_t>();
- const float* gamma_data = gamma.defined() ? gamma.data_ptr<float>() : nullptr;
- const float* beta_data = beta.defined() ? beta.data_ptr<float>() : nullptr;
+ const float* gamma_data = gamma.defined() ? gamma.const_data_ptr<float>() : nullptr;
+ const float* beta_data = beta.defined() ? beta.const_data_ptr<float>() : nullptr;
scalar_t* Y_data = Y->data_ptr<scalar_t>();
const bool gamma_null = gamma_data == nullptr;
const bool beta_null = beta_data == nullptr;
@@ -3085,8 +3085,8 @@ void quantized_groupnorm_nhwc_kernel(
"Unexpected size of beta");
scalar_t* X_data = X.data_ptr<scalar_t>();
- const float* gamma_data = gamma.defined() ? gamma.data_ptr<float>() : nullptr;
- const float* beta_data = beta.defined() ? beta.data_ptr<float>() : nullptr;
+ const float* gamma_data = gamma.defined() ? gamma.const_data_ptr<float>() : nullptr;
+ const float* beta_data = beta.defined() ? beta.const_data_ptr<float>() : nullptr;
scalar_t* Y_data = Y->data_ptr<scalar_t>();
const bool gamma_null = gamma_data == nullptr;
const bool beta_null = beta_data == nullptr;
@@ -3336,7 +3336,7 @@ void quantize_tensor_per_tensor_affine_cpu(
AT_DISPATCH_QINT_TYPES(
qtensor.scalar_type(), "quantize_tensor_per_tensor_affine_cpu", [&]() {
check_tensor_memory_format(rtensor, qtensor);
- const float* rd = rtensor.data_ptr<float>();
+ const float* rd = rtensor.const_data_ptr<float>();
auto qd = reinterpret_cast<underlying_t*>(qtensor.data_ptr<scalar_t>());
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
fbgemm::TensorQuantizationParams qparams;
@@ -3668,7 +3668,7 @@ void quantize_tensor_per_tensor_affine_cpu(
double scale,
int64_t zero_point) {
check_tensor_memory_format(rtensor, qtensor);
- const float* rdata = rtensor.data_ptr<float>();
+ const float* rdata = rtensor.const_data_ptr<float>();
int numel = rtensor.numel();
#if defined(__ARM_NEON__) || defined(__aarch64__)
AT_DISPATCH_QINT_TYPES(
@@ -3707,7 +3707,7 @@ void dequantize_tensor_per_tensor_affine_cpu(
#if defined(__ARM_NEON__) || defined(__aarch64__)
AT_DISPATCH_QINT_TYPES(
qtensor.scalar_type(), "dequantize_tensor_per_tensor_affine_cpu", [&]() {
- const scalar_t* qdata = qtensor.data_ptr<scalar_t>();
+ const scalar_t* qdata = qtensor.const_data_ptr<scalar_t>();
auto dequantize_range = [&](int64_t begin, int64_t end) {
dequantize_tensor_arm<scalar_t>(
qdata + begin, rdata + begin, end - begin, scale, zero_point);
@@ -3722,7 +3722,7 @@ void dequantize_tensor_per_tensor_affine_cpu(
// Fallback path
AT_DISPATCH_QINT_TYPES(
qtensor.scalar_type(), "dequantize_tensor_per_tensor_affine_cpu", [&]() {
- const scalar_t* qdata = qtensor.data_ptr<scalar_t>();
+ const scalar_t* qdata = qtensor.const_data_ptr<scalar_t>();
for (const auto i : c10::irange(numel)) {
rdata[i] = dequantize_val<scalar_t>(scale, zero_point, qdata[i]);
}
@@ -3752,7 +3752,7 @@ void quantize_tensor_per_channel_impl(
int64_t channels = rtensor.size(axis);
auto scales_data = scales.data_ptr<double>();
auto zero_points_data = zero_points.data_ptr<int64_t>();
- const float* in = rtensor.data_ptr<float>();
+ const float* in = rtensor.const_data_ptr<float>();
auto out = qtensor.data_ptr<T>();
if (axis == 1 &&
(rtensor.is_contiguous(MemoryFormat::ChannelsLast) ||
@@ -3804,7 +3804,7 @@ void quantize_tensor_per_channel_impl<c10::quint8>(
int64_t channels = rtensor.size(axis);
auto scales_data = scales.data_ptr<double>();
auto zero_points_data = zero_points.data_ptr<int64_t>();
- const float* in = rtensor.data_ptr<float>();
+ const float* in = rtensor.const_data_ptr<float>();
auto out = (uint8_t*)qtensor.data_ptr<c10::quint8>();
#if defined(__ARM_NEON__)
// magic float and magic int to take care of rounding
@@ -4022,7 +4022,7 @@ void dequantize_per_channel_affine_kernel(
auto scales_data = scales.data_ptr<T>();
auto zero_points_data = zero_points.data_ptr<N>();
check_tensor_memory_format(qtensor, rtensor);
- const auto* qd = qtensor.data_ptr<Q>();
+ const auto* qd = qtensor.const_data_ptr<Q>();
float* rd = rtensor.data_ptr<float>();
const auto elem_per_byte = 8 / bit_width;
if (axis == 1 && (rtensor.is_contiguous(MemoryFormat::ChannelsLast) ||
@@ -4099,7 +4099,7 @@ void quantize_tensor_per_channel_float_qparams_cpu(
auto scales_data = scales.data_ptr<float>();
auto zero_points_data = zero_points.data_ptr<float>();
check_tensor_memory_format(rtensor, qtensor);
- const float* rdata = rtensor.data_ptr<float>();
+ const float* rdata = rtensor.const_data_ptr<float>();
auto qdata = reinterpret_cast<underlying_t*>(qtensor.data_ptr<scalar_t>());
const auto elem_per_byte = CHAR_BIT / bit_width;
int qvalue = 0;
@@ -4163,7 +4163,7 @@ void quantize_tensor_per_tensor_affine_sub_byte_cpu(
AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(
qtensor.scalar_type(), "quantize_tensor_per_tensor_affine_sub_byte_cpu", [&]() {
check_tensor_memory_format(rtensor, qtensor);
- const float* const rdata = rtensor.data_ptr<float>();
+ const float* const rdata = rtensor.const_data_ptr<float>();
auto qdata = reinterpret_cast<underlying_t*>(qtensor.data_ptr<scalar_t>());
auto numel = rtensor.numel();
const auto elem_per_byte = CHAR_BIT / bit_width;
@@ -4196,7 +4196,7 @@ void dequantize_tensor_per_tensor_affine_sub_byte_cpu(
qtensor.scalar_type(), "dequantize_tensor_per_tensor_affine_sub_byte_cpu", [&]() {
check_tensor_memory_format(rtensor, qtensor);
auto rdata = rtensor.data_ptr<float>();
- const underlying_t* qdata = reinterpret_cast<underlying_t*>(qtensor.data_ptr<scalar_t>());
+ const underlying_t* qdata = reinterpret_cast<const underlying_t*>(qtensor.const_data_ptr<scalar_t>());
auto numel = rtensor.numel();
const auto elem_per_byte = CHAR_BIT / bit_width;
diff --git a/aten/src/ATen/native/quantized/cpu/qconv.cpp b/aten/src/ATen/native/quantized/cpu/qconv.cpp
index 596b16370d..50155d85d4 100644
--- a/aten/src/ATen/native/quantized/cpu/qconv.cpp
+++ b/aten/src/ATen/native/quantized/cpu/qconv.cpp
@@ -647,7 +647,7 @@ at::Tensor PackedConvWeightsQnnp<kSpatialDim>::apply_impl_xnnp(
// create an empty tensor for packing the weights
const at::Tensor weight_contig =
orig_weight.contiguous(c10::MemoryFormat::ChannelsLast);
- const float* w_scales_data = w_scales.data_ptr<float>();
+ const float* w_scales_data = w_scales.const_data_ptr<float>();
underlying_t w_zp = 0;
at::Tensor weight_tensor;
diff --git a/aten/src/ATen/native/quantized/cpu/qembeddingbag_unpack.cpp b/aten/src/ATen/native/quantized/cpu/qembeddingbag_unpack.cpp
index 3612f8eba2..7c1093a1c4 100644
--- a/aten/src/ATen/native/quantized/cpu/qembeddingbag_unpack.cpp
+++ b/aten/src/ATen/native/quantized/cpu/qembeddingbag_unpack.cpp
@@ -37,7 +37,7 @@ at::Tensor PackedEmbeddingBagWeight::unpack() {
scale_bias_bytes = 4;
}
- const auto* input = packed_weight.data_ptr<uint8_t>();
+ const auto* input = packed_weight.const_data_ptr<uint8_t>();
// Calculate the output shape, accounting for the last n bytes to be used
// for scale/bias rest of the entries are packed depending on the bit_width.
std::vector<int64_t> output_shape = {
@@ -125,7 +125,7 @@ Tensor& qembeddingbag_byte_unpack_out(Tensor& output, const Tensor& packed_weigh
// The last 2 values are used to store the FP32 scale and zero_point values
// per row.
const int32_t output_columns = input_columns - 2 * sizeof(float);
- const auto* input_data = packed_weight.data_ptr<uint8_t>();
+ const auto* input_data = packed_weight.const_data_ptr<uint8_t>();
std::vector<int64_t> output_shape = packed_weight_sizes.vec();
output_shape[col_dim] = output_columns;
@@ -187,7 +187,7 @@ Tensor _qembeddingbag_nbit_unpack_helper(
int BIT_RATE) {
const auto input_rows = packed_weight.size(0);
const auto input_columns = packed_weight.size(1);
- const auto* input_data = packed_weight.data_ptr<uint8_t>();
+ const auto* input_data = packed_weight.const_data_ptr<uint8_t>();
int NUM_ELEM_PER_BYTE = 8 / BIT_RATE;
// The last 4 bytes per row are two fp16 scale and zero_point.
diff --git a/aten/src/ATen/native/quantized/cpu/qlinear.cpp b/aten/src/ATen/native/quantized/cpu/qlinear.cpp
index 29e6726c47..166d0fd617 100644
--- a/aten/src/ATen/native/quantized/cpu/qlinear.cpp
+++ b/aten/src/ATen/native/quantized/cpu/qlinear.cpp
@@ -316,7 +316,7 @@ at::Tensor PackedLinearWeight::apply_with_input_q_dq_qweight_dq_output_fp32_impl
fbgemm::fbgemmSupportedCPU(), "Your CPU does not support FBGEMM.");
auto input_contig = input.expect_contiguous();
- const auto* input_ptr = input_contig->data_ptr<float>();
+ const auto* input_ptr = input_contig->const_data_ptr<float>();
TORCH_CHECK(
input.dim() >= 2,
@@ -485,7 +485,7 @@ at::Tensor PackedLinearWeightsQnnp::apply_impl_xnnp(
xnn_operator_t xnnp_op = nullptr;
- const float* weight_scales_data = w_scales.data_ptr<float>();
+ const float* weight_scales_data = w_scales.const_data_ptr<float>();
// prepare weights
underlying_t w_zp = static_cast<underlying_t>(
diff --git a/aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp b/aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp
index 3c267c7ebc..935ad081bd 100644
--- a/aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp
+++ b/aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp
@@ -46,7 +46,7 @@ at::Tensor PackedLinearWeight::apply_dynamic_impl(
// TODO: contiguous is called for further jit optimizations.
auto input_contig = input.contiguous();
- const auto* input_ptr = input_contig.data_ptr<float>();
+ const auto* input_ptr = input_contig.const_data_ptr<float>();
TORCH_CHECK(
input.dim() >= 2,
@@ -269,7 +269,7 @@ at::Tensor PackedLinearWeightsQnnp::apply_dynamic_impl(
TORCH_CHECK(bias_vec.dim() == 1, "bias should be a vector (1D Tensor)");
auto bias_contig = bias_vec.contiguous();
- const float* bias_ptr = bias_contig.data_ptr<float>();
+ const float* bias_ptr = bias_contig.const_data_ptr<float>();
// Calculate statistics for quantization of input Tensor
// TODO: optimized kernel
@@ -410,7 +410,7 @@ at::Tensor& PackedLinearWeightFp16::apply_dynamic_impl(
const at::Tensor& input,
at::Tensor& output) {
const at::Tensor input_contig = input.contiguous();
- const float* input_ptr = input_contig.data_ptr<float>();
+ const float* input_ptr = input_contig.const_data_ptr<float>();
auto& packed_weight_fp16 = *w;
diff --git a/aten/src/ATen/native/quantized/cuda/EmbeddingBag.cu b/aten/src/ATen/native/quantized/cuda/EmbeddingBag.cu
index 0580c47b8c..3574bfe28f 100644
--- a/aten/src/ATen/native/quantized/cuda/EmbeddingBag.cu
+++ b/aten/src/ATen/native/quantized/cuda/EmbeddingBag.cu
@@ -545,7 +545,7 @@ Tensor qembeddingbag_4bit_unpack(const Tensor& packed_weight) {
int BIT_RATE = 4;
const auto input_rows = packed_weight.size(0);
const auto input_columns = packed_weight.size(1);
- const auto* input_data = packed_weight.data_ptr<uint8_t>();
+ const auto* input_data = packed_weight.const_data_ptr<uint8_t>();
int NUM_ELEM_PER_BYTE = 8 / BIT_RATE;
// The last 4 bytes per row are two fp16 scale and zero_point.
diff --git a/aten/src/ATen/native/sparse/FlattenIndicesCommon.h b/aten/src/ATen/native/sparse/FlattenIndicesCommon.h
index 26c4f02902..0e79ed809a 100644
--- a/aten/src/ATen/native/sparse/FlattenIndicesCommon.h
+++ b/aten/src/ATen/native/sparse/FlattenIndicesCommon.h
@@ -62,7 +62,7 @@ Tensor _flatten_indices_impl(const Tensor& indices, IntArrayRef size) {
.build();
{
- const auto* RESTRICT ptr_indices = indices.data_ptr<index_t>();
+ const auto* RESTRICT ptr_indices = indices.const_data_ptr<index_t>();
KernelLauncher<kernel_t>::launch(iter,
// NOTE: capture by value required by CUDA
diff --git a/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionCommon.h b/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionCommon.h
index 2a1ca9e2e5..8782031c49 100644
--- a/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionCommon.h
+++ b/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionCommon.h
@@ -270,7 +270,7 @@ void _sparse_binary_op_intersection_kernel_impl(
.build();
{
- const auto* RESTRICT ptr_indices = indices.data_ptr<index_t>();
+ const auto* RESTRICT ptr_indices = indices.const_data_ptr<index_t>();
KernelLauncher::launch(iter,
// NOTE: capture by value required by CUDA
@@ -348,8 +348,8 @@ void _sparse_binary_op_intersection_kernel_impl(
.build();
{
- const auto* RESTRICT ptr_indices = source_indices.data_ptr<index_t>();
- const auto* RESTRICT ptr_sorted_hash = sorted_hash.data_ptr<int64_t>();
+ const auto* RESTRICT ptr_indices = source_indices.const_data_ptr<index_t>();
+ const auto* RESTRICT ptr_sorted_hash = sorted_hash.const_data_ptr<int64_t>();
const auto sorted_hash_len = sorted_hash.numel();
auto* RESTRICT ptr_intersection_count = intersection_count.data_ptr<int64_t>();
auto* RESTRICT ptr_intersection_first_idx = intersection_first_idx.data_ptr<int64_t>();
diff --git a/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionKernel.cpp b/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionKernel.cpp
index b48822e32f..2db8c9e940 100644
--- a/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionKernel.cpp
+++ b/aten/src/ATen/native/sparse/SparseBinaryOpIntersectionKernel.cpp
@@ -76,7 +76,7 @@ struct CPUValueSelectionIntersectionKernel {
const auto* ptr_rhs_values_bytes = data[3];
const auto* ptr_rhs_select_idx_bytes = data[4];
const auto* ptr_intersection_counts_bytes = data[5];
- const auto* ptr_argsort = argsort.data_ptr<index_t>();
+ const auto* ptr_argsort = argsort.const_data_ptr<index_t>();
for (int64_t i = 0; i < n; ++i) {
// Exctract data
diff --git a/aten/src/ATen/native/sparse/ValidateCompressedIndicesCommon.h b/aten/src/ATen/native/sparse/ValidateCompressedIndicesCommon.h
index 49ea0e1a19..ec4c084a39 100644
--- a/aten/src/ATen/native/sparse/ValidateCompressedIndicesCommon.h
+++ b/aten/src/ATen/native/sparse/ValidateCompressedIndicesCommon.h
@@ -312,7 +312,7 @@ void _validate_compressed_sparse_indices_kernel(
idx.scalar_type(),
NAME,
[&iter, &idx, dim, nnz, idx_ndims, &idx_sizes, &idx_strides]() {
- const auto* RESTRICT ptr_idx = idx.data_ptr<index_t>();
+ const auto* RESTRICT ptr_idx = idx.const_data_ptr<index_t>();
const auto zero = index_t{0};
KernelLauncher::launch(
iter,
diff --git a/aten/src/ATen/native/sparse/cuda/SparseCsrTensorMath.cu b/aten/src/ATen/native/sparse/cuda/SparseCsrTensorMath.cu
index 7ea6823b62..75474e77ea 100644
--- a/aten/src/ATen/native/sparse/cuda/SparseCsrTensorMath.cu
+++ b/aten/src/ATen/native/sparse/cuda/SparseCsrTensorMath.cu
@@ -67,7 +67,7 @@ __global__ void convert_indices_from_coo_to_csr_cuda_kernel(output_t* data_out,
template <typename input_t, typename output_t>
void convert_indices_from_coo_to_csr_cuda(const Tensor& result, const Tensor& input, const int64_t size) {
int64_t numel = input.numel();
- const input_t* data_in = input.data_ptr<input_t>();
+ const input_t* data_in = input.const_data_ptr<input_t>();
output_t* data_out = result.data_ptr<output_t>();
if (numel == 0) {
@@ -113,7 +113,7 @@ void convert_indices_from_csr_to_coo_cuda(const Tensor& indices, const Tensor& c
}
auto crow_indices_ = crow_indices.expect_contiguous();
- const input_t* crow_indices_data_in = crow_indices_->data_ptr<input_t>();
+ const input_t* crow_indices_data_in = crow_indices_->const_data_ptr<input_t>();
TORCH_INTERNAL_ASSERT(indices.is_contiguous());
auto row0 = indices.select(0, transpose?batch_ndim + 1:batch_ndim + 0);
auto row1 = indices.select(0, transpose?batch_ndim + 0:batch_ndim + 1);
diff --git a/aten/src/ATen/native/vulkan/ops/Mm.cpp b/aten/src/ATen/native/vulkan/ops/Mm.cpp
index 33181aef2f..e5893e8172 100644
--- a/aten/src/ATen/native/vulkan/ops/Mm.cpp
+++ b/aten/src/ATen/native/vulkan/ops/Mm.cpp
@@ -181,7 +181,7 @@ vTensor pack_biases_quantized_weights(
if (bias_arg) {
const Tensor bias = bias_arg->contiguous();
const IntArrayRef b_sizes = bias.sizes();
- const float* const src_bias_ptr = bias.data_ptr<float>();
+ const float* const src_bias_ptr = bias.const_data_ptr<float>();
/* Source */
int64_t src_kb_sz = 0;
diff --git a/aten/src/ATen/native/vulkan/ops/Mm.h b/aten/src/ATen/native/vulkan/ops/Mm.h
index f7ffd1a5fc..b4fcb31bc3 100644
--- a/aten/src/ATen/native/vulkan/ops/Mm.h
+++ b/aten/src/ATen/native/vulkan/ops/Mm.h
@@ -26,7 +26,7 @@ void stage_pack_weights(
const int64_t src_matrix_sz = src_kw_sz * src_kh_sz;
const int64_t dst_plane_sz = dst_kw_sz * dst_kh_sz;
const int64_t dst_matrix_sz = dst_plane_sz * 4;
- const T* const src_weight_ptr = weight.data_ptr<T>();
+ const T* const src_weight_ptr = weight.const_data_ptr<T>();
api::StorageBuffer staging(context, api::kFloat, v_weight.gpu_numel());
{
api::MemoryMap mapping(staging.buffer(), api::MemoryAccessType::WRITE);
diff --git a/aten/src/ATen/test/quantized_test.cpp b/aten/src/ATen/test/quantized_test.cpp
index 2363f83137..0262052d52 100644
--- a/aten/src/ATen/test/quantized_test.cpp
+++ b/aten/src/ATen/test/quantized_test.cpp
@@ -316,7 +316,7 @@ TEST(TestQTensor, TestArmVectorizedQuantizeDequantize) {
quantize_val_with_datatype(scale, zero_point, x_values[i]).val_);
}
const Tensor r = q.dequantize();
- const float* r_data = r.data_ptr<float>();
+ const float* r_data = r.const_data_ptr<float>();
for (const auto i : c10::irange(numel)) {
ASSERT_FLOAT_EQ(
r_data[i],
diff --git a/test/cpp/c10d/ProcessGroupGlooAsyncTest.cpp b/test/cpp/c10d/ProcessGroupGlooAsyncTest.cpp
index 0815de7e6b..0059560a60 100644
--- a/test/cpp/c10d/ProcessGroupGlooAsyncTest.cpp
+++ b/test/cpp/c10d/ProcessGroupGlooAsyncTest.cpp
@@ -243,7 +243,7 @@ void runAsyncBroadcastTest(
for (const auto i : c10::irange(numProcesses)) {
auto tensors = tests[i].getTensors();
for (const auto& tensor : tensors) {
- const auto* const data = tensor.data_ptr<float>();
+ const auto* const data = tensor.const_data_ptr<float>();
for (const auto k : c10::irange(tensor.numel())) {
EXPECT_EQ(data[k], expected);
}
diff --git a/test/cpp/c10d/ProcessGroupNCCLTest.cpp b/test/cpp/c10d/ProcessGroupNCCLTest.cpp
index 305a774aae..d2dc02d323 100644
--- a/test/cpp/c10d/ProcessGroupNCCLTest.cpp
+++ b/test/cpp/c10d/ProcessGroupNCCLTest.cpp
@@ -417,7 +417,7 @@ void testAllreduce(const std::string& path, int rank, int size) {
const auto expected = (totalNumGPUs * (totalNumGPUs - 1)) / 2;
const auto tensors = test.getTensors();
for (const auto& tensor : tensors) {
- const auto* const data = tensor.data_ptr<float>();
+ const auto* const data = tensor.const_data_ptr<float>();
for (const auto k : c10::irange(tensor.numel())) {
EXPECT_EQ(data[k], expected)
<< "Allreduce outputs do not match expected outputs";
@@ -463,7 +463,7 @@ void testSparseAllreduce(const std::string& path, int rank, int size) {
}
// validate all tensor values are expected value
- const auto* const data = values.data_ptr<float>();
+ const auto* const data = values.const_data_ptr<float>();
for (const auto k : c10::irange(values.numel())) {
EXPECT_EQ(data[k], expected)
<< "Allreduce outputs do not match expected outputs";
@@ -514,7 +514,7 @@ void testSparseAllreduceLarge(const std::string& path, int rank, int size) {
}
// validate all tensor values are expected value
- const auto* const data = values.data_ptr<float>();
+ const auto* const data = values.const_data_ptr<float>();
for (const auto k : c10::irange(values.numel())) {
EXPECT_EQ(data[k], expected)
<< "Allreduce outputs do not match expected outputs";
@@ -544,7 +544,7 @@ void testBroadcast(const std::string& path, int rank, int size) {
const auto expected = (rootRank * numDevices + rootTensor);
const auto tensors = test.getTensors();
for (const auto& tensor : tensors) {
- const auto* const data = tensor.data_ptr<float>();
+ const auto* const data = tensor.const_data_ptr<float>();
for (const auto k : c10::irange(tensor.numel())) {
EXPECT_EQ(data[k], expected)
<< "Broadcast outputs do not match expected outputs";
@@ -703,7 +703,7 @@ void testSplittingCommunicator(const std::string& path, int rank, int size) {
const auto expected = (rootRank * numDevices + rootTensor);
const auto tensors = test->getTensors();
for (const auto& tensor : tensors) {
- const auto* const data = tensor.data_ptr<float>();
+ const auto* const data = tensor.const_data_ptr<float>();
for (const auto k : c10::irange(tensor.numel())) {
EXPECT_EQ(data[k], expected)
<< "Broadcast outputs do not match expected outputs";
diff --git a/test/cpp/jit/test_custom_class_registrations.cpp b/test/cpp/jit/test_custom_class_registrations.cpp
index e92c397a03..8b83b7f0a8 100644
--- a/test/cpp/jit/test_custom_class_registrations.cpp
+++ b/test/cpp/jit/test_custom_class_registrations.cpp
@@ -131,7 +131,7 @@ struct TensorQueue : torch::CustomClassHolder {
const std::string key = "queue";
at::Tensor size_tensor;
size_tensor = dict.at(std::string(key + "/size")).cpu();
- const auto* size_tensor_acc = size_tensor.data_ptr<int64_t>();
+ const auto* size_tensor_acc = size_tensor.const_data_ptr<int64_t>();
int64_t queue_size = size_tensor_acc[0];
for (const auto index : c10::irange(queue_size)) {
diff --git a/torch/csrc/cuda/nccl.cpp b/torch/csrc/cuda/nccl.cpp
index f47f9502a5..f9b29c38dc 100644
--- a/torch/csrc/cuda/nccl.cpp
+++ b/torch/csrc/cuda/nccl.cpp
@@ -818,7 +818,7 @@ void all2all_single_equal_split(
auto type = to_nccl_data_type(input);
size_t count = input.numel() / size;
size_t rankdiff = input.nbytes() / size;
- const auto* sendbuff = reinterpret_cast<char*>(input.data_ptr());
+ const auto* sendbuff = reinterpret_cast<const char*>(input.const_data_ptr());
auto* recvbuff = reinterpret_cast<char*>(output.data_ptr());
auto comm = to_nccl_comm(_comm);
#if defined(USE_ROCM) && ROCM_VERSION >= 50000
@@ -1040,7 +1040,7 @@ void gather(
size_t count = inputs.numel();
auto type = to_nccl_data_type(inputs);
- const auto* sendbuff = reinterpret_cast<char*>(inputs.data_ptr());
+ const auto* sendbuff = reinterpret_cast<const char*>(inputs.const_data_ptr());
NCCL_CHECK(ncclGroupStart());
@@ -1097,7 +1097,8 @@ void scatter(
if (r != root) {
size_t send_count = inputs[r].numel();
auto send_type = to_nccl_data_type(inputs[r]);
- const auto* sendbuff = reinterpret_cast<char*>(inputs[r].data_ptr());
+ const auto* sendbuff =
+ reinterpret_cast<const char*>(inputs[r].const_data_ptr());
NCCL_CHECK(ncclSend(sendbuff, send_count, send_type, r, comm, stream));
} else {
// on its own rank, simply copy it to the output
|
2.41.0
|
266e472e2582ecc3f4780ea23db6e4386bdf6a6
|
Fri, 26 Apr 2024 19:09:29 +0000
|
[PATCH 0736/1000] rename ort to maia in dynamo's ort backend. (#124967)
|
Fixes #124966 Co-authored-by: Thiago Crepaldi <thiagofc@microsoft.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124967 Approved by: https://github.com/thiagocrepaldi
|
diff --git a/test/onnx/dynamo/test_dynamo_with_onnxruntime_backend.py b/test/onnx/dynamo/test_dynamo_with_onnxruntime_backend.py
index 646ec4868f..df981e0c56 100644
--- a/test/onnx/dynamo/test_dynamo_with_onnxruntime_backend.py
+++ b/test/onnx/dynamo/test_dynamo_with_onnxruntime_backend.py
@@ -50,6 +50,20 @@ class TestDynamoWithONNXRuntime(onnx_test_common._TestONNXRuntime):
torch._dynamo.reset()
OrtBackend.clear_cached_instances()
+ def test_get_ort_device_type(self):
+ self.assertEqual(
+ torch.onnx._internal.onnxruntime._get_ort_device_type("cuda"),
+ torch.onnx._internal.onnxruntime.ORTC.OrtDevice.cuda(),
+ )
+ self.assertEqual(
+ torch.onnx._internal.onnxruntime._get_ort_device_type("cpu"),
+ torch.onnx._internal.onnxruntime.ORTC.OrtDevice.cpu(),
+ )
+ self.assertEqual(
+ torch.onnx._internal.onnxruntime._get_ort_device_type("maia"),
+ torch.onnx._internal.onnxruntime.ORTC.OrtDevice.npu(),
+ )
+
def test_torch_compile_backend_registration(self):
self.assertIn("onnxrt", torch._dynamo.backends.registry.list_backends())
backend = torch._dynamo.backends.registry.lookup_backend("onnxrt")
diff --git a/torch/onnx/_internal/onnxruntime.py b/torch/onnx/_internal/onnxruntime.py
index 4be4d3fbff..0505b9f470 100644
--- a/torch/onnx/_internal/onnxruntime.py
+++ b/torch/onnx/_internal/onnxruntime.py
@@ -145,7 +145,7 @@ def _get_ort_device_type(device_type: str):
if device_type == "cpu":
return ORTC.OrtDevice.cpu()
# ort pytorch device is mapped to NPU OrtDevice type
- if device_type == "ort":
+ if device_type == "maia":
return ORTC.OrtDevice.npu()
raise ValueError("Unsupported device type: " + device_type)
|
2.41.0
|
e2b4c6ed60c92d23f46308109d942ad11c5d84d
|
Fri, 26 Apr 2024 08:55:44 -0700
|
[PATCH 0737/1000] Fix broken docs (#124940)
|
These were causing doctest to be unhappy. In particular the doc from #124496 caused #124771 to fail "trunk / win-vs2019-cpu-py3 / test" to fail when pushing. Not sure why it wasn't a problem on the original PR. Testing: `./test/run_doctests.sh`: before: ``` === 4 warnings in 11.21 seconds === ``` after: ``` === in 11.11 seconds === ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124940 Approved by: https://github.com/zou3519, https://github.com/atalman, https://github.com/huydhn
|
diff --git a/.github/workflows/_win-test.yml b/.github/workflows/_win-test.yml
index ebc8434407..99d037f035 100644
--- a/.github/workflows/_win-test.yml
+++ b/.github/workflows/_win-test.yml
@@ -92,7 +92,7 @@ jobs:
retry_wait_seconds: 30
command: |
set -eu
- python3 -m pip install rockset==1.0.3
+ python3 -m pip install rockset==1.0.3 'xdoctest>=1.1.0'
- name: Start monitoring script
id: monitor-script
diff --git a/torch/compiler/__init__.py b/torch/compiler/__init__.py
index cd3d032ea1..cf0b544e92 100644
--- a/torch/compiler/__init__.py
+++ b/torch/compiler/__init__.py
@@ -166,9 +166,9 @@ def is_compiling() -> bool:
>>> def forward(self, x):
>>> if not torch.compiler.is_compiling():
- >>> ...logic that is not needed in a compiled/traced graph...
+ >>> pass # ...logic that is not needed in a compiled/traced graph...
>>>
- >>> ...rest of the function...
+ >>> # ...rest of the function...
"""
if torch.jit.is_scripting():
return False
@@ -186,8 +186,8 @@ def is_dynamo_compiling() -> bool:
>>> def forward(self, x):
>>> if not torch.compiler.is_dynamo_compiling():
- >>> ...logic that is not needed in a TorchDynamo-traced graph...
+ >>> pass # ...logic that is not needed in a TorchDynamo-traced graph...
>>>
- >>> ...rest of the function...
+ >>> # ...rest of the function...
"""
return False
diff --git a/torch/library.py b/torch/library.py
index 0515fa5bc1..2c413a7c9a 100644
--- a/torch/library.py
+++ b/torch/library.py
@@ -849,11 +849,11 @@ def opcheck(
>>> def _(x, y):
>>> return torch.empty_like(x)
>>>
- >>> def setup_context(ctx, inputs, output)
+ >>> def setup_context(ctx, inputs, output):
>>> y, = inputs
>>> ctx.y = y
>>>
- >>> def backward(ctx, grad)
+ >>> def backward(ctx, grad):
>>> return grad * ctx.y, None
>>>
>>> numpy_sin.register_autograd(backward, setup_context=setup_context)
diff --git a/torch/utils/data/_utils/collate.py b/torch/utils/data/_utils/collate.py
index 5262497a90..4c17597bd6 100644
--- a/torch/utils/data/_utils/collate.py
+++ b/torch/utils/data/_utils/collate.py
@@ -121,7 +121,7 @@ def collate(batch, *, collate_fn_map: Optional[Dict[Union[Type, Tuple[Type, ...]
Examples:
>>> def collate_tensor_fn(batch, *, collate_fn_map):
- >>> # Extend this function to handle batch of tensors
+ ... # Extend this function to handle batch of tensors
... return torch.stack(batch, 0)
>>> def custom_collate(batch):
... collate_map = {torch.Tensor: collate_tensor_fn}
|
2.41.0
|
a6dfbe4806b361c43210dfd56db64c4097c66bb
|
Fri, 26 Apr 2024 19:58:56 +0000
|
[PATCH 0738/1000] Add label to label config to auto apply labels based on other labels (#125042)
|
* Implemented in https://github.com/pytorch/test-infra/pull/5127, * Tested in malfet/delete me: https://github.com/malfet/deleteme/issues/85 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125042 Approved by: https://github.com/huydhn
|
diff --git a/.github/label_to_label.yml b/.github/label_to_label.yml
new file mode 100644
index 0000000000..e6c66a5e56
--- /dev/null
+++ b/.github/label_to_label.yml
@@ -0,0 +1,13 @@
+# Use this to auto apply labels based on other labels. Applies to both PRs and
+# issues. Currently only supports any and all
+- any:
+ - "module: custom operators"
+ - "module: aotdispatch"
+ then:
+ - "module: pt2-dispatcher"
+- any:
+ - "module: dynamo"
+ - "module: pt2-dispatcher"
+ - "module: inductor"
+ then:
+ - "oncall: pt2"
diff --git a/.github/pytorch-probot.yml b/.github/pytorch-probot.yml
index 3f9fd75485..c7b554ce44 100644
--- a/.github/pytorch-probot.yml
+++ b/.github/pytorch-probot.yml
@@ -24,3 +24,4 @@ retryable_workflows:
- linux-binary
- windows-binary
labeler_config: labeler.yml
+label_to_label_config: label_to_label.yml
|
2.41.0
|
d06c73cbd398811efc4afe85ee29dee64ebfd45
|
Thu, 25 Apr 2024 14:23:58 +0200
|
[PATCH 0739/1000] [Inductor Cutlass backend] Improved GEMM template (#124577)
|
Improves the Cutlass backend GEMM template: * Adds code which allows to create stand-alone test runners for Cutlass GEMM Kernels, which allows (manual) debugging of, for example, CUDA IMA errors or similar problems which occur in practice. Includes some utility code and tests to actually compile and run these standalone tests. * Cleans up the GEMM template code through various refactorings * Eliminates code sections and options that are unneccessary now that epilogue fusions are being removed. * Limits the scope of a workaround for (flaky) Cutlass issues with bias broadcasting to neccessary cases. * Puts some CPU runtime checks into #if / #endif blocks, such that it's possible to compile CUTLASS Kernels with lower CPU overhead. * Add documentation comments Pull Request resolved: https://github.com/pytorch/pytorch/pull/124577 Approved by: https://github.com/jansel ghstack dependencies: #124576
|
diff --git a/torch/_inductor/codegen/cuda/cuda_kernel.py b/torch/_inductor/codegen/cuda/cuda_kernel.py
index 1bb536f289..4aa095a217 100644
--- a/torch/_inductor/codegen/cuda/cuda_kernel.py
+++ b/torch/_inductor/codegen/cuda/cuda_kernel.py
@@ -189,6 +189,23 @@ class CUDATemplateKernel(CUDAKernel):
return "void"
return DTYPE_TO_CPP.get(node.get_layout().dtype)
+ def cutlass_dtype(self, node: IRNode, default_dtype="void") -> Optional[str]:
+ # Helper method, called into from CUTLASSGemmTemplate
+ if node is None:
+ return default_dtype
+ from torch._inductor.codegen.cuda.cuda_template import CUTLASSTemplate
+
+ return CUTLASSTemplate._DTYPE_TO_CUTLASS[node.get_layout().dtype]
+
+ def max_valid_index(self, node: IRNode, default=-1):
+ # Helper method, called into from CUTLASSGemmTemplate
+ if node is None:
+ return default
+ max_valid_offset = 0
+ for i in range(len(node.get_size())):
+ max_valid_offset += (node.get_size()[i] - 1) * node.get_stride()[i]
+ return max_valid_offset
+
def offset(self, node: IRNode) -> str:
"""
Generates code which represents offset of a given node.
diff --git a/torch/_inductor/codegen/cuda/cutlass_utils.py b/torch/_inductor/codegen/cuda/cutlass_utils.py
index 40daf6da1c..ff60525548 100644
--- a/torch/_inductor/codegen/cuda/cutlass_utils.py
+++ b/torch/_inductor/codegen/cuda/cutlass_utils.py
@@ -2,7 +2,9 @@ import functools
import logging
import os
import sys
+
from dataclasses import dataclass
+from pathlib import Path
from typing import Any, List, Optional
import sympy
@@ -179,6 +181,23 @@ def gen_ops() -> List[Any]:
return _gen_ops_cached(arch, version)
+def torch_dtype_to_cutlass_type(
+ torch_dtype: torch.dtype,
+) -> "cutlass_library.library.DataType": # type: ignore[name-defined] # noqa: F821
+ # Import cutlass python scripts.
+ assert try_import_cutlass()
+ import cutlass_library # type: ignore[import]
+
+ if torch_dtype == torch.float:
+ return cutlass_library.library.DataType.f32
+ elif torch_dtype == torch.half:
+ return cutlass_library.library.DataType.f16
+ elif torch_dtype == torch.bfloat16:
+ return cutlass_library.library.DataType.bf16
+ else:
+ raise NotImplementedError(f"Unsupported data type: {torch_dtype=}")
+
+
def dtype_match(
torch_dtype: Optional[torch.dtype],
cutlass_dtype: "cutlass_library.library.DataType", # type: ignore[name-defined] # noqa: F821
@@ -281,3 +300,43 @@ def get_max_alignment(inductor_layout: Layout) -> int:
return alignment
return 1
+
+
+class CUDACompileSourceCapturingContext:
+ # Helper class for Benchmarking and Testing CUTLASS Kernels in isolation.
+ # Can be used to capture the sourcecode passed to CUDACodeCache.compile
+
+ def __init__(self):
+ self.sources = []
+ self._compile_patch = None
+
+ def __enter__(self, *args, **kwargs):
+ import unittest.mock as mock
+
+ import torch._inductor.codecache
+
+ _compile_method_orig = torch._inductor.codecache.CUDACodeCache.compile
+
+ def my_compile(source_code, dst_file_ext):
+ self.sources.append(source_code)
+ return _compile_method_orig(source_code, dst_file_ext)
+
+ self._compile_patch = mock.patch(
+ "torch._inductor.codecache.CUDACodeCache.compile", my_compile
+ )
+ return self._compile_patch.__enter__(*args, **kwargs) # type: ignore[union-attr]
+
+ def __exit__(self, *args, **kwargs):
+ return self._compile_patch.__exit__(*args, **kwargs) # type: ignore[union-attr]
+
+
+def cuda_standalone_runner_compile_command(srcpath: Path, exepath: Path):
+ # returns command string to compile a (captured) CUDA GEMM Kernel source to a standalone executable that's ready to run
+ # Passes the correct preprocessor define to nvcc to ensure the standalone runner is enabled.
+ from torch._inductor.codecache import cuda_compile_command
+
+ extra_args = ["-DGENERATE_STANDALONE_RUNNER=1", "-DCUTLASS_DEBUG_TRACE_LEVEL=1"]
+ compile_command = cuda_compile_command(
+ [str(srcpath)], str(exepath), "exe", extra_args=extra_args
+ )
+ return compile_command
diff --git a/torch/_inductor/codegen/cuda/gemm_template.py b/torch/_inductor/codegen/cuda/gemm_template.py
index 5112d11b56..89c326cef5 100644
--- a/torch/_inductor/codegen/cuda/gemm_template.py
+++ b/torch/_inductor/codegen/cuda/gemm_template.py
@@ -1,23 +1,29 @@
import copy
+import enum
import logging
import re
-from typing import cast, Dict, List, Optional, Tuple, Union
+from typing import Dict, List, Optional, Tuple, Union
from ... import ir
from ...config import cuda as inductor_cuda_config
-from ...ir import Buffer, ChoiceCaller, CUDATemplateBuffer, FixedLayout, IRNode, Layout
+from ...ir import (
+ Buffer,
+ ChoiceCaller,
+ CUDATemplateBuffer,
+ FixedLayout,
+ IRNode,
+ Layout,
+ ReinterpretView,
+)
from ..common import IndentedBuffer
from . import cutlass_utils
from .cuda_kernel import CUDATemplateKernel
from .cuda_template import CUTLASSTemplate
-from .cutlass_epilogue_gen import (
- CutlassEVTEpilogueArgumentFormatter,
- CutlassEVTEpilogueTypeFormatter,
-)
log = logging.getLogger(__name__)
+# Jinja template for GEMM Kernel, used by the CUTLASSGemmTemplate class below.
GEMM_TEMPLATE = r"""
{{template.header().getvalue()}}
{{template.globals().getvalue()}}
@@ -25,18 +31,19 @@ GEMM_TEMPLATE = r"""
// When workspace_size is not a nullptr, populates requested workspace_size and returns.
// Otherwise, computes the Gemm kernel using the given workspace ptr.
extern "C" {
-{{kernel.def_kernel(inputs=[X, W, Bias], outputs=[Y], names_str="X, W, Bias, Y", input_reorder=input_reorder)}} {
+{{kernel_call_signature}} {
try {
- {{kernel.check_not_null(X)}}
- {{kernel.check_not_null(W)}}
- {{kernel.check_not_null(Bias)}}
- {{kernel.check_not_null(Y)}}
int64_t B = {{kernel.size(Y, 0, -3, default_value=1)}};
int64_t M = {{kernel.size(X, -2)}};
int64_t K = {{kernel.size(X, -1)}};
int64_t N = {{kernel.size(W, -1)}};
using ElementComputeEpilogue = {{instance_type}}::ElementAccumulator;
using coord_t = cutlass::gemm::GemmCoord::Index;
+ static cutlass::KernelHardwareInfo hw_info;
+ if (hw_info.sm_count == 0) {
+ hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(0);
+ CUTLASS_TRACE_HOST("Query result for SM count per device: " << hw_info.sm_count);
+ }
{{instance_type}}::Arguments arguments;
{{template.render_gemm_arguments(argument_template, epilogue_template, should_swap_xw,
X, W, Bias, Y, alpha, beta, kernel, epilogue_args)}}
@@ -45,10 +52,26 @@ extern "C" {
*workspace_size = gemm_op.get_workspace_size(arguments);
return 0;
}
+ // check for null pointers after workspace size, since querying workspace size doesn't require valid data pointers
+#ifndef CUTLASS_BACKEND_DISABLE_CHECKS
+ {{kernel.check_not_null(X)}}
+ {{kernel.check_not_null(W)}}
+ {{kernel.check_not_null(Bias)}}
+ {{kernel.check_not_null(Y)}}
{
auto status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
}
+#endif
+#ifdef CUTLASS_DEBUG_TRACE_LEVEL
+#if CUTLASS_DEBUG_TRACE_LEVEL == 1
+ {
+ // Print the maximum number of active blocks per SM for the kernel if CUTLASS_DEBUG_TRACE_LEVEL == 1
+ // we don't need a print statement, it's happening inside the function.
+ gemm_op.maximum_active_blocks();
+ }
+#endif
+#endif
{
auto status = gemm_op.initialize(arguments, workspace, stream);
CUTLASS_CHECK(status);
@@ -70,42 +93,7 @@ extern "C" {
}
"""
-
-GEMM_ARGS_CUTLASS_2X = r"""
- int64_t batch_stride_x = {{kernel.stride(X, -3)}};
- int64_t row_stride_x = {{kernel.row_or_column_stride(X)}};
- int64_t batch_stride_w = {{kernel.stride(W, -3)}};
- int64_t row_stride_w = {{kernel.row_or_column_stride(W)}};
- int64_t batch_stride_bias = {{kernel.stride(Bias, -3)}};
- int64_t row_stride_bias = {{kernel.row_or_column_stride(Bias)}};
- int64_t batch_stride_y = {{kernel.stride(Y, -3)}};
- int64_t row_stride_y = {{kernel.row_or_column_stride(Y)}};
- // Initialize GemmUniversalInstance arguments.
- arguments = {
- {{template.gemm_mode()}}, // GemmUniversalMode mode
- {
- static_cast<coord_t>(M),
- static_cast<coord_t>(N),
- static_cast<coord_t>(K)
- }, // GemmCoord problem_size
- {{split_k if split_k > 1 else 'B'}}, // int batch_count
- {ElementComputeEpilogue({{alpha}}), ElementComputeEpilogue({{beta}})}, // typename EpilogueOutputOp::Params epilogue
- {{template.cutlass_type_cast(X, kernel.ptr(X))}}, // void const * ptr_A
- {{template.cutlass_type_cast(W, kernel.ptr(W))}}, // void const * ptr_B
- {{template.cutlass_type_cast(Bias, kernel.ptr(Bias))}}, // void const * ptr_C
- {{template.cutlass_type_cast(Y, kernel.ptr(Y))}}, // void * ptr_D
- batch_stride_x, // int64_t batch_stride_A
- batch_stride_w, // int64_t batch_stride_B
- batch_stride_bias, // int64_t batch_stride_C
- batch_stride_y, // int64_t batch_stride_D
- row_stride_x, // typename LayoutA::Stride::LongIndex lda
- row_stride_w, // typename LayoutB::Stride::LongIndex ldb
- row_stride_bias, // typename LayoutC::Stride::LongIndex ldc
- row_stride_y, // typename LayoutC::Stride::LongIndex ldd
- };
-"""
-
-
+# Jinja template for Cutlass 3.x GEMM Kernel arguments, used by the CUTLASSGemmTemplate class below.
GEMM_ARGS_CUTLASS_3X = r"""
// Initialize GemmUniversal3xInstance arguments.
arguments = {
@@ -130,10 +118,13 @@ GEMM_ARGS_CUTLASS_3X = r"""
{{template.cute_int(kernel.stride(W, -3), "batch_stride_w")}}
}, // StrideB dB
}, // MainloopArguments mainloop
- {{epilogue_arguments}}
+ {{epilogue_arguments}},
+ hw_info
};
"""
+# Jinja template for Cutlass 3.x GEMM Kernel arguments if epilogue fusion is applied,
+# used by the CUTLASSGemmTemplate class below.
GEMM_ARGS_CUTLASS_3X_EPILOGUE = r"""
// see https://tinyurl.com/4rk89z48
{
@@ -153,10 +144,92 @@ GEMM_ARGS_CUTLASS_3X_EPILOGUE = r"""
}, // EpilogueArguments epilogue
"""
+# Additional includes which are neccessary if the standalone test / debug runner is generated as wel
+GEMM_STANDALONE_RUNNER_ADDITIONAL_INCLUDES = r"""
+#ifdef GENERATE_STANDALONE_RUNNER
+#include "cutlass/util/distribution.h"
+#include "cutlass/util/host_tensor.h"
+#include "cutlass/util/packed_stride.hpp"
+#include "cutlass/util/tensor_view_io.h"
+#include "cutlass/util/reference/device/gemm_complex.h"
+#include "cutlass/util/reference/device/tensor_compare.h"
+#include "cutlass/util/reference/device/tensor_fill.h"
+#include <iostream>
+#endif
+"""
+
+# Jinja template for the standalone runner that may be generated as part of the code.
+GEMM_STANDALONE_RUNNER_TEMPLATE = r"""
+#ifdef GENERATE_STANDALONE_RUNNER
+/// Helper to initialize a block of device data
+template <class Element>
+bool initialize_block(
+ cutlass::DeviceAllocation<Element>& block,
+ uint64_t seed, float max=1.0, float min=-1.0) {
+ if (block.size()<=0) return false;
+ Element scope_max(static_cast<Element>(max)), scope_min(static_cast<Element>(min));
+ cutlass::reference::device::BlockFillRandomUniform(
+ block.get(), block.size(), seed, scope_max, scope_min, 0);
+
+ return true;
+}
+
+extern "C" int run_standalone(uint64_t seed, int repetitions) {
+ std::cout << "Starting GEMM Standalone test run with seed " << seed << std::endl;
+ size_t workspace_size = 0;
+ size_t* workspace_size_ptr = &workspace_size;
+
+ using ElementA = {{kernel.cutlass_dtype(X)}};
+ using ElementB = {{kernel.cutlass_dtype(W)}};
+ using ElementC = {{kernel.cutlass_dtype(Bias, default_dtype='uint8_t')}}; // may not be void
+ using ElementD = {{kernel.cutlass_dtype(Y)}};
+
+ cutlass::DeviceAllocation<ElementA> X_data({{kernel.max_valid_index(X)+1}});
+ initialize_block(X_data, seed++);
+ cutlass::DeviceAllocation<ElementB> W_data({{kernel.max_valid_index(W)+1}});
+ initialize_block(W_data, seed++);
+ cutlass::DeviceAllocation<ElementC> Bias_data({{kernel.max_valid_index(Bias)+1}});
+ initialize_block(Bias_data, seed++);
+ cutlass::DeviceAllocation<ElementD> Y_data({{kernel.max_valid_index(Y)+1}});
+
+ cutlass::DeviceAllocation<uint8_t> workspace_data;
+ // Call once with workspace_size_ptr set to get workspace size
+
+ std::cout << "Calling once to get workspace size" << std::endl;
+ {{test_call_statement}};
+ // Allocate workspace if neccessary
+ if (workspace_size > 0) {
+ workspace_data.reset(workspace_size);
+ std::cout << "Allocated workspace size of " << workspace_size << " bytes" << std::endl;
+ }
+ std::cout << "Calling Kernel as {{test_call_statement}};" << std::endl;
+ workspace_size_ptr = nullptr;
+ for (int i=0; i<repetitions; i++) {
+ {{test_call_statement}};
+ }
+ cudaError_t result = cudaDeviceSynchronize();
+ if (result != cudaSuccess) {
+ std::cerr << "Device synchronize failed with error "
+ << cudaGetErrorString(result) << std::endl;
+ return result;
+ }
+ return 0;
+}
+
+int main(int argc, char** argv) {
+ // warmup
+ run_standalone(1, 2);
+ // repeat
+ return run_standalone(2, 10);
+}
+
+#endif
+""" # noqa: B950
+
class CUTLASSGemmTemplate(CUTLASSTemplate):
"""
- CUTLASS GEMM template, which is used to generate CUTLASS GEMM kernels
+ CUTLASS GEMM Template, which is used to generate CUTLASS GEMM kernels
including those which allow flexible fusions with epilogues.
"""
@@ -167,23 +240,97 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
alpha: float,
beta: float,
input_reorder: Optional[List[int]] = None,
- can_fuse_epilogue: Optional[bool] = None,
):
"""
Args:
- input_nodes: input nodes of the kernel
- layout: layout of the output node
- alpha: alpha value of the GEMM operation
- beta: beta value of the GEMM operation
- input_reorder: reorder of the input nodes
- can_fuse_epilogue: If set to True, will only list and use operators capable of flexible epilogue fusions.
- If False, it will not use those. If None, both may be listed, but it will not allow fusions.
- Defaults to None
+ input_nodes (List[Buffer]): List of input nodes of the GEMM kernel.
+ layout (Layout): Layout type of the resulting output node.
+ alpha (float): The scaling factor for the product of the inputs in the GEMM operation.
+ beta (float): The scaling factor applied to the output matrix.
+ input_reorder (Optional[List[int]]): Specifies the reordering of the input nodes. If not provided,
+ no reordering is performed. Defaults to None.
"""
super().__init__("cutlass_gemm", input_nodes, layout, input_reorder)
self.alpha = alpha
self.beta = beta
- self.can_fuse_epilogue = can_fuse_epilogue
+ assert len(input_nodes) == 2 or len(input_nodes) == 3
+ assert self._are_inputs_layout_compatible(
+ [node.get_layout() for node in input_nodes]
+ )
+
+ def _are_inputs_layout_compatible(self, layouts: List[Layout]) -> bool:
+ """
+ Evaluates whether input layouts are compatible for General Matrix Multiply (GEMM).
+
+ This function checks compatibility of A, B, and possibly C operand layouts for
+ a General Matrix Multiply (GEMM) operation, expressed as 'alpha * matmul(A, B) + beta * C'.
+ It verifies requirements such as matching data types, minimum rank, and suitability
+ for broadcasting, as defined by PyTorch operations like `torch.matmul`, `torch.aten.mm`,
+ `addmm`, `bmm`, `baddbmm`, etc.
+
+ Args:
+ layouts (List[Layout]): List containing 2 or 3 Layout objects representing
+ the input matrices A, B, and possibly C.
+
+ Returns:
+ bool: True if layouts are GEMM compatible, otherwise False.
+ """
+ assert len(layouts) == 2 or len(layouts) == 3
+ # Check if A and B are compatible
+ A_layout, B_layout = layouts[:2]
+ if len(A_layout.size) < 1:
+ return False
+ if len(B_layout.size) < 1:
+ return False
+ A_size = [int(i) for i in A_layout.size]
+ B_size = [int(i) for i in B_layout.size]
+ if len(A_size) < 2:
+ A_size.insert(0, 1)
+ if len(B_size) < 2:
+ A_size.insert(1, 1)
+ # Are batch dims broadcastable?
+ while len(A_size) < len(B_size):
+ A_size.insert(0, 1)
+ while len(B_size) < len(A_size):
+ B_size.insert(0, 1)
+ K = max(A_size[-1], B_size[-2])
+ M = A_size[-2]
+ N = B_size[-1]
+ if K != A_size[-1] and A_size[-1] != 1:
+ return False
+ if K != B_size[-2] and B_size[-1] != 1:
+ return False
+ # check batch dim broadcastable
+ for i in range(len(A_size) - 2):
+ if A_size[i] != B_size[i] and A_size[i] != 1 and B_size[i] != 1:
+ return False
+ if len(layouts) == 3:
+ C_layout = layouts[2]
+ C_size = [int(i) for i in C_layout.size]
+ while len(C_size) < len(A_size):
+ C_size.insert(0, 1)
+ # check batch dims
+ for i in range(len(A_size) - 2):
+ bd = max(A_size[i], B_size[i])
+ if bd != C_size[i] and C_size[i] != 1:
+ return False
+ if len(C_size) > len(A_size):
+ # This may happen if the last elements of C are contiguous and
+ # their multiplied size equals the last dim size of B
+ if M != C_size[len(A_size) - 2] and C_size[len(A_size) - 2] != 1:
+ return False
+ remaining_size = 1
+ for i in range(len(A_size) - 1, len(C_size)):
+ remaining_size *= C_size[i]
+ if N != remaining_size and remaining_size != 1:
+ return False
+ return True
+ assert len(C_size) == len(A_size)
+ if M != C_size[-2] and C_size[-2] != 1:
+ return False
+ if N != C_size[-1] and C_size[-1] != 1:
+ return False
+ return True
@staticmethod
def add_cutlass_gemm_choices(
@@ -236,6 +383,13 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
)
def header(self) -> IndentedBuffer:
+ """
+ Returns a buffer containing CUDA C++ code for the header section of the CUTLASS GEMM template.
+ This section primarily includes the necessary header files.
+
+ Returns:
+ IndentedBuffer: An instance of IndentedBuffer that contains the generated CUDA C++ header code.
+ """
res = super().header()
res.splice(
"""
@@ -247,6 +401,7 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
+ #include "cutlass/epilogue/thread/activation.h"
#include "cutlass/gemm/dispatch_policy.hpp"
#include "cutlass/gemm/kernel/tile_scheduler.hpp"
#include "cutlass/util/distribution.h"
@@ -254,10 +409,23 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
#include "cutlass/util/tensor_view_io.h"
"""
)
+ if inductor_cuda_config.generate_test_runner:
+ res.splice(GEMM_STANDALONE_RUNNER_ADDITIONAL_INCLUDES)
return res
@staticmethod
- def cutlass_layout(torch_layout) -> "Optional[cutlass_lib.LayoutType]": # type: ignore[name-defined] # noqa: F821
+ def cutlass_layout(torch_layout: ir.Layout) -> "Optional[cutlass_lib.LayoutType]": # type: ignore[name-defined] # noqa: F821
+ """
+ Converts an ir.Layout instance into the corresponding cutlass_library.LayoutType enum value
+ (RowMajor, ColumnMajor, or None if no matching value is found ).
+
+ Args:
+ torch_layout (ir.Layout): The layout that needs to be looked up.
+
+ Returns:
+ cutlass_lib.LayoutType: The converted layout corresponding to the `torch_layout` or None if no matching
+ value is found.
+ """
assert cutlass_utils.try_import_cutlass()
import cutlass_library.library as cutlass_lib
@@ -272,6 +440,8 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
def flip_cutlass_layout(
cutlass_layout: "cutlass_lib.LayoutType", # type: ignore[name-defined] # noqa: F821
) -> "cutlass_lib.LayoutType": # type: ignore[name-defined] # noqa: F821
+ """Helper method: Flips a given cutlass layout (cutlass_lib.LayoutType) from RowMajor
+ to ColumnMajor or vice versa"""
assert cutlass_utils.try_import_cutlass()
import cutlass_library.library as cutlass_lib
@@ -281,11 +451,28 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
return cutlass_lib.LayoutType.RowMajor
@staticmethod
- def layout_match(torch_layout, cutlass_layout) -> bool:
+ def layout_match(
+ torch_layout: ir.Layout,
+ cutlass_layout: "cutlass_lib.LayoutType", # type: ignore[name-defined] # noqa: F821
+ ) -> bool:
+ """Helper Method: Determines whether a given torch layout matches a given Cutlass layout"""
return CUTLASSGemmTemplate.cutlass_layout(torch_layout) == cutlass_layout
@staticmethod
def set_alignment(torch_layout, op_element) -> bool:
+ """
+ Helper method to update the alignment of a given CUTLASS GEMM op operand's element.
+
+ This method modifies the alignment of the given Cutlass GEMM op operand's element to match the
+ layout of the corresponding ir.Buffer node.
+
+ Args:
+ torch_layout: The layout of the corresponding ir.Buffer node.
+ op_element: The Cutlass GEMM op operand's element whose alignment is to be updated.
+
+ Returns:
+ bool: True if the alignment was successfully updated, False otherwise.
+ """
alignment = cutlass_utils.get_max_alignment(torch_layout)
cuda_arch = cutlass_utils.get_cuda_arch()
if cuda_arch and int(cuda_arch) >= 90 and alignment < op_element.alignment:
@@ -295,7 +482,10 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
return True
@staticmethod
- def has_tma_epilogue(op) -> bool:
+ def has_tma_epilogue( # noqa: F821 # type: ignore[arg-type,name-defined]
+ op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined,arg-type] # noqa: F821
+ ) -> bool: # type: ignore[name-defined]
+ """Helper method: Determine whether a given Cutlass GEMM op has a TMA Epilogue"""
assert cutlass_utils.try_import_cutlass()
import cutlass_library.library as cutlass_lib
@@ -305,76 +495,37 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
result = epilogue_schedule_str.lower().startswith("tma")
return result
- @staticmethod
- def supports_evt(op: "cutlass_library.gemm_op.GemmOperation") -> bool: # type: ignore[name-defined] # noqa: F821
- """
- returns True if the op is capable of flexible epilogue fusions
- using epilogue visitor trees.
-
- See https://github.com/NVIDIA/cutlass/blob/e01b9b5029b7caca5a43c29f7d2714d7cf1dcae8/examples/49_hopper_gemm_with_collective_builder/49_collective_builder.cu#L283-L285 # noqa: B950
- """
- assert cutlass_utils.try_import_cutlass()
- import cutlass_library.library as cutlass_lib
-
- if op.gemm_kind != cutlass_lib.GemmKind.Universal3x:
- return False
- if op.epilogue_schedule not in (
- cutlass_lib.EpilogueScheduleType.TmaWarpSpecialized,
- cutlass_lib.EpilogueScheduleType.TmaWarpSpecializedCooperative,
- ):
- return False
-
- return True
-
- def render_evt_epilogue_declaration(
- self,
- template_output_node_name: str,
- evt_type_name: str,
- epilogue_nodes: List[IRNode],
- ) -> str:
- """Generates the epilogue for the EVT epilogue fusion"""
- return CutlassEVTEpilogueTypeFormatter.ir_to_evt_string(
- template_output_node_name, evt_type_name, epilogue_nodes
- )
-
def define_gemm_instance(
self,
op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined] # noqa: F821
- output_buffer_name: str,
- epilogue_nodes: Optional[List[IRNode]] = None,
) -> Tuple[str, str]:
+ """Defines and renders the Cutlass / CUDA C++ code for a given GEMM operation instance.
+
+ This function uses the Cutlass library to generate key parts of the codegen process. General Matrix Multiply
+ forms a core part of a number of scientific applications, so this efficient and adaptable implementation is
+ crucial.
+
+ Args:
+ op (cutlass_library.gemm_op.GemmOperation): This is the core GEMM operation that we are defining and rendering.
+
+ Returns:
+ Tuple[str, str]: A tuple where the first part is a string that constitutes the defined GEMM operation in C++
+ code (render) and the second part is the string that specifies the operation type.
+ """
assert cutlass_utils.try_import_cutlass()
import cutlass_library.gemm_operation as cutlass_gemm_op
import cutlass_library.library as cutlass_lib
- from torch._inductor.codegen.cuda.cutlass_lib_extensions.gemm_operation_extensions import (
- EmitGemmUniversal3xInstanceWithEVT,
- )
+ emitter = cutlass_gemm_op.EmitGemmUniversal3xInstance()
+ if not hasattr(op, "epilogue_functor") or not isinstance(
+ op.epilogue_functor, enum.Enum
+ ):
+ op = copy.deepcopy(op)
+ op.epilogue_functor = cutlass_lib.EpilogueFunctor.LinearCombination
+ op_def = emitter.emit(op)
+ pattern = re.compile(r"\s*struct\s(.*?)\s:")
+ decl = [line for line in op_def.split("\n") if "struct " in line][-1]
- if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:
- if epilogue_nodes is not None and len(epilogue_nodes) > 0:
- emitter = EmitGemmUniversal3xInstanceWithEVT()
- op.epilogue_functor = lambda epilogue_functor_type_name: self.render_evt_epilogue_declaration(
- output_buffer_name, epilogue_functor_type_name, epilogue_nodes
- )
- else:
- emitter = cutlass_gemm_op.EmitGemmUniversal3xInstance()
- op_def = emitter.emit(op)
- pattern = re.compile(r"\s*struct\s(.*?)\s:")
- decl = [line for line in op_def.split("\n") if "struct " in line][-1]
- else:
- if epilogue_nodes is not None and len(epilogue_nodes) > 0:
- raise RuntimeError(
- "EVT epilogue fusion is not supported for Cutlass 2.x ops."
- )
- emitter = cutlass_gemm_op.EmitGemmInstance()
- op_def = emitter.emit(op)
- op_def = op_def.replace(
- "cutlass::gemm::device::Gemm", "cutlass::gemm::device::GemmUniversal"
- )
- op_def = op_def.replace("false,", "")
- pattern = re.compile(r"\s*using\s(.*?)\s=")
- decl = op_def.split("\n")[2]
match = pattern.match(decl)
if match is None:
raise RuntimeError("Invalid Gemm config: \n" + op_def)
@@ -387,24 +538,37 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
@staticmethod
def should_swap_XW(
bias: IRNode,
- beta: float,
) -> bool:
- return True
-
- # TODO(ipiszy): Check whether it's necessary to swap X/W.
- # strides = bias.get_stride()
- # if strides[-1] != 1:
- # return True
- # for stride in strides[:-1]:
- # if stride != 0:
- # return True
- # return False
+ """
+ Helper method to determine whether we should do an explicit transpose by switching the order of the
+ matmul operands. This might be neccessary when we can't otherwise arrive at the right memory
+ layout for the given Bias operand.
+
+ Note: This method is a workaround for CUDA Errors that seemingly non-deterministically
+ occurred in practice in some CUTLASS GEMM Kernels with Linear epilogues that have a bias term.
+ it might make sense to check on newer Cutlass releases whether it makes sense to keep
+ returning True in certain cases or whether it becomes unneccessary.
+ """
+ # If bias is row major, swap all M and N dimensions
+ if (
+ bias is not None
+ and len(bias.get_stride()) >= 2
+ and bias.get_stride()[-1] in (0, 1)
+ ):
+ log.debug("GEMM Layout swapped X and W -> explicit transpose")
+ return True
+ return False
@staticmethod
def swap_XW(
op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined] # noqa: F821
) -> "cutlass_library.gemm_op.GemmOperation": # type: ignore[name-defined] # noqa: F821
- # Swap X and W in GemmOperation.
+ """
+ Swap operands X and W (aka operans A and B) of the GEMM operation. This
+ requires transposing the operands, which is done by swapping the strides.
+ Note that we don't change the apparent external layout, just the operand layout.
+ this is intentional.
+ """
new_op = copy.deepcopy(op)
new_op.A.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.A.layout)
new_op.B.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.B.layout)
@@ -413,6 +577,53 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
new_op.D.layout = CUTLASSGemmTemplate.flip_cutlass_layout(new_op.D.layout)
return new_op
+ def fix_op_layout(
+ self,
+ op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined] # noqa: F821
+ X: Buffer,
+ W: Buffer,
+ Bias: Optional[Buffer],
+ Y: Union[Buffer, ReinterpretView],
+ ) -> "cutlass_library.gemm_op.GemmOperation": # type: ignore[name-defined] # noqa: F821
+ # This is a workaround to deal with cases where the input layouts have changed
+ # between autotuning and rendering. This happens if the inputs layout
+ # are FlexibleLayout instances. In this case, we need to update the
+ # op's input layouts. It is a hack, because now the op
+ # we benchmarked is not the same as the op we render,
+ # but there is no simple way to fix this in the autotuner, since that would
+ # potentially disable other optimizations.
+ a_layout = X.get_layout()
+ b_layout = W.get_layout()
+ c_layout = Bias.get_layout() if Bias is not None else None
+
+ d_layout = copy.deepcopy(Y.get_layout())
+ match_list = [
+ CUTLASSGemmTemplate.layout_match(buf.get_layout(), op_layout)
+ for buf, op_layout in zip(
+ (X, W, Bias, Y),
+ (op.A.layout, op.B.layout, op.C.layout, op.D.layout),
+ )
+ if buf is not None
+ ]
+ all_match = all(match_list)
+ if all_match:
+ return op
+ log.warning(
+ f"Cutlass GEMM Layout change: Input and/or output layouts have changed between autotuning/retuning and call to render on {self}. Applying workaround. This can lead to suboptimal performance. Match List: {match_list}" # noqa: G004, B950
+ )
+ new_op = copy.deepcopy(op)
+
+ if a_layout is not None:
+ new_op.A.layout = CUTLASSGemmTemplate.cutlass_layout(a_layout)
+ if b_layout is not None:
+ new_op.B.layout = CUTLASSGemmTemplate.cutlass_layout(b_layout)
+ if c_layout is not None:
+ new_op.C.layout = CUTLASSGemmTemplate.cutlass_layout(c_layout)
+ new_op.C.element = cutlass_utils.torch_dtype_to_cutlass_type(c_layout.dtype)
+ if d_layout is not None:
+ new_op.D.layout = CUTLASSGemmTemplate.cutlass_layout(d_layout)
+ return new_op
+
def filter_op(
self,
op: "cutlass_library.gemm_op.GemmOperation", # type: ignore[name-defined] # noqa: F821
@@ -514,22 +725,25 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
op.C.element = cutlass_lib.DataType.void
else:
op.C.layout = op.D.layout
- supports_evt: bool = self.supports_evt(op)
- if (self.can_fuse_epilogue is not None) and (
- self.can_fuse_epilogue != supports_evt
- ):
- return None
return op
def gen_ops(self) -> "List[cutlass_gemm_op.GemmOperation]": # type: ignore[name-defined] # noqa: F821
+ """
+ Creates a list of Cutlass GemmOperation instances that match the operation this template is designed to represent.
+ The matching is carried out with respect to the input and output specifications of the operation.
+
+ No function arguments.
+
+ Returns:
+ List[cutlass_gemm_op.GemmOperation]: A list of GemmOperation instances that are compatible with the
+ operation requirements of this template.
+ """
assert cutlass_utils.try_import_cutlass()
import cutlass_library.gemm_operation as cutlass_gemm_op
import cutlass_library.library as cutlass_lib
ops = cutlass_utils.gen_ops()[cutlass_lib.OperationKind.Gemm]
res: Dict[str, cutlass_gemm_op.GemmOperation] = dict()
- num_3x_ops = 0
- num_2x_ops = 0
for op_dict in ops.values():
for op_list in op_dict.values():
for op in op_list:
@@ -540,21 +754,19 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
and res.get(filter_res.configuration_name(), None) is None
):
res[filter_res.configuration_name()] = filter_res
- for op in res.values():
- if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:
- num_3x_ops += 1
- else:
- num_2x_ops += 1
- log.debug(
- "Got cutlass configs: total number of ops: %d, "
- "total number of 3x ops: %d, total number of 2x ops: %d",
- len(res),
- num_3x_ops,
- num_2x_ops,
- )
+ log.debug("Got cutlass configs: total number of ops: %d, ", len(res))
return list(res.values())[: inductor_cuda_config.cutlass_max_profiling_configs]
def gemm_mode(self) -> str:
+ """
+ Returns a Cutlass GEMM mode string for the current operation, dependent on whether this op implements
+ a batched GEMM or a simple GEMM without batch dimension.
+
+ Returns:
+ str: A string indicating the Cutlass GEMM mode. If the output node has more than two dimensions,
+ "cutlass::gemm::GemmUniversalMode::kBatched" is returned, otherwise
+ "cutlass::gemm::GemmUniversalMode::kGemm" is returned.
+ """
sizes = self.output_node.get_size()
if len(sizes) > 2:
return "cutlass::gemm::GemmUniversalMode::kBatched"
@@ -575,9 +787,33 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
kernel: CUDATemplateKernel,
epilogue_args,
) -> str:
+ """
+ Render the Cutlass CUDA C++ code required for passing arguments to the GEMM operation.
+
+ Args:
+ argument_template (str): Template for the GEMM operation arguments.
+ epilogue_template (str): Template for the epilogue arguments.
+ should_swap_xw (bool): Determines whether X, W operands should be swapped. If True, applies an explicit
+ transpose operation to X and W.
+ X (IRNode): The X input tensor.
+ W (IRNode): The W input tensor.
+ Bias (IRNode): The bias tensor.
+ Y (IRNode): The output tensor.
+ alpha (float): Scaling factor for the product of the inputs.
+ beta (float): Scaling factor for the output tensor.
+ kernel (CUDATemplateKernel): CUDA Template kernel for the operation.
+ epilogue_args (any): Additional arguments for the epilogue state.
+
+ Returns:
+ str: A block of CUDA C++ code as a string, ready to be used as arguments for the GEMM operation.
+
+ Note: If `should_swap_xw` is True, a transpose operation will be applied to the X, W, Bias, and Y
+ tensors. This operation also implies the M and N dimensions of Bias and GEMM output to be swapped
+ before the function call.
+ """
options = dict(
- alpha=self.alpha,
- beta=self.beta,
+ alpha=alpha,
+ beta=beta,
X=X,
W=W,
Y=Y,
@@ -588,45 +824,42 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
N="N",
epilogue_args=epilogue_args,
)
-
- if epilogue_template is not None:
- if should_swap_xw:
- # Swap
- def clone_with_transposed_stride(node: IRNode) -> IRNode:
- old_layout = node.get_layout()
- new_stride = list(old_layout.stride)
- new_stride[-2], new_stride[-1] = new_stride[-1], new_stride[-2]
- new_layout = FixedLayout(
- old_layout.device,
- old_layout.dtype,
- list(old_layout.size),
- new_stride,
- old_layout.offset,
- )
- return Buffer(node.get_name(), new_layout)
-
- new_X = clone_with_transposed_stride(X)
- new_W = clone_with_transposed_stride(W)
- new_Bias = clone_with_transposed_stride(Bias)
- new_Y = clone_with_transposed_stride(Y)
- options["X"], options["W"], options["Bias"], options["Y"] = (
- new_W,
- new_X,
- new_Bias,
- new_Y,
+ assert epilogue_template is not None
+
+ if should_swap_xw:
+ # Swap
+ def clone_with_transposed_stride(node: IRNode) -> IRNode:
+ old_layout = node.get_layout()
+ new_stride = list(old_layout.stride)
+ new_stride[-2], new_stride[-1] = new_stride[-1], new_stride[-2]
+ new_layout = FixedLayout(
+ old_layout.device,
+ old_layout.dtype,
+ list(old_layout.size),
+ new_stride,
+ old_layout.offset,
)
- options["M"], options["N"] = "N", "M"
-
- epilogue_arguments = self._template_from_string(epilogue_template).render(
- **options
- )
- arguments = self._template_from_string(argument_template).render(
- epilogue_arguments=epilogue_arguments, **options
- )
- else:
- arguments = self._template_from_string(GEMM_ARGS_CUTLASS_2X).render(
- split_k=1, **options
+ return Buffer(node.get_name(), new_layout)
+
+ new_X = clone_with_transposed_stride(X)
+ new_W = clone_with_transposed_stride(W)
+ new_Bias = clone_with_transposed_stride(Bias)
+ new_Y = clone_with_transposed_stride(Y)
+ options["X"], options["W"], options["Bias"], options["Y"] = (
+ new_W,
+ new_X,
+ new_Bias,
+ new_Y,
)
+ options["M"], options["N"] = "N", "M"
+
+ epilogue_arguments = self._template_from_string(epilogue_template).render(
+ **options
+ )
+ arguments = self._template_from_string(argument_template).render(
+ epilogue_arguments=epilogue_arguments, **options
+ )
+
return arguments
def render( # type: ignore[override]
@@ -634,28 +867,27 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
kernel: CUDATemplateKernel,
op: "cutlass_gemm_op.GemmOperation" = None, # type: ignore[name-defined] # noqa: F821
template_buffer_node: Optional[CUDATemplateBuffer] = None,
- epilogue_nodes: Optional[List[IRNode]] = None,
**kwargs,
) -> str:
- if epilogue_nodes is not None and len(epilogue_nodes) > 0:
- assert self.can_fuse_epilogue and CUTLASSGemmTemplate.supports_evt(
- op
- ), "op does not support EVT epilogue fusion"
- assert (
- template_buffer_node is not None
- ), "Template node is required for epilogue fusion"
- assert isinstance(
- template_buffer_node, CUDATemplateBuffer
- ), f"Template node has to be a CUDATemplateBuffer, is type {type(template_buffer_node)}"
- assert (
- template_buffer_node.name is not None
- ), "Output node has to be a Buffer with a name"
- # This is the name of the output of the Matmul, before epilogues are applied.
- # it is not necessarily materialized in global memory if we have an epilogue
-
- template_output_node_name = (
- template_buffer_node.name if template_buffer_node is not None else None
- )
+ """
+ The primary entry point for the code rendering process used in this template.
+ Renders the Cutlass based CUDA C++ code for the GEMM Kernel that this template is designed to implement,
+ including potentially fused epilogues.
+
+ Args:
+ kernel (CUDATemplateKernel): The kernel to be rendered.
+ op (cutlass_gemm_op.GemmOperation, optional): A GEMM operation that is required to be compatible with the
+ input and output definitions as well as a possible epilogue. Defaults to None.
+ **kwargs: Additional keyword arguments. Currently unused.
+
+ Returns:
+ str: Cutlass based CUDA C++ code fragment as a string, to be used by the current
+ CUDATemplateKernel or autotuning code.
+
+ Note:
+ All inputs and their corresponding buffer addresses and names take precedence over previously
+ passed inputs to the template at construction time. However, they should be layout compatible.
+ """
assert cutlass_utils.try_import_cutlass()
import cutlass_library.gemm_operation as cutlass_gemm_op
@@ -664,46 +896,66 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
assert isinstance(
op, cutlass_gemm_op.GemmOperation
), "op argument is required and has to be an instance of GemmOperation"
- if template_buffer_node is not None:
- self.output_node = template_buffer_node
- if epilogue_nodes is not None and len(epilogue_nodes) > 0:
- self.output_node = cast(Buffer, epilogue_nodes[-1])
assert len(self.input_nodes) >= 2 and self.output_node is not None
X, W = self.input_nodes[0], self.input_nodes[1]
+ assert isinstance(X.layout, FixedLayout), "X.layout is not fixed"
+ assert isinstance(W.layout, FixedLayout), "W.layout is not fixed"
Y = self.output_node
+ if template_buffer_node is not None:
+ Y = template_buffer_node
Bias = None if len(self.input_nodes) == 2 else self.input_nodes[2]
- epilogue_template: Optional[str] = None
+ # to make op mutable without affecting others
+ op = copy.deepcopy(op)
+ if Bias is not None:
+ assert Bias.get_layout().dtype == X.get_layout().dtype
+ # This might have been set to void during filtering, when the assumption was still that there's no C
+ # operand
+ op.C.element = op.A.element
+
+ # Define Kernel call signature
+ # Important: This step also populates Kernel name to node mapping data structures,
+ # which are required further below ( for example by CutlassEVTEpilogueArgumentFormatter and
+ # the template renderer )
+ inputs = [X, W, Bias]
+ names = ["X", "W", "Bias"] + ["Y"]
+ names_str = ",".join(names)
+ if self.input_reorder is not None:
+ input_reorder = self.input_reorder
+ else:
+ input_reorder = None
+ kernel_call_signature = kernel.def_kernel(
+ inputs=inputs, outputs=[Y], names_str=names_str, input_reorder=input_reorder # type: ignore[arg-type]
+ )
+ test_call_statement = self.test_call_statement(kernel, inputs, names_str)
+ # The layouts might have changed between autotuning and this call if they were FlexibleLayout
+ # we need to adapt, which might lead to suboptimal performance.
+
+ op = self.fix_op_layout(op, X, W, Bias, Y)
+ epilogue_template: str = GEMM_ARGS_CUTLASS_3X_EPILOGUE
+ argument_template: str = GEMM_ARGS_CUTLASS_3X
should_swap_xw: bool = False
epilogue_args = f"{{ElementComputeEpilogue({self.alpha}), ElementComputeEpilogue({self.beta})}}"
- if op.gemm_kind == cutlass_lib.GemmKind.Universal3x:
- if Bias is not None and self.has_tma_epilogue(op):
- if self.should_swap_XW(Bias, self.beta):
- # TMA epilogue requires bias vector in column major to get best perf.
- op = self.swap_XW(op)
- should_swap_xw = True
- if epilogue_nodes is not None and len(epilogue_nodes) > 0:
- epilogue_args = (
- CutlassEVTEpilogueArgumentFormatter.ir_to_evt_argument_string(
- cast(str, template_output_node_name), epilogue_nodes
- )
- )
- epilogue_template = GEMM_ARGS_CUTLASS_3X_EPILOGUE
- argument_template = GEMM_ARGS_CUTLASS_3X
- else:
- # TODO: Support split_k.
- argument_template = GEMM_ARGS_CUTLASS_2X
+ if Bias is not None and self.has_tma_epilogue(op):
+ if (
+ op.epilogue_schedule
+ != cutlass_lib.EpilogueScheduleType.EpilogueTransposed
+ and self.should_swap_XW(Bias)
+ ):
+ # TMA epilogue requires bias vector in column major to get best perf.
+ op = self.swap_XW(op)
+ should_swap_xw = True
+
+ instance_definition, instance_type = self.define_gemm_instance(op)
- instance_definition, instance_type = self.define_gemm_instance(
- op, cast(str, template_output_node_name), epilogue_nodes
- )
options = dict(
alpha=self.alpha,
beta=self.beta,
X=X,
W=W,
Y=Y,
+ kernel_call_signature=kernel_call_signature,
Bias=Bias,
epilogue_template=epilogue_template,
argument_template=argument_template,
@@ -714,6 +966,35 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
instance_type=instance_type,
input_reorder=self.input_reorder,
epilogue_args=epilogue_args,
+ test_call_statement=test_call_statement,
)
res = self._template_from_string(GEMM_TEMPLATE).render(**options)
+ if inductor_cuda_config.generate_test_runner:
+ test_runner_code = self._template_from_string(
+ GEMM_STANDALONE_RUNNER_TEMPLATE
+ ).render(**options)
+ res += "\n\n" + test_runner_code
return res
+
+ def test_call_statement(
+ self,
+ kernel,
+ input_nodes,
+ names_str: str = "",
+ ) -> str:
+ """
+ Helper method to render the Cutlass CUDA C++ code required for calling the GEMM operation in the standalone
+ test runner that might also be generated along with the rest of the code, if the corresponding config is
+ enabled.
+
+ Returns a C++ statement that calls the GEMM operation with the correct arguments.
+ """
+ _, __, arg_types = kernel.args.cpp_argdefs()
+ arg_names = [name.strip() for name in names_str.strip().split(",")]
+ if input_nodes[2] is None:
+ del arg_names[2]
+ arguments = [
+ f"(({arg_type}){arg_name}_data.get())"
+ for arg_type, arg_name in zip(arg_types, arg_names)
+ ]
+ return f"{kernel.kernel_name}({', '.join(arguments)}, workspace_size_ptr, (uint8_t*)workspace_data.get(), 0);"
diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py
index 90279c5be9..ce69e81656 100644
--- a/torch/_inductor/config.py
+++ b/torch/_inductor/config.py
@@ -774,10 +774,11 @@ class cuda:
# Minimum value of M*N*K to consider the CUTLASS backend for GEMM ops.
cutlass_backend_min_gemm_size: int = 1
- # If set to True, it will ensure that only GEMM ops capable of
- # epilogue fusion via CUTLASS Epilogue Visitor Trees ( EVT )
- # are enabled for the CUTLASS backend.
- cutlass_only_evt_capable_ops: bool = False
+ # enable generation of inline standalone runner in CUDA CPP generated code
+ # which allows to compile the generated code into a standalone executable.
+ generate_test_runner: bool = (
+ os.environ.get("INDUCTOR_CUDA_BACKEND_GENERATE_TEST_RUNNER_CODE", "1") == "1"
+ )
# Keep only Cutlass op configs which contain this regular expression pattern
# Set this to "warpspecialized_cooperative_epi_tma" to enable only SM90 TMA Cutlass Kernels for large GEMMs
|
2.41.0
|
6b7504d47003a731d80418b2f008a15d0e417a3
|
Fri, 26 Apr 2024 10:27:29 -0700
|
[PATCH 0740/1000] Fix torch.library.register_fake's module reporting (#125037)
|
torch.library.register_fake reports the python module the fake impl is located in. This is used to check against `m.set_python_module("foo.bar")` calls in C++. The module reporting logic was wrong in most cases. This PR fixes it. Test Plan: - exhaustive tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/125037 Approved by: https://github.com/williamwen42
|
diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py
index a9d92c57d0..b5901bcc4b 100644
--- a/test/test_custom_ops.py
+++ b/test/test_custom_ops.py
@@ -2422,6 +2422,16 @@ class TestCustomOpAPI(TestCase):
continue
self.assertGreater(after, prev)
+ @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug")
+ @parametrize("idx", [0, 1, 2, 3, 4, 5])
+ def test_library_register_fake_source(self, idx):
+ opname = f"source{idx}"
+ op = getattr(torch.ops._torch_testing, opname).default
+ entry = torch._library.simple_registry.singleton.find(op._name)
+ source = entry.abstract_impl.kernel.source
+ assert source is not None
+ self.assertTrue("custom_op_db.py" in source)
+
@skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug")
def test_library_register_fake(self):
for mode in ["function", "qualname", "opoverload"]:
@@ -2436,10 +2446,15 @@ class TestCustomOpAPI(TestCase):
if mode == "function":
dec = torch.library.register_fake(add)
+ self.assertIsNotNone(dec)
elif mode == "qualname":
dec = torch.library.register_fake("_torch_testing::add")
+ self.assertIsNotNone(dec)
elif mode == "opoverload":
dec = torch.library.register_fake(torch.ops._torch_testing.add.default)
+ self.assertIsNotNone(dec)
+ else:
+ raise AssertionError("should not get here")
@dec
def _(x, y):
diff --git a/torch/_library/custom_ops.py b/torch/_library/custom_ops.py
index d1eddccbbc..bbe4ac7a2a 100644
--- a/torch/_library/custom_ops.py
+++ b/torch/_library/custom_ops.py
@@ -466,7 +466,7 @@ class CustomOpDef:
)
return self._abstract_fn(*args, **kwargs)
- lib._register_fake(self._name, fake_impl)
+ lib._register_fake(self._name, fake_impl, _stacklevel=4)
autograd_impl = _library.autograd.make_autograd_impl(self._opoverload, self)
lib.impl(self._name, autograd_impl, "Autograd", with_keyset=True)
diff --git a/torch/library.py b/torch/library.py
index 2c413a7c9a..6bd4bd8110 100644
--- a/torch/library.py
+++ b/torch/library.py
@@ -418,7 +418,9 @@ def impl_abstract(qualname, func=None, *, lib=None, _stacklevel=1):
"we will remove torch.library.impl_abstract in a future "
"version of PyTorch.",
DeprecationWarning, stacklevel=2)
- return register_fake(qualname, func, lib=lib, _stacklevel=_stacklevel + 1)
+ if func is not None:
+ _stacklevel = _stacklevel + 1
+ return register_fake(qualname, func, lib=lib, _stacklevel=_stacklevel)
_op_identifier = Union[str, "torch._ops.OpOverload", "torch._library.custom_ops.CustomOpDef"]
@@ -592,7 +594,7 @@ def register_fake(
_keep_alive.append(use_lib)
else:
use_lib = lib
- use_lib._register_fake(op_name, func, _stacklevel=stacklevel)
+ use_lib._register_fake(op_name, func, _stacklevel=stacklevel + 1)
return func
if func is None:
diff --git a/torch/testing/_internal/custom_op_db.py b/torch/testing/_internal/custom_op_db.py
index 4e5bdfe8a4..3177fb9c8b 100644
--- a/torch/testing/_internal/custom_op_db.py
+++ b/torch/testing/_internal/custom_op_db.py
@@ -435,3 +435,54 @@ custom_op_db = [
supports_out=False,
),
]
+
+
+# ==============================================================
+# some mechanical test cases
+# ==============================================================
+
+lib = torch.library.Library("_torch_testing", "FRAGMENT") # noqa: TOR901
+
+lib.define("source0(Tensor x) -> Tensor")
+
+@torch.library.register_fake("_torch_testing::source0", lib=lib)
+def _(x):
+ return x.clone()
+
+lib.define("source1(Tensor x) -> Tensor")
+
+def source1_fake(x):
+ return x.clone()
+
+torch.library.register_fake("_torch_testing::source1", source1_fake, lib=lib)
+
+lib.define("source2(Tensor x) -> Tensor")
+
+@torch.library.impl_abstract("_torch_testing::source2", lib=lib)
+def _(x):
+ return x.clone()
+
+lib.define("source3(Tensor x) -> Tensor")
+
+def source3_fake(x):
+ return x.clone()
+
+torch.library.impl_abstract("_torch_testing::source3", source3_fake, lib=lib)
+
+
+@torch.library.custom_op("_torch_testing::source4", mutates_args=())
+def source4(x: Tensor) -> Tensor:
+ return x.clone()
+
+@source4.register_fake
+def _(x):
+ return x.clone()
+
+@torch.library.custom_op("_torch_testing::source5", mutates_args=())
+def source5(x: Tensor) -> Tensor:
+ return x.clone()
+
+def source5_fake(x):
+ return x.clone()
+
+source5.register_fake(source5_fake)
|
2.41.0
|
3e7b9d25fd33cff129107738ea280806dd23db3
|
Fri, 26 Apr 2024 14:09:49 +0000
|
[PATCH 0742/1000] [Inductor] Support fusion of chained reductions even if keepdims=True (#124843)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124843 Approved by: https://github.com/shunting314
|
diff --git a/test/inductor/test_fp8.py b/test/inductor/test_fp8.py
index 499bf8877c..5d910a74c9 100644
--- a/test/inductor/test_fp8.py
+++ b/test/inductor/test_fp8.py
@@ -243,10 +243,12 @@ class TestFP8Types(TestCase):
@unittest.skipIf(not SM90OrLater, "FP8 is only supported on H100+")
@parametrize("float8_dtype", (torch.float8_e4m3fn, torch.float8_e5m2))
@parametrize("shape", ("4,2048,4096",))
+ @parametrize("keepdim", (False, True))
def test_layernorm_fp8_quant_benchmark(
self,
float8_dtype: torch.dtype,
shape: str,
+ keepdim: bool,
):
shape = [int(dim) for dim in shape.split(",")]
batch_size, sequence_length, hidden_size = shape
@@ -269,7 +271,8 @@ class TestFP8Types(TestCase):
bias=None,
eps=1e-05,
)
- amax_buffer.fill_(torch.amax(torch.abs(x)))
+ amax = torch.amax(torch.abs(x), keepdim=keepdim)
+ amax_buffer.view_as(amax).copy_(amax)
x_scaled = x * scale
bits_fp8 = _to_fp8_saturated(x_scaled, float8_dtype)
return bits_fp8
@@ -295,7 +298,7 @@ class TestFP8Types(TestCase):
ln_latency = utils.do_bench_using_profiling(functools.partial(compiled_ln, x))
print(
- f"Config: {float8_dtype=}, {shape=}. "
+ f"Config: {float8_dtype=}, {shape=}, {keepdim=}. "
f"Benchmark results: Inductor: {compiled_latency}ms, Eager: {eager_latency}ms, "
f"LN only Inductor: {ln_latency}ms."
)
diff --git a/test/inductor/test_perf.py b/test/inductor/test_perf.py
index 4326f487cd..c4394b3964 100644
--- a/test/inductor/test_perf.py
+++ b/test/inductor/test_perf.py
@@ -487,31 +487,15 @@ class FusionTests(TestCase):
inp = (T(4, 2048, hidden_size, dtype=torch.float), T(1, dtype=torch.float))
- # 3 kernels:
- # kernel 1: (input = X, scale, LN scale, LN bias, output = LN_pointwise(X), welford_reduction(X) * 2)
- # kernel 2: (input = X, welford_reduction(X) * 2, LN scale, LN bias, output = first-level amax (split-reduction))
- # kernel 3: (input = first-level amax, output = final amax)
- # scale (1) + X (4*2048*hidden_size) * 3 + welford_reduction (4*2048) * 4 +
- # LN scale (hidden_size) * 2 + LN bias (hidden_size) * 2 + amax (num_splits * 2 + 1)
- # num_splits depends on SM architectures.
- expected_amax_keep_dim_numel = (
- 1 + hidden_size * 4 + 4 * 2048 * hidden_size * 3 + 4 * 2048 * 4 + 1
- )
- self.assertGreaterAlmostEqual(
- int(count_numel(f, *inp, True)), expected_amax_keep_dim_numel
- )
-
# 2 kernels:
# kernel 1: (input = X, scale, LN scale, LN bias, output = LN_pointwise(X), first-level amax (split-reduction))
# kernel 2: (input = first-level amax, output = final amax)
# scale (1) + X (4*2048*hidden_size) * 2 + LN scale (hidden_size) + LN bias (hidden_size) + amax (4 * 2048 * 2 + 1)
-
- expected_amax_no_keep_dim_numel = (
+ expected_numel = (
1 + hidden_size * 2 + 4 * 2048 * hidden_size * 2 + 4 * 2048 * 2 + 1
)
- self.assertExpectedInline(
- count_numel(f, *inp, False), str(expected_amax_no_keep_dim_numel)
- )
+ self.assertExpectedInline(count_numel(f, *inp, True), str(expected_numel))
+ self.assertExpectedInline(count_numel(f, *inp, False), str(expected_numel))
def test_pointwise_multi_level_reduction(self):
# TODO: this can be optimized by having the first pointwise kernel leveraging block sizes
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index 9e366d9b26..64a83e31e1 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -788,14 +788,7 @@ class Reduction(Loops):
if split == 1:
# No need to split.
return ReductionHint.INNER, split
- if (
- len(ranges) == 0
- and input_node is not None
- and isinstance(input_node, TensorBox)
- ):
- # Only handles the case where keep_dim = False.
- # Otherwise, we need to propagate reduction dim info to the stage where
- # the intermediate loader of the first Reduction is generated.
+ if input_node is not None and isinstance(input_node, TensorBox):
new_ranges, new_reduction_ranges = extract_input_node_reduction_ranges(
input_node
)
@@ -1173,13 +1166,20 @@ class Reduction(Loops):
new_reduction_ranges,
default,
):
- assert len(original_ranges) == 0, f"{original_ranges}= is not equal to []"
+ assert all(
+ r == 1 for r in original_ranges
+ ), f"Only enabled for numel_hint == 1, found {original_ranges=}"
reindex = View.dynamic_reshape_indexer(
original_reduction_ranges, tuple(new_ranges) + tuple(new_reduction_ranges)
)
- def wrapper_fn(index, reduction_index):
- return loader([], reindex(tuple(index) + tuple(reduction_index)))
+ def wrapper_fn(merged_index, new_reduction_index):
+ original_idx = merged_index[: len(original_ranges)]
+ new_index = merged_index[len(original_ranges) :]
+ return loader(
+ original_idx,
+ reindex(tuple(new_index) + tuple(new_reduction_index)),
+ )
return wrapper_fn
@@ -1318,7 +1318,7 @@ class Reduction(Loops):
wrapper_fn,
original_ranges,
original_reduction_ranges,
- new_ranges,
+ [*original_ranges, *new_ranges],
new_reduction_ranges,
reduction_type,
-1,
|
2.41.0
|
e2c09725af0e9ce0ba0db1688a6b258e43f7c63
|
Thu, 25 Apr 2024 16:24:47 -0700
|
[PATCH 0743/1000] [dtensor][experimental] local_map (#123676)
|
**Summary** This PR is attempt to land an experimental feature designed in #103686 . `local_map` is designed to allow users to apply to `DTensor` objects a function that was written to apply to `torch.Tensor`. As a function, `local_map` takes in 2 required arguments (`func` and `out_placements`) and 3 optional arguments (`device_mesh`, `in_placements`, `redistribute_inputs`). `func` is the function to be applied to each local shard of input `DTensor`. `out_placements` is the sharding specification of output `DTensor`. `local_map` returns a new function that does the following: 1. Infer `device_mesh` and `in_placements` from `DTensor` input if they're not provided. If `device_mesh` is provided, it must be identical to the device mesh of every `DTensor` input. If `in_placements` is provided, it serves as the required sharding specification of corresponding `DTensor` input before feeding its local shard into `func`. In case it is different from `DTensor`'s sharding specification, if `redistribute_inputs=False` an exception will be raised, otherwise perform a resharding to the required sharding. 2. Call `func` with the arguments passed in along with `device_mesh` except `DTensor`s. For `DTensor`, pass in its local shard. This `func` may include collectives. 3. For each output of `func` that has validate (i.e. not `None) sharding specification in `out_placements`, construct a new `DTensor` using the output and the specification. Use this `DTensor` as the output. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123676 Approved by: https://github.com/wanchaol
|
diff --git a/test/distributed/_tensor/experimental/test_local_map.py b/test/distributed/_tensor/experimental/test_local_map.py
new file mode 100644
index 0000000000..1035df2f5f
--- /dev/null
+++ b/test/distributed/_tensor/experimental/test_local_map.py
@@ -0,0 +1,228 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates
+# Owner(s): ["oncall: distributed"]
+
+import torch
+import torch.distributed._functional_collectives as funcol
+from torch.distributed._tensor import (
+ distribute_tensor,
+ init_device_mesh,
+ Replicate,
+ Shard,
+)
+from torch.distributed._tensor.debug import CommDebugMode
+from torch.distributed._tensor.experimental import local_map
+from torch.testing._internal.common_utils import run_tests
+from torch.testing._internal.distributed._tensor.common_dtensor import (
+ DTensorTestBase,
+ with_comms,
+)
+
+
+def equal_forward(device_mesh, X, Y):
+ eq = torch.tensor([torch.equal(X, Y)], device=X.device)
+ eq_gather = funcol.all_gather_tensor(eq, 0, device_mesh)
+ return torch.all(eq_gather).item()
+
+
+def mm_forward(device_mesh, W, X):
+ return torch.mm(W, X)
+
+
+def mm_allreduce_forward(device_mesh, W, X):
+ partial_sum_tensor = torch.mm(W, X)
+ reduced_tensor = funcol.all_reduce(partial_sum_tensor, "sum", device_mesh).wait()
+ return reduced_tensor
+
+
+def mul_forward(device_mesh, X, scalar):
+ return torch.mul(X, scalar)
+
+
+class TestLocalMap(DTensorTestBase):
+ @property
+ def world_size(self):
+ return 2
+
+ # simple correctness check
+ @with_comms
+ def test_local_map_correctness(self):
+ device_mesh = init_device_mesh(
+ device_type=self.device_type, mesh_shape=(self.world_size,)
+ )
+ comm_mode = CommDebugMode()
+
+ # Y = W @ X
+ W = torch.randn(12, 8, device=self.device_type, requires_grad=False)
+ X = torch.randn(8, 16, device=self.device_type, requires_grad=False)
+ Y = torch.mm(W, X)
+
+ row_wise = [Shard(0)] # row-wise sharding placements on 1-d mesh
+ col_wise = [Shard(1)] # col-wise sharding placements on 1-d mesh
+ W_dt = distribute_tensor(
+ W, device_mesh, col_wise
+ ) # col-wisely sharded W tensor
+ X_dt = distribute_tensor(
+ X, device_mesh, row_wise
+ ) # row-wisely sharded X tensor
+ # get the function wrapped with DTensor/Tensor convertion
+ # mm_allreduce_forward is a function that applies to Tensors with manual collective
+ # local_mm_allreduce_forward is the function that does the same but applies to
+ # DTensors' `_local_tensor`.
+ local_mm_allreduce_forward = local_map(
+ mm_allreduce_forward,
+ out_placements=[Replicate()],
+ in_placements=(col_wise, row_wise),
+ device_mesh=device_mesh,
+ )
+ with comm_mode:
+ Y_dt = local_mm_allreduce_forward(W_dt, X_dt)
+
+ # output redistribution to Replicate
+ self.assertEqual(comm_mode.get_total_counts(), 1)
+ # check output placements
+ for placement in Y_dt.placements:
+ self.assertTrue(placement.is_replicate())
+ # check output value
+ self.assertEqual(Y_dt.to_local(), Y)
+
+ # check for `out_placements`
+ @with_comms
+ def test_local_map_out_placements(self):
+ device_mesh = init_device_mesh(
+ device_type=self.device_type, mesh_shape=(self.world_size,)
+ )
+ comm_mode = CommDebugMode()
+
+ # X.equal(Y)
+ X = torch.randn(8, 8, device=self.device_type, requires_grad=False)
+ Y = torch.randn(8, 8, device=self.device_type, requires_grad=False)
+ row_wise = [Shard(0)]
+ X_dt = distribute_tensor(X, device_mesh, row_wise)
+ Y_dt = distribute_tensor(Y, device_mesh, row_wise)
+ local_equal_forward = local_map(equal_forward, out_placements=None)
+ with comm_mode:
+ equal_dt = local_equal_forward(X_dt, Y_dt) # a bool
+
+ self.assertEqual(comm_mode.get_total_counts(), 1)
+ self.assertTrue(not equal_dt)
+ self.assertTrue(not (X.equal(Y)))
+
+ # check for `in_placements` handling
+ @with_comms
+ def test_local_map_in_placements(self):
+ device_mesh = init_device_mesh(
+ device_type=self.device_type, mesh_shape=(self.world_size,)
+ )
+ comm_mode = CommDebugMode()
+
+ # Y = W @ X
+ W = torch.randn(12, 8, device=self.device_type, requires_grad=False)
+ X = torch.randn(8, 16, device=self.device_type, requires_grad=False)
+ Y = torch.mm(W, X)
+
+ row_wise = [Shard(0)] # row-wise sharding placements on 1-d mesh
+ replicate = [Replicate()] # replicate placements on 1-d mesh
+ W_dt = distribute_tensor(
+ W, device_mesh, row_wise
+ ) # row-wisely sharded W tensor
+ X_dt = distribute_tensor(X, device_mesh, replicate) # replicate X tensor
+
+ # Test 1: explicitly pass `in_placements`
+ local_mm_forward = local_map(
+ mm_forward,
+ out_placements=row_wise,
+ in_placements=(row_wise, replicate),
+ device_mesh=device_mesh,
+ )
+ with comm_mode:
+ Y_dt = local_mm_forward(W_dt, X_dt)
+
+ # no communication should occur in this case
+ self.assertEqual(comm_mode.get_total_counts(), 0)
+ for placement in Y_dt.placements:
+ self.assertTrue(placement.is_shard(dim=0))
+ self.assertEqual(Y_dt.full_tensor(), Y)
+
+ # Test 2: `in_placements=None`
+ local_mm_forward = local_map(
+ mm_forward,
+ out_placements=row_wise,
+ device_mesh=device_mesh,
+ )
+ with comm_mode:
+ Y_dt = local_mm_forward(W_dt, X_dt)
+
+ self.assertEqual(comm_mode.get_total_counts(), 0)
+ for placement in Y_dt.placements:
+ self.assertTrue(placement.is_shard(dim=0))
+ self.assertEqual(Y_dt.full_tensor(), Y)
+
+ # Test 3: `None` placements for non-Tensor input argument
+ local_mul_forward = local_map(
+ mul_forward,
+ in_placements=(row_wise, None),
+ out_placements=row_wise,
+ device_mesh=device_mesh,
+ )
+ Y = torch.mul(W, 2.0)
+ with comm_mode:
+ Y_dt = local_mul_forward(W_dt, 2.0)
+
+ self.assertEqual(comm_mode.get_total_counts(), 0)
+ for placement in Y_dt.placements:
+ self.assertTrue(placement.is_shard(dim=0))
+ self.assertEqual(Y_dt.full_tensor(), Y)
+
+ # check for `redistribute_inputs` handling
+ @with_comms
+ def test_local_map_redistribute(self):
+ device_mesh = init_device_mesh(
+ device_type=self.device_type, mesh_shape=(self.world_size,)
+ )
+ comm_mode = CommDebugMode()
+
+ # Y = W @ X
+ W = torch.randn(12, 8, device=self.device_type, requires_grad=False)
+ X = torch.randn(8, 16, device=self.device_type, requires_grad=False)
+ Y = torch.mm(W, X)
+
+ row_wise = [Shard(0)] # row-wise sharding placements on 1-d mesh
+ col_wise = [Shard(1)] # col-wise sharding placements on 1-d mesh
+ W_dt = distribute_tensor(
+ W, device_mesh, row_wise
+ ) # row-wisely sharded W tensor which will be redistributed
+ X_dt = distribute_tensor(
+ X, device_mesh, col_wise
+ ) # col-wisely sharded X tensor which will be redistributed
+
+ # Test 1: allow input redistribution
+ local_mm_allreduce_forward = local_map(
+ mm_allreduce_forward,
+ out_placements=[Replicate()],
+ in_placements=(col_wise, row_wise),
+ device_mesh=device_mesh,
+ redistribute_inputs=True,
+ )
+ with comm_mode:
+ Y_dt = local_mm_allreduce_forward(W_dt, X_dt)
+
+ # 2 for input redistribution and 1 for output
+ self.assertEqual(comm_mode.get_total_counts(), 3)
+ for placement in Y_dt.placements:
+ self.assertTrue(placement.is_replicate())
+ self.assertEqual(Y_dt.to_local(), Y)
+
+ # Test 2: no input redistribution is allowed
+ local_mm_allreduce_forward = local_map(
+ mm_allreduce_forward,
+ out_placements=[Replicate()],
+ in_placements=(col_wise, row_wise),
+ device_mesh=device_mesh,
+ redistribute_inputs=False,
+ )
+ with self.assertRaisesRegex(ValueError, "set redistribute_inputs=True"):
+ Y_dt = local_mm_allreduce_forward(W_dt, X_dt)
+
+
+if __name__ == "__main__":
+ run_tests()
diff --git a/torch/distributed/_tensor/experimental/__init__.py b/torch/distributed/_tensor/experimental/__init__.py
index e6a9bbe7ec..587eef3011 100644
--- a/torch/distributed/_tensor/experimental/__init__.py
+++ b/torch/distributed/_tensor/experimental/__init__.py
@@ -1,6 +1,10 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates
from contextlib import contextmanager
from torch.distributed._tensor.api import DTensor
+from torch.distributed._tensor.experimental.local_map import local_map
+
+__all__ = ["local_map", "implicit_replication"]
@contextmanager
diff --git a/torch/distributed/_tensor/experimental/local_map.py b/torch/distributed/_tensor/experimental/local_map.py
new file mode 100644
index 0000000000..002ff5542a
--- /dev/null
+++ b/torch/distributed/_tensor/experimental/local_map.py
@@ -0,0 +1,182 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates
+from typing import Callable, Optional, Sequence, Tuple, Union
+
+import torch
+from torch.distributed._tensor import DeviceMesh, DTensor
+from torch.distributed._tensor.placement_types import Placement
+
+try:
+ from torch.utils import _cxx_pytree as pytree
+except ImportError:
+ from torch.utils import _pytree as pytree # type: ignore[no-redef]
+
+
+PlacementType = Optional[Sequence[Placement]]
+InputPlacements = Union[PlacementType, Tuple[PlacementType, ...]]
+OutputPlacements = Union[PlacementType, Tuple[PlacementType, ...]]
+
+
+def local_map(
+ func: Callable,
+ out_placements: OutputPlacements,
+ in_placements: Optional[InputPlacements] = None,
+ device_mesh: Optional[DeviceMesh] = None,
+ *,
+ redistribute_inputs: bool = False,
+):
+ """
+ ``local_map`` is an experimental API that allows users to apply on :class:`DTensors`
+ a function that is written to be applied on :class:`~torch.Tensors`.
+
+ Args:
+ func (Callable): the function to be applied on each local shard of
+ :class:`DTensor`s.
+ out_placements (Union[`PlacementType`, Tuple[`PlacementType`, ...]]):
+ the desired placements of the output :class:`DTensor`s. If the `output` of
+ `func` is a Python collection, the `out_placements` will be a Tuple of
+ `PlacementType` values 1:1 mapping to the flattened `output`. For
+ :class:`Tensor` output, the corresponding `PlacementType` will be its
+ placements (a `Tuple[Placement]` value). For non-:class:`Tensor` output,
+ the `PlacementType` will be `None`.
+ in_placements (Union[`PlacementType`, Tuple[`PlacementType`, ...]], optional):
+ the required placements of the input :class:`DTensor`s. If not specified,
+ the input :class:`DTensor` will not be redistributed before passing its local
+ tensor to `func`. Similarly to `out_placements`, `in_placements` should keep
+ a 1:1 mapping to the flattened input of `func`. If a redistribution is
+ required according to `in_placements` and `redistribute_inputs` is `False`,
+ an exception will be raised.
+ device_mesh (:class:`DeviceMesh`, optional):
+ the device mesh that all the :class:`DTensor`s are placed on. If not
+ specified, this will be inferred from the input :class:`DTensor`s' device
+ mesh. `local_map` requires every :class:`DTensor`s to be placed on the same
+ device mesh.
+ redistribute_inputs (bool, optional):
+ the bool value indicating whether to reshard the input :class:`DTensor`s when
+ their placements are different from the required input placements. If this
+ value is `False` and some :class:`DTensor` input has a different placement,
+ an exception will be raised. Default: `False`.
+
+ Returns:
+ A `Callable` that applies `func` to each local shard of the input :class:`DTensor`
+ and returns a :class:`DTensor` constructed from the return value of `func`.
+
+ Raises:
+ AssertionError: If the input :class:`DTensor`s are not placed on the same device
+ mesh, or if they are placed on a different device mesh than the `device_mesh`
+ argument passed in.
+
+ AssertionError: For any non-:class:`DTensor` output, we require its corresponding
+ output placement in `out_placements` be `None`. An AssertionError will be raised
+ if this is not the case.
+
+ ValueError: If `redistribute_inputs=False` but the input :class:`DTensor` needs
+ a redistribution according to `in_placements`.
+
+ Example:
+ >>> # xdoctest: +SKIP("distributed")
+ >>> def mm_allreduce_forward(device_mesh, W, X):
+ >>> partial_sum_tensor = torch.mm(W, X)
+ >>> reduced_tensor = funcol.all_reduce(partial_sum_tensor, "sum", device_mesh)
+ >>> return reduced_tensor
+ >>>
+ >>> W = torch.randn(12, 8, requires_grad=False)
+ >>> X = torch.randn(8, 16, requires_grad=False)
+ >>> Y = torch.mm(W, X)
+ >>> row_wise = [Shard(0)] # row-wise sharding placements on 1-d mesh
+ >>> col_wise = [Shard(1)] # col-wise sharding placements on 1-d mesh
+ >>>
+ >>> # local_mm_allreduce_forward is the function wrapped with DTensor/Tensor convertion
+ >>> local_mm_allreduce_forward = local_map(
+ >>> mm_allreduce_forward,
+ >>> out_placements=[Replicate()],
+ >>> in_placements=[col_wise, row_wise],
+ >>> device_mesh=device_mesh,
+ >>> )
+ >>>
+ >>> W_dt = distribute_tensor(W, device_mesh, col_wise) # col-wisely sharded W tensor
+ >>> X_dt = distribute_tensor(X, device_mesh, row_wise) # row-wisely sharded X tensor
+ >>> Y_dt = local_mm_allreduce_forward(W_dt, X_dt) # apply local_mm_allreduce_forward to DTensors
+
+ NOTE: This API is currently experimental and subject to change
+ """
+
+ def wrapped(*args, **kwargs):
+ # process input args
+ flat_args, args_spec = pytree.tree_flatten(args)
+
+ # we assume every DTensor object is placed on the same device mesh
+ flat_local_args = []
+ nonlocal device_mesh # access var device_mesh from the outer scope
+ for idx, arg in enumerate(flat_args):
+ if isinstance(arg, DTensor):
+ # TODO: the current code doesn't consider the uneven sharding case
+ # Need to think about what the consequence is when the input DTensor
+ # is uneven sharded.
+ if device_mesh is None: # infer device mesh from the DTensor arg
+ device_mesh = arg.device_mesh
+
+ assert arg.device_mesh == device_mesh, (
+ f"arg {arg} in local_map has a mismatched device mesh:"
+ f"{arg} has device mesh {arg.device_mesh} while"
+ f"the expected device mesh is {device_mesh}!"
+ )
+ if in_placements is not None:
+ spec = (
+ in_placements[idx]
+ if isinstance(in_placements, tuple)
+ else in_placements
+ )
+ assert (
+ spec is not None
+ ), f"DTensor input {arg} expects placements but received {spec}!"
+
+ if not isinstance(spec, tuple):
+ spec = tuple(spec)
+
+ if arg.placements != spec:
+ if redistribute_inputs:
+ # redistribute to input placements
+ arg = arg.redistribute(device_mesh, spec)
+ else:
+ raise ValueError(
+ f"arg {arg} in local_map has a mismatched placements:"
+ f"arg placements is {arg.placements} but the input"
+ f"placements is {spec}!"
+ "If redistribute_inputs is wanted, set redistribute_inputs=True to local_map."
+ )
+
+ flat_local_args.append(arg.to_local())
+ else:
+ flat_local_args.append(arg)
+
+ local_args = pytree.tree_unflatten(flat_local_args, args_spec)
+
+ out = func(device_mesh, *local_args, **kwargs)
+
+ # process output
+ flat_out, out_spec = pytree.tree_flatten(out)
+ flat_dist_out = []
+ for idx, out in enumerate(flat_out):
+ spec = (
+ out_placements[idx]
+ if isinstance(out_placements, tuple)
+ else out_placements
+ )
+ if isinstance(out, torch.Tensor):
+ assert not isinstance(
+ out, DTensor
+ ), f"torch.Tensor output expected but received {type(out)}: {out}"
+
+ flat_dist_out.append(
+ DTensor.from_local(out, device_mesh, spec, run_check=False)
+ )
+ else:
+ assert (
+ spec is None
+ ), f"Non-tensor output {out} expects None placements but received {spec}!"
+
+ flat_dist_out.append(out)
+
+ return pytree.tree_unflatten(flat_dist_out, out_spec)
+
+ return wrapped
|
2.41.0
|
3069c460e87ba77fd69d2193635179779328e27
|
Fri, 26 Apr 2024 22:25:42 +0000
|
[PATCH 0744/1000] Correct check for Boolean list input type (#124899)
|
Summary: This diff fixes a bug in PyTorch where when creating a tensor from a List of booleans, PyTorch was throwing an error. This fix resolves that issue. All credit goes to swolchok for identifying the root cause of the issue and suggesting this fix. Test Plan: Running our model end to end works as expected and no error occurs. Differential Revision: D55990810 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124899 Approved by: https://github.com/zhxchen17
|
diff --git a/torch/csrc/jit/runtime/register_special_ops.cpp b/torch/csrc/jit/runtime/register_special_ops.cpp
index 36ede6717f..5e33d8cf27 100644
--- a/torch/csrc/jit/runtime/register_special_ops.cpp
+++ b/torch/csrc/jit/runtime/register_special_ops.cpp
@@ -33,7 +33,7 @@ c10::AliasAnalysisKind aliasAnalysisConservative() {
void checkListInputType(const c10::TypePtr& elem_type, bool empty_list) {
if (!elem_type->isSubtypeOf(*NumberType::get()) &&
- elem_type != BoolType::get()) {
+ !elem_type->isSubtypeOf(*BoolType::get())) {
std::stringstream error;
error << "Input must be of ints, floats, or bools, "
<< "got " << elem_type->repr_str();
|
2.41.0
|
d24d8c05ab62952918b1b459a9c036da25de27c
|
Fri, 26 Apr 2024 11:14:24 -0700
|
[PATCH 0745/1000] [dynamo][nn module] Use correct sources for _call_impl (#124970)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124970 Approved by: https://github.com/jansel ghstack dependencies: #124779, #124627
|
diff --git a/torch/_dynamo/variables/nn_module.py b/torch/_dynamo/variables/nn_module.py
index ffd591793a..b2d86bad0f 100644
--- a/torch/_dynamo/variables/nn_module.py
+++ b/torch/_dynamo/variables/nn_module.py
@@ -340,9 +340,10 @@ class NNModuleVariable(VariableTracker):
# If so at least some changes are needed, we don't allow inlining
# the call_wrapped currently, and maybe other issues too
fn = mod.forward
+ fn_source = AttrSource(self.source, "forward")
else:
fn = mod._call_impl
- fn_source = AttrSource(self.source, "__call__")
+ fn_source = AttrSource(self.source, "_call_impl")
if istype(fn, types.MethodType):
fn = fn.__func__
fn_source = AttrSource(fn_source, "__func__")
|
2.41.0
|
bb8905e0c9c21f80a2428f43c9478324d1b4282
|
Sat, 27 Apr 2024 00:17:44 +0000
|
[PATCH 0746/1000] [cpu] add VecConvert between 8bits and 16bits (#124828)
|
The perf benefit was found in https://github.com/pytorch/pytorch/issues/124697#issuecomment-2071658300. The PR adds intrinsic specializations between int8/uint8 and bf16/fp16. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124828 Approved by: https://github.com/jgong5, https://github.com/jansel
|
diff --git a/aten/src/ATen/cpu/vec/vec256/vec256_convert.h b/aten/src/ATen/cpu/vec/vec256/vec256_convert.h
index 19a9457193..55f26c606d 100644
--- a/aten/src/ATen/cpu/vec/vec256/vec256_convert.h
+++ b/aten/src/ATen/cpu/vec/vec256/vec256_convert.h
@@ -126,32 +126,44 @@ struct VecConvert<int32_t, 1, uint8_t, 1> {
}
};
+template <typename dst_t, typename src_t>
+struct VecConvert<
+ dst_t,
+ 1,
+ src_t,
+ 1,
+ typename std::enable_if_t<
+ (is_reduced_floating_point_v<dst_t> && is_8bit_integer_v<src_t>) ||
+ (is_reduced_floating_point_v<src_t> && is_8bit_integer_v<dst_t>),
+ void>> {
+ static inline VectorizedN<dst_t, 1> apply(const VectorizedN<src_t, 1>& src) {
+ VectorizedN<float, 1> tmp_fp32 = VecConvert<float, 1, src_t, 1>::apply(src);
+ return VecConvert<dst_t, 1, float, 1>::apply(tmp_fp32);
+ }
+};
+
template <typename dst_t>
struct VecConvert<
- dst_t,
- 1,
- float,
- 1,
- typename std::enable_if_t<
- std::is_same_v<dst_t, unsigned char> || std::is_same_v<dst_t, signed char>,
- void>> {
- static inline VectorizedN<dst_t, 1> apply(
- const VectorizedN<float, 1>& src) {
+ dst_t,
+ 1,
+ float,
+ 1,
+ typename std::enable_if_t<is_8bit_integer_v<dst_t>,
+ void>> {
+ static inline VectorizedN<dst_t, 1> apply(const VectorizedN<float, 1>& src) {
return convert_float_to_int8<dst_t>(src[0]);
}
};
template <typename src_t>
struct VecConvert<
- float,
- 1,
- src_t,
- 1,
- typename std::enable_if_t<
- std::is_same_v<src_t, unsigned char> || std::is_same_v<src_t, signed char>,
- void>> {
- static inline VectorizedN<float, 1> apply(
- const VectorizedN<src_t, 1>& src) {
+ float,
+ 1,
+ src_t,
+ 1,
+ typename std::enable_if_t<is_8bit_integer_v<src_t>,
+ void>> {
+ static inline VectorizedN<float, 1> apply(const VectorizedN<src_t, 1>& src) {
return convert_int8_to_float<src_t>(src[0]);
}
};
diff --git a/aten/src/ATen/cpu/vec/vec512/vec512_convert.h b/aten/src/ATen/cpu/vec/vec512/vec512_convert.h
index a8da148f37..e8ad662a99 100644
--- a/aten/src/ATen/cpu/vec/vec512/vec512_convert.h
+++ b/aten/src/ATen/cpu/vec/vec512/vec512_convert.h
@@ -117,32 +117,44 @@ struct VecConvert<int32_t, 1, uint8_t, 1> {
}
};
+template <typename dst_t, typename src_t>
+struct VecConvert<
+ dst_t,
+ 1,
+ src_t,
+ 1,
+ typename std::enable_if_t<
+ (is_reduced_floating_point_v<dst_t> && is_8bit_integer_v<src_t>) ||
+ (is_reduced_floating_point_v<src_t> && is_8bit_integer_v<dst_t>),
+ void>> {
+ static inline VectorizedN<dst_t, 1> apply(const VectorizedN<src_t, 1>& src) {
+ VectorizedN<float, 1> tmp_fp32 = VecConvert<float, 1, src_t, 1>::apply(src);
+ return VecConvert<dst_t, 1, float, 1>::apply(tmp_fp32);
+ }
+};
+
template <typename dst_t>
struct VecConvert<
- dst_t,
- 1,
- float,
- 1,
- typename std::enable_if_t<
- std::is_same_v<dst_t, unsigned char> || std::is_same_v<dst_t, signed char>,
- void>> {
- static inline VectorizedN<dst_t, 1> apply(
- const VectorizedN<float, 1>& src) {
+ dst_t,
+ 1,
+ float,
+ 1,
+ typename std::enable_if_t<is_8bit_integer_v<dst_t>,
+ void>> {
+ static inline VectorizedN<dst_t, 1> apply(const VectorizedN<float, 1>& src) {
return convert_float_to_int8<dst_t>(src[0]);
}
};
template <typename src_t>
struct VecConvert<
- float,
- 1,
- src_t,
- 1,
- typename std::enable_if_t<
- std::is_same_v<src_t, unsigned char> || std::is_same_v<src_t, signed char>,
- void>> {
- static inline VectorizedN<float, 1> apply(
- const VectorizedN<src_t, 1>& src) {
+ float,
+ 1,
+ src_t,
+ 1,
+ typename std::enable_if_t<is_8bit_integer_v<src_t>,
+ void>> {
+ static inline VectorizedN<float, 1> apply(const VectorizedN<src_t, 1>& src) {
return convert_int8_to_float<src_t>(src[0]);
}
};
diff --git a/aten/src/ATen/cpu/vec/vec_base.h b/aten/src/ATen/cpu/vec/vec_base.h
index e5b9927ed0..d696c97b59 100644
--- a/aten/src/ATen/cpu/vec/vec_base.h
+++ b/aten/src/ATen/cpu/vec/vec_base.h
@@ -90,6 +90,16 @@ struct is_reduced_floating_point:
template <typename T>
constexpr bool is_reduced_floating_point_v = is_reduced_floating_point<T>::value;
+template <typename T>
+struct is_8bit_integer:
+ std::integral_constant<bool,
+ std::is_same_v<T, unsigned char> ||
+ std::is_same_v<T, signed char>> {
+};
+
+template <typename T>
+constexpr bool is_8bit_integer_v = is_8bit_integer<T>::value;
+
template<size_t n> struct int_of_size;
#define DEFINE_INT_OF_SIZE(int_t) \
|
2.41.0
|
68f5212fa7a45722732e0b75d8288694af3edef
|
Sat, 27 Apr 2024 00:19:08 +0000
|
[PATCH 0747/1000] [cpu] [inductor] decompose bmm for memory bound in lowering (#124826)
|
Fixes #124697. Resolve the issue of large regression of GPT-FAST MOE with `coordinate_descent_tuning` disabled. To get better perf for memory bound case, we decompose bmm in lowering. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124826 Approved by: https://github.com/jgong5, https://github.com/jansel
|
diff --git a/torch/_inductor/kernel/bmm.py b/torch/_inductor/kernel/bmm.py
index f19a54c19f..143c616fcb 100644
--- a/torch/_inductor/kernel/bmm.py
+++ b/torch/_inductor/kernel/bmm.py
@@ -2,8 +2,7 @@ import logging
import torch
-from .. import ir
-from ..lowering import register_lowering
+from .. import ir, lowering as L
from ..select_algorithm import (
autotune_select_algorithm,
ExternKernelChoice,
@@ -97,9 +96,14 @@ aten_bmm = ExternKernelChoice(torch.bmm, "at::bmm_out")
aten_baddbmm = ExternKernelChoice(torch.baddbmm, "at::baddbmm_out")
-@register_lowering(aten.bmm)
+@L.register_lowering(aten.bmm)
def tuned_bmm(mat1, mat2, *, layout=None):
if all(x.get_device().type == "cpu" for x in [mat1, mat2]):
+ # decompose to small ops when memory bound
+ if mat1.get_size()[1] == 1 or mat2.get_size()[2] == 1:
+ mat1 = L.unsqueeze(mat1, -1)
+ mat2 = L.unsqueeze(mat2, 1)
+ return L.sum_(L.mul(mat1, mat2), axis=2)
def is_valid_to_require_contiguous(t):
if not ir.is_storage_and_layout(t):
@@ -157,7 +161,7 @@ def tuned_bmm(mat1, mat2, *, layout=None):
# Don't register this since it is slower than decomposing it
-# @register_lowering(aten.baddbmm)
+# @L.register_lowering(aten.baddbmm)
def tuned_baddbmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None):
m, n, k, layout, mat1, mat2, inp = mm_args(mat1, mat2, inp, layout=layout)
|
2.41.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.