commitId
stringlengths 40
40
| datetime
stringlengths 30
31
| subject
stringlengths 37
266
| comment
stringlengths 109
15.2k
| diff
stringlengths 238
914k
| gitVersion
stringclasses 9
values |
|---|---|---|---|---|---|
0c5113dec79608941db69ae091dfe8893f9a14f
|
Mon, 22 Apr 2024 12:12:42 -0700
|
[PATCH 0504/1000] Add support for capturing tensors with score_mod (#124444)
|
``` import torch from torch import nn import torch.nn.functional as F import torch._inductor.config as config # torch.set_default_device('cuda') import torch from torch.nn.attention._templated_attention import _templated_attention as templated_attention from triton.testing import do_bench from torch.nn.attention import SDPBackend, sdpa_kernel index = torch.ops.aten torch.manual_seed(0) B = 16 H = 16 S = 2048 D = 64 head_scale = torch.randn(H, device='cuda') def alibi(score, batch, head, token_q, token_kv): return score + torch.ops.aten.index(head_scale, [head]) * (token_q - token_kv) bias = torch.randn(H, S, S, dtype=torch.float16, device='cuda') query = torch.randn(B, H, S, D, device="cuda", dtype=torch.float16) key = torch.randn(B, H, S, D, device="cuda", dtype=torch.float16) value = torch.randn(B, H, S, D, device="cuda", dtype=torch.float16) compiled = torch.compile(templated_attention) out = compiled(query, key, value, score_mod=alibi) out2 = templated_attention(query, key, value,score_mod=alibi) print((out - out2).abs().mean()) assert (out - out2).abs().mean() < 1e-3 print("Flash (no mask): ", do_bench(lambda: F.scaled_dot_product_attention(query, key, value))) print("Flash (mask): ", do_bench(lambda: F.scaled_dot_product_attention(query, key, value, attn_mask=bias))) print("flexattention: ", do_bench(lambda: compiled(query, key, value, score_mod=alibi))) ``` <img width="324" alt="image" src="https://github.com/pytorch/pytorch/assets/6355099/18c175d0-2720-4dfd-8747-85b8a8f609f5"> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124444 Approved by: https://github.com/jansel, https://github.com/drisspg
|
diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py
index b906689af9..4c8043d9bf 100644
--- a/test/inductor/test_templated_attention.py
+++ b/test/inductor/test_templated_attention.py
@@ -4,7 +4,7 @@ import functools
from collections import namedtuple
from typing import Callable
-from unittest import expectedFailure, skipUnless
+from unittest import skip, skipUnless
from unittest.mock import patch
import torch
@@ -28,6 +28,8 @@ supported_platform = skipUnless(
Tolerances = namedtuple("Tolerances", ["atol", "rtol"])
torch.set_float32_matmul_precision("high")
+index = torch.ops.aten.index
+
def create_attention(score_mod):
return functools.partial(_templated_attention, score_mod=score_mod)
@@ -39,6 +41,8 @@ test_dtypes = (
else [torch.float16, torch.float32]
)
+test_dtypes_fast = [torch.float16]
+
# TODO float16 was causing ERRORs for tests on ROCm
# See https://github.com/pytorch/pytorch/issues/123531
if common_utils.TEST_WITH_ROCM:
@@ -53,13 +57,19 @@ def _causal_mod(score, b, h, token_q, token_kv):
return torch.where(token_q >= token_kv, score, float("-inf"))
+B = 4
+H = 8
+S = 2048
+D = 64
+
+
class TestTemplatedSDPA(InductorTestCase):
def run_test(self, score_mod: Callable, dtype: torch.dtype = torch.float16):
sdpa_partial = create_attention(score_mod)
compiled_sdpa = torch.compile(sdpa_partial)
- q = torch.randn((4, 8, 2048, 64), dtype=dtype, device="cuda")
- k = torch.randn((4, 8, 2048, 64), dtype=dtype, device="cuda")
- v = torch.randn((4, 8, 2048, 64), dtype=dtype, device="cuda")
+ q = torch.randn((B, H, S, D), dtype=dtype, device="cuda")
+ k = torch.randn((B, H, S, D), dtype=dtype, device="cuda")
+ v = torch.randn((B, H, S, D), dtype=dtype, device="cuda")
golden_out = sdpa_partial(
q.to(torch.float64), k.to(torch.float64), v.to(torch.float64)
)
@@ -147,23 +157,116 @@ class TestTemplatedSDPA(InductorTestCase):
self.run_test(composed_score_mod, dtype)
- # TODO We are currently not capturing free variables in the closure correctly
- @expectedFailure
@supported_platform
@common_utils.parametrize("dtype", test_dtypes)
def test_captured_buffers(self, dtype: torch.dtype):
- head_offset = torch.rand(8, device="cuda", dtype=dtype)
+ head_offset = torch.rand(H, device="cuda", dtype=dtype)
def score_mod(score, b, h, m, n):
- return score + head_offset[h]
+ return score + index(head_offset, [h])
self.run_test(score_mod, dtype)
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_seq_masking(self, dtype):
+ seq_idx = torch.zeros(S, device="cuda", dtype=torch.bool)
+ seq_idx[S // 2 :] = 1
+
+ def seq_mask_mod(score, b, h, q, kv):
+ return torch.where(
+ index(seq_idx, [q]) == index(seq_idx, [kv]), score, float("-inf")
+ )
+
+ self.run_test(seq_mask_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_load_from_bias_seq_only(self, dtype):
+ bias = torch.randn(S, S, device="cuda", dtype=dtype)
+
+ def bias_mod(score, b, h, q, kv):
+ return score + index(bias, [q, kv])
+
+ self.run_test(bias_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_load_from_bias_seq_batch(self, dtype):
+ bias = torch.randn(B, S, S, device="cuda", dtype=dtype)
+
+ def bias_mod(score, b, h, q, kv):
+ return score + index(bias, [b, q, kv])
+
+ self.run_test(bias_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_load_from_bias_head_seq_batch(self, dtype):
+ bias = torch.randn(B, H, S, S, device="cuda", dtype=dtype)
+
+ def bias_mod(score, b, h, q, kv):
+ return score + index(bias, [b, h, q, kv])
+
+ self.run_test(bias_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_load_rel_bias(self, dtype):
+ rel_bias = torch.randn(2 * S, device="cuda", dtype=dtype)
+
+ def bias_mod(score, b, h, q, kv):
+ return score + index(rel_bias, [(q - kv) + S])
+
+ self.run_test(bias_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_dependent_causal_bidirectional(self, dtype):
+ num_bidirectional = torch.randint(0, S, (B,), device="cuda", dtype=torch.int32)
+
+ def bias_mod(score, b, h, q, kv):
+ causal_attention = q >= kv
+ cur_num_bidirectional = index(num_bidirectional, (b,))
+ bidirectional_attention_on_video = (q <= cur_num_bidirectional) & (
+ kv <= cur_num_bidirectional
+ )
+ return torch.where(
+ bidirectional_attention_on_video | causal_attention,
+ score,
+ -float("inf"),
+ )
+
+ self.run_test(bias_mod, dtype)
+
+ @supported_platform
+ @skip("Triton bug ") # https://github.com/pytorch/pytorch/issues/124571
+ @common_utils.parametrize("dtype", test_dtypes)
+ def test_njt_causal(self, dtype):
+ offsets = torch.tensor(
+ [0, 1024, 1024 + 512, S], device="cuda", dtype=torch.int32
+ )
+ seq_idx = torch.zeros(S, device="cuda", dtype=torch.int32)
+ for idx in range(len(offsets) - 1):
+ seq_idx[offsets[idx] : offsets[idx + 1]] = idx
+
+ def create_njt_wrapper(orig_score_mod, offsets, seq_idx):
+ def njt_score_mod(qk, b, h, q, kv):
+ q_nested = q - index(offsets, [index(seq_idx, [q])])
+ kv_nested = kv - index(offsets, [index(seq_idx, [kv])])
+ return orig_score_mod(qk, b, h, q_nested, kv_nested)
+
+ return njt_score_mod
+
+ causal_njt = create_njt_wrapper(_causal_mod, offsets, seq_idx)
+
+ self.run_test(causal_njt, dtype)
+
@supported_platform
def test_backwards_fails(self):
make_tensor = functools.partial(
torch.randn,
- (4, 8, 2048, 64),
+ (B, H, S, D),
dtype=torch.float32,
device="cuda",
requires_grad=True,
@@ -177,9 +280,9 @@ class TestTemplatedSDPA(InductorTestCase):
@supported_platform
def test_mixed_dtypes_fails(self):
- query = torch.randn((1, 1, 2048, 64), dtype=torch.float32, device="cuda")
- key = torch.randn((1, 1, 2048, 64), dtype=torch.float16, device="cuda")
- value = torch.randn((1, 1, 2048, 64), dtype=torch.float16, device="cuda")
+ query = torch.randn((1, 1, 1024, 64), dtype=torch.float32, device="cuda")
+ key = torch.randn((1, 1, 1024, 64), dtype=torch.float16, device="cuda")
+ value = torch.randn((1, 1, 1024, 64), dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(
ValueError, "Expected query, key, and value to have the same dtype"
):
@@ -201,6 +304,21 @@ class TestTemplatedSDPA(InductorTestCase):
self.run_test(score_mod)
+ @supported_platform
+ @patch.object(torch._inductor.config, "max_autotune", True)
+ def test_max_autotune_with_captured(self):
+ head_scale = torch.randn(H, device="cuda")
+ batch_scale = torch.randn(B, device="cuda")
+ tok_scale = torch.randn(S, device="cuda")
+
+ def bias_mod(score, batch, head, token_q, token_kv):
+ score = score + index(tok_scale, [token_q])
+ score = score + index(batch_scale, [batch])
+ score = score + index(head_scale, [head])
+ return score
+
+ self.run_test(bias_mod)
+
@supported_platform
@common_utils.parametrize("dtype", test_dtypes)
@common_utils.parametrize("score_mod", [_identity_mod, _causal_mod])
@@ -211,7 +329,7 @@ class TestTemplatedSDPA(InductorTestCase):
make_tensor = functools.partial(
torch.randn,
- (4, 8, 2048, 64),
+ (B, H, S, D),
dtype=dtype,
device="cuda",
requires_grad=True,
@@ -253,7 +371,7 @@ class TestTemplatedSDPA(InductorTestCase):
def test_logsumexp_only_return(self):
make_tensor = functools.partial(
torch.randn,
- (4, 8, 2048, 64),
+ (B, H, S, D),
dtype=torch.float32,
device="cuda",
requires_grad=True,
@@ -274,7 +392,7 @@ class TestTemplatedSDPA(InductorTestCase):
def test_logsumexp_is_not_fused(self):
make_tensor = functools.partial(
torch.randn,
- (4, 8, 2048, 64),
+ (B, H, S, D),
dtype=torch.float32,
device="cuda",
requires_grad=True,
diff --git a/torch/_dynamo/variables/higher_order_ops.py b/torch/_dynamo/variables/higher_order_ops.py
index e0b0233e05..723e2a95cd 100644
--- a/torch/_dynamo/variables/higher_order_ops.py
+++ b/torch/_dynamo/variables/higher_order_ops.py
@@ -1433,12 +1433,10 @@ class TemplatedAttentionHigherOrderVariable(TorchHigherOrderOperatorVariable):
) -> "VariableTracker":
from .builder import wrap_fx_proxy
- query, key, value, score_mod, *other_buffers = self.normalize_to_args(
- args, kwargs
- )
+ query, key, value, score_mod = self.normalize_to_args(args, kwargs)
p_args, p_kwargs = self.create_wrapped_node(tx, query, score_mod)
- proxied_args = [query, key, value, *other_buffers]
+ proxied_args = [query, key, value]
# Store the invocation as a call
# Norm_kwargs contains the score_function and we dont want to proxy this because
diff --git a/torch/_higher_order_ops/templated_attention.py b/torch/_higher_order_ops/templated_attention.py
index 09e10754fe..388e741837 100644
--- a/torch/_higher_order_ops/templated_attention.py
+++ b/torch/_higher_order_ops/templated_attention.py
@@ -60,7 +60,7 @@ def math_attention(
"""
assert len(other_buffers) == 0, "Other buffers are not yet supported."
- scores = query @ key.transpose(-2, -1)
+ scores = (query @ key.transpose(-2, -1)).to(dtype=torch.float32)
b = torch.arange(0, scores.size(0), device=scores.device)
h = torch.arange(0, scores.size(1), device=scores.device)
@@ -179,9 +179,11 @@ def templated_attention_functionalize(
assert isinstance(other_buffers_unwrapped, tuple)
assert all(isinstance(item, torch.Tensor) for item in other_buffers_unwrapped)
- example_vals = [torch.zeros((), dtype=query.dtype)] + [
- torch.zeros((), dtype=torch.int) for _ in range(4)
- ]
+ example_vals = (
+ [torch.zeros((), dtype=query.dtype)]
+ + [torch.zeros((), dtype=torch.int) for _ in range(4)]
+ + list(other_buffers_unwrapped)
+ )
with ctx.redispatch_to_next() as m:
functional_score_mod = ctx.functionalize(score_mod)
pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch
diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py
index 4950f5e802..152621453c 100644
--- a/torch/_inductor/codegen/triton.py
+++ b/torch/_inductor/codegen/triton.py
@@ -3413,22 +3413,14 @@ class TritonScheduling(BaseScheduling):
buffer_names.update(node.used_buffer_names())
# Get buffers objects
+
def _get_buffer(name: str) -> Union[ir.Buffer, ir.TensorBox]:
- if name in V.graph.name_to_buffer:
- return V.graph.name_to_buffer[name]
- elif name in V.graph.graph_inputs:
- return V.graph.graph_inputs[name]
- elif name in V.graph.constants:
- data = V.graph.constants[name]
- return ir.ConstantBuffer(
- name,
- ir.FixedLayout(
- data.device, data.dtype, *V.graph.static_sizes_strides(data)
- ),
- )
- raise RuntimeError(f"Failed to find buffer matching name {name}")
+ buf = V.graph.get_buffer(name)
+ if buf is None:
+ raise RuntimeError(f"Failed to find buffer matching name {name}")
+ return buf
- buffers = [_get_buffer(name) for name in buffer_names]
+ buffers = [V.graph.get_buffer(name) for name in buffer_names]
# In theory we can separately check xnumel and rnumel are <= int_max
# but some indexers do use the full linear index so we need to be
diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py
index 97e1683120..a160055ee1 100644
--- a/torch/_inductor/graph.py
+++ b/torch/_inductor/graph.py
@@ -660,6 +660,14 @@ class GraphLowering(torch.fx.Interpreter):
return self.name_to_buffer[buffer_name]
if buffer_name in self.graph_inputs:
return self.graph_inputs[buffer_name]
+ if buffer_name in self.constants:
+ data = V.graph.constants[buffer_name]
+ return ir.ConstantBuffer(
+ buffer_name,
+ ir.FixedLayout(
+ data.device, data.dtype, *V.graph.static_sizes_strides(data)
+ ),
+ )
return None
def get_dtype(self, buffer_name: str):
diff --git a/torch/_inductor/kernel/templated_attention.py b/torch/_inductor/kernel/templated_attention.py
index 7942a367e2..4c59036fbb 100644
--- a/torch/_inductor/kernel/templated_attention.py
+++ b/torch/_inductor/kernel/templated_attention.py
@@ -3,6 +3,7 @@ import logging
from typing import Any, List
import torch
+from .. import config
from ..lowering import empty_strided, lowerings, register_lowering
from ..select_algorithm import autotune_select_algorithm, TritonTemplate
@@ -114,12 +115,14 @@ sdpa_template = TritonTemplate(
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk = tl.dot(q, k.to(MATMUL_PRECISION), acc=qk)
# ~~~~~~~~~~~~~~~~~~~ Apply score modification ~~~~~~~~~~~~~~~~~~~
+ m = offs_m[:, None]
+ n = start_n + offs_n[None, :]
{{ modification(
score="qk",
b="off_hz // H",
h="off_hz % H",
- m="offs_m[:, None]",
- n="start_n + offs_n[None, :]",
+ m="m",
+ n="n",
out="qk"
) | indent_except_first(2) }}
# TODO: In the case that score_mod is linear, this can be LICMed
@@ -170,7 +173,8 @@ sdpa_template = TritonTemplate(
)
-@register_lowering(torch.ops.higher_order.templated_attention)
+# TODO: We probably also need a layout constraint?
+@register_lowering(torch.ops.higher_order.templated_attention, type_promotion_kind=None)
def templated_attention(*args, **kwargs):
from torch._prims_common import make_contiguous_strides_for
from ..ir import (
@@ -182,7 +186,7 @@ def templated_attention(*args, **kwargs):
TensorBox,
)
- query, key, value, subgraph = args
+ query, key, value, subgraph, *other_buffers = args
def create_placeholder(name: str, dtype: torch.dtype) -> InputBuffer:
return TensorBox.create(
@@ -272,17 +276,22 @@ def templated_attention(*args, **kwargs):
configs: List[Any] = []
if query.get_dtype() == torch.float32:
configs.append((64, 64, 4, 3))
- configs += [
- (128, 64, 4, 3),
- (128, 128, 4, 3),
- (128, 128, 8, 2),
- (64, 128, 4, 3),
- ]
-
+ else:
+ configs.append((128, 64, 4, 3))
+ if config.max_autotune:
+ configs += [
+ (128, 64, 4, 3),
+ (128, 128, 4, 3),
+ (128, 128, 8, 2),
+ (64, 128, 4, 3),
+ ]
+ # Note, we don't need to pass in the captured buffers explicitly
+ # because they're implicitly added by the score_mod function
+ # We do need to explicitly pass it in for autotuning though.
for BLOCK_M, BLOCK_N, num_warps, num_stages in configs:
sdpa_template.maybe_append_choice(
choices=choices,
- input_nodes=(query, key, value, logsumexp),
+ input_nodes=[query, key, value, logsumexp],
layout=layout,
subgraphs=subgraph_buffer,
mutated_inputs=[
@@ -298,9 +307,10 @@ def templated_attention(*args, **kwargs):
ROWS_GUARANTEED_SAFE=False,
OUTPUT_LOGSUMEXP=True,
)
+ inputs_for_autotuning = [query, key, value, logsumexp] + list(other_buffers)
return (
autotune_select_algorithm(
- "sdpa", choices, [query, key, value, logsumexp], layout
+ "sdpa", choices, inputs_for_autotuning, layout
),
logsumexp,
)
diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py
index 5360c41765..f648076e4a 100644
--- a/torch/_inductor/select_algorithm.py
+++ b/torch/_inductor/select_algorithm.py
@@ -36,7 +36,14 @@ from .codegen.triton_utils import config_of, signature_to_meta
from .exc import CUDACompileError
from .ir import ChoiceCaller, PrimitiveInfoType
from .runtime.runtime_utils import do_bench
-from .utils import get_dtype_size, Placeholder, sympy_dot, sympy_product, unique
+from .utils import (
+ get_dtype_size,
+ Placeholder,
+ sympy_dot,
+ sympy_index_symbol,
+ sympy_product,
+ unique,
+)
from .virtualized import V
log = logging.getLogger(__name__)
@@ -269,20 +276,23 @@ class TritonTemplateKernel(TritonKernel):
potential multiple modifications
"""
+ def add_input(name):
+ return self.args.input(name)
+
class PlaceholderSubstitution(V.WrapperHandler): # type: ignore[name-defined]
self.name = "PlaceholderSubstitution"
def load(self, name: str, index: sympy.Expr):
if name not in fixed_inputs:
- raise AssertionError(
- f"All loads should be coming from fixed inputs - {name}"
- )
+ # If it's not a fixed input, it's a load from a captured
+ # tensor
+ var = add_input(name)
+ return f"tl.load({var} + {index})"
+
return f"({fixed_inputs[name]})"
- # TODO Doesn't work yet
def indirect_indexing(self, index_var, size, check):
- return self._inner.indirect_indexing(index_var, size, False)
- # return sympy_symbol(str(index_var))
+ return sympy_index_symbol(str(index_var))
# if self.modification_cache is None:
with V.set_ops_handler(PlaceholderSubstitution(V.ops)):
@@ -589,16 +599,25 @@ class TritonTemplate(KernelTemplate):
+ "-"
)
mod = PyCodeCache.load(code, extra)
- _, call_args, _ = kernel.args.python_argdefs()
- expected_args = list(unique(x.get_name() for x in input_nodes))
- expected_args.extend([fake_out.get_name()])
- assert list(call_args)[: len(expected_args)] == expected_args, (
- call_args,
- expected_args,
+ input_call_args = tuple(kernel.args.input_buffers.keys())
+ output_call_args = tuple(kernel.args.output_buffers.keys())
+
+ # We expect the input_buffer order to be [*input_nodes, *captured_buffers]
+ expected_input_args = tuple(unique(x.get_name() for x in input_nodes))
+ expected_output_args = (fake_out.get_name(),)
+ assert input_call_args[: len(expected_input_args)] == expected_input_args, (
+ input_call_args,
+ expected_input_args,
+ )
+ assert output_call_args == expected_output_args, (
+ output_call_args,
+ expected_output_args,
)
+
+ full_input_nodes = tuple([V.graph.get_buffer(k) for k in input_call_args])
extra_args = V.graph.sizevars.size_hints(
- map(sympy.expand, call_args[len(expected_args) :]),
+ map(sympy.expand, tuple(kernel.args.sizevars.keys())),
fallback=config.unbacked_symint_fallback,
)
@@ -636,13 +655,13 @@ class TritonTemplate(KernelTemplate):
num_stages=num_stages,
num_warps=num_warps,
matrix_instr_nonkdim=kwargs.get("matrix_instr_nonkdim", 0),
- input_tensor_meta=TensorMeta.from_irnodes(input_nodes),
+ input_tensor_meta=TensorMeta.from_irnodes(full_input_nodes),
output_tensor_meta=TensorMeta.from_irnodes(layout),
)
return TritonTemplateCaller(
kernel_hash_name,
- input_nodes,
+ full_input_nodes,
layout,
make_kernel_render,
extra.strip("-").replace("-", ", "),
|
2.41.0
|
dcd968b51f8a9f8faba806fdd91d34210e672b2
|
Tue, 23 Apr 2024 06:26:11 +0000
|
[PATCH 0505/1000] Add out wrappers to some decompositions (#115437)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/115437 Approved by: https://github.com/lezcano
|
diff --git a/test/expect/HasDecompTest.test_aten_core_operators.expect b/test/expect/HasDecompTest.test_aten_core_operators.expect
index b330aa787c..dc3d8cc389 100644
--- a/test/expect/HasDecompTest.test_aten_core_operators.expect
+++ b/test/expect/HasDecompTest.test_aten_core_operators.expect
@@ -22,6 +22,9 @@ aten::_softmax
aten::_softmax.out
aten::_to_copy
aten::_to_copy.out
+aten::_upsample_nearest_exact1d.out
+aten::_upsample_nearest_exact2d.out
+aten::_upsample_nearest_exact3d.out
aten::abs
aten::abs.out
aten::abs_
@@ -508,6 +511,10 @@ aten::uniform.out
aten::uniform_
aten::unsqueeze
aten::upsample_bicubic2d
+aten::upsample_bicubic2d.out
+aten::upsample_nearest1d.out
+aten::upsample_nearest2d.out
+aten::upsample_nearest3d.out
aten::var.correction
aten::var.correction_out
aten::var_mean.correction
diff --git a/test/expect/HasDecompTest.test_has_decomposition.expect b/test/expect/HasDecompTest.test_has_decomposition.expect
index 8fbdc431f4..2fc26d1a32 100644
--- a/test/expect/HasDecompTest.test_has_decomposition.expect
+++ b/test/expect/HasDecompTest.test_has_decomposition.expect
@@ -609,13 +609,10 @@ aten::_upsample_bilinear2d_aa
aten::_upsample_bilinear2d_aa.out
aten::_upsample_bilinear2d_aa_backward
aten::_upsample_bilinear2d_aa_backward.grad_input
-aten::_upsample_nearest_exact1d.out
aten::_upsample_nearest_exact1d_backward
aten::_upsample_nearest_exact1d_backward.grad_input
-aten::_upsample_nearest_exact2d.out
aten::_upsample_nearest_exact2d_backward
aten::_upsample_nearest_exact2d_backward.grad_input
-aten::_upsample_nearest_exact3d.out
aten::_upsample_nearest_exact3d_backward
aten::_upsample_nearest_exact3d_backward.grad_input
aten::_use_cudnn_ctc_loss
@@ -1331,20 +1328,16 @@ aten::unsafe_split_with_sizes.out
aten::unsqueeze_
aten::unsqueeze_copy
aten::unsqueeze_copy.out
-aten::upsample_bicubic2d.out
aten::upsample_bicubic2d_backward
aten::upsample_bicubic2d_backward.grad_input
aten::upsample_bilinear2d_backward
aten::upsample_bilinear2d_backward.grad_input
aten::upsample_linear1d_backward
aten::upsample_linear1d_backward.grad_input
-aten::upsample_nearest1d.out
aten::upsample_nearest1d_backward
aten::upsample_nearest1d_backward.grad_input
-aten::upsample_nearest2d.out
aten::upsample_nearest2d_backward
aten::upsample_nearest2d_backward.grad_input
-aten::upsample_nearest3d.out
aten::upsample_nearest3d_backward
aten::upsample_nearest3d_backward.grad_input
aten::upsample_trilinear3d_backward
diff --git a/test/test_torch.py b/test/test_torch.py
index 735a4f447a..25d1cc14ed 100644
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -8847,7 +8847,7 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
out = torch.empty(4, 3, 16, 16, device='meta', dtype=torch.double)
self.assertExpectedRaisesInline(
RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out),
- """Expected out tensor to have dtype float, but got double instead"""
+ """Expected out tensor to have dtype torch.float32 but got torch.float64 instead"""
)
# Complain if out device mismatch
@@ -8857,7 +8857,7 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
if not TEST_WITH_TORCHINDUCTOR:
self.assertExpectedRaisesInline(
RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out),
- """Expected out tensor to have device meta, but got cpu instead"""
+ """Attempting to copy from device meta to device cpu, but cross-device copies are not allowed!"""
)
def test_add_meta_scalar(self):
diff --git a/torch/_decomp/decompositions.py b/torch/_decomp/decompositions.py
index 3ef43ad4b1..3b69cc5b91 100644
--- a/torch/_decomp/decompositions.py
+++ b/torch/_decomp/decompositions.py
@@ -2720,9 +2720,10 @@ def _compute_upsample_nearest_indices(input, output_size, scales, exact=False):
return indices
-@register_decomposition(aten.upsample_nearest1d.default)
+@register_decomposition([aten.upsample_nearest1d.default, aten.upsample_nearest1d.out])
@aten.upsample_nearest1d.default.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.upsample_nearest1d.default.py_impl(DispatchKey.Autograd)
+@out_wrapper(preserve_memory_format=True, exact_dtype=True)
def upsample_nearest1d(
input: Tensor,
output_size: List[int],
@@ -2731,9 +2732,12 @@ def upsample_nearest1d(
return _upsample_nearest(input, output_size, [scales])
-@register_decomposition(aten._upsample_nearest_exact1d.default)
+@register_decomposition(
+ [aten._upsample_nearest_exact1d.default, aten._upsample_nearest_exact1d.out]
+)
@aten._upsample_nearest_exact1d.default.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten._upsample_nearest_exact1d.default.py_impl(DispatchKey.Autograd)
+@out_wrapper(preserve_memory_format=True, exact_dtype=True)
def upsample_nearest_exact1d(
input: Tensor,
output_size: List[int],
@@ -2742,9 +2746,10 @@ def upsample_nearest_exact1d(
return _upsample_nearest(input, output_size, [scales], exact=True)
-@register_decomposition(aten.upsample_nearest2d.default)
+@register_decomposition([aten.upsample_nearest2d.default, aten.upsample_nearest2d.out])
@aten.upsample_nearest2d.default.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.upsample_nearest2d.default.py_impl(DispatchKey.Autograd)
+@out_wrapper(preserve_memory_format=True, exact_dtype=True)
def upsample_nearest2d(
input: Tensor,
output_size: List[int],
@@ -2754,9 +2759,12 @@ def upsample_nearest2d(
return _upsample_nearest(input, output_size, [scales_h, scales_w])
-@register_decomposition(aten._upsample_nearest_exact2d.default)
+@register_decomposition(
+ [aten._upsample_nearest_exact2d.default, aten._upsample_nearest_exact2d.out]
+)
@aten._upsample_nearest_exact2d.default.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten._upsample_nearest_exact2d.default.py_impl(DispatchKey.Autograd)
+@out_wrapper(preserve_memory_format=True, exact_dtype=True)
def _upsample_nearest_exact2d(
input: Tensor,
output_size: List[int],
@@ -2766,9 +2774,10 @@ def _upsample_nearest_exact2d(
return _upsample_nearest(input, output_size, [scales_h, scales_w], exact=True)
-@register_decomposition(aten.upsample_nearest3d.default)
+@register_decomposition([aten.upsample_nearest3d.default, aten.upsample_nearest3d.out])
@aten.upsample_nearest3d.default.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten.upsample_nearest3d.default.py_impl(DispatchKey.Autograd)
+@out_wrapper(preserve_memory_format=True, exact_dtype=True)
def upsample_nearest3d(
input: Tensor,
output_size: List[int],
@@ -2779,9 +2788,12 @@ def upsample_nearest3d(
return _upsample_nearest(input, output_size, [scales_d, scales_h, scales_w])
-@register_decomposition(aten._upsample_nearest_exact3d.default)
+@register_decomposition(
+ [aten._upsample_nearest_exact3d.default, aten._upsample_nearest_exact3d.out]
+)
@aten._upsample_nearest_exact3d.default.py_impl(DispatchKey.CompositeImplicitAutograd)
@aten._upsample_nearest_exact3d.default.py_impl(DispatchKey.Autograd)
+@out_wrapper(preserve_memory_format=True, exact_dtype=True)
def _upsample_nearest_exact3d(
input: Tensor,
output_size: List[int],
@@ -4251,8 +4263,9 @@ def matmul(tensor1, tensor2, *, is_out=False):
torch._check(False, lambda: "both arguments to matmul need to be at least 1D")
-@register_decomposition(aten.upsample_bicubic2d.default)
+@register_decomposition([aten.upsample_bicubic2d.default, aten.upsample_bicubic2d.out])
@aten.upsample_bicubic2d.default.py_impl(DispatchKey.Autograd)
+@out_wrapper()
@pw_cast_for_opmath
def upsample_bicubic2d_default(
input: Tensor,
diff --git a/torch/_prims_common/wrappers.py b/torch/_prims_common/wrappers.py
index 8b7515bbca..9057edc875 100644
--- a/torch/_prims_common/wrappers.py
+++ b/torch/_prims_common/wrappers.py
@@ -170,9 +170,13 @@ def _resize_output_check(out: TensorLikeType, shape: ShapeType):
# TODO: handle tuples of tensors
-def _maybe_resize_out(out: TensorLikeType, shape: ShapeType):
+def _maybe_resize_out(
+ out: TensorLikeType,
+ shape: ShapeType,
+ memory_format: Optional[torch.memory_format] = None,
+):
if _resize_output_check(out, shape):
- return out.resize_(shape)
+ return out.resize_(shape, memory_format=memory_format)
else:
return out
@@ -205,7 +209,12 @@ def _safe_copy_out(
return copy_to.copy_(copy_from)
-def out_wrapper(*out_names: str, exact_dtype: bool = False, pass_is_out: bool = False):
+def out_wrapper(
+ *out_names: str,
+ exact_dtype: bool = False,
+ pass_is_out: bool = False,
+ preserve_memory_format=False,
+):
# The wrapped function needs to convert the output parameters to ensure
# compatibility between the Python API (which always uses "out" as the
# parameter name and may be a tuple) and the Aten API (which may have
@@ -219,6 +228,9 @@ def out_wrapper(*out_names: str, exact_dtype: bool = False, pass_is_out: bool =
is_tensor = len(out_names) == 1
+ def maybe_compute_memory_format(t):
+ return utils.suggest_memory_format(t) if preserve_memory_format else None
+
def _out_wrapper(fn: Callable) -> Callable:
"""
Adds the out parameter to a Python reference.
@@ -277,7 +289,9 @@ def out_wrapper(*out_names: str, exact_dtype: bool = False, pass_is_out: bool =
if is_tensor:
assert isinstance(out, TensorLike)
# These two operations are done in-place
- _maybe_resize_out(out, result.shape)
+ _maybe_resize_out(
+ out, result.shape, maybe_compute_memory_format(result)
+ )
_safe_copy_out(copy_from=result, copy_to=out, exact_dtype=exact_dtype) # type: ignore[arg-type]
else:
assert isinstance(out, Tuple) # type: ignore[arg-type]
@@ -287,7 +301,7 @@ def out_wrapper(*out_names: str, exact_dtype: bool = False, pass_is_out: bool =
)
for r, o in zip(result, out):
# These two operations are done in-place
- _maybe_resize_out(o, r.shape)
+ _maybe_resize_out(o, r.shape, maybe_compute_memory_format(r))
_safe_copy_out(copy_from=r, copy_to=o, exact_dtype=exact_dtype) # type: ignore[arg-type]
else:
out = result
|
2.41.0
|
a5b4d2403e600cedfce3b51f46055a43f87f40d
|
Mon, 22 Apr 2024 18:44:32 +0000
|
[PATCH 0506/1000] Do not forward parent's value range to CSE variable for variables created within codegen. (#123099)
|
Consider we are generating code for `ops.gt`, and within it we call `ops.to_dtype`. Before, we would forward the bounds from `gt` to the to the result of `to_dtype`, which is wrong. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123099 Approved by: https://github.com/jgong5, https://github.com/peterbell10
|
diff --git a/torch/_inductor/codegen/common.py b/torch/_inductor/codegen/common.py
index 19b504d93d..674c131d42 100644
--- a/torch/_inductor/codegen/common.py
+++ b/torch/_inductor/codegen/common.py
@@ -1456,8 +1456,9 @@ class Kernel(CodeGen):
def inner(*args, **kwargs):
# TritonTemplateKernel has no current_node
buf_bounds = ValueRanges.unknown()
- if hasattr(V.interpreter, "current_node"):
- fx_node = V.interpreter.current_node
+ if (
+ fx_node := getattr(V.interpreter, "current_node", None)
+ ) and fx_node.target == name:
assert isinstance(self.node_to_bounds, dict)
buf_bounds = self.node_to_bounds.get(
fx_node, ValueRanges.unknown()
|
2.41.0
|
b6e354ecd0fbbbf2999cef19dc1947cdc9ee4a4
|
Mon, 22 Apr 2024 15:10:12 -0700
|
[PATCH 0507/1000] [DDP][PT2D] Fix some tracing bugs of DDP (#124421)
|
1. We need to clear the cache of get_legacy_mod_inlinelist to ensure the newly added rule will be captured. 2. Don't add the hook if the parameter does not require gradient. Differential Revision: [D56315534](https://our.internmc.facebook.com/intern/diff/D56315534/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124421 Approved by: https://github.com/yf225
|
diff --git a/torch/nn/parallel/distributed.py b/torch/nn/parallel/distributed.py
index 11a2c78959..a179a592d0 100644
--- a/torch/nn/parallel/distributed.py
+++ b/torch/nn/parallel/distributed.py
@@ -894,6 +894,7 @@ class DistributedDataParallel(Module, Joinable):
torch._dynamo.trace_rules.LEGACY_MOD_INLINELIST.add(
"torch.nn.parallel.distributed"
)
+ torch._dynamo.trace_rules.get_legacy_mod_inlinelist.cache_clear()
self._force_to_disable_cpp_reducer = (
optimize_ddp == "python_reducer_without_compiled_forward"
)
@@ -926,6 +927,8 @@ class DistributedDataParallel(Module, Joinable):
param.grad.copy_(gradient)
for index, param in enumerate(self._module_parameters):
+ if not param.requires_grad:
+ continue
self._accum_grad_hooks.append(
param.register_post_accumulate_grad_hook(
functools.partial(
|
2.41.0
|
efb980e0794eaaab551861cede82ef7032697d4
|
Tue, 23 Apr 2024 06:59:43 +0000
|
[PATCH 0508/1000] [BE] Update older scipy used in CI to 1.8.1 (#124675)
|
As older scipy are affected by CVE-2023-29824 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124675 Approved by: https://github.com/kit1980
|
diff --git a/.ci/docker/requirements-ci.txt b/.ci/docker/requirements-ci.txt
index fdd2fe25c9..ff9e8ccba2 100644
--- a/.ci/docker/requirements-ci.txt
+++ b/.ci/docker/requirements-ci.txt
@@ -228,8 +228,7 @@ scikit-image==0.20.0 ; python_version >= "3.10"
#Pinned versions: 0.20.3
#test that import:
-scipy==1.6.3 ; python_version < "3.10"
-scipy==1.8.1 ; python_version == "3.10"
+scipy==1.8.1 ; python_version <= "3.10"
scipy==1.10.1 ; python_version == "3.11"
scipy==1.12.0 ; python_version == "3.12"
# Pin SciPy because of failing distribution tests (see #60347)
|
2.41.0
|
c964ad1cace0e553eb46c8d2654b5591bf05701
|
Mon, 22 Apr 2024 21:36:36 +0800
|
[PATCH 0509/1000] add fused_sgd_kernel support for CPU device (#123629)
|
Support fused_sgd_kernel support for CPU. ## Bench result: 32 core/sockets ICX Test Scripts: https://gist.github.com/zhuhaozhe/688763e17e93e4c5e12f25f676ec90d9 https://gist.github.com/zhuhaozhe/ad9938694bc7fae8b66d376f4dffc6c9 ``` Tensor Size: 262144, Num Tensor 4, Num Threads: 1 _single_tensor_sgd time: 0.2301 seconds _fused_sgd time: 0.0925 seconds Tensor Size: 4194304, Num Tensor 32, Num Threads: 32 _single_tensor_sgd time: 2.6195 seconds _fused_sgd time: 1.7543 seconds ``` ## Test Plan: ``` python test_optim.py -k test_fused_matches_forloop python test_optim.py -k test_fused_large_tensor python test_optim.py -k test_can_load_older_state_dict python test_optim.py -k test_grad_scaling_autocast_fused_optimizers python test_torch.py -k test_grad_scaling_autocast_fused python test_torch.py -k test_params_invalidated_with_grads_invalidated_between_unscale_and_step ``` Looks like we already have some PRs under this issue https://github.com/pytorch/pytorch/issues/123451 to unified the UTs, I did not modified UT in this PR. Co-authored-by: Jane Xu <janeyx@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123629 Approved by: https://github.com/jgong5, https://github.com/janeyx99
|
diff --git a/aten/src/ATen/native/FusedSGD.cpp b/aten/src/ATen/native/FusedSGD.cpp
new file mode 100644
index 0000000000..56e2e91759
--- /dev/null
+++ b/aten/src/ATen/native/FusedSGD.cpp
@@ -0,0 +1,86 @@
+#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
+#include <ATen/core/Tensor.h>
+#include <ATen/native/DispatchStub.h>
+#include <ATen/native/FusedSGD.h>
+
+#ifndef AT_PER_OPERATOR_HEADERS
+#include <ATen/Functions.h>
+#include <ATen/NativeFunctions.h>
+#else
+#include <ATen/ops/_fused_sgd.h>
+#include <ATen/ops/_fused_sgd_native.h>
+#endif
+namespace at {
+
+namespace native {
+
+
+void _fused_sgd_kernel_cpu_(
+ at::TensorList params,
+ at::TensorList grads,
+ at::TensorList momentum_buffer_list,
+ const double weight_decay,
+ const double momentum,
+ const double lr,
+ const double dampening,
+ const bool nesterov,
+ const bool maximize,
+ const bool is_first_step,
+ const c10::optional<at::Tensor>& grad_scale,
+ const c10::optional<at::Tensor>& found_inf) {
+ const float* grad_scale_ptr =
+ grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
+ const float* found_inf_ptr =
+ found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
+ if (found_inf_ptr && *found_inf_ptr == 1.0) {
+ return;
+ }
+ size_t n_tensors = params.size();
+ TORCH_CHECK(grads.size() == n_tensors);
+ bool no_momentum_buffer = momentum == 0.0;
+ if (no_momentum_buffer) {
+ TORCH_CHECK(momentum_buffer_list.size() == 0);
+ } else {
+ TORCH_CHECK(momentum_buffer_list.size() == n_tensors);
+ }
+ for (size_t i = 0; i < n_tensors; i++){
+ fused_sgd_stub(
+ kCPU,
+ params[i],
+ grads[i],
+ no_momentum_buffer ? Tensor() : momentum_buffer_list[i],
+ weight_decay,
+ momentum,
+ lr,
+ dampening,
+ nesterov,
+ maximize,
+ is_first_step,
+ grad_scale_ptr);
+ }
+}
+
+void _fused_sgd_kernel_cpu_(
+ at::TensorList params,
+ at::TensorList grads,
+ at::TensorList momentum_buffer_list,
+ const double weight_decay,
+ const double momentum,
+ const at::Tensor& lr,
+ const double dampening,
+ const bool nesterov,
+ const bool maximize,
+ const bool is_first_step,
+ const c10::optional<at::Tensor>& grad_scale,
+ const c10::optional<at::Tensor>& found_inf) {
+ _fused_sgd_kernel_cpu_(
+ params, grads, momentum_buffer_list, weight_decay,
+ momentum, lr.item<double>(), dampening, nesterov,
+ maximize, is_first_step, grad_scale, found_inf
+ );
+}
+
+DEFINE_DISPATCH(fused_sgd_stub);
+
+}
+}
diff --git a/aten/src/ATen/native/FusedSGD.h b/aten/src/ATen/native/FusedSGD.h
new file mode 100644
index 0000000000..62cd3c8aef
--- /dev/null
+++ b/aten/src/ATen/native/FusedSGD.h
@@ -0,0 +1,24 @@
+#include <ATen/core/Tensor.h>
+#include <ATen/native/DispatchStub.h>
+
+namespace at {
+
+namespace native {
+
+using fused_sgd_fn = void (*)(
+ const at::Tensor& param,
+ const at::Tensor& grad,
+ const at::Tensor& momentum_buffer,
+ const double weight_decay,
+ const double momentum,
+ const double lr,
+ const double dampening,
+ const bool nesterov,
+ const bool maximize,
+ const bool is_first_step,
+ const float* grad_scale_ptr);
+
+DECLARE_DISPATCH(fused_sgd_fn, fused_sgd_stub);
+
+}
+}
diff --git a/aten/src/ATen/native/cpu/FusedSGDKernel.cpp b/aten/src/ATen/native/cpu/FusedSGDKernel.cpp
new file mode 100644
index 0000000000..3383585675
--- /dev/null
+++ b/aten/src/ATen/native/cpu/FusedSGDKernel.cpp
@@ -0,0 +1,279 @@
+#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
+#include <ATen/core/Tensor.h>
+#include <ATen/Parallel.h>
+#include <ATen/OpMathType.h>
+#include <ATen/native/DispatchStub.h>
+#include <ATen/native/FusedSGD.h>
+#include <ATen/Dispatch.h>
+#include <ATen/cpu/vec/vec.h>
+#include <ATen/cpu/vec/functional.h>
+namespace at::native {
+
+namespace{
+
+template <typename scalar_t, typename opmath_t>
+typename std::enable_if<
+ std::is_same<scalar_t, Half>::value || std::is_same<scalar_t, BFloat16>::value,
+ void>::
+ type inline sgd_math(
+ scalar_t* param_ptr,
+ scalar_t* grad_ptr,
+ scalar_t* momentum_buf_ptr,
+ const double weight_decay,
+ const double momentum,
+ const double lr,
+ const double dampening,
+ const bool nesterov,
+ const bool maximize,
+ const bool is_first_step,
+ const float* grad_scale_ptr,
+ int64_t size
+){
+ using lpVec = at::vec::Vectorized<scalar_t>;
+ using fVec = at::vec::Vectorized<opmath_t>;
+ lpVec grad_vec_to_store;
+ fVec param_vec1, param_vec2;
+ fVec grad_vec1, grad_vec2;
+ fVec momentum_buffer_vec1, momentum_buffer_vec2;
+ int64_t d = 0;
+ for (; d < size - (size % lpVec::size()); d += lpVec::size()) {
+ lpVec param_lpvec = lpVec::loadu(param_ptr + d);
+ std::tie(param_vec1, param_vec2) = vec::convert_to_float<scalar_t>(param_lpvec);
+ lpVec grad_lpvec = lpVec::loadu(grad_ptr + d);
+ std::tie(grad_vec1, grad_vec2) = vec::convert_to_float<scalar_t>(grad_lpvec);
+ if (grad_scale_ptr) {
+ grad_vec1 = grad_vec1 / fVec(float(*grad_scale_ptr));
+ grad_vec2 = grad_vec2 / fVec(float(*grad_scale_ptr));
+ grad_vec_to_store = vec::convert_from_float<scalar_t>(grad_vec1, grad_vec2);
+ grad_vec_to_store.store(grad_ptr + d);
+ }
+ if (maximize){
+ grad_vec1 = grad_vec1 * fVec(opmath_t(-1.0));
+ grad_vec2 = grad_vec2 * fVec(opmath_t(-1.0));
+ }
+ if (weight_decay != 0.0){
+ grad_vec1 += param_vec1 * fVec(scalar_t(weight_decay));
+ grad_vec2 += param_vec2 * fVec(scalar_t(weight_decay));
+ }
+ if (momentum != 0.0) {
+ fVec momentum_vec1, momentum_vec2;
+ if (is_first_step) {
+ momentum_vec1 = grad_vec1;
+ momentum_vec2 = grad_vec2;
+ } else {
+ momentum_vec1 =
+ fVec::loadu(momentum_buf_ptr + d) * fVec(scalar_t(momentum)) +
+ grad_vec1 * fVec(scalar_t(1 - dampening));
+ momentum_vec2 =
+ fVec::loadu(momentum_buf_ptr + d + fVec::size()) * fVec(scalar_t(momentum)) +
+ grad_vec2 * fVec(scalar_t(1 - dampening));
+ }
+ vec::convert_from_float<scalar_t>(momentum_vec1, momentum_vec2).store(momentum_buf_ptr + d);;
+ if (nesterov) {
+ grad_vec1 += momentum_vec1 * fVec(scalar_t(momentum));
+ grad_vec2 += momentum_vec2 * fVec(scalar_t(momentum));
+ } else {
+ grad_vec1 = momentum_vec1;
+ grad_vec2 = momentum_vec2;
+ }
+ }
+ }
+ scalar_t grad_val_to_store;
+ for (; d < size; d++) {
+ opmath_t grad_val = grad_ptr[d];
+ opmath_t param_val = param_ptr[d];
+ if (grad_scale_ptr) {
+ grad_val = grad_ptr[d] / opmath_t(*grad_scale_ptr);
+ grad_val_to_store = grad_val;
+ grad_ptr[d] = grad_val_to_store;
+ }
+ if (maximize) grad_val = -grad_val;
+ if (weight_decay != 0.0){
+ grad_val += param_val * opmath_t(weight_decay);
+ }
+ if (momentum != 0.0) {
+ opmath_t momentum_buf_var = momentum_buf_ptr[d];
+ if (is_first_step) {
+ momentum_buf_var = grad_val;
+ } else {
+ momentum_buf_var = momentum_buf_var * opmath_t(momentum) +
+ grad_val * opmath_t(1 - dampening);
+ }
+ momentum_buf_ptr[d] = momentum_buf_var;
+ if (nesterov) {
+ grad_val += momentum_buf_var * opmath_t(momentum);
+ } else {
+ grad_val = momentum_buf_var;
+ }
+ }
+ param_ptr[d] = param_val - grad_val * opmath_t(lr);
+ }
+}
+
+
+template <typename scalar_t, typename opmath_t>
+typename std::enable_if<
+ std::is_same<scalar_t, float>::value || std::is_same<scalar_t, double>::value,
+ void>::
+ type inline sgd_math(
+ scalar_t* param_ptr,
+ scalar_t* grad_ptr,
+ scalar_t* momentum_buf_ptr,
+ const double weight_decay,
+ const double momentum,
+ const double lr,
+ const double dampening,
+ const bool nesterov,
+ const bool maximize,
+ const bool is_first_step,
+ const float* grad_scale_ptr,
+ int64_t size
+){
+ using Vec = at::vec::Vectorized<scalar_t>;
+ Vec grad_vec_to_store;
+ int64_t d = 0;
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
+ Vec param_vec = Vec::loadu(param_ptr + d);
+ Vec grad_vec = Vec::loadu(grad_ptr + d);
+ if (grad_scale_ptr) {
+ grad_vec = grad_vec / Vec(scalar_t(*grad_scale_ptr));
+ grad_vec_to_store = grad_vec;
+ grad_vec_to_store.store(grad_ptr + d);
+ }
+ if (maximize) grad_vec = grad_vec * Vec(scalar_t(-1.0));
+ if (weight_decay != 0.0){
+ grad_vec += param_vec * Vec(scalar_t(weight_decay));
+ }
+ if (momentum != 0.0) {
+ Vec momentum_vec;
+ if (is_first_step) {
+ momentum_vec = grad_vec;
+ } else {
+ momentum_vec =
+ Vec::loadu(momentum_buf_ptr + d) * Vec(scalar_t(momentum)) +
+ grad_vec * Vec(scalar_t(1 - dampening));
+ }
+ momentum_vec.store(momentum_buf_ptr + d);
+ if (nesterov) {
+ grad_vec += momentum_vec * Vec(scalar_t(momentum));
+ } else {
+ grad_vec = momentum_vec;
+ }
+ }
+ param_vec += grad_vec * Vec(scalar_t(-lr));
+ param_vec.store(param_ptr + d);
+ }
+ scalar_t grad_val_to_store;
+ for (; d < size; d++) {
+ scalar_t grad_val = grad_ptr[d];
+ if (grad_scale_ptr) {
+ grad_val = grad_ptr[d] / scalar_t(*grad_scale_ptr);
+ grad_val_to_store = grad_val;
+ grad_ptr[d] = grad_val_to_store;
+ }
+ if (maximize) grad_val = -grad_val;
+ if (weight_decay != 0.0){
+ grad_val += param_ptr[d] * scalar_t(weight_decay);
+ }
+ if (momentum != 0.0) {
+ if (is_first_step) {
+ momentum_buf_ptr[d] = grad_val;
+ } else {
+ momentum_buf_ptr[d] = momentum_buf_ptr[d] * scalar_t(momentum) +
+ grad_val * scalar_t(1 - dampening);
+ }
+ if (nesterov) {
+ grad_val += momentum_buf_ptr[d] * scalar_t(momentum);
+ } else {
+ grad_val = momentum_buf_ptr[d];
+ }
+ }
+ param_ptr[d] -= grad_val * scalar_t(lr);
+ }
+}
+
+template <typename scalar_t>
+void sgd_fused_step_impl(
+ const at::Tensor& param,
+ const at::Tensor& grad,
+ const at::Tensor& momentum_buffer,
+ const double weight_decay,
+ const double momentum,
+ const double lr,
+ const double dampening,
+ const bool nesterov,
+ const bool maximize,
+ const bool is_first_step,
+ const float* grad_scale_ptr) {
+ using opmath_t = at::opmath_type<scalar_t>;
+ scalar_t* param_data = param.data_ptr<scalar_t>();
+ scalar_t* grad_data = grad.data_ptr<scalar_t>();
+ bool has_momentum_buffer = momentum != 0.0;
+ scalar_t* momentum_buffer_data = has_momentum_buffer ? momentum_buffer.data_ptr<scalar_t>() : nullptr;
+
+ constexpr size_t cache_line_size = 64;
+ constexpr int64_t cache_line_aligned_task_unit = cache_line_size / sizeof(scalar_t);
+ size_t num_units = divup(param.numel(), cache_line_aligned_task_unit);
+
+ auto sgd_fn = [&](int64_t begin, int64_t end) {
+ // local pointers
+ begin *= cache_line_aligned_task_unit;
+ end = std::min(end * cache_line_aligned_task_unit, param.numel());
+ scalar_t* param_ptr = param_data + begin;
+ scalar_t* grad_ptr = grad_data + begin;
+ scalar_t* momentum_buffer_ptr = has_momentum_buffer ? momentum_buffer_data + begin : nullptr;
+
+ const int64_t size = end - begin;
+ sgd_math<scalar_t, opmath_t>(
+ param_ptr,
+ grad_ptr,
+ momentum_buffer_ptr,
+ weight_decay,
+ momentum,
+ lr,
+ dampening,
+ nesterov,
+ maximize,
+ is_first_step,
+ grad_scale_ptr,
+ size
+ );
+ };
+ at::parallel_for(
+ 0, num_units, 0, sgd_fn);
+}
+
+void fused_sgd_kernel(
+ const at::Tensor& param,
+ const at::Tensor& grad,
+ const at::Tensor& momentum_buffer,
+ const double weight_decay,
+ const double momentum,
+ const double lr,
+ const double dampening,
+ const bool nesterov,
+ const bool maximize,
+ const bool is_first_step,
+ const float* grad_scale_ptr
+ ) {
+ Tensor grad_contiguous = grad.contiguous();
+ AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, param.scalar_type(), "fused_sgd_kernel", [&] {
+ sgd_fused_step_impl<scalar_t>(
+ param,
+ grad,
+ momentum_buffer,
+ weight_decay,
+ momentum,
+ lr,
+ dampening,
+ nesterov,
+ maximize,
+ is_first_step,
+ grad_scale_ptr);
+ });
+}
+
+}
+
+REGISTER_DISPATCH(fused_sgd_stub, &fused_sgd_kernel);
+} // namespace at::native
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index cbdb998c81..517a65fa0e 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -15565,6 +15565,7 @@
# Unlike "foreach" functions, lists of tensors should be guaranteed to be on the same device (for now).
variants: function
dispatch:
+ CPU: _fused_sgd_kernel_cpu_
CUDA: _fused_sgd_kernel_cuda_
autogen: _fused_sgd, _fused_sgd.out
@@ -15574,6 +15575,7 @@
device_check: NoCheck
variants: function
dispatch:
+ CPU: _fused_sgd_kernel_cpu_
CUDA: _fused_sgd_kernel_cuda_
autogen: _fused_sgd.tensor_lr, _fused_sgd.tensor_lr_out
diff --git a/build_variables.bzl b/build_variables.bzl
index a8b173ac3f..22e36a4d8b 100644
--- a/build_variables.bzl
+++ b/build_variables.bzl
@@ -1167,6 +1167,7 @@ aten_native_source_codegen_list = [
"aten/src/ATen/native/cpu/SparseFactories.cpp",
"aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp",
"aten/src/ATen/native/cpu/FusedAdamKernel.cpp",
+ "aten/src/ATen/native/cpu/FusedSGDKernel.cpp",
]
# This aten native source file list will not go through aten codegen process
@@ -1402,6 +1403,7 @@ aten_native_source_non_codegen_list = [
"aten/src/ATen/native/xnnpack/RegisterOpContextClass.cpp",
"aten/src/ATen/native/xnnpack/Shim.cpp",
"aten/src/ATen/native/FusedAdam.cpp",
+ "aten/src/ATen/native/FusedSGD.cpp",
# Files not in native, but depends on native symbols
# "aten/src/ATen/TensorIndexing.cpp",
"aten/src/ATen/TensorIterator.cpp",
diff --git a/torch/optim/adam.py b/torch/optim/adam.py
index e74ad4e1ab..cd45a197b3 100644
--- a/torch/optim/adam.py
+++ b/torch/optim/adam.py
@@ -76,7 +76,7 @@ class Adam(Optimizer):
# Support AMP with FP16/BF16 model params which would need
# higher prec copy of params to do update math in higher prec to
# alleviate the loss of information.
- fused_supported_devices = _get_fused_kernels_supported_devices() + ["cpu"]
+ fused_supported_devices = _get_fused_kernels_supported_devices()
if not all(
p.device.type in fused_supported_devices and torch.is_floating_point(p)
for pg in self.param_groups
diff --git a/torch/optim/adamw.py b/torch/optim/adamw.py
index 89e776558d..bbe03c1ce5 100644
--- a/torch/optim/adamw.py
+++ b/torch/optim/adamw.py
@@ -75,7 +75,7 @@ class AdamW(Optimizer):
# Suppor AMP with FP16/BF16 model params which would need
# higher prec copy of params to do update math in higher prec to
# alleviate the loss of information.
- fused_supported_devices = _get_fused_kernels_supported_devices() + ["cpu"]
+ fused_supported_devices = _get_fused_kernels_supported_devices()
if not all(
p.device.type in fused_supported_devices and torch.is_floating_point(p)
for pg in self.param_groups
diff --git a/torch/optim/sgd.py b/torch/optim/sgd.py
index 9a21fceb6c..30d9070042 100644
--- a/torch/optim/sgd.py
+++ b/torch/optim/sgd.py
@@ -2,6 +2,7 @@ from typing import List, Optional
import torch
from torch import Tensor
+from torch.utils._foreach_utils import _get_fused_kernels_supported_devices
from .optimizer import (
_default_to_fused_or_foreach,
_differentiable_doc,
@@ -54,6 +55,17 @@ class SGD(Optimizer):
if fused:
self._step_supports_amp_scaling = True
+
+ fused_supported_devices = _get_fused_kernels_supported_devices()
+ if not all(
+ p.device.type in fused_supported_devices and torch.is_floating_point(p)
+ for pg in self.param_groups
+ for p in pg["params"]
+ ):
+ raise RuntimeError(
+ "`fused=True` requires all the params to be floating point Tensors of "
+ f"supported devices: {fused_supported_devices}."
+ )
if differentiable:
raise RuntimeError("`fused` does not support `differentiable`")
if foreach:
diff --git a/torch/testing/_internal/common_optimizers.py b/torch/testing/_internal/common_optimizers.py
index 9533b22032..8b50b293ff 100644
--- a/torch/testing/_internal/common_optimizers.py
+++ b/torch/testing/_internal/common_optimizers.py
@@ -1738,7 +1738,10 @@ optim_db: List[OptimizerInfo] = [
},
[lambda opt: StepLR(opt, gamma=0.99999, step_size=300)],
),
- supports_fused_on=("cuda",),
+ supports_fused_on=(
+ "cpu",
+ "cuda",
+ ),
skips=(
DecorateInfo(
skipIfTorchDynamo("initial_value is incorrect in dynamo, see #123202"),
diff --git a/torch/utils/_foreach_utils.py b/torch/utils/_foreach_utils.py
index 9a9a6a15ca..840df0432b 100644
--- a/torch/utils/_foreach_utils.py
+++ b/torch/utils/_foreach_utils.py
@@ -11,7 +11,7 @@ def _get_foreach_kernels_supported_devices() -> List[str]:
def _get_fused_kernels_supported_devices() -> List[str]:
r"""Return the device type list that supports fused kernels in optimizer."""
- return ["cuda", "xpu", torch._C._get_privateuse1_backend_name()]
+ return ["cuda", "xpu", "cpu", torch._C._get_privateuse1_backend_name()]
TensorListList: TypeAlias = List[List[Optional[Tensor]]]
Indices: TypeAlias = List[int]
|
2.41.0
|
5f321b84fd3057514d7363b58f592d23e931bd6
|
Tue, 23 Apr 2024 09:34:45 +0000
|
[PATCH 0510/1000] Refactor autocast C++ APIs to be device-agnostic (#124359)
|
# Motivation This PR aims to refactor autocast **C++** APIs to be device-agnostic and deprecate the device-specific autocast **C++** APIs. In C++ side, - `is_enabled()` -> `is_enabled(device_type)`. - `set_enabled(new_enabled)` -> `set_enabled(device_type, new_enabled)`. - `get_autocast_dtype()` -> `get_autocast_dtype(device_type)` - `set_autocast_dtype(dtype)` -> `set_autocast_dtype(device_type, dtype)` These following C++ APIs are deprecated and should be removed in PyTorch 2.5 - `is_cpu_enabled` - `set_cpu_enabled` - `get_autocast_cpu_dtype` - `set_autocast_cpu_dtype` - `is_xpu_enabled` - `set_xpu_enabled` - `get_autocast_xpu_dtype` - `set_autocast_xpu_dtype` - `is_ipu_enabled` - `set_ipu_enabled` - `get_autocast_ipu_dtype` - `set_autocast_ipu_dtype` - `is_hpu_enabled` - `set_hpu_enabled` - `get_autocast_hpu_dtype` - `set_autocast_hpu_dtype` - `is_xla_enabled` - `set_xla_enabled` - `get_autocast_xla_dtype` - `set_autocast_xla_dtype` - `is_privateuseone_enabled` - `set_privateuseone_enabled` - `get_autocast_privateuseone_dtype` - `set_autocast_privateuseone_dtype` In Python side, provide 4 generic autocast APIs: - `torch.is_autocast_enabled(device_type)` - `torch.set_autocast_enabled(device_type, new_enabled)` - `torch.get_autocast_dtype(device_type)` - `torch.set_autocast_dtype(device_type, dtype)` # Additional Context We will submit another PR to refactor autocast **Python** APIs based on this PR. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124359 Approved by: https://github.com/jgong5, https://github.com/albanD
|
diff --git a/aten/src/ATen/autocast_mode.cpp b/aten/src/ATen/autocast_mode.cpp
index 0b1dac55f3..923c2e42c9 100644
--- a/aten/src/ATen/autocast_mode.cpp
+++ b/aten/src/ATen/autocast_mode.cpp
@@ -6,60 +6,14 @@
namespace at::autocast {
-bool is_enabled() {
- return !c10::impl::tls_is_dispatch_key_excluded(DispatchKey::AutocastCUDA);
+bool is_autocast_enabled(at::DeviceType device_type) {
+ at::DispatchKey dispatch_key = get_autocast_dispatch_key_from_device_type(device_type);
+ return !c10::impl::tls_is_dispatch_key_excluded(dispatch_key);
}
-void set_enabled(bool new_enabled) {
- c10::impl::tls_set_dispatch_key_excluded(DispatchKey::AutocastCUDA, !new_enabled);
-}
-
-bool is_cpu_enabled() {
- return !c10::impl::tls_is_dispatch_key_excluded(DispatchKey::AutocastCPU);
-}
-
-void set_cpu_enabled(bool new_enabled) {
- c10::impl::tls_set_dispatch_key_excluded(DispatchKey::AutocastCPU, !new_enabled);
-}
-
-bool is_xpu_enabled() {
- return !c10::impl::tls_is_dispatch_key_excluded(DispatchKey::AutocastXPU);
-}
-
-void set_xpu_enabled(bool new_enabled) {
- c10::impl::tls_set_dispatch_key_excluded(DispatchKey::AutocastXPU, !new_enabled);
-}
-
-bool is_ipu_enabled() {
- return !c10::impl::tls_is_dispatch_key_excluded(DispatchKey::AutocastIPU);
-}
-
-void set_ipu_enabled(bool new_enabled) {
- c10::impl::tls_set_dispatch_key_excluded(DispatchKey::AutocastIPU, !new_enabled);
-}
-
-bool is_hpu_enabled() {
- return !c10::impl::tls_is_dispatch_key_excluded(DispatchKey::AutocastHPU);
-}
-
-void set_hpu_enabled(bool new_enabled) {
- c10::impl::tls_set_dispatch_key_excluded(DispatchKey::AutocastHPU, !new_enabled);
-}
-
-bool is_xla_enabled() {
- return !c10::impl::tls_is_dispatch_key_excluded(DispatchKey::AutocastXLA);
-}
-
-void set_xla_enabled(bool new_enabled) {
- c10::impl::tls_set_dispatch_key_excluded(DispatchKey::AutocastXLA, !new_enabled);
-}
-
-bool is_privateuseone_enabled() {
- return !c10::impl::tls_is_dispatch_key_excluded(DispatchKey::AutocastPrivateUse1);
-}
-
-void set_privateuseone_enabled(bool new_enabled) {
- c10::impl::tls_set_dispatch_key_excluded(DispatchKey::AutocastPrivateUse1, !new_enabled);
+void set_autocast_enabled(at::DeviceType device_type, bool enabled) {
+ at::DispatchKey dispatch_key = get_autocast_dispatch_key_from_device_type(device_type);
+ c10::impl::tls_set_dispatch_key_excluded(dispatch_key, !enabled);
}
namespace {
@@ -91,30 +45,40 @@ std::mutex cached_casts_mutex;
// it calls clear_cache() to ensure cached Tensors don't leak outside the autocasting region.
thread_local int nesting = 0;
-// autocast_cpu_dtype is the lower_precision_fp used by AutocastCPU.
-thread_local at::ScalarType autocast_cpu_dtype = at::kBFloat16;
-
-// autocast_xpu_dtype is the lower_precision_fp used by AutocastXPU.
-thread_local at::ScalarType autocast_xpu_dtype = at::kBFloat16;
-
-// autocast_ipu_dtype is the lower_precision_fp used by AutocastIPU.
-thread_local at::ScalarType autocast_ipu_dtype = at::kHalf;
-
-// autocast_hpu_dtype is the lower_precision_fp used by AutocastHPU.
-thread_local at::ScalarType autocast_hpu_dtype = at::kBFloat16;
-
-// autocast_xla_dtype is the lower_precision_fp used by AutocastXLA.
-thread_local at::ScalarType autocast_xla_dtype = at::kBFloat16;
+// The order of this array MUST exactly match the definition order of DeviceType
+// in c10/core/DeviceType.h.
+static_assert(
+ at::COMPILE_TIME_MAX_DEVICE_TYPES == 21,
+ "The definition of the default autocast data type per device backend doesn't match with the definition of the device type.");
+thread_local std::array<at::ScalarType, at::COMPILE_TIME_MAX_DEVICE_TYPES>
+ autocast_dtype = {
+ at::kBFloat16, // CPU
+ at::kHalf, // CUDA.
+ at::ScalarType::Undefined, // Reserved for explicit MKLDNN
+ at::ScalarType::Undefined, // OpenGL
+ at::ScalarType::Undefined, // OpenCL
+ at::ScalarType::Undefined, // IDEEP.
+ at::kHalf, // AMD HIP
+ at::ScalarType::Undefined, // FPGA
+ at::ScalarType::Undefined, // ONNX Runtime / Microsoft
+ at::kBFloat16, // XLA / TPU
+ at::ScalarType::Undefined, // Vulkan
+ at::ScalarType::Undefined, // Metal
+ at::kBFloat16, // XPU
+ at::ScalarType::Undefined, // MPS
+ at::ScalarType::Undefined, // Meta (tensors with no data)
+ at::kBFloat16, // HPU / HABANA
+ at::ScalarType::Undefined, // SX-Aurora / NEC
+ at::ScalarType::Undefined, // Lazy Tensors
+ at::kHalf, // Graphcore IPU
+ at::ScalarType::Undefined, // Meta training and inference devices
+ at::kHalf, // PrivateUse1 device
+};
// should we enabled the cache inside autocast.
thread_local bool cache_enabled = true;
-// autocast_gpu_dtype is the lower_precision_fp used by AutocastGPU.
-thread_local at::ScalarType autocast_gpu_dtype = at::kHalf;
-
-// autocast_privateuseone_dtype is the lower_precision_fp used by AutocastPrivateUse1.
-thread_local at::ScalarType autocast_privateuseone_dtype = at::kHalf;
-}
+} // anonymous namespace
void clear_cache() {
const std::lock_guard<std::mutex> lock(cached_casts_mutex);
@@ -129,60 +93,12 @@ int decrement_nesting() {
return --nesting;
}
-at::ScalarType get_autocast_gpu_dtype() {
- return autocast_gpu_dtype;
-}
-
-at::ScalarType get_autocast_cpu_dtype() {
- return autocast_cpu_dtype;
-}
-
-at::ScalarType get_autocast_xpu_dtype() {
- return autocast_xpu_dtype;
-}
-
-at::ScalarType get_autocast_ipu_dtype() {
- return autocast_ipu_dtype;
-}
-
-at::ScalarType get_autocast_hpu_dtype() {
- return autocast_hpu_dtype;
-}
-
-at::ScalarType get_autocast_xla_dtype() {
- return autocast_xla_dtype;
-}
-
-at::ScalarType get_autocast_privateuseone_dtype() {
- return autocast_privateuseone_dtype;
-}
-
-void set_autocast_cpu_dtype(at::ScalarType dtype) {
- autocast_cpu_dtype = dtype;
-}
-
-void set_autocast_gpu_dtype(at::ScalarType dtype) {
- autocast_gpu_dtype = dtype;
-}
-
-void set_autocast_xpu_dtype(at::ScalarType dtype) {
- autocast_xpu_dtype = dtype;
-}
-
-void set_autocast_ipu_dtype(at::ScalarType dtype) {
- autocast_ipu_dtype = dtype;
-}
-
-void set_autocast_hpu_dtype(at::ScalarType dtype) {
- autocast_hpu_dtype = dtype;
-}
-
-void set_autocast_xla_dtype(at::ScalarType dtype) {
- autocast_xla_dtype = dtype;
+at::ScalarType get_autocast_dtype(at::DeviceType device_type) {
+ return autocast_dtype[static_cast<int>(device_type)];
}
-void set_autocast_privateuseone_dtype(at::ScalarType dtype) {
- autocast_privateuseone_dtype = dtype;
+void set_autocast_dtype(at::DeviceType device_type, at::ScalarType dtype) {
+ autocast_dtype[static_cast<int>(device_type)] = dtype;
}
bool is_autocast_cache_enabled() {
diff --git a/aten/src/ATen/autocast_mode.h b/aten/src/ATen/autocast_mode.h
index eead4bf2c9..f4dd7d8766 100644
--- a/aten/src/ATen/autocast_mode.h
+++ b/aten/src/ATen/autocast_mode.h
@@ -10,40 +10,120 @@
namespace at::autocast {
-TORCH_API bool is_enabled();
-TORCH_API void set_enabled(bool enabled);
+TORCH_API bool is_autocast_enabled(at::DeviceType device_type);
+TORCH_API void set_autocast_enabled(at::DeviceType device_type, bool enabled);
+TORCH_API at::ScalarType get_autocast_dtype(at::DeviceType device_type);
+TORCH_API void set_autocast_dtype(
+ at::DeviceType device_type,
+ at::ScalarType dtype);
TORCH_API void clear_cache();
TORCH_API int increment_nesting();
TORCH_API int decrement_nesting();
-TORCH_API bool is_cpu_enabled();
-TORCH_API void set_cpu_enabled(bool enabled);
-TORCH_API at::ScalarType get_autocast_gpu_dtype();
-TORCH_API at::ScalarType get_autocast_cpu_dtype();
-TORCH_API void set_autocast_gpu_dtype(at::ScalarType dtype);
-TORCH_API void set_autocast_cpu_dtype(at::ScalarType dtype);
-TORCH_API bool is_xpu_enabled();
-TORCH_API void set_xpu_enabled(bool enabled);
-TORCH_API at::ScalarType get_autocast_xpu_dtype();
-TORCH_API void set_autocast_xpu_dtype(at::ScalarType dtype);
-TORCH_API bool is_ipu_enabled();
-TORCH_API void set_ipu_enabled(bool enabled);
-TORCH_API at::ScalarType get_autocast_ipu_dtype();
-TORCH_API void set_autocast_ipu_dtype(at::ScalarType dtype);
-TORCH_API bool is_hpu_enabled();
-TORCH_API void set_hpu_enabled(bool enabled);
-TORCH_API at::ScalarType get_autocast_hpu_dtype();
-TORCH_API void set_autocast_hpu_dtype(at::ScalarType dtype);
-TORCH_API bool is_xla_enabled();
-TORCH_API void set_xla_enabled(bool enabled);
-TORCH_API at::ScalarType get_autocast_xla_dtype();
-TORCH_API void set_autocast_xla_dtype(at::ScalarType dtype);
-TORCH_API bool is_privateuseone_enabled();
-TORCH_API void set_privateuseone_enabled(bool enabled);
-TORCH_API at::ScalarType get_autocast_privateuseone_dtype();
-TORCH_API void set_autocast_privateuseone_dtype(at::ScalarType dtype);
TORCH_API bool is_autocast_cache_enabled();
TORCH_API void set_autocast_cache_enabled(bool enabled);
+// deprecated CUDA-specific autocast APIs
+C10_DEPRECATED_MESSAGE(
+ "at::autocast::is_enabled() is deprecated. Please use at::autocast::is_autocast_enabled(at::kCUDA) instead.")
+TORCH_API inline bool is_enabled() {
+ TORCH_WARN_DEPRECATION(
+ "at::autocast::",
+ __func__,
+ "() is deprecated. Please use at::autocast::is_autocast_enabled(at::kCUDA) instead.")
+ return is_autocast_enabled(at::kCUDA);
+}
+C10_DEPRECATED_MESSAGE(
+ "at::autocast::set_enabled(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(at::kCUDA, enabled) instead.")
+TORCH_API inline void set_enabled(bool enabled) {
+ TORCH_WARN_DEPRECATION(
+ "at::autocast::",
+ __func__,
+ "(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(at::kCUDA, enabled) instead.")
+ set_autocast_enabled(at::kCUDA, enabled);
+}
+C10_DEPRECATED_MESSAGE(
+ "at::autocast::get_autocast_gpu_dtype() is deprecated. Please use at::autocast::get_autocast_dtype(at::kCUDA) instead.")
+TORCH_API inline at::ScalarType get_autocast_gpu_dtype() {
+ TORCH_WARN_DEPRECATION(
+ "at::autocast::",
+ __func__,
+ "() is deprecated. Please use at::autocast::get_autocast_dtype(at::kCUDA) instead.")
+ return get_autocast_dtype(at::kCUDA);
+}
+C10_DEPRECATED_MESSAGE(
+ "at::autocast::set_autocast_gpu_dtype(dtype) is deprecated. Please use at::autocast::set_autocast_dtype(at::kCUDA, dtype) instead.")
+TORCH_API inline void set_autocast_gpu_dtype(at::ScalarType dtype) {
+ TORCH_WARN_DEPRECATION(
+ "at::autocast::",
+ __func__,
+ "(dtype) is deprecated. Please use at::autocast::set_autocast_dtype(at::kCUDA, dtype) instead.")
+ set_autocast_dtype(at::kCUDA, dtype);
+}
+
+#define DECLARE_DEPRECATED_AUTOCAST_APIS(name, device_type) \
+ C10_DEPRECATED_MESSAGE( \
+ "at::autocast::is_" #name \
+ "_enabled() is deprecated. Please use at::autocast::is_autocast_enabled(" #device_type \
+ ") instead.") \
+ TORCH_API inline bool is_##name##_enabled() { \
+ TORCH_WARN_DEPRECATION( \
+ "at::autocast::", \
+ __func__, \
+ "() is deprecated. Please use at::autocast::is_autocast_enabled(" #device_type \
+ ") instead.") \
+ return is_autocast_enabled(device_type); \
+ } \
+ \
+ C10_DEPRECATED_MESSAGE( \
+ "at::autocast::set_" #name \
+ "_enabled(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(" #device_type \
+ ", enabled) instead.") \
+ TORCH_API inline void set_##name##_enabled(bool enabled) { \
+ TORCH_WARN_DEPRECATION( \
+ "at::autocast::", \
+ __func__, \
+ "(enabled) is deprecated. Please use at::autocast::set_autocast_enabled(" #device_type \
+ ", enabled) instead.") \
+ set_autocast_enabled(device_type, enabled); \
+ } \
+ \
+ C10_DEPRECATED_MESSAGE( \
+ "at::autocast::get_autocast_" #name \
+ "_dtype() is deprecated. Please use at::autocast::get_autocast_dtype(" #device_type \
+ ") instead.") \
+ TORCH_API inline at::ScalarType get_autocast_##name##_dtype() { \
+ TORCH_WARN_DEPRECATION( \
+ "at::autocast::", \
+ __func__, \
+ "() is deprecated. Please at::autocast::get_autocast_dtype(" #device_type \
+ ") instead.") \
+ return get_autocast_dtype(device_type); \
+ } \
+ \
+ C10_DEPRECATED_MESSAGE( \
+ "at::autocast::set_autocast_" #name \
+ "_dtype(dtype) is deprecated. Please use at::autocast::set_autocast_dtype(" #device_type \
+ ", dtype) instead.") \
+ TORCH_API inline void set_autocast_##name##_dtype(at::ScalarType dtype) { \
+ TORCH_WARN_DEPRECATION( \
+ "at::autocast::", \
+ __func__, \
+ "(dtype) is deprecated. Please use at::autocast::set_autocast_dtype(" #device_type \
+ ", dtype) instead.") \
+ set_autocast_dtype(device_type, dtype); \
+ }
+
+#define AT_FORALL_DEPRECATED_AUTOCAST_BAKCNEDS(_) \
+ _(cpu, at::kCPU) \
+ _(xpu, at::kXPU) \
+ _(xla, at::kXLA) \
+ _(hpu, at::kHPU) \
+ _(ipu, at::kIPU) \
+ _(privateuseone, at::kPrivateUse1)
+
+// deprecated other backend specific autocast APIs
+AT_FORALL_DEPRECATED_AUTOCAST_BAKCNEDS(DECLARE_DEPRECATED_AUTOCAST_APIS)
+
namespace {
inline bool is_autocast_eligible(
const Tensor& tensor,
@@ -96,24 +176,14 @@ inline DispatchKey get_autocast_dispatch_key_from_device_type(
inline at::ScalarType get_lower_precision_fp_from_device_type(
c10::DeviceType device_type) {
- switch (device_type) {
- case c10::DeviceType::CUDA:
- return get_autocast_gpu_dtype();
- case c10::DeviceType::CPU:
- return get_autocast_cpu_dtype();
- case c10::DeviceType::XPU:
- return get_autocast_xpu_dtype();
- case c10::DeviceType::IPU:
- return get_autocast_ipu_dtype();
- case c10::DeviceType::HPU:
- return get_autocast_hpu_dtype();
- case c10::DeviceType::XLA:
- return get_autocast_xla_dtype();
- case c10::DeviceType::PrivateUse1:
- return get_autocast_privateuseone_dtype();
- default:
- throw std::runtime_error(
- "unknown device type for autocast in get_lower_precision_fp_from_device_type");
+ if (device_type == at::kCPU || device_type == at::kCUDA ||
+ device_type == at::kXPU || device_type == at::kIPU ||
+ device_type == at::kHPU || device_type == at::kXLA ||
+ device_type == at::kPrivateUse1) {
+ return get_autocast_dtype(device_type);
+ } else {
+ throw std::runtime_error(
+ "unknown device type for autocast in get_lower_precision_fp_from_device_type");
}
}
diff --git a/test/test_public_bindings.py b/test/test_public_bindings.py
index 96ffb9d03e..e6a1914aa4 100644
--- a/test/test_public_bindings.py
+++ b/test/test_public_bindings.py
@@ -139,6 +139,7 @@ class TestPublicBindings(TestCase):
"Generator",
"GeneratorType",
"get_autocast_cpu_dtype",
+ "get_autocast_dtype",
"get_autocast_ipu_dtype",
"get_default_dtype",
"get_num_interop_threads",
@@ -216,6 +217,7 @@ class TestPublicBindings(TestCase):
"set_anomaly_enabled",
"set_autocast_cache_enabled",
"set_autocast_cpu_dtype",
+ "set_autocast_dtype",
"set_autocast_ipu_dtype",
"set_autocast_cpu_enabled",
"set_autocast_ipu_enabled",
diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in
index dd7047b6b6..1b53c1b40f 100644
--- a/torch/_C/__init__.pyi.in
+++ b/torch/_C/__init__.pyi.in
@@ -1249,8 +1249,16 @@ def is_grad_enabled() -> _bool: ...
def _set_fwd_grad_enabled(enabled: _bool) -> None: ...
def _is_fwd_grad_enabled() -> _bool: ...
def is_inference_mode_enabled() -> _bool: ...
+@overload
+def set_autocast_enabled(device_type: str, enabled: _bool) -> None: ...
+@overload
def set_autocast_enabled(enabled: _bool) -> None: ...
+@overload
+def is_autocast_enabled(device_type: str) -> _bool: ...
+@overload
def is_autocast_enabled() -> _bool: ...
+def set_autocast_dtype(device_type: str, dtype: _dtype) -> None: ...
+def get_autocast_dtype(device_type: str) -> _dtype: ...
def clear_autocast_cache() -> None: ...
def set_autocast_cpu_enabled(enabled: _bool) -> None: ...
def is_autocast_cpu_enabled() -> _bool: ...
diff --git a/torch/_dynamo/output_graph.py b/torch/_dynamo/output_graph.py
index 35941ed6c4..f996358ba0 100644
--- a/torch/_dynamo/output_graph.py
+++ b/torch/_dynamo/output_graph.py
@@ -595,21 +595,30 @@ class OutputGraph:
self.torch_function_enabled,
)
global_state["grad_enabled"] = (torch.set_grad_enabled, torch.is_grad_enabled())
+
+ def autocast_specific_backend(
+ device_type: str, func: Callable[[str, Any], None]
+ ):
+ def decorator(value):
+ return func(device_type, value)
+
+ return decorator
+
global_state["autocast_enabled"] = (
- torch.set_autocast_enabled,
- torch.is_autocast_enabled(),
+ autocast_specific_backend("cuda", torch.set_autocast_enabled),
+ torch.is_autocast_enabled("cuda"),
)
global_state["autocast_cpu_enabled"] = (
- torch.set_autocast_cpu_enabled,
- torch.is_autocast_cpu_enabled(),
+ autocast_specific_backend("cpu", torch.set_autocast_enabled),
+ torch.is_autocast_enabled("cpu"),
)
global_state["autocast_gpu_dtype"] = (
- torch.set_autocast_gpu_dtype,
- torch.get_autocast_gpu_dtype(),
+ autocast_specific_backend("cuda", torch.set_autocast_dtype),
+ torch.get_autocast_dtype("cuda"),
)
global_state["autocast_cpu_dtype"] = (
- torch.set_autocast_cpu_dtype,
- torch.get_autocast_cpu_dtype(),
+ autocast_specific_backend("cpu", torch.set_autocast_dtype),
+ torch.get_autocast_dtype("cpu"),
)
global_state["autocast_cache_enabled"] = (
torch.set_autocast_cache_enabled,
diff --git a/torch/_functorch/_aot_autograd/utils.py b/torch/_functorch/_aot_autograd/utils.py
index 67b0974147..e23a32f10c 100644
--- a/torch/_functorch/_aot_autograd/utils.py
+++ b/torch/_functorch/_aot_autograd/utils.py
@@ -79,10 +79,10 @@ def normalize_as_list(x):
def _get_autocast_states():
return [
- torch.is_autocast_enabled(),
- torch.is_autocast_cpu_enabled(),
- torch.get_autocast_gpu_dtype(),
- torch.get_autocast_cpu_dtype(),
+ torch.is_autocast_enabled("cuda"),
+ torch.is_autocast_enabled("cpu"),
+ torch.get_autocast_dtype("cuda"),
+ torch.get_autocast_dtype("cpu"),
torch.is_autocast_cache_enabled(),
]
diff --git a/torch/csrc/autograd/init.cpp b/torch/csrc/autograd/init.cpp
index 8edf23cd2e..5fedfb9be4 100644
--- a/torch/csrc/autograd/init.cpp
+++ b/torch/csrc/autograd/init.cpp
@@ -474,24 +474,47 @@ PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject* unused) {
Py_RETURN_TRUE;
}
-namespace torch {
-namespace autograd {
+namespace torch::autograd {
-static PyObject* set_autocast_enabled(PyObject* _unused, PyObject* arg) {
+static PyObject* set_autocast_enabled(
+ PyObject* _unused,
+ PyObject* args,
+ PyObject* kwargs) {
HANDLE_TH_ERRORS
- TORCH_CHECK_TYPE(
- PyBool_Check(arg),
- "enabled must be a bool (got ",
- Py_TYPE(arg)->tp_name,
- ")");
- at::autocast::set_enabled(arg == Py_True);
+ static PythonArgParser parser(
+ {"set_autocast_enabled(c10::string_view device_type, bool enabled)",
+ "set_autocast_enabled(bool enabled)"}); // this signature is depracated.
+ ParsedArgs<2> parsed_args;
+ auto r = parser.parse(args, kwargs, parsed_args);
+ // Set at::kCUDA as default value to prevent BC-breaking changes.
+ at::DeviceType device_type = at::kCUDA;
+ int enabled_id = 0;
+ if (r.idx == 0) {
+ device_type = at::Device(r.string(0)).type();
+ enabled_id = 1;
+ }
+ auto enabled = r.toBool(enabled_id);
+ at::autocast::set_autocast_enabled(device_type, enabled);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
-static PyObject* is_autocast_enabled(PyObject* _unused, PyObject* arg) {
+static PyObject* is_autocast_enabled(
+ PyObject* _unused,
+ PyObject* args,
+ PyObject* kwargs) {
HANDLE_TH_ERRORS
- if (at::autocast::is_enabled()) {
+ static PythonArgParser parser(
+ {"is_autocast_enabled(c10::string_view device_type)",
+ "is_autocast_enabled()"}); // this signature is depracated.
+ ParsedArgs<1> parsed_args;
+ auto r = parser.parse(args, kwargs, parsed_args);
+ // Set at::kCUDA as default value to prevent BC-breaking changes.
+ at::DeviceType device_type = at::kCUDA;
+ if (r.idx == 0) {
+ device_type = at::Device(r.string(0)).type();
+ }
+ if (at::autocast::is_autocast_enabled(device_type)) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
@@ -499,11 +522,48 @@ static PyObject* is_autocast_enabled(PyObject* _unused, PyObject* arg) {
END_HANDLE_TH_ERRORS
}
+static PyObject* get_autocast_dtype(
+ PyObject* _unused,
+ PyObject* args,
+ PyObject* kwargs) {
+ HANDLE_TH_ERRORS
+ static PythonArgParser parser(
+ {"get_autocast_dtype(c10::string_view device_type)"});
+ ParsedArgs<1> parsed_args;
+ auto r = parser.parse(args, kwargs, parsed_args);
+ auto device_type = at::Device(r.string(0)).type();
+ at::ScalarType current_dtype = at::autocast::get_autocast_dtype(device_type);
+ auto dtype = (PyObject*)torch::getTHPDtype(current_dtype);
+ Py_INCREF(dtype);
+ return dtype;
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* set_autocast_dtype(
+ PyObject* _unused,
+ PyObject* args,
+ PyObject* kwargs) {
+ HANDLE_TH_ERRORS
+ static PythonArgParser parser(
+ {"set_autocast_dtype(c10::string_view device_type, ScalarType dtype)"});
+ ParsedArgs<2> parsed_args;
+ auto r = parser.parse(args, kwargs, parsed_args);
+ auto device_type = at::Device(r.string(0)).type();
+ auto dtype = r.scalartype(1);
+ at::autocast::set_autocast_dtype(device_type, dtype);
+ Py_RETURN_NONE;
+ END_HANDLE_TH_ERRORS
+}
+
static PyObject* is_any_autocast_enabled(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
- if (at::autocast::is_enabled() || at::autocast::is_cpu_enabled() ||
- at::autocast::is_xpu_enabled() || at::autocast::is_ipu_enabled() ||
- at::autocast::is_xla_enabled() || at::autocast::is_hpu_enabled()) {
+ if (at::autocast::is_autocast_enabled(at::kCPU) ||
+ at::autocast::is_autocast_enabled(at::kCUDA) ||
+ at::autocast::is_autocast_enabled(at::kXPU) ||
+ at::autocast::is_autocast_enabled(at::kIPU) ||
+ at::autocast::is_autocast_enabled(at::kXLA) ||
+ at::autocast::is_autocast_enabled(at::kHPU) ||
+ at::autocast::is_autocast_enabled(at::kPrivateUse1)) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
@@ -518,14 +578,18 @@ static PyObject* set_autocast_cpu_enabled(PyObject* _unused, PyObject* arg) {
"enabled must be a bool (got ",
Py_TYPE(arg)->tp_name,
")");
- at::autocast::set_cpu_enabled(arg == Py_True);
+ TORCH_WARN_DEPRECATION(
+ "torch.set_autocast_cpu_enabled(enabled) is deprecated. Please use torch.set_autocast_enabled('cpu', enabled) instead.")
+ at::autocast::set_autocast_enabled(at::kCPU, arg == Py_True);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject* is_autocast_cpu_enabled(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
- if (at::autocast::is_cpu_enabled()) {
+ TORCH_WARN_DEPRECATION(
+ "torch.is_autocast_cpu_enabled() is deprecated. Please use torch.is_autocast_enabled('cpu') instead.")
+ if (at::autocast::is_autocast_enabled(at::kCPU)) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
@@ -540,14 +604,18 @@ static PyObject* set_autocast_ipu_enabled(PyObject* _unused, PyObject* arg) {
"enabled must be a bool (got ",
Py_TYPE(arg)->tp_name,
")");
- at::autocast::set_ipu_enabled(arg == Py_True);
+ TORCH_WARN_DEPRECATION(
+ "torch.set_autocast_ipu_enabled(enabled) is deprecated. Please use torch.set_autocast_enabled('ipu', enabled) instead.")
+ at::autocast::set_autocast_enabled(at::kIPU, arg == Py_True);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject* is_autocast_ipu_enabled(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
- if (at::autocast::is_ipu_enabled()) {
+ TORCH_WARN_DEPRECATION(
+ "torch.is_autocast_ipu_enabled() is deprecated. Please use torch.is_autocast_enabled('ipu') instead.")
+ if (at::autocast::is_autocast_enabled(at::kIPU)) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
@@ -562,14 +630,18 @@ static PyObject* set_autocast_xla_enabled(PyObject* _unused, PyObject* arg) {
"enabled must be a bool (got ",
Py_TYPE(arg)->tp_name,
")");
- at::autocast::set_xla_enabled(arg == Py_True);
+ TORCH_WARN_DEPRECATION(
+ "torch.set_autocast_xla_enabled(enabled) is deprecated. Please use torch.set_autocast_enabled('xla', enabled) instead.")
+ at::autocast::set_autocast_enabled(at::kXLA, arg == Py_True);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject* is_autocast_xla_enabled(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
- if (at::autocast::is_xla_enabled()) {
+ TORCH_WARN_DEPRECATION(
+ "torch.is_autocast_xla_enabled() is deprecated. Please use torch.is_autocast_enabled('xla') instead.")
+ if (at::autocast::is_autocast_enabled(at::kXLA)) {
Py_RETURN_TRUE;
} else {
Py_RETURN_FALSE;
@@ -584,8 +656,10 @@ static PyObject* set_autocast_gpu_dtype(PyObject* _unused, PyObject* arg) {
"dtype must be a torch.dtype (got ",
Py_TYPE(arg)->tp_name,
")");
+ TORCH_WARN_DEPRECATION(
+ "torch.set_autocast_gpu_dtype(dtype) is deprecated. Please use torch.set_autocast_dtype('cuda', dtype) instead.")
at::ScalarType targetType = reinterpret_cast<THPDtype*>(arg)->scalar_type;
- at::autocast::set_autocast_gpu_dtype(targetType);
+ at::autocast::set_autocast_dtype(at::kCUDA, targetType);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
@@ -597,8 +671,10 @@ static PyObject* set_autocast_cpu_dtype(PyObject* _unused, PyObject* arg) {
"dtype must be a torch.dtype (got ",
Py_TYPE(arg)->tp_name,
")");
+ TORCH_WARN_DEPRECATION(
+ "torch.set_autocast_cpu_dtype(dtype) is deprecated. Please use torch.set_autocast_dtype('cpu', dtype) instead.")
at::ScalarType targetType = reinterpret_cast<THPDtype*>(arg)->scalar_type;
- at::autocast::set_autocast_cpu_dtype(targetType);
+ at::autocast::set_autocast_dtype(at::kCPU, targetType);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
@@ -610,8 +686,10 @@ static PyObject* set_autocast_ipu_dtype(PyObject* _unused, PyObject* arg) {
"dtype must be a torch.dtype (got ",
Py_TYPE(arg)->tp_name,
")");
+ TORCH_WARN_DEPRECATION(
+ "torch.set_autocast_ipu_dtype(dtype) is deprecated. Please use torch.set_autocast_dtype('ipu', dtype) instead.")
at::ScalarType targetType = reinterpret_cast<THPDtype*>(arg)->scalar_type;
- at::autocast::set_autocast_ipu_dtype(targetType);
+ at::autocast::set_autocast_dtype(at::kIPU, targetType);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
@@ -623,15 +701,19 @@ static PyObject* set_autocast_xla_dtype(PyObject* _unused, PyObject* arg) {
"dtype must be a torch.dtype (got ",
Py_TYPE(arg)->tp_name,
")");
+ TORCH_WARN_DEPRECATION(
+ "torch.set_autocast_xla_dtype(dtype) is deprecated. Please use torch.set_autocast_dtype('xla', dtype) instead.")
at::ScalarType targetType = reinterpret_cast<THPDtype*>(arg)->scalar_type;
- at::autocast::set_autocast_xla_dtype(targetType);
+ at::autocast::set_autocast_dtype(at::kXLA, targetType);
Py_RETURN_NONE;
END_HANDLE_TH_ERRORS
}
static PyObject* get_autocast_gpu_dtype(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
- at::ScalarType current_dtype = at::autocast::get_autocast_gpu_dtype();
+ TORCH_WARN_DEPRECATION(
+ "torch.get_autocast_gpu_dtype() is deprecated. Please use torch.get_autocast_dtype('cuda') instead.")
+ at::ScalarType current_dtype = at::autocast::get_autocast_dtype(at::kCUDA);
auto dtype = (PyObject*)torch::getTHPDtype(current_dtype);
Py_INCREF(dtype);
return dtype;
@@ -640,7 +722,9 @@ static PyObject* get_autocast_gpu_dtype(PyObject* _unused, PyObject* arg) {
static PyObject* get_autocast_cpu_dtype(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
- at::ScalarType current_dtype = at::autocast::get_autocast_cpu_dtype();
+ TORCH_WARN_DEPRECATION(
+ "torch.get_autocast_cpu_dtype() is deprecated. Please use torch.get_autocast_dtype('cpu') instead.")
+ at::ScalarType current_dtype = at::autocast::get_autocast_dtype(at::kCPU);
auto dtype = (PyObject*)torch::getTHPDtype(current_dtype);
Py_INCREF(dtype);
return dtype;
@@ -649,7 +733,9 @@ static PyObject* get_autocast_cpu_dtype(PyObject* _unused, PyObject* arg) {
static PyObject* get_autocast_ipu_dtype(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
- at::ScalarType current_dtype = at::autocast::get_autocast_ipu_dtype();
+ TORCH_WARN_DEPRECATION(
+ "torch.get_autocast_ipu_dtype() is deprecated. Please use torch.get_autocast_dtype('ipu') instead.")
+ at::ScalarType current_dtype = at::autocast::get_autocast_dtype(at::kIPU);
auto dtype = (PyObject*)torch::getTHPDtype(current_dtype);
Py_INCREF(dtype);
return dtype;
@@ -658,7 +744,9 @@ static PyObject* get_autocast_ipu_dtype(PyObject* _unused, PyObject* arg) {
static PyObject* get_autocast_xla_dtype(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
- at::ScalarType current_dtype = at::autocast::get_autocast_xla_dtype();
+ TORCH_WARN_DEPRECATION(
+ "torch.get_autocast_xla_dtype() is deprecated. Please use torch.get_autocast_dtype('xla') instead.")
+ at::ScalarType current_dtype = at::autocast::get_autocast_dtype(at::kXLA);
auto dtype = (PyObject*)torch::getTHPDtype(current_dtype);
Py_INCREF(dtype);
return dtype;
@@ -1123,8 +1211,22 @@ static PyMethodDef methods[] = { // NOLINT
is_inference_mode_enabled,
METH_NOARGS,
nullptr},
- {"set_autocast_enabled", set_autocast_enabled, METH_O, nullptr},
- {"is_autocast_enabled", is_autocast_enabled, METH_NOARGS, nullptr},
+ {"set_autocast_enabled",
+ castPyCFunctionWithKeywords(set_autocast_enabled),
+ METH_VARARGS | METH_KEYWORDS,
+ nullptr},
+ {"is_autocast_enabled",
+ castPyCFunctionWithKeywords(is_autocast_enabled),
+ METH_VARARGS | METH_KEYWORDS,
+ nullptr},
+ {"set_autocast_dtype",
+ castPyCFunctionWithKeywords(set_autocast_dtype),
+ METH_VARARGS | METH_KEYWORDS,
+ nullptr},
+ {"get_autocast_dtype",
+ castPyCFunctionWithKeywords(get_autocast_dtype),
+ METH_VARARGS | METH_KEYWORDS,
+ nullptr},
{"_is_any_autocast_enabled", is_any_autocast_enabled, METH_NOARGS, nullptr},
{"clear_autocast_cache", clear_autocast_cache, METH_NOARGS, nullptr},
{"set_autocast_cpu_enabled", set_autocast_cpu_enabled, METH_O, nullptr},
@@ -1225,5 +1327,4 @@ PyMethodDef* python_functions() {
return methods;
}
-} // namespace autograd
-} // namespace torch
+} // namespace torch::autograd
diff --git a/torch/csrc/jit/api/function_impl.cpp b/torch/csrc/jit/api/function_impl.cpp
index d7e567b8f3..c0f0b4e486 100644
--- a/torch/csrc/jit/api/function_impl.cpp
+++ b/torch/csrc/jit/api/function_impl.cpp
@@ -116,8 +116,8 @@ GraphFunction::SpecializationKey GraphFunction::currentSpecialization() const {
// disabling autodiff pass for mobile build since autocast APIs don't exist
return SpecializationKey::AutocastOff;
#else
- bool cpu_enabled = at::autocast::is_cpu_enabled();
- bool gpu_enabled = at::autocast::is_enabled();
+ bool cpu_enabled = at::autocast::is_autocast_enabled(at::kCPU);
+ bool gpu_enabled = at::autocast::is_autocast_enabled(at::kCUDA);
if (cpu_enabled && gpu_enabled) {
return SpecializationKey::CpuGpuAutocastOn;
} else if (!cpu_enabled && !gpu_enabled) {
diff --git a/torch/csrc/jit/passes/autocast.cpp b/torch/csrc/jit/passes/autocast.cpp
index 564f70e9da..e16f2f0899 100644
--- a/torch/csrc/jit/passes/autocast.cpp
+++ b/torch/csrc/jit/passes/autocast.cpp
@@ -521,10 +521,10 @@ void Autocast(const std::shared_ptr<Graph>& graph) {
GRAPH_DUMP("\nBefore Autocast: ", graph);
if (autocastEnabled()) {
AutocastContext init = {
- at::autocast::is_enabled(),
- at::autocast::is_cpu_enabled(),
- at::autocast::get_autocast_gpu_dtype(),
- at::autocast::get_autocast_cpu_dtype()};
+ at::autocast::is_autocast_enabled(at::kCUDA),
+ at::autocast::is_autocast_enabled(at::kCPU),
+ at::autocast::get_autocast_dtype(at::kCUDA),
+ at::autocast::get_autocast_dtype(at::kCPU)};
handleBlock(graph->block(), init);
}
GRAPH_DUMP("\nAfter Autocast: ", graph);
diff --git a/torch/csrc/jit/runtime/register_prim_ops.cpp b/torch/csrc/jit/runtime/register_prim_ops.cpp
index 30ca033c51..485adcb5a8 100644
--- a/torch/csrc/jit/runtime/register_prim_ops.cpp
+++ b/torch/csrc/jit/runtime/register_prim_ops.cpp
@@ -799,7 +799,7 @@ static const std::vector<OperatorGeneratorArgs> opGenArgs{
#if defined BUILD_LITE_INTERPRETER || defined C10_MOBILE
bool enabled = false;
#else
- bool enabled = at::autocast::is_enabled();
+ bool enabled = at::autocast::is_autocast_enabled(at::kCUDA);
#endif
push(stack, enabled);
},
@@ -810,7 +810,7 @@ static const std::vector<OperatorGeneratorArgs> opGenArgs{
#if defined BUILD_LITE_INTERPRETER || defined C10_MOBILE
bool enabled = false;
#else
- bool enabled = at::autocast::is_cpu_enabled();
+ bool enabled = at::autocast::is_autocast_enabled(at::kCPU);
#endif
push(stack, enabled);
},
diff --git a/torch/overrides.py b/torch/overrides.py
index 9f99ee0c54..728c75c090 100644
--- a/torch/overrides.py
+++ b/torch/overrides.py
@@ -256,6 +256,8 @@ def get_ignored_functions() -> Set[Callable]:
handle_torch_function,
torch.set_autocast_enabled,
torch.is_autocast_enabled,
+ torch.set_autocast_dtype,
+ torch.get_autocast_dtype,
torch.clear_autocast_cache,
torch.set_autocast_cpu_enabled,
torch.is_autocast_cpu_enabled,
diff --git a/torch/utils/checkpoint.py b/torch/utils/checkpoint.py
index 259b1cd351..fc536dd546 100644
--- a/torch/utils/checkpoint.py
+++ b/torch/utils/checkpoint.py
@@ -194,25 +194,18 @@ def set_device_states(devices, states) -> None:
def _get_autocast_kwargs(device="cuda"):
- if device == "cuda":
+ if _supports_autocast(device):
device_autocast_kwargs = {
- "enabled": torch.is_autocast_enabled(),
- "dtype": torch.get_autocast_gpu_dtype(),
- "cache_enabled": torch.is_autocast_cache_enabled(),
- }
- elif _supports_autocast(device):
- device_module = _get_device_module(device)
- device_autocast_kwargs = {
- "enabled": device_module.is_autocast_enabled(),
- "dtype": device_module.get_autocast_dtype(),
+ "enabled": torch.is_autocast_enabled(device),
+ "dtype": torch.get_autocast_dtype(device),
"cache_enabled": torch.is_autocast_cache_enabled(),
}
else:
device_autocast_kwargs = None
cpu_autocast_kwargs = {
- "enabled": torch.is_autocast_cpu_enabled(),
- "dtype": torch.get_autocast_cpu_dtype(),
+ "enabled": torch.is_autocast_enabled('cpu'),
+ "dtype": torch.get_autocast_dtype('cpu'),
"cache_enabled": torch.is_autocast_cache_enabled(),
}
|
2.41.0
|
8f6c460cdf8b9ce9afd07aadf468e2b3bb9a174
|
Mon, 22 Apr 2024 17:17:28 +0200
|
[PATCH 0511/1000] [Inductor max autotune] Make autotuning robust against very slow Kernels (#123932)
|
If a Kernel does not return in a reasonable amount of time during autotuning, it can delay inductor compilation a lot. This change introduces soft / hard kill timeouts and a mechanism to kill Kernels being profiled in subprocesses if they take too long. Correspondingly, a few new config options are introduced within _inductor/config.py - all of them with inline docs. Test Plan: Existing tests within test_max_autotune.py and test_cutlass_backend.py ) cover the new codepaths. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123932 Approved by: https://github.com/jansel ghstack dependencies: #121497, #123930
|
diff --git a/torch/_inductor/autotune_process.py b/torch/_inductor/autotune_process.py
index f45f33ffde..bab2af3cb8 100644
--- a/torch/_inductor/autotune_process.py
+++ b/torch/_inductor/autotune_process.py
@@ -171,22 +171,55 @@ class TuningProcess:
assert self.request_queue is not None
self.request_queue.put(obj)
- def get(self) -> Any:
+ def get(
+ self, result_timeout=120.0, graceful_timeout=3.0, terminate_timeout=1.0
+ ) -> Any:
"""
- Get a response from the child process.
+ Get a response from the child process. Raises queue.Empty on timeout
+ or if the process dies.
+
+ This method is (so far) only used by TuningProcessPool, where torch._inductor.config entries are being used
+ to populate the timeouts:
+
+ Arguments:
+
+ @param result_timeout: Timeout in seconds, defaults to 120.0 or to
+ config.max_autotune_subproc_result_timeout_seconds when called by TuningProcessPool
+ @param graceful_timeout: Timeout in seconds to allow graceful shutdown (SIGTERM is sent after this time).
+ Defaults to 3.0 or to config.max_autotune_subproc_graceful_timeout_seconds
+ @param terminate_timeout: Timeout in seconds after SIGTERM, until we send SIGKILL if the process
+ remains alive. Defaults to 1.0 or to
+ config.max_autotune_subproc_terminate_timeout_seconds.
+ Returns:
+ A response from the child process (Any type)
"""
assert self.process is not None
assert self.response_queue is not None
while True:
try:
- return self.response_queue.get(timeout=1.0)
+ remaining_timeout = result_timeout
+ res = None
+ while remaining_timeout is not None and remaining_timeout >= 1.0:
+ remaining_timeout -= 0.5
+ try:
+ res = self.response_queue.get(timeout=0.5)
+ break
+ except queue.Empty:
+ if not self.process.is_alive():
+ raise # is being caught a few lines below
+ if res is None:
+ res = self.response_queue.get(timeout=remaining_timeout)
+ return res
except queue.Empty:
status = self.process.exitcode
if status is None:
- # child process is still running
- continue
- # child process crashed
- self.clear()
+ self.kill(
+ graceful_timeout=graceful_timeout,
+ terminate_timeout=terminate_timeout,
+ )
+ else:
+ # child process crashed
+ self.clear()
raise
def terminate(self) -> None:
@@ -206,6 +239,29 @@ class TuningProcess:
self.process.join()
self.clear()
+ def kill(self, graceful_timeout=5.0, terminate_timeout=1.0) -> None:
+ # Tries to kill the process, using a graceful_timeout in which the process
+ # is allowed to exit gracefully. If the process is still alive,
+ # it will be terminated. If that is not sufficient to end it
+ # within terminate_timeout seconds, it will be killed.
+ if self.process is not None:
+ self.terminate()
+ self.process.join(timeout=graceful_timeout)
+ if self.process.is_alive():
+ log.warning(
+ "Sending SIGTERM to process with PID %d",
+ self.process.pid,
+ )
+ self.process.terminate()
+ self.process.join(timeout=terminate_timeout)
+ if self.process.is_alive():
+ log.error(
+ "Sending SIGKILL to process with PID %d",
+ self.process.pid,
+ )
+ self.process.kill() # This should definitely end the process
+ self.clear()
+
@dataclasses.dataclass
class TuningProcessPool:
@@ -239,7 +295,7 @@ class TuningProcessPool:
# Wait for the initialization to finish
for p in self.processes.queue:
- assert isinstance(p.get(), Pong)
+ assert isinstance(p.get(result_timeout=None), Pong)
# Use a thread pool to manage distributing work to the subprocesses.
# Threads block on an available process, so it makes sense to match
@@ -300,7 +356,11 @@ class TuningProcessPool:
process = self.processes.get()
process.put(choice.bmreq)
try:
- return process.get()
+ return process.get(
+ config.max_autotune_subproc_result_timeout_seconds,
+ config.max_autotune_subproc_graceful_timeout_seconds,
+ config.max_autotune_subproc_terminate_timeout_seconds,
+ )
except queue.Empty:
warnings.warn(
f"Failed to benchmark choice '{choice}'. It will be ignored. "
diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py
index b5b8e16684..f31a03a365 100644
--- a/torch/_inductor/config.py
+++ b/torch/_inductor/config.py
@@ -247,6 +247,15 @@ save_args = os.environ.get("TORCHINDUCTOR_SAVE_ARGS") == "1"
# We will disable creating subprocess for autotuning if this is False
autotune_in_subproc = os.environ.get("TORCHINDUCTOR_AUTOTUNE_IN_SUBPROC") == "1"
+# The following three timeouts are applicable if autotune_in_subproc is True:
+
+# Max time that a a valid benchmark result may take during autotuning
+max_autotune_subproc_result_timeout_seconds = 60.0
+# Additional time we allow subprocesses to terminate gracefully after the timeout until we send a SIGTERM
+max_autotune_subproc_graceful_timeout_seconds = 1.0
+# Additional time that we grant after a SIGTERM until we do a hard SIGKILL of subprocesses
+max_autotune_subproc_terminate_timeout_seconds = 2.0
+
# If autotuning in subprocess, whether to use multiple devices
autotune_multi_device = os.environ.get("TORCHINDUCTOR_AUTOTUNE_MULTI_DEVICE") == "1"
|
2.41.0
|
07f944f226554a52d4028b37b35bcc07b427cc7
|
Tue, 23 Apr 2024 13:35:24 +0000
|
[PATCH 0512/1000] Support fp8 quantization (#123161)
|
This commit enables float8_e5m2 and float8_e4m3fn dtypes in fx quantization and PT2E. Motivation for using fp8 quantization instead of int8: - it works better to run inference with the same datatype the model was trained with, - fp8 can handle outliers better, which is one of the problems in LLMs activations. The numerical recipe we want to use it for is fp8 inference: - bgemms/gemms running in float8_e4m3fn, - Per-Tensor-Quantization/Scaling, - amax observer for measurement with input_backoff and weight_backoff. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123161 Approved by: https://github.com/jgong5, https://github.com/jerryzh168
|
diff --git a/test/quantization/pt2e/test_quantize_pt2e.py b/test/quantization/pt2e/test_quantize_pt2e.py
index e3b7eadf48..3c759fc65c 100644
--- a/test/quantization/pt2e/test_quantize_pt2e.py
+++ b/test/quantization/pt2e/test_quantize_pt2e.py
@@ -49,7 +49,11 @@ from torch.testing._internal.common_quantization import (
skipIfNoQNNPACK,
TestHelperModules,
)
-from torch.testing._internal.common_utils import TemporaryFileName
+from torch.testing._internal.common_utils import (
+ instantiate_parametrized_tests,
+ parametrize,
+ TemporaryFileName,
+)
@skipIfNoQNNPACK
@@ -1175,14 +1179,15 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
self.assertIsNot(observers[0], observers[2])
self.assertIsNot(observers[1], observers[2])
- def test_int16(self):
- class Int16ActQuantizer(Quantizer):
+ @parametrize("dtype", (torch.int16, torch.float8_e5m2, torch.float8_e4m3fn))
+ def test_quantization_dtype(self, dtype):
+ class DtypeActQuantizer(Quantizer):
def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
- # using int32 to simulate int16
- int16_qspec = QuantizationSpec(
- dtype=torch.int16,
- quant_min=-(2**15),
- quant_max=2**15 - 1,
+ info_fun = torch.iinfo if dtype == torch.int16 else torch.finfo
+ activate_qspec = QuantizationSpec(
+ dtype=dtype,
+ quant_min=int(info_fun(dtype).min),
+ quant_max=int(info_fun(dtype).max),
qscheme=torch.per_tensor_affine,
is_dynamic=False,
observer_or_fake_quant_ctr=observer.default_observer,
@@ -1196,10 +1201,10 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
observer_or_fake_quant_ctr=observer.default_weight_observer,
)
quantization_config = QuantizationConfig(
- input_activation=int16_qspec,
+ input_activation=activate_qspec,
weight=int8_qspec,
bias=None,
- output_activation=int16_qspec,
+ output_activation=activate_qspec,
)
OP_TO_ANNOTATOR["conv"](model, quantization_config)
@@ -1214,7 +1219,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
def forward(self, x):
return self.conv(x)
- quantizer = Int16ActQuantizer()
+ quantizer = DtypeActQuantizer()
node_occurrence = {
# one for input of the first conv, one for output for the first conv
torch.ops.quantized_decomposed.quantize_per_tensor.default: 2,
@@ -1230,7 +1235,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
self._test_quantizer(
M().eval(),
example_inputs,
- Int16ActQuantizer(),
+ quantizer,
node_occurrence,
node_list,
)
@@ -2248,3 +2253,6 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
node_occurrence,
node_list,
)
+
+
+instantiate_parametrized_tests(TestQuantizePT2E)
diff --git a/torch/ao/quantization/fx/_decomposed.py b/torch/ao/quantization/fx/_decomposed.py
index c54a3046d5..8feafafea2 100644
--- a/torch/ao/quantization/fx/_decomposed.py
+++ b/torch/ao/quantization/fx/_decomposed.py
@@ -10,12 +10,11 @@ from torch.library import impl, Library
# name is not too long
quantized_decomposed_lib = Library("quantized_decomposed", "DEF")
-_DTYPE_TO_QVALUE_BOUNDS = {
- torch.uint8: (0, 255),
- torch.int8: (-128, 127),
- torch.int16: (-(2**15), 2**15 - 1),
- torch.int32: (-(2**31), 2**31 - 1),
-}
+_INTEGER_DTYPES = [torch.uint8, torch.int8, torch.int16, torch.int32]
+_FLOAT_DTYPES = [torch.float8_e5m2, torch.float8_e4m3fn]
+
+_DTYPE_TO_QVALUE_BOUNDS = {k : (torch.iinfo(k).min, torch.iinfo(k).max) for k in _INTEGER_DTYPES}
+_DTYPE_TO_QVALUE_BOUNDS.update({k : (int(torch.finfo(k).min), int(torch.finfo(k).max)) for k in _FLOAT_DTYPES})
# Helper to check the passed in quant min and max are valid for the dtype
def _quant_min_max_bounds_check(quant_min, quant_max, dtype):
diff --git a/torch/ao/quantization/fx/convert.py b/torch/ao/quantization/fx/convert.py
index 023abff834..ef90f8b71e 100644
--- a/torch/ao/quantization/fx/convert.py
+++ b/torch/ao/quantization/fx/convert.py
@@ -84,6 +84,18 @@ __all__ = [
"convert_weighted_module",
]
+SUPPORTED_QDTYPES = [
+ torch.quint8,
+ torch.qint8,
+ torch.qint32,
+ torch.uint8,
+ torch.int8,
+ torch.int16,
+ torch.int32,
+ torch.float8_e5m2,
+ torch.float8_e4m3fn,
+]
+
_QSCHEME_TO_CHOOSE_QPARAMS_OP = {
torch.per_tensor_affine: torch.ops.quantized_decomposed.choose_qparams.tensor,
torch.per_tensor_symmetric: torch.ops.quantized_decomposed.choose_qparams_symmetric.tensor,
@@ -136,8 +148,7 @@ def _replace_observer_with_quantize_dequantize_node_decomposed(
if hasattr(activation_post_process, "is_dynamic"):
is_dynamic = activation_post_process.is_dynamic # type: ignore[assignment]
- if dtype in [torch.quint8, torch.qint8, torch.qint32, torch.uint8, torch.int8, torch.int16, torch.int32] and \
- (not is_dynamic):
+ if dtype in SUPPORTED_QDTYPES and (not is_dynamic):
# TODO: probably should cleanup this condition check, it's hard
# to reason about this if and the following elif
@@ -372,7 +383,7 @@ def _replace_observer_with_quantize_dequantize_node(
if hasattr(activation_post_process, "is_dynamic"):
is_dynamic = activation_post_process.is_dynamic # type: ignore[attr-defined, assignment]
- if dtype in [torch.quint8, torch.qint8, torch.qint32] and \
+ if dtype in [torch.quint8, torch.qint8, torch.qint32, torch.float8_e5m2, torch.float8_e4m3fn] and \
(not is_dynamic):
# TODO: probably should cleanup this condition check, it's hard
# to reason about this if and the following elif
@@ -477,15 +488,7 @@ def _is_conversion_supported(activation_post_process: torch.nn.Module) -> bool:
is_dynamic = activation_post_process.is_dynamic # type: ignore[attr-defined, assignment]
return (
- (dtype in [
- torch.quint8,
- torch.qint8,
- torch.qint32,
- torch.uint8,
- torch.int8,
- torch.int16,
- torch.int32
- ] and (not is_dynamic)) or # type: ignore[return-value]
+ (dtype in SUPPORTED_QDTYPES and (not is_dynamic)) or # type: ignore[return-value]
is_dynamic or
dtype == torch.float16
)
diff --git a/torch/ao/quantization/fx/prepare.py b/torch/ao/quantization/fx/prepare.py
index 6a4ae0bb85..9ca91ecb49 100644
--- a/torch/ao/quantization/fx/prepare.py
+++ b/torch/ao/quantization/fx/prepare.py
@@ -138,7 +138,9 @@ _OBS_DTYPE_LIST = [
torch.uint8,
torch.int8,
torch.int16,
- torch.int32
+ torch.int32,
+ torch.float8_e5m2,
+ torch.float8_e4m3fn,
]
_DEFAULT_FP32_OBS_OR_FQ_CTR = PlaceholderObserver.with_args(dtype=torch.float)
diff --git a/torch/ao/quantization/observer.py b/torch/ao/quantization/observer.py
index 718dc7d50b..5f075df1cd 100644
--- a/torch/ao/quantization/observer.py
+++ b/torch/ao/quantization/observer.py
@@ -244,6 +244,8 @@ class UniformQuantizationObserverBase(ObserverBase):
torch.uint8,
torch.int16,
torch.int32,
+ torch.float8_e5m2,
+ torch.float8_e4m3fn,
)
assert self.dtype in _ALLOWED_DTYPES, f"Default Observer only works for {_ALLOWED_DTYPES} data type"
diff --git a/torch/ao/quantization/utils.py b/torch/ao/quantization/utils.py
index 2a225d14b1..70b45b92fb 100644
--- a/torch/ao/quantization/utils.py
+++ b/torch/ao/quantization/utils.py
@@ -151,6 +151,8 @@ def to_underlying_dtype(qdtype):
torch.int8: torch.int8,
torch.int16: torch.int16,
torch.int32: torch.int32,
+ torch.float8_e5m2: torch.float8_e5m2,
+ torch.float8_e4m3fn: torch.float8_e4m3fn,
}
assert qdtype in DTYPE_MAPPING, "Unsupported dtype: " + str(qdtype)
return DTYPE_MAPPING[qdtype]
@@ -231,7 +233,9 @@ def activation_is_statically_quantized(qconfig):
torch.uint8,
torch.int8,
torch.int16,
- torch.int32
+ torch.int32,
+ torch.float8_e5m2,
+ torch.float8_e4m3fn,
]
and (not activation_is_dynamically_quantized(qconfig))
)
@@ -269,7 +273,9 @@ def weight_is_quantized(qconfig):
torch.uint8,
torch.int8,
torch.int16,
- torch.int32
+ torch.int32,
+ torch.float8_e5m2,
+ torch.float8_e4m3fn,
]
def weight_is_statically_quantized(qconfig):
@@ -305,7 +311,18 @@ def get_quant_type(qconfig):
assert qconfig is not None
activation = qconfig.activation()
weight = qconfig.weight()
- static_dtypes = [torch.quint8, torch.qint8, torch.quint4x2, torch.qint32, torch.uint8, torch.int8, torch.int16, torch.int32]
+ static_dtypes = [
+ torch.quint8,
+ torch.qint8,
+ torch.quint4x2,
+ torch.qint32,
+ torch.uint8,
+ torch.int8,
+ torch.int16,
+ torch.int32,
+ torch.float8_e5m2,
+ torch.float8_e4m3fn
+ ]
if weight.dtype in static_dtypes:
if hasattr(activation, 'is_dynamic') and activation.is_dynamic:
return QuantType.DYNAMIC
|
2.41.0
|
b98d43488bed0836b4da5996a50bafd0dd2c11c
|
Mon, 22 Apr 2024 12:14:03 -0700
|
[PATCH 0513/1000] Verify types in custom op schemas (#124520)
|
Before this PR, we didn't check that types in a schema were valid. This is because TorchScript treats unknown types as type variables. This PR checks types in a schema for the TORCH_LIBRARY APIs. To do this, we add an `allow_typevars` flag to parseSchema so that TorchScript can use allow_typevars=True. We also add some error messages for common mistakes (e.g. using int64_t or double in schema). Test Plan: - new tests Differential Revision: [D56432690](https://our.internmc.facebook.com/intern/diff/D56432690) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124520 Approved by: https://github.com/albanD
|
diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py
index 6038bd3914..f4dc19eb01 100644
--- a/test/test_custom_ops.py
+++ b/test/test_custom_ops.py
@@ -1740,6 +1740,17 @@ dynamic shape operator: _torch_testing.numpy_nonzero.default
res = torch._library.utils.is_functional_schema(schema)
self.assertEqual(res, expected)
+ def test_incorrect_schema_types(self):
+ with torch.library._scoped_library("mylib", "FRAGMENT") as lib:
+ with self.assertRaisesRegex(RuntimeError, "unknown type specifier"):
+ lib.define("foo12(Tensor a) -> asdfasdf")
+ with self.assertRaisesRegex(RuntimeError, "unknown type specifier"):
+ lib.define("foo12(asdf a) -> Tensor")
+ with self.assertRaisesRegex(RuntimeError, "Use `SymInt` or `int`"):
+ lib.define("foo12(int64_t a) -> Tensor")
+ with self.assertRaisesRegex(RuntimeError, "Use `float`"):
+ lib.define("foo12(double a) -> Tensor")
+
def test_is_tensorlist_like_type(self):
tensorlists = [
# Tensor[]
diff --git a/torch/csrc/jit/frontend/function_schema_parser.cpp b/torch/csrc/jit/frontend/function_schema_parser.cpp
index 4b681055bd..94b477676d 100644
--- a/torch/csrc/jit/frontend/function_schema_parser.cpp
+++ b/torch/csrc/jit/frontend/function_schema_parser.cpp
@@ -23,14 +23,14 @@ namespace torch::jit {
namespace {
struct SchemaParser {
- explicit SchemaParser(const std::string& str)
+ explicit SchemaParser(const std::string& str, bool allow_typevars)
: L(std::make_shared<Source>(
c10::string_view(str),
c10::nullopt,
0,
nullptr,
Source::DONT_COPY)),
- type_parser(L, /*parse_complete_tensor_types*/ false) {}
+ type_parser(L, /*parse_complete_tensor_types*/ false, allow_typevars) {}
std::variant<OperatorName, FunctionSchema> parseDeclaration() {
OperatorName name = parseName();
@@ -361,16 +361,19 @@ struct SchemaParser {
}
Lexer L;
SchemaTypeParser type_parser;
+ bool allow_typevars_;
};
} // namespace
std::variant<OperatorName, FunctionSchema> parseSchemaOrName(
- const std::string& schemaOrName) {
- return SchemaParser(schemaOrName).parseExactlyOneDeclaration();
+ const std::string& schemaOrName,
+ bool allow_typevars) {
+ return SchemaParser(schemaOrName, allow_typevars)
+ .parseExactlyOneDeclaration();
}
-FunctionSchema parseSchema(const std::string& schema) {
- auto parsed = parseSchemaOrName(schema);
+FunctionSchema parseSchema(const std::string& schema, bool allow_typevars) {
+ auto parsed = parseSchemaOrName(schema, allow_typevars);
TORCH_CHECK(
std::holds_alternative<FunctionSchema>(parsed),
"Tried to parse a function schema but only the operator name was given");
diff --git a/torch/csrc/jit/frontend/function_schema_parser.h b/torch/csrc/jit/frontend/function_schema_parser.h
index a01ca7ad0b..a60215704f 100644
--- a/torch/csrc/jit/frontend/function_schema_parser.h
+++ b/torch/csrc/jit/frontend/function_schema_parser.h
@@ -8,9 +8,15 @@
namespace torch {
namespace jit {
+// allow_typevars: If true, we assume that lowercase types that we don't
+// understand are type variables. This is only needed for TorchScript (and not
+// not needed for custom ops).
TORCH_API std::variant<c10::OperatorName, c10::FunctionSchema> parseSchemaOrName(
- const std::string& schemaOrName);
-TORCH_API c10::FunctionSchema parseSchema(const std::string& schema);
+ const std::string& schemaOrName,
+ bool allow_typevars = true);
+TORCH_API c10::FunctionSchema parseSchema(
+ const std::string& schema,
+ bool allow_typevars = true);
TORCH_API c10::OperatorName parseName(const std::string& name);
} // namespace jit
diff --git a/torch/csrc/jit/frontend/schema_type_parser.cpp b/torch/csrc/jit/frontend/schema_type_parser.cpp
index 7c4b8ba0ca..68e6f7a02b 100644
--- a/torch/csrc/jit/frontend/schema_type_parser.cpp
+++ b/torch/csrc/jit/frontend/schema_type_parser.cpp
@@ -82,12 +82,27 @@ TypePtr SchemaTypeParser::parseBaseType() {
auto it = type_map.find(text);
if (it == type_map.end()) {
- if (!text.empty() && islower(text[0])) {
+ if (allow_typevars_ && !text.empty() && islower(text[0])) {
// lower case identifiers that are not otherwise valid types
// are treated as type variables
return c10::TypeFactory::createNamed<VarType>(text);
}
- throw ErrorReport(tok.range) << "unknown type specifier";
+ if (text == "double") {
+ throw ErrorReport(tok.range)
+ << "Use `float` instead of `double` in an operator's schema string. "
+ "`float` in schema corresponds to the double type in C++";
+ }
+ if (text == "int64_t") {
+ throw ErrorReport(tok.range)
+ << "Use `SymInt` or `int` instead of `int64_t` in an operator's schema string. "
+ "`SymInt` corresponds to c10::SymInt in C++ while `int` in schema corresponds "
+ "to the int64_t type in C++.";
+ }
+ throw ErrorReport(tok.range)
+ << "unknown type specifier. Common valid schema types include "
+ "Tensor, SymInt, int, float, bool, Scalar; "
+ "for a full list, please see "
+ "https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/README.md#func ";
}
return it->second;
}
diff --git a/torch/csrc/jit/frontend/schema_type_parser.h b/torch/csrc/jit/frontend/schema_type_parser.h
index c43e4363da..e43a24beb5 100644
--- a/torch/csrc/jit/frontend/schema_type_parser.h
+++ b/torch/csrc/jit/frontend/schema_type_parser.h
@@ -20,8 +20,13 @@ struct TORCH_API SchemaTypeParser {
c10::optional<at::ScalarType> parseTensorDType(const std::string& dtype);
TypePtr parseRefinedTensor();
- SchemaTypeParser(Lexer& L, bool parse_complete_tensor_types)
- : complete_tensor_types(parse_complete_tensor_types), L(L) {}
+ SchemaTypeParser(
+ Lexer& L,
+ bool parse_complete_tensor_types,
+ bool allow_typevars)
+ : complete_tensor_types(parse_complete_tensor_types),
+ L(L),
+ allow_typevars_(allow_typevars) {}
private:
c10::optional<bool> tryToParseRequiresGrad();
@@ -35,6 +40,7 @@ struct TORCH_API SchemaTypeParser {
bool complete_tensor_types;
Lexer& L;
size_t next_id = 0;
+ bool allow_typevars_;
};
} // namespace jit
} // namespace torch
diff --git a/torch/csrc/jit/ir/irparser.cpp b/torch/csrc/jit/ir/irparser.cpp
index c37988e322..30cb5ad9eb 100644
--- a/torch/csrc/jit/ir/irparser.cpp
+++ b/torch/csrc/jit/ir/irparser.cpp
@@ -35,7 +35,10 @@ class IRParser {
: L(std::make_shared<Source>(str)),
g(graph),
vmap(vmap),
- type_parser(L, /*parse_complete_tensor_types*/ true),
+ type_parser(
+ L,
+ /*parse_complete_tensor_types*/ true,
+ /*allow_type_vars*/ true),
parse_tensor_constants_(parse_tensor_constants) {}
std::string parseVar();
diff --git a/torch/csrc/jit/python/init.cpp b/torch/csrc/jit/python/init.cpp
index 5eb4851089..2023ec27ba 100644
--- a/torch/csrc/jit/python/init.cpp
+++ b/torch/csrc/jit/python/init.cpp
@@ -1765,7 +1765,11 @@ void initJITBindings(PyObject* module) {
},
py::arg("input"),
py::arg("parse_tensor_constants") = false);
- m.def("parse_schema", parseSchema);
+ m.def(
+ "parse_schema",
+ &parseSchema,
+ py::arg("schema"),
+ py::arg("allow_typevars") = true);
m.def("unify_type_list", [](const std::vector<TypePtr>& types) {
std::ostringstream s;
auto type = unifyTypeList(types, s);
diff --git a/torch/csrc/jit/runtime/static/passes.cpp b/torch/csrc/jit/runtime/static/passes.cpp
index fcd2b79e39..68fd8a270c 100644
--- a/torch/csrc/jit/runtime/static/passes.cpp
+++ b/torch/csrc/jit/runtime/static/passes.cpp
@@ -1347,7 +1347,8 @@ bool isNoOpSlice(Node* node) {
void EliminateNoOpSlice(std::shared_ptr<Graph>& graph) {
DepthFirstGraphNodeIterator it(graph);
auto schema = torch::schema(
- "aten::slice.t(t[] l, int? start=None, int? end=None, int step=1) -> t[]");
+ "aten::slice.t(t[] l, int? start=None, int? end=None, int step=1) -> t[]",
+ /*allow_typevars*/ true);
Node* node = nullptr;
std::vector<Node*> to_delete;
while ((node = it.next()) != nullptr) {
diff --git a/torch/library.h b/torch/library.h
index c38179a6ee..035cc17597 100644
--- a/torch/library.h
+++ b/torch/library.h
@@ -406,8 +406,8 @@ inline CppFunction dispatch(c10::DeviceType type, Func&& raw_f) {
/// ```
///
/// \ingroup torch-schema-overloads
-inline c10::FunctionSchema schema(const char* str, c10::AliasAnalysisKind k) {
- c10::FunctionSchema s = torch::jit::parseSchema(str);
+inline c10::FunctionSchema schema(const char* str, c10::AliasAnalysisKind k, bool allow_typevars=false) {
+ c10::FunctionSchema s = torch::jit::parseSchema(str, /*allow_typevars*/allow_typevars);
s.setAliasAnalysis(k);
return s;
}
@@ -415,8 +415,8 @@ inline c10::FunctionSchema schema(const char* str, c10::AliasAnalysisKind k) {
/// Function schemas can be directly constructed from string literals.
///
/// \ingroup torch-schema-overloads
-inline c10::FunctionSchema schema(const char* s) {
- return schema(s, c10::AliasAnalysisKind::FROM_SCHEMA);
+inline c10::FunctionSchema schema(const char* s, bool allow_typevars=false) {
+ return schema(s, c10::AliasAnalysisKind::FROM_SCHEMA, allow_typevars);
}
/// \private
|
2.41.0
|
f3e1f1c93c578bda5afd6ea20fd80da04db7865
|
Tue, 23 Apr 2024 14:39:37 +0000
|
[PATCH 0514/1000] Revert "Add support for capturing tensors with score_mod (#124444)"
|
This reverts commit e0c5113dec79608941db69ae091dfe8893f9a14f. Reverted https://github.com/pytorch/pytorch/pull/124444 on behalf of https://github.com/malfet due to This is weird, but somehow profile test started to timeout after this PR, see https://hud.pytorch.org/hud/pytorch/pytorch/main/1?per_page=50&name_filter=noGPU_AVX512 ([comment](https://github.com/pytorch/pytorch/pull/124444#issuecomment-2072506731))
|
diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py
index 4c8043d9bf..b906689af9 100644
--- a/test/inductor/test_templated_attention.py
+++ b/test/inductor/test_templated_attention.py
@@ -4,7 +4,7 @@ import functools
from collections import namedtuple
from typing import Callable
-from unittest import skip, skipUnless
+from unittest import expectedFailure, skipUnless
from unittest.mock import patch
import torch
@@ -28,8 +28,6 @@ supported_platform = skipUnless(
Tolerances = namedtuple("Tolerances", ["atol", "rtol"])
torch.set_float32_matmul_precision("high")
-index = torch.ops.aten.index
-
def create_attention(score_mod):
return functools.partial(_templated_attention, score_mod=score_mod)
@@ -41,8 +39,6 @@ test_dtypes = (
else [torch.float16, torch.float32]
)
-test_dtypes_fast = [torch.float16]
-
# TODO float16 was causing ERRORs for tests on ROCm
# See https://github.com/pytorch/pytorch/issues/123531
if common_utils.TEST_WITH_ROCM:
@@ -57,19 +53,13 @@ def _causal_mod(score, b, h, token_q, token_kv):
return torch.where(token_q >= token_kv, score, float("-inf"))
-B = 4
-H = 8
-S = 2048
-D = 64
-
-
class TestTemplatedSDPA(InductorTestCase):
def run_test(self, score_mod: Callable, dtype: torch.dtype = torch.float16):
sdpa_partial = create_attention(score_mod)
compiled_sdpa = torch.compile(sdpa_partial)
- q = torch.randn((B, H, S, D), dtype=dtype, device="cuda")
- k = torch.randn((B, H, S, D), dtype=dtype, device="cuda")
- v = torch.randn((B, H, S, D), dtype=dtype, device="cuda")
+ q = torch.randn((4, 8, 2048, 64), dtype=dtype, device="cuda")
+ k = torch.randn((4, 8, 2048, 64), dtype=dtype, device="cuda")
+ v = torch.randn((4, 8, 2048, 64), dtype=dtype, device="cuda")
golden_out = sdpa_partial(
q.to(torch.float64), k.to(torch.float64), v.to(torch.float64)
)
@@ -157,116 +147,23 @@ class TestTemplatedSDPA(InductorTestCase):
self.run_test(composed_score_mod, dtype)
+ # TODO We are currently not capturing free variables in the closure correctly
+ @expectedFailure
@supported_platform
@common_utils.parametrize("dtype", test_dtypes)
def test_captured_buffers(self, dtype: torch.dtype):
- head_offset = torch.rand(H, device="cuda", dtype=dtype)
+ head_offset = torch.rand(8, device="cuda", dtype=dtype)
def score_mod(score, b, h, m, n):
- return score + index(head_offset, [h])
+ return score + head_offset[h]
self.run_test(score_mod, dtype)
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes_fast)
- def test_seq_masking(self, dtype):
- seq_idx = torch.zeros(S, device="cuda", dtype=torch.bool)
- seq_idx[S // 2 :] = 1
-
- def seq_mask_mod(score, b, h, q, kv):
- return torch.where(
- index(seq_idx, [q]) == index(seq_idx, [kv]), score, float("-inf")
- )
-
- self.run_test(seq_mask_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes_fast)
- def test_load_from_bias_seq_only(self, dtype):
- bias = torch.randn(S, S, device="cuda", dtype=dtype)
-
- def bias_mod(score, b, h, q, kv):
- return score + index(bias, [q, kv])
-
- self.run_test(bias_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes_fast)
- def test_load_from_bias_seq_batch(self, dtype):
- bias = torch.randn(B, S, S, device="cuda", dtype=dtype)
-
- def bias_mod(score, b, h, q, kv):
- return score + index(bias, [b, q, kv])
-
- self.run_test(bias_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes_fast)
- def test_load_from_bias_head_seq_batch(self, dtype):
- bias = torch.randn(B, H, S, S, device="cuda", dtype=dtype)
-
- def bias_mod(score, b, h, q, kv):
- return score + index(bias, [b, h, q, kv])
-
- self.run_test(bias_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes_fast)
- def test_load_rel_bias(self, dtype):
- rel_bias = torch.randn(2 * S, device="cuda", dtype=dtype)
-
- def bias_mod(score, b, h, q, kv):
- return score + index(rel_bias, [(q - kv) + S])
-
- self.run_test(bias_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes_fast)
- def test_dependent_causal_bidirectional(self, dtype):
- num_bidirectional = torch.randint(0, S, (B,), device="cuda", dtype=torch.int32)
-
- def bias_mod(score, b, h, q, kv):
- causal_attention = q >= kv
- cur_num_bidirectional = index(num_bidirectional, (b,))
- bidirectional_attention_on_video = (q <= cur_num_bidirectional) & (
- kv <= cur_num_bidirectional
- )
- return torch.where(
- bidirectional_attention_on_video | causal_attention,
- score,
- -float("inf"),
- )
-
- self.run_test(bias_mod, dtype)
-
- @supported_platform
- @skip("Triton bug ") # https://github.com/pytorch/pytorch/issues/124571
- @common_utils.parametrize("dtype", test_dtypes)
- def test_njt_causal(self, dtype):
- offsets = torch.tensor(
- [0, 1024, 1024 + 512, S], device="cuda", dtype=torch.int32
- )
- seq_idx = torch.zeros(S, device="cuda", dtype=torch.int32)
- for idx in range(len(offsets) - 1):
- seq_idx[offsets[idx] : offsets[idx + 1]] = idx
-
- def create_njt_wrapper(orig_score_mod, offsets, seq_idx):
- def njt_score_mod(qk, b, h, q, kv):
- q_nested = q - index(offsets, [index(seq_idx, [q])])
- kv_nested = kv - index(offsets, [index(seq_idx, [kv])])
- return orig_score_mod(qk, b, h, q_nested, kv_nested)
-
- return njt_score_mod
-
- causal_njt = create_njt_wrapper(_causal_mod, offsets, seq_idx)
-
- self.run_test(causal_njt, dtype)
-
@supported_platform
def test_backwards_fails(self):
make_tensor = functools.partial(
torch.randn,
- (B, H, S, D),
+ (4, 8, 2048, 64),
dtype=torch.float32,
device="cuda",
requires_grad=True,
@@ -280,9 +177,9 @@ class TestTemplatedSDPA(InductorTestCase):
@supported_platform
def test_mixed_dtypes_fails(self):
- query = torch.randn((1, 1, 1024, 64), dtype=torch.float32, device="cuda")
- key = torch.randn((1, 1, 1024, 64), dtype=torch.float16, device="cuda")
- value = torch.randn((1, 1, 1024, 64), dtype=torch.float16, device="cuda")
+ query = torch.randn((1, 1, 2048, 64), dtype=torch.float32, device="cuda")
+ key = torch.randn((1, 1, 2048, 64), dtype=torch.float16, device="cuda")
+ value = torch.randn((1, 1, 2048, 64), dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(
ValueError, "Expected query, key, and value to have the same dtype"
):
@@ -304,21 +201,6 @@ class TestTemplatedSDPA(InductorTestCase):
self.run_test(score_mod)
- @supported_platform
- @patch.object(torch._inductor.config, "max_autotune", True)
- def test_max_autotune_with_captured(self):
- head_scale = torch.randn(H, device="cuda")
- batch_scale = torch.randn(B, device="cuda")
- tok_scale = torch.randn(S, device="cuda")
-
- def bias_mod(score, batch, head, token_q, token_kv):
- score = score + index(tok_scale, [token_q])
- score = score + index(batch_scale, [batch])
- score = score + index(head_scale, [head])
- return score
-
- self.run_test(bias_mod)
-
@supported_platform
@common_utils.parametrize("dtype", test_dtypes)
@common_utils.parametrize("score_mod", [_identity_mod, _causal_mod])
@@ -329,7 +211,7 @@ class TestTemplatedSDPA(InductorTestCase):
make_tensor = functools.partial(
torch.randn,
- (B, H, S, D),
+ (4, 8, 2048, 64),
dtype=dtype,
device="cuda",
requires_grad=True,
@@ -371,7 +253,7 @@ class TestTemplatedSDPA(InductorTestCase):
def test_logsumexp_only_return(self):
make_tensor = functools.partial(
torch.randn,
- (B, H, S, D),
+ (4, 8, 2048, 64),
dtype=torch.float32,
device="cuda",
requires_grad=True,
@@ -392,7 +274,7 @@ class TestTemplatedSDPA(InductorTestCase):
def test_logsumexp_is_not_fused(self):
make_tensor = functools.partial(
torch.randn,
- (B, H, S, D),
+ (4, 8, 2048, 64),
dtype=torch.float32,
device="cuda",
requires_grad=True,
diff --git a/torch/_dynamo/variables/higher_order_ops.py b/torch/_dynamo/variables/higher_order_ops.py
index 723e2a95cd..e0b0233e05 100644
--- a/torch/_dynamo/variables/higher_order_ops.py
+++ b/torch/_dynamo/variables/higher_order_ops.py
@@ -1433,10 +1433,12 @@ class TemplatedAttentionHigherOrderVariable(TorchHigherOrderOperatorVariable):
) -> "VariableTracker":
from .builder import wrap_fx_proxy
- query, key, value, score_mod = self.normalize_to_args(args, kwargs)
+ query, key, value, score_mod, *other_buffers = self.normalize_to_args(
+ args, kwargs
+ )
p_args, p_kwargs = self.create_wrapped_node(tx, query, score_mod)
- proxied_args = [query, key, value]
+ proxied_args = [query, key, value, *other_buffers]
# Store the invocation as a call
# Norm_kwargs contains the score_function and we dont want to proxy this because
diff --git a/torch/_higher_order_ops/templated_attention.py b/torch/_higher_order_ops/templated_attention.py
index 388e741837..09e10754fe 100644
--- a/torch/_higher_order_ops/templated_attention.py
+++ b/torch/_higher_order_ops/templated_attention.py
@@ -60,7 +60,7 @@ def math_attention(
"""
assert len(other_buffers) == 0, "Other buffers are not yet supported."
- scores = (query @ key.transpose(-2, -1)).to(dtype=torch.float32)
+ scores = query @ key.transpose(-2, -1)
b = torch.arange(0, scores.size(0), device=scores.device)
h = torch.arange(0, scores.size(1), device=scores.device)
@@ -179,11 +179,9 @@ def templated_attention_functionalize(
assert isinstance(other_buffers_unwrapped, tuple)
assert all(isinstance(item, torch.Tensor) for item in other_buffers_unwrapped)
- example_vals = (
- [torch.zeros((), dtype=query.dtype)]
- + [torch.zeros((), dtype=torch.int) for _ in range(4)]
- + list(other_buffers_unwrapped)
- )
+ example_vals = [torch.zeros((), dtype=query.dtype)] + [
+ torch.zeros((), dtype=torch.int) for _ in range(4)
+ ]
with ctx.redispatch_to_next() as m:
functional_score_mod = ctx.functionalize(score_mod)
pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch
diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py
index 152621453c..4950f5e802 100644
--- a/torch/_inductor/codegen/triton.py
+++ b/torch/_inductor/codegen/triton.py
@@ -3413,14 +3413,22 @@ class TritonScheduling(BaseScheduling):
buffer_names.update(node.used_buffer_names())
# Get buffers objects
-
def _get_buffer(name: str) -> Union[ir.Buffer, ir.TensorBox]:
- buf = V.graph.get_buffer(name)
- if buf is None:
- raise RuntimeError(f"Failed to find buffer matching name {name}")
- return buf
+ if name in V.graph.name_to_buffer:
+ return V.graph.name_to_buffer[name]
+ elif name in V.graph.graph_inputs:
+ return V.graph.graph_inputs[name]
+ elif name in V.graph.constants:
+ data = V.graph.constants[name]
+ return ir.ConstantBuffer(
+ name,
+ ir.FixedLayout(
+ data.device, data.dtype, *V.graph.static_sizes_strides(data)
+ ),
+ )
+ raise RuntimeError(f"Failed to find buffer matching name {name}")
- buffers = [V.graph.get_buffer(name) for name in buffer_names]
+ buffers = [_get_buffer(name) for name in buffer_names]
# In theory we can separately check xnumel and rnumel are <= int_max
# but some indexers do use the full linear index so we need to be
diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py
index a160055ee1..97e1683120 100644
--- a/torch/_inductor/graph.py
+++ b/torch/_inductor/graph.py
@@ -660,14 +660,6 @@ class GraphLowering(torch.fx.Interpreter):
return self.name_to_buffer[buffer_name]
if buffer_name in self.graph_inputs:
return self.graph_inputs[buffer_name]
- if buffer_name in self.constants:
- data = V.graph.constants[buffer_name]
- return ir.ConstantBuffer(
- buffer_name,
- ir.FixedLayout(
- data.device, data.dtype, *V.graph.static_sizes_strides(data)
- ),
- )
return None
def get_dtype(self, buffer_name: str):
diff --git a/torch/_inductor/kernel/templated_attention.py b/torch/_inductor/kernel/templated_attention.py
index 4c59036fbb..7942a367e2 100644
--- a/torch/_inductor/kernel/templated_attention.py
+++ b/torch/_inductor/kernel/templated_attention.py
@@ -3,7 +3,6 @@ import logging
from typing import Any, List
import torch
-from .. import config
from ..lowering import empty_strided, lowerings, register_lowering
from ..select_algorithm import autotune_select_algorithm, TritonTemplate
@@ -115,14 +114,12 @@ sdpa_template = TritonTemplate(
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk = tl.dot(q, k.to(MATMUL_PRECISION), acc=qk)
# ~~~~~~~~~~~~~~~~~~~ Apply score modification ~~~~~~~~~~~~~~~~~~~
- m = offs_m[:, None]
- n = start_n + offs_n[None, :]
{{ modification(
score="qk",
b="off_hz // H",
h="off_hz % H",
- m="m",
- n="n",
+ m="offs_m[:, None]",
+ n="start_n + offs_n[None, :]",
out="qk"
) | indent_except_first(2) }}
# TODO: In the case that score_mod is linear, this can be LICMed
@@ -173,8 +170,7 @@ sdpa_template = TritonTemplate(
)
-# TODO: We probably also need a layout constraint?
-@register_lowering(torch.ops.higher_order.templated_attention, type_promotion_kind=None)
+@register_lowering(torch.ops.higher_order.templated_attention)
def templated_attention(*args, **kwargs):
from torch._prims_common import make_contiguous_strides_for
from ..ir import (
@@ -186,7 +182,7 @@ def templated_attention(*args, **kwargs):
TensorBox,
)
- query, key, value, subgraph, *other_buffers = args
+ query, key, value, subgraph = args
def create_placeholder(name: str, dtype: torch.dtype) -> InputBuffer:
return TensorBox.create(
@@ -276,22 +272,17 @@ def templated_attention(*args, **kwargs):
configs: List[Any] = []
if query.get_dtype() == torch.float32:
configs.append((64, 64, 4, 3))
- else:
- configs.append((128, 64, 4, 3))
- if config.max_autotune:
- configs += [
- (128, 64, 4, 3),
- (128, 128, 4, 3),
- (128, 128, 8, 2),
- (64, 128, 4, 3),
- ]
- # Note, we don't need to pass in the captured buffers explicitly
- # because they're implicitly added by the score_mod function
- # We do need to explicitly pass it in for autotuning though.
+ configs += [
+ (128, 64, 4, 3),
+ (128, 128, 4, 3),
+ (128, 128, 8, 2),
+ (64, 128, 4, 3),
+ ]
+
for BLOCK_M, BLOCK_N, num_warps, num_stages in configs:
sdpa_template.maybe_append_choice(
choices=choices,
- input_nodes=[query, key, value, logsumexp],
+ input_nodes=(query, key, value, logsumexp),
layout=layout,
subgraphs=subgraph_buffer,
mutated_inputs=[
@@ -307,10 +298,9 @@ def templated_attention(*args, **kwargs):
ROWS_GUARANTEED_SAFE=False,
OUTPUT_LOGSUMEXP=True,
)
- inputs_for_autotuning = [query, key, value, logsumexp] + list(other_buffers)
return (
autotune_select_algorithm(
- "sdpa", choices, inputs_for_autotuning, layout
+ "sdpa", choices, [query, key, value, logsumexp], layout
),
logsumexp,
)
diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py
index f648076e4a..5360c41765 100644
--- a/torch/_inductor/select_algorithm.py
+++ b/torch/_inductor/select_algorithm.py
@@ -36,14 +36,7 @@ from .codegen.triton_utils import config_of, signature_to_meta
from .exc import CUDACompileError
from .ir import ChoiceCaller, PrimitiveInfoType
from .runtime.runtime_utils import do_bench
-from .utils import (
- get_dtype_size,
- Placeholder,
- sympy_dot,
- sympy_index_symbol,
- sympy_product,
- unique,
-)
+from .utils import get_dtype_size, Placeholder, sympy_dot, sympy_product, unique
from .virtualized import V
log = logging.getLogger(__name__)
@@ -276,23 +269,20 @@ class TritonTemplateKernel(TritonKernel):
potential multiple modifications
"""
- def add_input(name):
- return self.args.input(name)
-
class PlaceholderSubstitution(V.WrapperHandler): # type: ignore[name-defined]
self.name = "PlaceholderSubstitution"
def load(self, name: str, index: sympy.Expr):
if name not in fixed_inputs:
- # If it's not a fixed input, it's a load from a captured
- # tensor
- var = add_input(name)
- return f"tl.load({var} + {index})"
-
+ raise AssertionError(
+ f"All loads should be coming from fixed inputs - {name}"
+ )
return f"({fixed_inputs[name]})"
+ # TODO Doesn't work yet
def indirect_indexing(self, index_var, size, check):
- return sympy_index_symbol(str(index_var))
+ return self._inner.indirect_indexing(index_var, size, False)
+ # return sympy_symbol(str(index_var))
# if self.modification_cache is None:
with V.set_ops_handler(PlaceholderSubstitution(V.ops)):
@@ -599,25 +589,16 @@ class TritonTemplate(KernelTemplate):
+ "-"
)
mod = PyCodeCache.load(code, extra)
+ _, call_args, _ = kernel.args.python_argdefs()
- input_call_args = tuple(kernel.args.input_buffers.keys())
- output_call_args = tuple(kernel.args.output_buffers.keys())
-
- # We expect the input_buffer order to be [*input_nodes, *captured_buffers]
- expected_input_args = tuple(unique(x.get_name() for x in input_nodes))
- expected_output_args = (fake_out.get_name(),)
- assert input_call_args[: len(expected_input_args)] == expected_input_args, (
- input_call_args,
- expected_input_args,
- )
- assert output_call_args == expected_output_args, (
- output_call_args,
- expected_output_args,
+ expected_args = list(unique(x.get_name() for x in input_nodes))
+ expected_args.extend([fake_out.get_name()])
+ assert list(call_args)[: len(expected_args)] == expected_args, (
+ call_args,
+ expected_args,
)
-
- full_input_nodes = tuple([V.graph.get_buffer(k) for k in input_call_args])
extra_args = V.graph.sizevars.size_hints(
- map(sympy.expand, tuple(kernel.args.sizevars.keys())),
+ map(sympy.expand, call_args[len(expected_args) :]),
fallback=config.unbacked_symint_fallback,
)
@@ -655,13 +636,13 @@ class TritonTemplate(KernelTemplate):
num_stages=num_stages,
num_warps=num_warps,
matrix_instr_nonkdim=kwargs.get("matrix_instr_nonkdim", 0),
- input_tensor_meta=TensorMeta.from_irnodes(full_input_nodes),
+ input_tensor_meta=TensorMeta.from_irnodes(input_nodes),
output_tensor_meta=TensorMeta.from_irnodes(layout),
)
return TritonTemplateCaller(
kernel_hash_name,
- full_input_nodes,
+ input_nodes,
layout,
make_kernel_render,
extra.strip("-").replace("-", ", "),
|
2.41.0
|
4491c08111e19ddb17472306fbad7478fbdea0b
|
Mon, 22 Apr 2024 13:12:13 -0700
|
[PATCH 0515/1000] Restore CompileContext as well in backwards (#124626)
|
This should fix many of the unknown compile id problems currently afflicting tlparse backwards analysis. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124626 Approved by: https://github.com/bdhirsh
|
diff --git a/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py b/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
index 4bbed04e7e..e5e4cfae30 100644
--- a/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
+++ b/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
@@ -16,7 +16,13 @@ import torch
import torch.utils.dlpack
from torch import Tensor
from torch._dynamo.utils import lazy_format_graph_code
-from torch._guards import detect_fake_mode, tracing, TracingContext
+from torch._guards import (
+ compile_context,
+ CompileContext,
+ detect_fake_mode,
+ tracing,
+ TracingContext,
+)
from torch._logging import getArtifactLogger, trace_structured
from torch._prims_common import CUDARngStateHelper
from torch._subclasses import FakeTensor
@@ -532,6 +538,7 @@ def aot_dispatch_autograd(
_LazyGraphModule.force_recompile(bw_module)
saved_context = TracingContext.try_get()
+ saved_compile_context = CompileContext.try_get()
backward_state_indices = [
idx for idx, x in enumerate(flat_args) if isinstance(x, BackwardState)
@@ -918,9 +925,9 @@ Got grad_output types: {str(grad_output_types)}"""
ctx.maybe_clear_saved_tensors()
if CompiledFunction.compiled_bw is None:
context = torch._C._DisableAutocast if disable_amp else nullcontext
- with tracing(saved_context), context(), track_graph_compiling(
- aot_config, "backward"
- ):
+ with tracing(saved_context), compile_context(
+ saved_compile_context
+ ), context(), track_graph_compiling(aot_config, "backward"):
CompiledFunction.compiled_bw = aot_config.bw_compiler(
bw_module, placeholder_list
)
diff --git a/torch/_guards.py b/torch/_guards.py
index df62ab8e83..b4248b0f3e 100644
--- a/torch/_guards.py
+++ b/torch/_guards.py
@@ -750,7 +750,7 @@ class TracingContext:
@contextmanager
-def compile_context(context: CompileContext):
+def compile_context(context: Optional[CompileContext]):
old_context = getattr(_TLS, "compile_context", None)
_TLS.compile_context = context
try:
|
2.41.0
|
b6d052e9c1481805adf8e7c5d4f64bcdd1437f0
|
Tue, 23 Apr 2024 10:55:52 -0500
|
[PATCH 0519/1000] Specify the exact table we upload metrics to (#124321)
|
Part of https://github.com/pytorch/ci-infra/issues/113 Since this table is only located in one AWS account, but the ARC account also needs to access it, explicitly specify the account name for the table
|
diff --git a/tools/stats/upload_metrics.py b/tools/stats/upload_metrics.py
index 8a0e93858b..16688c340c 100644
--- a/tools/stats/upload_metrics.py
+++ b/tools/stats/upload_metrics.py
@@ -20,6 +20,12 @@ try:
except ImportError as e:
print(f"Unable to import boto3. Will not be emitting metrics.... Reason: {e}")
+# Sometimes our runner machines are located in one AWS account while the metrics table may be in
+# another, so we need to specify the table's ARN explicitly.
+TORCHCI_METRICS_TABLE_ARN = (
+ "arn:aws:dynamodb:us-east-1:308535385114:table/torchci-metrics"
+)
+
class EnvVarMetric:
name: str
@@ -153,7 +159,7 @@ def emit_metric(
if EMIT_METRICS:
try:
session = boto3.Session(region_name="us-east-1")
- session.resource("dynamodb").Table("torchci-metrics").put_item(
+ session.resource("dynamodb").Table(TORCHCI_METRICS_TABLE_ARN).put_item(
Item={
**reserved_metrics,
**metrics,
|
2.41.0
|
558008a056afa12a9aa91dfd912f553ed4ceee2
|
Mon, 22 Apr 2024 18:45:29 -0700
|
[PATCH 0520/1000] [PyTorch] Add test that canEnableStaticRuntime rejects prim::CallMethod (#120853)
|
Rejecting prim::CallMethod is called out in a comment in impl.cpp, but doesn't seem to be tested. Now it is. Differential Revision: [D54338261](https://our.internmc.facebook.com/intern/diff/D54338261/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/120853 Approved by: https://github.com/houseroad
|
diff --git a/benchmarks/static_runtime/test_static_module.cc b/benchmarks/static_runtime/test_static_module.cc
index 5fc5d92749..2434e29878 100644
--- a/benchmarks/static_runtime/test_static_module.cc
+++ b/benchmarks/static_runtime/test_static_module.cc
@@ -37,6 +37,12 @@ bool testCanEnableStaticRuntime(const std::string& jit_script) {
return canEnableStaticRuntime(graph);
}
+bool testCanEnableStaticRuntimeWithIR(const std::string& ir) {
+ auto graph = std::make_shared<Graph>();
+ parseIR(ir, graph.get(), {});
+ return canEnableStaticRuntime(graph);
+}
+
bool testModuleHasOp(const std::string& jit_script, const char* op_name) {
script::Module module("module");
module.define(jit_script);
@@ -345,6 +351,15 @@ TEST(StaticRuntime, CanEnableStaticRuntime) {
EXPECT_TRUE(testCanEnableStaticRuntime(is_script_none));
EXPECT_FALSE(testCanEnableStaticRuntime(is_not_script_tensors));
EXPECT_TRUE(testCanEnableStaticRuntime(is_not_script_none));
+
+}
+TEST(StaticRuntime, CanEnableStaticRuntimeCallMethod) {
+ const auto call_method = R"IR(
+ graph(%x : Tensor):
+ %1 : Tensor = prim::CallMethod[name="offsets"](%x)
+ return (%1)
+ )IR";
+ EXPECT_FALSE(testCanEnableStaticRuntimeWithIR(call_method));
}
TEST(StaticRuntime, CanEnableStaticRuntimeSubBlocks) {
|
2.41.0
|
2fd224f270ecf12f33d4273131f5b3b73d4e2b7
|
Mon, 22 Apr 2024 12:07:02 -0700
|
[PATCH 0521/1000] [AOTI] Add more ABI-compatiblity unit test (#123900)
|
Summary: Follow https://github.com/pytorch/pytorch/pull/123848, and test more c10 util functions. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123900 Approved by: https://github.com/chenyang78
|
diff --git a/test/cpp/aoti_abi_check/CMakeLists.txt b/test/cpp/aoti_abi_check/CMakeLists.txt
index 8ae688ebbb..401a4c712a 100644
--- a/test/cpp/aoti_abi_check/CMakeLists.txt
+++ b/test/cpp/aoti_abi_check/CMakeLists.txt
@@ -3,7 +3,11 @@ set(AOTI_ABI_CHECK_TEST_ROOT ${TORCH_ROOT}/test/cpp/aoti_abi_check)
# Build the cpp gtest binary containing the cpp-only tests.
set(AOTI_ABI_CHECK_TEST_SRCS
${AOTI_ABI_CHECK_TEST_ROOT}/main.cpp
+ ${AOTI_ABI_CHECK_TEST_ROOT}/test_cast.cpp
${AOTI_ABI_CHECK_TEST_ROOT}/test_dtype.cpp
+ ${AOTI_ABI_CHECK_TEST_ROOT}/test_math.cpp
+ ${AOTI_ABI_CHECK_TEST_ROOT}/test_rand.cpp
+ ${AOTI_ABI_CHECK_TEST_ROOT}/test_vec.cpp
)
add_executable(test_aoti_abi_check
diff --git a/test/cpp/aoti_abi_check/test_cast.cpp b/test/cpp/aoti_abi_check/test_cast.cpp
new file mode 100644
index 0000000000..5021a14881
--- /dev/null
+++ b/test/cpp/aoti_abi_check/test_cast.cpp
@@ -0,0 +1,25 @@
+#include <gtest/gtest.h>
+
+#include <c10/util/TypeCast.h>
+#include <c10/util/bit_cast.h>
+namespace torch {
+namespace aot_inductor {
+
+TEST(TestCast, TestConvert) {
+ c10::BFloat16 a = 3.0f;
+ c10::Half b = 3.0f;
+
+ EXPECT_EQ(c10::convert<c10::Half>(a), b);
+ EXPECT_EQ(a, c10::convert<c10::BFloat16>(b));
+}
+
+TEST(TestCast, TestBitcast) {
+ c10::BFloat16 a = 3.0f;
+ c10::Half b = 3.0f;
+
+ EXPECT_EQ(c10::bit_cast<c10::BFloat16>(c10::bit_cast<c10::Half>(a)), a);
+ EXPECT_EQ(c10::bit_cast<c10::Half>(c10::bit_cast<c10::BFloat16>(b)), b);
+}
+
+} // namespace aot_inductor
+} // namespace torch
diff --git a/test/cpp/aoti_abi_check/test_math.cpp b/test/cpp/aoti_abi_check/test_math.cpp
new file mode 100644
index 0000000000..83418142bd
--- /dev/null
+++ b/test/cpp/aoti_abi_check/test_math.cpp
@@ -0,0 +1,23 @@
+#include <gtest/gtest.h>
+
+#include <ATen/NumericUtils.h>
+#include <c10/util/generic_math.h>
+#include <cmath>
+namespace torch {
+namespace aot_inductor {
+
+TEST(TestMath, TestDivFloor) {
+ EXPECT_EQ(c10::div_floor_floating(5., 0.), INFINITY);
+ EXPECT_DOUBLE_EQ(c10::div_floor_floating(5., 2.), 2.);
+ EXPECT_DOUBLE_EQ(c10::div_floor_floating(5., -2.), -3.);
+ EXPECT_EQ(c10::div_floor_integer(5, 2), 2);
+ EXPECT_EQ(c10::div_floor_integer(5, -2), -3);
+}
+
+TEST(TestMath, TestNan) {
+ EXPECT_FALSE(at::_isnan(1.0));
+ EXPECT_TRUE(at::_isnan(std::nan("")));
+}
+
+} // namespace aot_inductor
+} // namespace torch
diff --git a/test/cpp/aoti_abi_check/test_rand.cpp b/test/cpp/aoti_abi_check/test_rand.cpp
new file mode 100644
index 0000000000..98ce1a4eda
--- /dev/null
+++ b/test/cpp/aoti_abi_check/test_rand.cpp
@@ -0,0 +1,39 @@
+#include <gtest/gtest.h>
+
+#include <ATen/core/PhiloxRNGEngine.h>
+
+#include <cstdint>
+#include <iostream>
+namespace torch {
+namespace aot_inductor {
+
+int64_t randint64_cpu(
+ uint32_t seed,
+ uint32_t offset,
+ int64_t low,
+ int64_t high) {
+ auto gen = at::Philox4_32(seed, 0, offset);
+ uint64_t r0 = gen();
+ uint64_t r1 = gen();
+ uint64_t result = r0 | (r1 << 32);
+ return static_cast<int64_t>(result % (high - low)) + low;
+}
+
+TEST(TestRand, TestRandn) {
+ at::Philox4_32 engine_1(1, 0, 0);
+ float a = engine_1.randn(10);
+ at::Philox4_32 engine_2(1, 0, 0);
+ float b = engine_2.randn(10);
+
+ EXPECT_EQ(a, b);
+}
+
+TEST(TestRand, TestRandint64) {
+ int64_t a = randint64_cpu(0xffffffff, 100, 0, INT64_MAX);
+ int64_t b = randint64_cpu(0xffffffff, 100, 0, INT64_MAX);
+
+ EXPECT_EQ(a, b);
+}
+
+} // namespace aot_inductor
+} // namespace torch
diff --git a/test/cpp/aoti_abi_check/test_vec.cpp b/test/cpp/aoti_abi_check/test_vec.cpp
new file mode 100644
index 0000000000..a26576cfdd
--- /dev/null
+++ b/test/cpp/aoti_abi_check/test_vec.cpp
@@ -0,0 +1,81 @@
+#include <gtest/gtest.h>
+
+#include <ATen/cpu/vec/vec.h>
+
+#include <iostream>
+namespace torch {
+namespace aot_inductor {
+
+TEST(TestVec, TestAdd) {
+ using Vec = at::vec::Vectorized<int>;
+ std::vector<int> a(1024, 1);
+ std::vector<int> b(1024, 2);
+ Vec a_vec = Vec::loadu(a.data());
+ Vec b_vec = Vec::loadu(b.data());
+ Vec actual_vec = a_vec + b_vec;
+ std::vector<int> expected(1024, 3);
+ Vec expected_vec = Vec::loadu(expected.data());
+
+ for (int i = 0; i < Vec::size(); i++) {
+ EXPECT_EQ(expected_vec[i], actual_vec[i]);
+ }
+}
+
+TEST(TestVec, TestMax) {
+ using Vec = at::vec::Vectorized<int>;
+ std::vector<int> a(1024, -1);
+ std::vector<int> b(1024, 2);
+ Vec a_vec = Vec::loadu(a.data());
+ Vec b_vec = Vec::loadu(b.data());
+ Vec actual_vec = at::vec::maximum(a_vec, b_vec);
+ Vec expected_vec = b_vec;
+
+ for (int i = 0; i < Vec::size(); i++) {
+ EXPECT_EQ(expected_vec[i], actual_vec[i]);
+ }
+}
+
+TEST(TestVec, TestMin) {
+ using Vec = at::vec::Vectorized<int>;
+ std::vector<int> a(1024, -1);
+ std::vector<int> b(1024, 2);
+ Vec a_vec = Vec::loadu(a.data());
+ Vec b_vec = Vec::loadu(b.data());
+ Vec actual_vec = at::vec::minimum(a_vec, b_vec);
+ Vec expected_vec = a_vec;
+
+ for (int i = 0; i < Vec::size(); i++) {
+ EXPECT_EQ(expected_vec[i], actual_vec[i]);
+ }
+}
+
+TEST(TestVec, TestConvert) {
+ std::vector<int> a(1024, -1);
+ std::vector<float> b(1024, -1.0);
+ at::vec::Vectorized<int> a_vec = at::vec::Vectorized<int>::loadu(a.data());
+ at::vec::Vectorized<float> b_vec =
+ at::vec::Vectorized<float>::loadu(b.data());
+ auto actual_vec = at::vec::convert<float>(a_vec);
+ auto expected_vec = b_vec;
+
+ for (int i = 0; i < at::vec::Vectorized<int>::size(); i++) {
+ EXPECT_EQ(expected_vec[i], actual_vec[i]);
+ }
+}
+
+TEST(TestVec, TestClampMin) {
+ using Vec = at::vec::Vectorized<float>;
+ std::vector<float> a(1024, -2.0);
+ std::vector<float> min(1024, -1.0);
+ Vec a_vec = Vec::loadu(a.data());
+ Vec min_vec = Vec::loadu(min.data());
+ Vec actual_vec = at::vec::clamp_min(a_vec, min_vec);
+ Vec expected_vec = min_vec;
+
+ for (int i = 0; i < Vec::size(); i++) {
+ EXPECT_EQ(expected_vec[i], actual_vec[i]);
+ }
+}
+
+} // namespace aot_inductor
+} // namespace torch
diff --git a/torch/_inductor/codegen/cpp_prefix.h b/torch/_inductor/codegen/cpp_prefix.h
index a05a9e2b8a..ddf0ce76c1 100644
--- a/torch/_inductor/codegen/cpp_prefix.h
+++ b/torch/_inductor/codegen/cpp_prefix.h
@@ -15,7 +15,6 @@
#include <ATen/NumericUtils.h>
#include <ATen/core/PhiloxRNGEngine.h>
-#include <ATen/native/Math.h>
#include <c10/util/Float8_e4m3fn.h>
#include <c10/util/Float8_e5m2.h>
|
2.41.0
|
5db64024c49335a53957c52fcd692235a5d7c1c
|
Mon, 22 Apr 2024 15:10:14 -0700
|
[PATCH 0523/1000] [DDP][PT2D] Correctly calculate the numel with symint in DDP fusion (#124422)
|
As title Differential Revision: [D56315533](https://our.internmc.facebook.com/intern/diff/D56315533/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124422 Approved by: https://github.com/yf225 ghstack dependencies: #124421
|
diff --git a/torch/_inductor/fx_passes/ddp_fusion.py b/torch/_inductor/fx_passes/ddp_fusion.py
index 3703aa77b8..532a546dd4 100644
--- a/torch/_inductor/fx_passes/ddp_fusion.py
+++ b/torch/_inductor/fx_passes/ddp_fusion.py
@@ -2,6 +2,7 @@
import collections
import inspect
import logging
+import math
import operator
from dataclasses import dataclass
from functools import partial
@@ -329,10 +330,7 @@ def _scatter_fused_allreduce_waits(
aten.split,
(
fused_wait_node,
- [
- int(cast(torch.Size, cb.shape).numel())
- for cb in orig_comm_blocks
- ],
+ [math.prod(cast(List[int], cb.shape)) for cb in orig_comm_blocks],
),
)
with graph.inserting_after(split_node):
|
2.41.0
|
5a2d18dd923b557ec6eca33f8f452b3cd06a9a7
|
Tue, 23 Apr 2024 17:40:29 +0000
|
[PATCH 0524/1000] [Profiler] iterate frontend function events for profiler post processing (#124596)
|
The `function_events` in `_parse_kineto_results` is used to contain all function events from the result. It contains 2 kinds of events. One is frontend function events whose correlation id is 0, for example, `aten::add`, `aten::mul`. They are on the top level of the profile results. The other is the backend events, which are associated with the frontend events and its correlation id is > 0, for example, `at::native::vectorized_elementwise_kernel`, it should be the backend event of a frontend element-wise op. They have the device execution duration for the related frontend op. In the following post processing code, the **frontend function events** should be iterated to find its correlated backend events in `device_corr_map`, instead of iterating all function events, because `device_corr_map` is designed as a dict, whose key is the id of the frontend function event. https://github.com/pytorch/pytorch/blob/3af12447f85dfede191a113c052e58fa7b21a8b3/torch/autograd/profiler.py#L543-L560 https://github.com/pytorch/pytorch/blob/3af12447f85dfede191a113c052e58fa7b21a8b3/torch/autograd/profiler.py#L537-L540 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124596 Approved by: https://github.com/aaronenyeshi
|
diff --git a/torch/autograd/profiler.py b/torch/autograd/profiler.py
index f233277b7e..5aa607a8e6 100644
--- a/torch/autograd/profiler.py
+++ b/torch/autograd/profiler.py
@@ -467,8 +467,13 @@ class profile:
else 0
)
- # Create and return FunctionEvent list
- function_events = []
+ # Create and return FunctionEvent list, which contains all function events
+ # Here 2 function events are created:
+ # all_function_events contains all events associated with each kineto event from result
+ all_function_events = []
+ # frontend_function_events contains the events in aten or torch frontend level,
+ # whose correlation id is 0
+ frontend_function_events = []
device_corr_map: Dict[int, List[FunctionEvent]] = {}
max_evt_id = 0
for kineto_event in result.events():
@@ -532,15 +537,21 @@ class profile:
if cuda_time > 0:
fe.append_kernel(fe.name, fe.device_index, cuda_time)
fe.is_legacy = True
- function_events.append(fe)
+ all_function_events.append(fe)
corr_id = kineto_event.linked_correlation_id()
if corr_id > 0:
if corr_id not in device_corr_map:
device_corr_map[corr_id] = []
device_corr_map[corr_id].append(fe)
+ elif corr_id == 0:
+ frontend_function_events.append(fe)
+ else:
+ raise RuntimeError(
+ f"Got negative correlation id {corr_id} in profiler post processing"
+ )
# associate device kernels and device runtime (CPU) with CPU events
- for fe in function_events:
+ for fe in frontend_function_events:
if (
fe.device_type == DeviceType.CPU
and not fe.is_async
@@ -587,17 +598,17 @@ class profile:
if not mem_record[1]:
max_evt_id += 1
fe = createFunctionEventForMemoryEvents(mem_record[0])
- function_events.append(fe)
+ all_function_events.append(fe)
for oom_record in oom_records:
max_evt_id += 1
fe = createFunctionEventForMemoryEvents(oom_record)
- function_events.append(fe)
+ all_function_events.append(fe)
- function_events.sort(
+ all_function_events.sort(
key=lambda evt: [evt.time_range.start, -evt.time_range.end]
)
- return function_events
+ return all_function_events
class record_function(_ContextDecorator):
|
2.41.0
|
d45eb77f1aeb57f13391990215b518a607b3c7e
|
Mon, 22 Apr 2024 17:07:15 -0700
|
[PATCH 0525/1000] [inductor] Remove usage of device_interface from _inductor.runtime (#124592)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124592 Approved by: https://github.com/masnesral
|
diff --git a/test/inductor/test_cuda_repro.py b/test/inductor/test_cuda_repro.py
index 684f3cef8f..db02d19310 100644
--- a/test/inductor/test_cuda_repro.py
+++ b/test/inductor/test_cuda_repro.py
@@ -14,6 +14,7 @@ from torch._dynamo.testing import rand_strided
from torch._dynamo.utils import same
from torch._inductor import config
from torch._inductor.compile_fx import compile_fx_inner
+from torch._inductor.runtime.hints import DeviceProperties
from torch._inductor.utils import run_and_get_code
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing import FileCheck
@@ -405,7 +406,7 @@ class CudaReproTests(TestCase):
],
meta={
"signature": {0: "*fp32", 1: "*fp32", 2: "i32"},
- "device": 0,
+ "device": DeviceProperties.create(torch.device("cuda")),
"configs": [instance_descriptor(divisible_by_16=(0, 1), equal_to_1=())],
"constants": {},
},
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 8707bd8a6b..8a2e1f228d 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -45,16 +45,12 @@ from typing import (
Optional,
Set,
Tuple,
- Type,
TYPE_CHECKING,
Union,
)
import torch
-from torch._dynamo.device_interface import (
- get_interface_for_device,
- get_registered_device_interfaces,
-)
+from torch._dynamo.device_interface import get_registered_device_interfaces
from torch._dynamo.utils import counters, dynamo_timed
from torch._inductor import config, exc, metrics
from torch._inductor.codegen.cuda import cuda_env
@@ -70,7 +66,6 @@ from torch._subclasses.fake_tensor import (
from torch.fx.experimental.symbolic_shapes import has_hint, hint_int, ShapeEnv
if TYPE_CHECKING:
- from torch._dynamo.device_interface import DeviceInterface
from torch._inductor.graph import GraphLowering
from torch._inductor.ir import ChoiceCaller
@@ -2772,14 +2767,9 @@ def _set_triton_ptxas_path() -> None:
def _worker_compile_triton(
load_kernel: Callable[[], Any],
- cc: int,
- device: torch.device,
- device_interface: Type[DeviceInterface],
):
_set_triton_ptxas_path()
- device_interface.Worker.set_device(device.index)
- kernel = load_kernel()
- kernel.precompile(warm_cache_only_with_cc=cc)
+ load_kernel().precompile(warm_cache_only=True)
class CodeCacheFuture:
@@ -2942,17 +2932,13 @@ class AsyncCompile:
kernel = TritonCodeCache.load(kernel_name, source_code)
if config.compile_threads > 1:
- device_interface = get_interface_for_device(device_str)
- device = torch.device(device_str, device_interface.current_device())
- cc = device_interface.get_compute_capability(device)
- future = self.process_pool().submit(
- _worker_compile_triton,
- kernel._reload_in_subproc,
- cc,
- device,
- device_interface,
+ return TritonFuture(
+ kernel,
+ self.process_pool().submit(
+ _worker_compile_triton,
+ kernel._reload_in_subproc,
+ ),
)
- return TritonFuture(kernel, future)
else:
kernel.precompile()
return kernel
diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py
index 4950f5e802..df669d10d2 100644
--- a/torch/_inductor/codegen/triton.py
+++ b/torch/_inductor/codegen/triton.py
@@ -34,7 +34,7 @@ import torch.utils._pytree as pytree
from torch._dynamo.utils import preserve_rng_state
from torch._inductor.metrics import is_metric_table_enabled, log_kernel_metadata
-from torch._inductor.runtime.hints import AutotuneHint
+from torch._inductor.runtime.hints import AutotuneHint, DeviceProperties
from torch._prims_common import is_integer_dtype
from torch.utils._sympy.functions import FloorDiv, ModularIndexing
from torch.utils._sympy.value_ranges import ValueRanges
@@ -125,7 +125,7 @@ def gen_common_triton_imports():
"""
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
- from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor
+ from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
"""
)
return imports.getvalue()
@@ -2833,8 +2833,7 @@ class TritonKernel(Kernel):
)
triton_meta = {
"signature": triton_meta_signature,
- "device": V.graph.scheduler.current_device.index,
- "device_type": V.graph.scheduler.current_device.type,
+ "device": DeviceProperties.create(V.graph.scheduler.current_device),
"constants": {},
}
diff --git a/torch/_inductor/codegen/triton_foreach.py b/torch/_inductor/codegen/triton_foreach.py
index a0acdcdae0..210ab6b50a 100644
--- a/torch/_inductor/codegen/triton_foreach.py
+++ b/torch/_inductor/codegen/triton_foreach.py
@@ -6,6 +6,7 @@ from typing import Dict, List, Tuple
from sympy import Integer
from .. import metrics
+from ..runtime.hints import DeviceProperties
from ..scheduler import SchedulerNode
from ..utils import ceildiv, Placeholder
from ..virtualized import V
@@ -157,8 +158,7 @@ class ForeachKernel(Kernel):
_, _, signature = self.args.python_argdefs()
triton_meta = {
"signature": signature_to_meta(signature, size_dtype=size_dtype),
- "device": V.graph.scheduler.current_device.index,
- "device_type": V.graph.scheduler.current_device.type,
+ "device": DeviceProperties.create(V.graph.scheduler.current_device),
"constants": {},
}
triton_meta["configs"] = [config_of(signature)]
diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py
index b679b1f174..3271682ef1 100644
--- a/torch/_inductor/codegen/wrapper.py
+++ b/torch/_inductor/codegen/wrapper.py
@@ -35,6 +35,7 @@ from torch.utils._sympy.singleton_int import SingletonInt
from .. import codecache, config, ir
from ..ir import ReinterpretView
from ..runtime import triton_heuristics
+from ..runtime.hints import DeviceProperties
from ..utils import (
cache_on_self,
get_benchmark_name,
@@ -1095,8 +1096,7 @@ class WrapperCodeGen(CodeGen):
size_dtype=index_dtype,
indices=non_constant_indices,
),
- "device": V.graph.scheduler.current_device.index,
- "device_type": V.graph.scheduler.current_device.type,
+ "device": DeviceProperties.create(V.graph.scheduler.current_device),
# Triton compiler includes equal_to_1 args into constants even
# when they are not constexpr. otherwise there may be a segfault
# during launching the Inductor-compiled Triton kernel.
diff --git a/torch/_inductor/runtime/hints.py b/torch/_inductor/runtime/hints.py
index 5b2b53ebff..325f37ae25 100644
--- a/torch/_inductor/runtime/hints.py
+++ b/torch/_inductor/runtime/hints.py
@@ -1,6 +1,8 @@
import collections
+import typing
from dataclasses import fields
from enum import auto, Enum
+from typing import Optional
# NOTE: if these fail asserts submit a PR to increase them
@@ -89,3 +91,39 @@ class AutotuneHint(Enum):
# which isn't valid python.
# Enum.__str__ will just return "AutotuneHint.ELEMENTS_PER_WARP_32".
__repr__ = Enum.__str__
+
+
+class DeviceProperties(typing.NamedTuple):
+ """Copy device properties into a data structure not requiring torch to be imported"""
+
+ type: str # type: ignore[assignment]
+ index: int # type: ignore[assignment]
+ cc: int
+ major: Optional[int] = None
+ regs_per_multiprocessor: Optional[int] = None
+ max_threads_per_multi_processor: Optional[int] = None
+ multi_processor_count: Optional[int] = None
+
+ @classmethod
+ def create(cls, device):
+ import torch
+ from torch._dynamo.device_interface import get_interface_for_device
+
+ device_type = device.type if torch.version.hip is None else "hip"
+ device_interface = get_interface_for_device(device)
+ if device_type == "cuda":
+ props = device_interface.get_device_properties(device)
+ return cls(
+ type=device_type,
+ index=device.index,
+ cc=device_interface.get_compute_capability(device),
+ major=props.major,
+ regs_per_multiprocessor=props.regs_per_multiprocessor,
+ max_threads_per_multi_processor=props.max_threads_per_multi_processor,
+ multi_processor_count=props.multi_processor_count,
+ )
+ return cls(
+ type=device_type,
+ index=device.index,
+ cc=device_interface.get_compute_capability(device),
+ )
diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py
index 1b042d4f4a..85587c2093 100644
--- a/torch/_inductor/runtime/triton_heuristics.py
+++ b/torch/_inductor/runtime/triton_heuristics.py
@@ -16,12 +16,12 @@ from typing import Any, Callable, Dict, List, Optional, Set, Tuple
import torch
-from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device
from .coordinate_descent_tuner import CoordescTuner
from .hints import (
_NUM_THREADS_PER_WARP,
AutotuneHint,
+ DeviceProperties,
HeuristicType,
ReductionHint,
TileHint,
@@ -144,7 +144,12 @@ class CachingAutotuner(KernelInterface):
assert len(configs) > 0, "Non-empty TritonConfig list required for compiling"
self.fn = fn
- self.triton_meta = triton_meta
+ self.device_props: DeviceProperties = triton_meta["device"]
+ self.triton_meta = {
+ **triton_meta,
+ "device": self.device_props.index,
+ "device_type": self.device_props.type,
+ }
self.inductor_meta = {} if inductor_meta is None else inductor_meta
self.save_cache_hook = save_cache_hook
self.mutated_arg_names = mutated_arg_names
@@ -152,13 +157,6 @@ class CachingAutotuner(KernelInterface):
self.heuristic_type = heuristic_type
self.custom_kernel = custom_kernel
self.cuda_kernel_saved = False
-
- # Align the default design that default as cuda
- self.device_type = (
- triton_meta["device_type"] if "device_type" in triton_meta else "cuda"
- )
- self.device_interface = get_interface_for_device(self.device_type)
-
if log.isEnabledFor(logging.DEBUG):
log.debug(
"CachingAutotuner gets %d configs for %s",
@@ -186,7 +184,7 @@ class CachingAutotuner(KernelInterface):
)
self.filename = filename
- def precompile(self, warm_cache_only_with_cc=None):
+ def precompile(self, warm_cache_only=False):
with self.lock:
if self.launchers:
return
@@ -198,7 +196,7 @@ class CachingAutotuner(KernelInterface):
for c in self.configs:
try:
compiled_binary, launcher = self._precompile_config(
- c, warm_cache_only_with_cc
+ c, warm_cache_only
)
except OutOfResources as e:
if len(self.configs) == 1:
@@ -219,19 +217,19 @@ class CachingAutotuner(KernelInterface):
seen_configs = set(self.configs)
- device_prop = self.device_interface.Worker.get_device_properties(
- self.triton_meta["device"]
- )
+ device_prop = self.device_props
if (
self.inductor_meta.get("dynamic_scale_rblock", True)
and self.heuristic_type == HeuristicType.REDUCTION
and self.size_hints is not None
- # Disable for AMDGPU as Triton is not ready to return n_regs for a compiled_binary.
- and not self.inductor_meta.get("is_hip")
- # Disable for Intel GPU as Triton is not ready to return n_regs for a compiled_binary.
- and self.device_type != "xpu"
+ # Disable for AMDGPU/Intel as Triton is not ready to return n_regs for a compiled_binary.
+ and device_prop.type == "cuda"
+ and device_prop.major
and device_prop.major >= 8
):
+ assert device_prop.regs_per_multiprocessor
+ assert device_prop.max_threads_per_multi_processor
+ assert device_prop.multi_processor_count
for triton_config, compiled_binary in zip(
self.configs, compiled_binaries
):
@@ -292,15 +290,21 @@ class CachingAutotuner(KernelInterface):
continue
seen_configs.add(new_config)
self.launchers.append(
- self._precompile_config(new_config, warm_cache_only_with_cc)[1]
+ self._precompile_config(new_config, warm_cache_only)[1]
)
self.configs = None
- def _precompile_config(self, cfg: Config, warm_cache_only_with_cc: Optional[int]):
+ def get_device_interface(self):
+ # this code cannot run in compile workers, because it imports from torch
+ from torch._dynamo.device_interface import get_interface_for_device
+
+ return get_interface_for_device(self.device_props.type.replace("hip", "cuda"))
+
+ def _precompile_config(self, cfg: Config, warm_cache_only: bool):
"""Ahead of time compile a given autotuner config."""
compile_meta = copy.deepcopy(self.triton_meta)
for k, v in cfg.kwargs.items():
- if torch.version.hip is not None:
+ if self.device_props.type != "hip":
if k == "matrix_instr_nonkdim":
compile_meta["matrix_instr_nonkdim"] = v
continue
@@ -314,22 +318,9 @@ class CachingAutotuner(KernelInterface):
"assert_indirect_indexing", True
) and not self.inductor_meta.get("is_hip", False)
- # Setting device_type="hip" required on ROCm to pass down to triton
- compile_meta["device_type"] = (
- self.device_type if torch.version.hip is None else "hip"
- )
-
- if warm_cache_only_with_cc:
- cc = warm_cache_only_with_cc
- else:
- # Use device_type 'cuda' for both cuda and hip devices to retrieve
- # the compute capability.
- device_type = self.device_type if torch.version.hip is None else "cuda"
- device_id = compile_meta["device"]
- device = torch.device(device_type, device_id)
- cc = self.device_interface.get_compute_capability(device)
-
- compile_meta["cc"] = cc
+ # device type will be "hip" rather than "cuda" here
+ compile_meta["device_type"] = self.device_props.type
+ compile_meta["cc"] = self.device_props.cc
if ASTSource:
compile_args = (
@@ -341,13 +332,13 @@ class CachingAutotuner(KernelInterface):
),
)
- target = (compile_meta["device_type"], cc)
+ target = (compile_meta["device_type"], compile_meta["cc"])
options = {
"num_warps": compile_meta["num_warps"],
"num_stages": compile_meta["num_stages"],
"debug": compile_meta["debug"],
}
- if torch.version.hip is not None:
+ if self.device_props.type != "hip":
if "waves_per_eu" in compile_meta:
options["waves_per_eu"] = compile_meta["waves_per_eu"]
if "matrix_instr_nonkdim" in compile_meta:
@@ -362,16 +353,21 @@ class CachingAutotuner(KernelInterface):
compile_args = (self.fn,)
compile_kwargs = compile_meta
- if warm_cache_only_with_cc:
+ if warm_cache_only:
return (
triton.compile(*compile_args, **compile_kwargs),
None,
)
+ # importing from torch is safe now that precompile has returned
+ from torch._dynamo.device_interface import DeviceGuard
+
+ device_interface = self.get_device_interface()
+
# load binary to the correct device
- with DeviceGuard(self.device_interface, compile_meta["device"]): # type: ignore[attr-defined]
+ with DeviceGuard(device_interface, compile_meta["device"]): # type: ignore[attr-defined]
# need to initialize context
- self.device_interface.synchronize(self.device_interface.current_device())
+ device_interface.synchronize(device_interface.current_device())
try:
binary = triton.compile(*compile_args, **compile_kwargs)
@@ -589,8 +585,9 @@ class CachingAutotuner(KernelInterface):
)
return float("inf")
- stream = self.device_interface.get_raw_stream( # type: ignore[call-arg]
- self.device_interface.current_device()
+ device_interface = self.get_device_interface()
+ stream = device_interface.get_raw_stream( # type: ignore[call-arg]
+ device_interface.current_device()
)
def kernel_call():
@@ -697,7 +694,7 @@ class CachingAutotuner(KernelInterface):
from torch._inductor.codecache import CudaKernelParamCache
- if torch.version.hip is None:
+ if self.device_props.type != "hip":
CudaKernelParamCache.set(key, params, launcher.bin.asm["cubin"])
else:
# There is some divergence between CUDA and ROCm here.
@@ -735,7 +732,7 @@ class CachingAutotuner(KernelInterface):
def benchmark_one_config(config):
with self.lock:
- _, launcher = self._precompile_config(config, None)
+ _, launcher = self._precompile_config(config, False)
config2launcher[config] = launcher
out = self.bench(launcher, *cloned_args, **kwargs)
diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py
index 5360c41765..adf7e4e50d 100644
--- a/torch/_inductor/select_algorithm.py
+++ b/torch/_inductor/select_algorithm.py
@@ -35,6 +35,7 @@ from .codegen.triton import (
from .codegen.triton_utils import config_of, signature_to_meta
from .exc import CUDACompileError
from .ir import ChoiceCaller, PrimitiveInfoType
+from .runtime.hints import DeviceProperties
from .runtime.runtime_utils import do_bench
from .utils import get_dtype_size, Placeholder, sympy_dot, sympy_product, unique
from .virtualized import V
@@ -147,8 +148,7 @@ class TritonTemplateKernel(TritonKernel):
argdefs, _, signature = self.args.python_argdefs()
triton_meta = {
"signature": signature_to_meta(signature, size_dtype=self.index_dtype),
- "device": self.output_node.get_device().index,
- "device_type": self.output_node.get_device().type,
+ "device": DeviceProperties.create(self.output_node.get_device()),
"constants": {},
}
triton_meta["configs"] = [config_of(signature)]
|
2.41.0
|
792ceab4b6a61c6c217f65c3fecf51d75e65a9f
|
Mon, 22 Apr 2024 17:07:15 -0700
|
[PATCH 0526/1000] [dynamo] Refactor into torch/_inductor/runtime/compile_tasks.py (#124681)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124681 Approved by: https://github.com/masnesral ghstack dependencies: #124592
|
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 8a2e1f228d..0de182bf6a 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -54,6 +54,13 @@ from torch._dynamo.device_interface import get_registered_device_interfaces
from torch._dynamo.utils import counters, dynamo_timed
from torch._inductor import config, exc, metrics
from torch._inductor.codegen.cuda import cuda_env
+from torch._inductor.runtime.compile_tasks import (
+ _module_to_triton_kernel,
+ _reload_python_module,
+ _reload_python_module_in_subproc,
+ _set_triton_ptxas_path,
+ _worker_compile_triton,
+)
from torch._inductor.runtime.runtime_utils import cache_dir
from torch._inductor.utils import clear_on_fresh_inductor_cache, is_linux
@@ -2351,10 +2358,6 @@ class CppWrapperCodeCache(CppPythonBindingsCodeCache):
)
-def _reload_python_module_in_subproc(key, path):
- return PyCodeCache.load_by_key_path(key, path)
-
-
@clear_on_fresh_inductor_cache
class PyCodeCache:
cache: Dict[str, ModuleType] = dict()
@@ -2387,31 +2390,21 @@ class PyCodeCache:
if linemap is None:
linemap = []
if key not in cls.cache:
- with open(path) as f:
- try:
- code = compile(f.read(), path, "exec")
- except Exception as e:
- raise RuntimeError(
- f"Failed to import {path}\n{type(e).__name__}: {e}"
- ) from None
- mod = ModuleType(f"{__name__}.{key}")
- mod.__file__ = path
- mod.key = key # type: ignore[attr-defined]
- exec(code, mod.__dict__, mod.__dict__)
- sys.modules[mod.__name__] = mod
- # another thread might set this first
- cls.cache.setdefault(key, mod)
- # unzip into separate lines/nodes lists
- cls.linemaps[path] = list(zip(*linemap))
-
- if attrs is not None:
- for k, v in attrs.items():
- setattr(mod, k, v)
-
- if not (linemap or attrs):
- mod._reload_in_subproc = functools.partial( # type: ignore[attr-defined]
- _reload_python_module_in_subproc, key, path
- )
+ mod = _reload_python_module(key, path)
+
+ # another thread might set this first
+ cls.cache.setdefault(key, mod)
+ # unzip into separate lines/nodes lists
+ cls.linemaps[path] = list(zip(*linemap))
+
+ if attrs is not None:
+ for k, v in attrs.items():
+ setattr(mod, k, v)
+
+ if not (linemap or attrs):
+ mod._reload_in_subproc = functools.partial( # type: ignore[attr-defined]
+ _reload_python_module_in_subproc, key, path
+ )
return cls.cache[key]
@@ -2444,25 +2437,10 @@ class PyCodeCache:
return parse_stack_trace(entry)
-def _reload_triton_kernel_in_subproc(reload_module, kernel_name):
- return TritonCodeCache._mod_to_kernel(reload_module(), kernel_name)
-
-
class TritonCodeCache:
@classmethod
def load(cls, kernel_name: str, source_code: str) -> ModuleType:
- mod = PyCodeCache.load(source_code)
- return cls._mod_to_kernel(mod, kernel_name)
-
- @classmethod
- def _mod_to_kernel(cls, mod, kernel_name):
- kernel = getattr(mod, kernel_name)
- kernel._reload_in_subproc = functools.partial(
- _reload_triton_kernel_in_subproc,
- mod._reload_in_subproc,
- kernel_name,
- )
- return kernel
+ return _module_to_triton_kernel(PyCodeCache.load(source_code), kernel_name)
def _cuda_compiler() -> Optional[str]:
@@ -2750,28 +2728,6 @@ def caching_device_properties():
device_interface.Worker.get_device_properties()
-@functools.lru_cache(None)
-def _set_triton_ptxas_path() -> None:
- if os.environ.get("TRITON_PTXAS_PATH") is not None:
- return
- ptxas_path = os.path.abspath(
- os.path.join(os.path.dirname(__file__), "..", "bin", "ptxas")
- )
- if not os.path.exists(ptxas_path):
- return
- if os.path.isfile(ptxas_path) and os.access(ptxas_path, os.X_OK):
- os.environ["TRITON_PTXAS_PATH"] = ptxas_path
- else:
- warnings.warn(f"{ptxas_path} exists but is not an executable")
-
-
-def _worker_compile_triton(
- load_kernel: Callable[[], Any],
-):
- _set_triton_ptxas_path()
- load_kernel().precompile(warm_cache_only=True)
-
-
class CodeCacheFuture:
def result(self):
raise NotImplementedError
diff --git a/torch/_inductor/runtime/compile_tasks.py b/torch/_inductor/runtime/compile_tasks.py
new file mode 100644
index 0000000000..66a36703da
--- /dev/null
+++ b/torch/_inductor/runtime/compile_tasks.py
@@ -0,0 +1,68 @@
+from __future__ import annotations
+
+import functools
+import os
+import sys
+import warnings
+from types import ModuleType
+from typing import Any, Callable
+
+
+def _reload_triton_kernel_in_subproc(reload_module, kernel_name):
+ return _module_to_triton_kernel(reload_module(), kernel_name)
+
+
+def _module_to_triton_kernel(mod, kernel_name):
+ kernel = getattr(mod, kernel_name)
+ kernel._reload_in_subproc = functools.partial(
+ _reload_triton_kernel_in_subproc,
+ mod._reload_in_subproc,
+ kernel_name,
+ )
+ return kernel
+
+
+def _reload_python_module_in_subproc(key, path):
+ codecache = sys.modules.get("torch._inductor.codecache")
+ if codecache:
+ return codecache.PyCodeCache.load_by_key_path(key, path)
+ else:
+ return _reload_python_module(key, path)
+
+
+def _reload_python_module(key, path):
+ with open(path) as f:
+ try:
+ code = compile(f.read(), path, "exec")
+ except Exception as e:
+ raise RuntimeError(
+ f"Failed to import {path}\n{type(e).__name__}: {e}"
+ ) from None
+ mod = ModuleType(f"{__name__}.{key}")
+ mod.__file__ = path
+ mod.key = key # type: ignore[attr-defined]
+ exec(code, mod.__dict__, mod.__dict__)
+ sys.modules[mod.__name__] = mod
+ return mod
+
+
+@functools.lru_cache(None)
+def _set_triton_ptxas_path() -> None:
+ if os.environ.get("TRITON_PTXAS_PATH") is not None:
+ return
+ ptxas_path = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), "..", "bin", "ptxas")
+ )
+ if not os.path.exists(ptxas_path):
+ return
+ if os.path.isfile(ptxas_path) and os.access(ptxas_path, os.X_OK):
+ os.environ["TRITON_PTXAS_PATH"] = ptxas_path
+ else:
+ warnings.warn(f"{ptxas_path} exists but is not an executable")
+
+
+def _worker_compile_triton(
+ load_kernel: Callable[[], Any],
+):
+ _set_triton_ptxas_path()
+ load_kernel().precompile(warm_cache_only=True)
|
2.41.0
|
c253a777641791247f7fcc19fe5c60f24be32b9
|
Mon, 22 Apr 2024 12:12:42 -0700
|
[PATCH 0527/1000] Add support for capturing tensors with score_mod (#124444)
|
``` import torch from torch import nn import torch.nn.functional as F import torch._inductor.config as config # torch.set_default_device('cuda') import torch from torch.nn.attention._templated_attention import _templated_attention as templated_attention from triton.testing import do_bench from torch.nn.attention import SDPBackend, sdpa_kernel index = torch.ops.aten torch.manual_seed(0) B = 16 H = 16 S = 2048 D = 64 head_scale = torch.randn(H, device='cuda') def alibi(score, batch, head, token_q, token_kv): return score + torch.ops.aten.index(head_scale, [head]) * (token_q - token_kv) bias = torch.randn(H, S, S, dtype=torch.float16, device='cuda') query = torch.randn(B, H, S, D, device="cuda", dtype=torch.float16) key = torch.randn(B, H, S, D, device="cuda", dtype=torch.float16) value = torch.randn(B, H, S, D, device="cuda", dtype=torch.float16) compiled = torch.compile(templated_attention) out = compiled(query, key, value, score_mod=alibi) out2 = templated_attention(query, key, value,score_mod=alibi) print((out - out2).abs().mean()) assert (out - out2).abs().mean() < 1e-3 print("Flash (no mask): ", do_bench(lambda: F.scaled_dot_product_attention(query, key, value))) print("Flash (mask): ", do_bench(lambda: F.scaled_dot_product_attention(query, key, value, attn_mask=bias))) print("flexattention: ", do_bench(lambda: compiled(query, key, value, score_mod=alibi))) ``` <img width="324" alt="image" src="https://github.com/pytorch/pytorch/assets/6355099/18c175d0-2720-4dfd-8747-85b8a8f609f5"> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124444 Approved by: https://github.com/jansel, https://github.com/drisspg
|
diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py
index b906689af9..4c8043d9bf 100644
--- a/test/inductor/test_templated_attention.py
+++ b/test/inductor/test_templated_attention.py
@@ -4,7 +4,7 @@ import functools
from collections import namedtuple
from typing import Callable
-from unittest import expectedFailure, skipUnless
+from unittest import skip, skipUnless
from unittest.mock import patch
import torch
@@ -28,6 +28,8 @@ supported_platform = skipUnless(
Tolerances = namedtuple("Tolerances", ["atol", "rtol"])
torch.set_float32_matmul_precision("high")
+index = torch.ops.aten.index
+
def create_attention(score_mod):
return functools.partial(_templated_attention, score_mod=score_mod)
@@ -39,6 +41,8 @@ test_dtypes = (
else [torch.float16, torch.float32]
)
+test_dtypes_fast = [torch.float16]
+
# TODO float16 was causing ERRORs for tests on ROCm
# See https://github.com/pytorch/pytorch/issues/123531
if common_utils.TEST_WITH_ROCM:
@@ -53,13 +57,19 @@ def _causal_mod(score, b, h, token_q, token_kv):
return torch.where(token_q >= token_kv, score, float("-inf"))
+B = 4
+H = 8
+S = 2048
+D = 64
+
+
class TestTemplatedSDPA(InductorTestCase):
def run_test(self, score_mod: Callable, dtype: torch.dtype = torch.float16):
sdpa_partial = create_attention(score_mod)
compiled_sdpa = torch.compile(sdpa_partial)
- q = torch.randn((4, 8, 2048, 64), dtype=dtype, device="cuda")
- k = torch.randn((4, 8, 2048, 64), dtype=dtype, device="cuda")
- v = torch.randn((4, 8, 2048, 64), dtype=dtype, device="cuda")
+ q = torch.randn((B, H, S, D), dtype=dtype, device="cuda")
+ k = torch.randn((B, H, S, D), dtype=dtype, device="cuda")
+ v = torch.randn((B, H, S, D), dtype=dtype, device="cuda")
golden_out = sdpa_partial(
q.to(torch.float64), k.to(torch.float64), v.to(torch.float64)
)
@@ -147,23 +157,116 @@ class TestTemplatedSDPA(InductorTestCase):
self.run_test(composed_score_mod, dtype)
- # TODO We are currently not capturing free variables in the closure correctly
- @expectedFailure
@supported_platform
@common_utils.parametrize("dtype", test_dtypes)
def test_captured_buffers(self, dtype: torch.dtype):
- head_offset = torch.rand(8, device="cuda", dtype=dtype)
+ head_offset = torch.rand(H, device="cuda", dtype=dtype)
def score_mod(score, b, h, m, n):
- return score + head_offset[h]
+ return score + index(head_offset, [h])
self.run_test(score_mod, dtype)
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_seq_masking(self, dtype):
+ seq_idx = torch.zeros(S, device="cuda", dtype=torch.bool)
+ seq_idx[S // 2 :] = 1
+
+ def seq_mask_mod(score, b, h, q, kv):
+ return torch.where(
+ index(seq_idx, [q]) == index(seq_idx, [kv]), score, float("-inf")
+ )
+
+ self.run_test(seq_mask_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_load_from_bias_seq_only(self, dtype):
+ bias = torch.randn(S, S, device="cuda", dtype=dtype)
+
+ def bias_mod(score, b, h, q, kv):
+ return score + index(bias, [q, kv])
+
+ self.run_test(bias_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_load_from_bias_seq_batch(self, dtype):
+ bias = torch.randn(B, S, S, device="cuda", dtype=dtype)
+
+ def bias_mod(score, b, h, q, kv):
+ return score + index(bias, [b, q, kv])
+
+ self.run_test(bias_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_load_from_bias_head_seq_batch(self, dtype):
+ bias = torch.randn(B, H, S, S, device="cuda", dtype=dtype)
+
+ def bias_mod(score, b, h, q, kv):
+ return score + index(bias, [b, h, q, kv])
+
+ self.run_test(bias_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_load_rel_bias(self, dtype):
+ rel_bias = torch.randn(2 * S, device="cuda", dtype=dtype)
+
+ def bias_mod(score, b, h, q, kv):
+ return score + index(rel_bias, [(q - kv) + S])
+
+ self.run_test(bias_mod, dtype)
+
+ @supported_platform
+ @common_utils.parametrize("dtype", test_dtypes_fast)
+ def test_dependent_causal_bidirectional(self, dtype):
+ num_bidirectional = torch.randint(0, S, (B,), device="cuda", dtype=torch.int32)
+
+ def bias_mod(score, b, h, q, kv):
+ causal_attention = q >= kv
+ cur_num_bidirectional = index(num_bidirectional, (b,))
+ bidirectional_attention_on_video = (q <= cur_num_bidirectional) & (
+ kv <= cur_num_bidirectional
+ )
+ return torch.where(
+ bidirectional_attention_on_video | causal_attention,
+ score,
+ -float("inf"),
+ )
+
+ self.run_test(bias_mod, dtype)
+
+ @supported_platform
+ @skip("Triton bug ") # https://github.com/pytorch/pytorch/issues/124571
+ @common_utils.parametrize("dtype", test_dtypes)
+ def test_njt_causal(self, dtype):
+ offsets = torch.tensor(
+ [0, 1024, 1024 + 512, S], device="cuda", dtype=torch.int32
+ )
+ seq_idx = torch.zeros(S, device="cuda", dtype=torch.int32)
+ for idx in range(len(offsets) - 1):
+ seq_idx[offsets[idx] : offsets[idx + 1]] = idx
+
+ def create_njt_wrapper(orig_score_mod, offsets, seq_idx):
+ def njt_score_mod(qk, b, h, q, kv):
+ q_nested = q - index(offsets, [index(seq_idx, [q])])
+ kv_nested = kv - index(offsets, [index(seq_idx, [kv])])
+ return orig_score_mod(qk, b, h, q_nested, kv_nested)
+
+ return njt_score_mod
+
+ causal_njt = create_njt_wrapper(_causal_mod, offsets, seq_idx)
+
+ self.run_test(causal_njt, dtype)
+
@supported_platform
def test_backwards_fails(self):
make_tensor = functools.partial(
torch.randn,
- (4, 8, 2048, 64),
+ (B, H, S, D),
dtype=torch.float32,
device="cuda",
requires_grad=True,
@@ -177,9 +280,9 @@ class TestTemplatedSDPA(InductorTestCase):
@supported_platform
def test_mixed_dtypes_fails(self):
- query = torch.randn((1, 1, 2048, 64), dtype=torch.float32, device="cuda")
- key = torch.randn((1, 1, 2048, 64), dtype=torch.float16, device="cuda")
- value = torch.randn((1, 1, 2048, 64), dtype=torch.float16, device="cuda")
+ query = torch.randn((1, 1, 1024, 64), dtype=torch.float32, device="cuda")
+ key = torch.randn((1, 1, 1024, 64), dtype=torch.float16, device="cuda")
+ value = torch.randn((1, 1, 1024, 64), dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(
ValueError, "Expected query, key, and value to have the same dtype"
):
@@ -201,6 +304,21 @@ class TestTemplatedSDPA(InductorTestCase):
self.run_test(score_mod)
+ @supported_platform
+ @patch.object(torch._inductor.config, "max_autotune", True)
+ def test_max_autotune_with_captured(self):
+ head_scale = torch.randn(H, device="cuda")
+ batch_scale = torch.randn(B, device="cuda")
+ tok_scale = torch.randn(S, device="cuda")
+
+ def bias_mod(score, batch, head, token_q, token_kv):
+ score = score + index(tok_scale, [token_q])
+ score = score + index(batch_scale, [batch])
+ score = score + index(head_scale, [head])
+ return score
+
+ self.run_test(bias_mod)
+
@supported_platform
@common_utils.parametrize("dtype", test_dtypes)
@common_utils.parametrize("score_mod", [_identity_mod, _causal_mod])
@@ -211,7 +329,7 @@ class TestTemplatedSDPA(InductorTestCase):
make_tensor = functools.partial(
torch.randn,
- (4, 8, 2048, 64),
+ (B, H, S, D),
dtype=dtype,
device="cuda",
requires_grad=True,
@@ -253,7 +371,7 @@ class TestTemplatedSDPA(InductorTestCase):
def test_logsumexp_only_return(self):
make_tensor = functools.partial(
torch.randn,
- (4, 8, 2048, 64),
+ (B, H, S, D),
dtype=torch.float32,
device="cuda",
requires_grad=True,
@@ -274,7 +392,7 @@ class TestTemplatedSDPA(InductorTestCase):
def test_logsumexp_is_not_fused(self):
make_tensor = functools.partial(
torch.randn,
- (4, 8, 2048, 64),
+ (B, H, S, D),
dtype=torch.float32,
device="cuda",
requires_grad=True,
diff --git a/torch/_dynamo/variables/higher_order_ops.py b/torch/_dynamo/variables/higher_order_ops.py
index 08c966f2b1..0db77f53ee 100644
--- a/torch/_dynamo/variables/higher_order_ops.py
+++ b/torch/_dynamo/variables/higher_order_ops.py
@@ -1535,12 +1535,10 @@ class TemplatedAttentionHigherOrderVariable(TorchHigherOrderOperatorVariable):
) -> "VariableTracker":
from .builder import wrap_fx_proxy
- query, key, value, score_mod, *other_buffers = self.normalize_to_args(
- args, kwargs
- )
+ query, key, value, score_mod = self.normalize_to_args(args, kwargs)
p_args, p_kwargs = self.create_wrapped_node(tx, query, score_mod)
- proxied_args = [query, key, value, *other_buffers]
+ proxied_args = [query, key, value]
# Store the invocation as a call
# Norm_kwargs contains the score_function and we dont want to proxy this because
diff --git a/torch/_higher_order_ops/templated_attention.py b/torch/_higher_order_ops/templated_attention.py
index 09e10754fe..388e741837 100644
--- a/torch/_higher_order_ops/templated_attention.py
+++ b/torch/_higher_order_ops/templated_attention.py
@@ -60,7 +60,7 @@ def math_attention(
"""
assert len(other_buffers) == 0, "Other buffers are not yet supported."
- scores = query @ key.transpose(-2, -1)
+ scores = (query @ key.transpose(-2, -1)).to(dtype=torch.float32)
b = torch.arange(0, scores.size(0), device=scores.device)
h = torch.arange(0, scores.size(1), device=scores.device)
@@ -179,9 +179,11 @@ def templated_attention_functionalize(
assert isinstance(other_buffers_unwrapped, tuple)
assert all(isinstance(item, torch.Tensor) for item in other_buffers_unwrapped)
- example_vals = [torch.zeros((), dtype=query.dtype)] + [
- torch.zeros((), dtype=torch.int) for _ in range(4)
- ]
+ example_vals = (
+ [torch.zeros((), dtype=query.dtype)]
+ + [torch.zeros((), dtype=torch.int) for _ in range(4)]
+ + list(other_buffers_unwrapped)
+ )
with ctx.redispatch_to_next() as m:
functional_score_mod = ctx.functionalize(score_mod)
pre_dispatch = hasattr(ctx, "mode") and ctx.mode.pre_dispatch
diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py
index df669d10d2..bd218ed3df 100644
--- a/torch/_inductor/codegen/triton.py
+++ b/torch/_inductor/codegen/triton.py
@@ -3412,22 +3412,14 @@ class TritonScheduling(BaseScheduling):
buffer_names.update(node.used_buffer_names())
# Get buffers objects
+
def _get_buffer(name: str) -> Union[ir.Buffer, ir.TensorBox]:
- if name in V.graph.name_to_buffer:
- return V.graph.name_to_buffer[name]
- elif name in V.graph.graph_inputs:
- return V.graph.graph_inputs[name]
- elif name in V.graph.constants:
- data = V.graph.constants[name]
- return ir.ConstantBuffer(
- name,
- ir.FixedLayout(
- data.device, data.dtype, *V.graph.static_sizes_strides(data)
- ),
- )
- raise RuntimeError(f"Failed to find buffer matching name {name}")
+ buf = V.graph.get_buffer(name)
+ if buf is None:
+ raise RuntimeError(f"Failed to find buffer matching name {name}")
+ return buf
- buffers = [_get_buffer(name) for name in buffer_names]
+ buffers = [V.graph.get_buffer(name) for name in buffer_names]
# In theory we can separately check xnumel and rnumel are <= int_max
# but some indexers do use the full linear index so we need to be
diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py
index 97e1683120..a160055ee1 100644
--- a/torch/_inductor/graph.py
+++ b/torch/_inductor/graph.py
@@ -660,6 +660,14 @@ class GraphLowering(torch.fx.Interpreter):
return self.name_to_buffer[buffer_name]
if buffer_name in self.graph_inputs:
return self.graph_inputs[buffer_name]
+ if buffer_name in self.constants:
+ data = V.graph.constants[buffer_name]
+ return ir.ConstantBuffer(
+ buffer_name,
+ ir.FixedLayout(
+ data.device, data.dtype, *V.graph.static_sizes_strides(data)
+ ),
+ )
return None
def get_dtype(self, buffer_name: str):
diff --git a/torch/_inductor/kernel/templated_attention.py b/torch/_inductor/kernel/templated_attention.py
index 7942a367e2..4c59036fbb 100644
--- a/torch/_inductor/kernel/templated_attention.py
+++ b/torch/_inductor/kernel/templated_attention.py
@@ -3,6 +3,7 @@ import logging
from typing import Any, List
import torch
+from .. import config
from ..lowering import empty_strided, lowerings, register_lowering
from ..select_algorithm import autotune_select_algorithm, TritonTemplate
@@ -114,12 +115,14 @@ sdpa_template = TritonTemplate(
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk = tl.dot(q, k.to(MATMUL_PRECISION), acc=qk)
# ~~~~~~~~~~~~~~~~~~~ Apply score modification ~~~~~~~~~~~~~~~~~~~
+ m = offs_m[:, None]
+ n = start_n + offs_n[None, :]
{{ modification(
score="qk",
b="off_hz // H",
h="off_hz % H",
- m="offs_m[:, None]",
- n="start_n + offs_n[None, :]",
+ m="m",
+ n="n",
out="qk"
) | indent_except_first(2) }}
# TODO: In the case that score_mod is linear, this can be LICMed
@@ -170,7 +173,8 @@ sdpa_template = TritonTemplate(
)
-@register_lowering(torch.ops.higher_order.templated_attention)
+# TODO: We probably also need a layout constraint?
+@register_lowering(torch.ops.higher_order.templated_attention, type_promotion_kind=None)
def templated_attention(*args, **kwargs):
from torch._prims_common import make_contiguous_strides_for
from ..ir import (
@@ -182,7 +186,7 @@ def templated_attention(*args, **kwargs):
TensorBox,
)
- query, key, value, subgraph = args
+ query, key, value, subgraph, *other_buffers = args
def create_placeholder(name: str, dtype: torch.dtype) -> InputBuffer:
return TensorBox.create(
@@ -272,17 +276,22 @@ def templated_attention(*args, **kwargs):
configs: List[Any] = []
if query.get_dtype() == torch.float32:
configs.append((64, 64, 4, 3))
- configs += [
- (128, 64, 4, 3),
- (128, 128, 4, 3),
- (128, 128, 8, 2),
- (64, 128, 4, 3),
- ]
-
+ else:
+ configs.append((128, 64, 4, 3))
+ if config.max_autotune:
+ configs += [
+ (128, 64, 4, 3),
+ (128, 128, 4, 3),
+ (128, 128, 8, 2),
+ (64, 128, 4, 3),
+ ]
+ # Note, we don't need to pass in the captured buffers explicitly
+ # because they're implicitly added by the score_mod function
+ # We do need to explicitly pass it in for autotuning though.
for BLOCK_M, BLOCK_N, num_warps, num_stages in configs:
sdpa_template.maybe_append_choice(
choices=choices,
- input_nodes=(query, key, value, logsumexp),
+ input_nodes=[query, key, value, logsumexp],
layout=layout,
subgraphs=subgraph_buffer,
mutated_inputs=[
@@ -298,9 +307,10 @@ def templated_attention(*args, **kwargs):
ROWS_GUARANTEED_SAFE=False,
OUTPUT_LOGSUMEXP=True,
)
+ inputs_for_autotuning = [query, key, value, logsumexp] + list(other_buffers)
return (
autotune_select_algorithm(
- "sdpa", choices, [query, key, value, logsumexp], layout
+ "sdpa", choices, inputs_for_autotuning, layout
),
logsumexp,
)
diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py
index adf7e4e50d..3a09238da3 100644
--- a/torch/_inductor/select_algorithm.py
+++ b/torch/_inductor/select_algorithm.py
@@ -37,7 +37,14 @@ from .exc import CUDACompileError
from .ir import ChoiceCaller, PrimitiveInfoType
from .runtime.hints import DeviceProperties
from .runtime.runtime_utils import do_bench
-from .utils import get_dtype_size, Placeholder, sympy_dot, sympy_product, unique
+from .utils import (
+ get_dtype_size,
+ Placeholder,
+ sympy_dot,
+ sympy_index_symbol,
+ sympy_product,
+ unique,
+)
from .virtualized import V
log = logging.getLogger(__name__)
@@ -269,20 +276,23 @@ class TritonTemplateKernel(TritonKernel):
potential multiple modifications
"""
+ def add_input(name):
+ return self.args.input(name)
+
class PlaceholderSubstitution(V.WrapperHandler): # type: ignore[name-defined]
self.name = "PlaceholderSubstitution"
def load(self, name: str, index: sympy.Expr):
if name not in fixed_inputs:
- raise AssertionError(
- f"All loads should be coming from fixed inputs - {name}"
- )
+ # If it's not a fixed input, it's a load from a captured
+ # tensor
+ var = add_input(name)
+ return f"tl.load({var} + {index})"
+
return f"({fixed_inputs[name]})"
- # TODO Doesn't work yet
def indirect_indexing(self, index_var, size, check):
- return self._inner.indirect_indexing(index_var, size, False)
- # return sympy_symbol(str(index_var))
+ return sympy_index_symbol(str(index_var))
# if self.modification_cache is None:
with V.set_ops_handler(PlaceholderSubstitution(V.ops)):
@@ -589,16 +599,25 @@ class TritonTemplate(KernelTemplate):
+ "-"
)
mod = PyCodeCache.load(code, extra)
- _, call_args, _ = kernel.args.python_argdefs()
- expected_args = list(unique(x.get_name() for x in input_nodes))
- expected_args.extend([fake_out.get_name()])
- assert list(call_args)[: len(expected_args)] == expected_args, (
- call_args,
- expected_args,
+ input_call_args = tuple(kernel.args.input_buffers.keys())
+ output_call_args = tuple(kernel.args.output_buffers.keys())
+
+ # We expect the input_buffer order to be [*input_nodes, *captured_buffers]
+ expected_input_args = tuple(unique(x.get_name() for x in input_nodes))
+ expected_output_args = (fake_out.get_name(),)
+ assert input_call_args[: len(expected_input_args)] == expected_input_args, (
+ input_call_args,
+ expected_input_args,
+ )
+ assert output_call_args == expected_output_args, (
+ output_call_args,
+ expected_output_args,
)
+
+ full_input_nodes = tuple([V.graph.get_buffer(k) for k in input_call_args])
extra_args = V.graph.sizevars.size_hints(
- map(sympy.expand, call_args[len(expected_args) :]),
+ map(sympy.expand, tuple(kernel.args.sizevars.keys())),
fallback=config.unbacked_symint_fallback,
)
@@ -636,13 +655,13 @@ class TritonTemplate(KernelTemplate):
num_stages=num_stages,
num_warps=num_warps,
matrix_instr_nonkdim=kwargs.get("matrix_instr_nonkdim", 0),
- input_tensor_meta=TensorMeta.from_irnodes(input_nodes),
+ input_tensor_meta=TensorMeta.from_irnodes(full_input_nodes),
output_tensor_meta=TensorMeta.from_irnodes(layout),
)
return TritonTemplateCaller(
kernel_hash_name,
- input_nodes,
+ full_input_nodes,
layout,
make_kernel_render,
extra.strip("-").replace("-", ", "),
|
2.41.0
|
d3a13d3d13372f27ee4adafb03c1754f1cfda1e
|
Tue, 23 Apr 2024 18:38:44 +0000
|
[PATCH 0529/1000] Conform torch.mps to device module interface (#124676)
|
Right now `torch.fork_rng()` doesn't support MPS. MPS' device module functions don't line up with the others'. There is a step of `fork_rng` to call `device_count()`: https://github.com/pytorch/pytorch/blob/302d7e9a6ecc0d8e162f6d4ff8d067d7ba5bf4eb/torch/random.py#L146 It is pretty simple to know the MPS device count, based on whether it is built and available. Also: https://github.com/pytorch/pytorch/blob/302d7e9a6ecc0d8e162f6d4ff8d067d7ba5bf4eb/torch/random.py#L168 https://github.com/pytorch/pytorch/blob/302d7e9a6ecc0d8e162f6d4ff8d067d7ba5bf4eb/torch/random.py#L175 `get_rng_state` and `set_rng_state` are expected to be able to accept a `device` parameter. @ezyang Pull Request resolved: https://github.com/pytorch/pytorch/pull/124676 Approved by: https://github.com/ezyang
|
diff --git a/docs/source/mps.rst b/docs/source/mps.rst
index bff15666ff..bab0d3378e 100644
--- a/docs/source/mps.rst
+++ b/docs/source/mps.rst
@@ -7,6 +7,7 @@ torch.mps
:toctree: generated
:nosignatures:
+ device_count
synchronize
get_rng_state
set_rng_state
@@ -39,4 +40,4 @@ MPS Event
.. This module needs to be documented. Adding here in the meantime
.. for tracking purposes
.. py:module:: torch.mps.event
-.. py:module:: torch.mps.profiler
\ No newline at end of file
+.. py:module:: torch.mps.profiler
diff --git a/torch/mps/__init__.py b/torch/mps/__init__.py
index 52cda4fb0c..6118c2b056 100644
--- a/torch/mps/__init__.py
+++ b/torch/mps/__init__.py
@@ -4,6 +4,8 @@ Metal is Apple's API for programming metal GPU (graphics processor unit). Using
performance can be achieved, by running work on the metal GPU(s).
See https://developer.apple.com/documentation/metalperformanceshaders for more details.
"""
+from typing import Union
+
import torch
from .. import Tensor
@@ -19,21 +21,35 @@ def _get_default_mps_generator() -> torch._C.Generator:
return _default_mps_generator
+def device_count() -> int:
+ r"""Returns the number of available MPS devices."""
+ return int(torch._C._has_mps and torch._C._mps_is_available())
+
+
def synchronize() -> None:
r"""Waits for all kernels in all streams on a MPS device to complete."""
return torch._C._mps_deviceSynchronize()
-def get_rng_state() -> Tensor:
- r"""Returns the random number generator state as a ByteTensor."""
+def get_rng_state(device: Union[int, str, torch.device] = "mps") -> Tensor:
+ r"""Returns the random number generator state as a ByteTensor.
+
+ Args:
+ device (torch.device or int, optional): The device to return the RNG state of.
+ Default: ``'mps'`` (i.e., ``torch.device('mps')``, the current MPS device).
+ """
return _get_default_mps_generator().get_state()
-def set_rng_state(new_state: Tensor) -> None:
+def set_rng_state(
+ new_state: Tensor, device: Union[int, str, torch.device] = "mps"
+) -> None:
r"""Sets the random number generator state.
Args:
new_state (torch.ByteTensor): The desired state
+ device (torch.device or int, optional): The device to set the RNG state.
+ Default: ``'mps'`` (i.e., ``torch.device('mps')``, the current MPS device).
"""
new_state_copy = new_state.clone(memory_format=torch.contiguous_format)
_get_default_mps_generator().set_state(new_state_copy)
@@ -116,6 +132,7 @@ from . import profiler
from .event import Event
__all__ = [
+ "device_count",
"get_rng_state",
"manual_seed",
"seed",
|
2.41.0
|
bdd569e412c4af5957b10ff8039511f8175986e
|
Tue, 23 Apr 2024 18:39:35 +0000
|
[PATCH 0530/1000] [easy][test_profiler.py] if tqdm is not available, pass instead of None (#124729)
|
Change the try exception to pass when it cannot import tqdm. To address comment: https://github.com/pytorch/pytorch/pull/124409#discussion_r1576327365 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124729 Approved by: https://github.com/malfet, https://github.com/shengfukevin
|
diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py
index 5c890bb672..7606dee2c8 100644
--- a/test/profiler/test_profiler.py
+++ b/test/profiler/test_profiler.py
@@ -11,7 +11,7 @@ try:
tqdm.tqdm.monitor_interval = 0
except ImportError:
- None
+ pass
import collections
import gc
|
2.41.0
|
5a448f3cbcb7d48ce9392955269cb387f6dd759
|
Tue, 23 Apr 2024 08:26:10 -0700
|
[PATCH 0531/1000] Record structured log for overall AOTAutograd backwards compilation (#124648)
|
It's sort of similar to CompilationMetrics but also not quite the same, quite open to bikeshedding. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124648 Approved by: https://github.com/bdhirsh ghstack dependencies: #124626
|
diff --git a/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py b/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
index e5e4cfae30..5aed487080 100644
--- a/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
+++ b/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
@@ -8,6 +8,7 @@ in `runtime_wrappers`.
"""
import logging
+import time
from contextlib import nullcontext
from functools import wraps
from typing import Any, List, Optional, Sequence
@@ -928,9 +929,41 @@ Got grad_output types: {str(grad_output_types)}"""
with tracing(saved_context), compile_context(
saved_compile_context
), context(), track_graph_compiling(aot_config, "backward"):
- CompiledFunction.compiled_bw = aot_config.bw_compiler(
- bw_module, placeholder_list
- )
+ fail_type: Optional[str] = None
+ fail_reason: Optional[str] = None
+ start_time = time.time()
+ try:
+ CompiledFunction.compiled_bw = aot_config.bw_compiler(
+ bw_module, placeholder_list
+ )
+ except Exception as e:
+ fail_type = str(type(e))
+ fail_reason = str(e)
+ if saved_compile_context is not None:
+ e.compile_id = saved_compile_context.compile_id # type: ignore[attr-defined]
+ raise
+ finally:
+ # TODO: Similar to CompilationMetrics, we would
+ # like to report inductor_compile_time, but we
+ # cannot conveniently do so because these are
+ # keyed on utils.frame, and frame key is not
+ # incremented on backwards compilations. Maybe
+ # should just bump the frame key here too?
+ end_time = time.time()
+ # TODO: Put this in scuba? But CompilationMetrics
+ # is kind of not a great match, because there's no
+ # interaction with Dynamo, so a lot of Dynamo only
+ # events don't exist anymore. So we need a new
+ # scuba table. Lazy lazy...
+ trace_structured(
+ "aot_autograd_backward_compilation_metrics",
+ lambda: {
+ "start_time": start_time,
+ "elapsed_time": time.time() - start_time,
+ "fail_type": fail_type,
+ "fail_reason": fail_reason,
+ },
+ )
out = call_func_at_runtime_with_args(
CompiledFunction.compiled_bw,
|
2.41.0
|
112792a699919f7efef2bc909456c4971b86a9b
|
Tue, 23 Apr 2024 20:07:49 +0000
|
[PATCH 0532/1000] [export] refactor _AddRuntimeAssertionsForInlineConstraintsPass (#124503)
|
Summary: The current _AddRuntimeAssertionsForInlineConstraintsPass has 2 known issues caused by its use of torch.fx.Interpreter: 1. SymInt-related ops (e.g. item()) are executed, causing new Unbacked SymInts to appear in the graph during the pass. 2. The graph is reconstructed, and node names/indices can be different from before, causing mismatches with `module_call_graph`, and leading to issues during unflattening. This refactors the pass to use PassBase instead of _ExportPassBaseDeprecatedDoNotUse, only constructing new nodes for assertions. Test Plan: This pass is called on all strict-mode export calls with range_constraints, test that behavior remains unchanged. Differential Revision: D56360137 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124503 Approved by: https://github.com/zhxchen17
|
diff --git a/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py b/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py
index 6823c3ec24..a2cbb4ebe7 100644
--- a/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py
+++ b/torch/_export/passes/add_runtime_assertions_for_constraints_pass.py
@@ -1,3 +1,4 @@
+import copy
import math
import operator
import traceback
@@ -8,10 +9,11 @@ import sympy
import torch
import torch.fx
-from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse, ProxyValue, PassResult
from torch.utils._sympy.value_ranges import ValueRanges
from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
-
+from torch.fx.passes.infra.pass_base import PassBase, PassResult
+from torch._subclasses import FakeTensor
+from torch._subclasses.fake_tensor import FakeTensorMode
__all__ = ["InputDim"]
@@ -41,7 +43,7 @@ def _convert_range_to_int(range: ValueRanges):
return min_val, max_val
-class _AddRuntimeAssertionsForInlineConstraintsPass(_ExportPassBaseDeprecatedDoNotUse):
+class _AddRuntimeAssertionsForInlineConstraintsPass(PassBase):
def __init__(
self,
range_constraints: Dict[sympy.Symbol, ValueRanges],
@@ -50,97 +52,123 @@ class _AddRuntimeAssertionsForInlineConstraintsPass(_ExportPassBaseDeprecatedDoN
self.range_constraints: Dict[sympy.Symbol, ValueRanges] = range_constraints
self._asserts_generated_unbacked_symbols: Set[sympy.Symbol] = set()
self.counter = 0
-
- def _assert_range_constraint(self, proxy, lower, upper, assert_msg):
+ self.fake_tensor_mode = FakeTensorMode(allow_non_fake_inputs=True)
+
+ def _create_metadata(self, node, original_meta, val):
+ node.meta = {
+ "stack_trace": "".join(traceback.format_stack(limit=1)),
+ "seq_nr": -1,
+ "tensor_meta": None,
+ }
+ for field in ["nn_module_stack", "source_fn_stack", "from_node", "torch_fn"]:
+ if field in original_meta:
+ node.meta[field] = copy.copy(original_meta[field])
+ node.meta["val"] = val
+
+ def _assert_range_constraint(self, node, lower, upper, assert_msg):
+ last_node = node
if lower > -math.inf:
- self._insert_assert_async(operator.ge, proxy, lower, assert_msg)
+ last_node = self._insert_assert_async(last_node, operator.ge, node, lower, assert_msg)
if upper < math.inf:
- self._insert_assert_async(operator.le, proxy, upper, assert_msg)
+ last_node = self._insert_assert_async(last_node, operator.le, node, upper, assert_msg)
- def _insert_assert_async(self, operator, lower, upper, assert_msg):
+ def _insert_assert_async(self, last_node, op, lower, upper, assert_msg):
"""
Inserts assert_async call_function nodes in the graph. This function is
called **during** the interpreter-based pass.
"""
self.counter += 1
- cmp = super().call_operator(operator, (lower, upper), {}, self._create_dummy_node_metadata())
- cmp_tensor = super().call_operator(torch.ops.aten.scalar_tensor.default, (cmp,), {}, self._create_dummy_node_metadata())
- super().call_operator(
- torch.ops.aten._assert_async.msg,
- (cmp_tensor, assert_msg),
- {},
- self._create_dummy_node_metadata(),
+ graph = last_node.graph
+ with graph.inserting_after(last_node):
+ cmp = graph.call_function(op, (lower, upper), {})
+ with graph.inserting_after(cmp):
+ cmp_tensor = graph.call_function(torch.ops.aten.scalar_tensor.default, (cmp,), {})
+ with graph.inserting_after(cmp_tensor):
+ assert_async = graph.call_function(
+ torch.ops.aten._assert_async.msg,
+ (cmp_tensor, assert_msg),
+ {},
+ )
+ # create metadata
+ val = lower.meta["val"]
+ self._create_metadata(cmp, lower.meta, val >= 0 if op == operator.ge else val <= 0)
+ self._create_metadata(cmp_tensor, lower.meta, FakeTensor(
+ self.fake_tensor_mode,
+ torch.empty((), dtype=torch.float32, device="meta"),
+ device="cpu"
+ ))
+ self._create_metadata(assert_async, lower.meta, None)
+ return assert_async
+
+ def call(self, graph_module) -> PassResult:
+ self.existing_inline_assertions = _get_existing_inline_assertions(
+ graph_module, self.range_constraints
)
- def call_operator(self, op, args, kwargs, meta) -> ProxyValue:
- ret = super().call_operator(op, args, kwargs, meta)
- if "val" not in meta:
- return ret
-
- val = meta["val"]
-
- # In general, we may have to deal the case such as: ret[1].shape[0].
- # We need first find out what symbols require assertion, then we need to follow the path
- # from ret to the symbol, construct the proxies along the way and construct the messages
- # piece-wise at the same time.
- #
- # We use post-order traversal to collect all the proxies callbacks needed, construct
- # the error message callbacks, and at the top-level traversal tree we execute all the callbacks.
- # We need the callbacks because, in order to call the function to create a proxy for shape[0], we
- # need the proxy for shape, which further requires the proxy for ret[1], etc.
- def add_assertions(val):
- call_backs: List[Callable] = []
- messages: List[str] = []
- if isinstance(val, (torch.SymInt, torch.SymFloat, torch.SymBool)):
- symbol = val.node.expr
- if symbol in self.existing_inline_assertions:
- return call_backs, messages
- if isinstance(symbol, sympy.Symbol) and free_unbacked_symbols(symbol):
- if symbol in self._asserts_generated_unbacked_symbols:
- return call_backs, messages
- # We only care about unbacked symints for these inline
- # constraints, which are prefixed with 'u'
- constraint = self.range_constraints[symbol]
- min_val, max_val = _convert_range_to_int(constraint)
- assert_msg = f" is outside of inline constraint [{min_val}, {max_val}]."
- call_backs.append(
- partial(self._assert_range_constraint, lower=min_val, upper=max_val)
- )
- messages.append(assert_msg)
- self._asserts_generated_unbacked_symbols.add(symbol)
-
- elif isinstance(val, torch.Tensor):
- for i, sym in enumerate(val.shape):
- cbs, msgs = add_assertions(sym)
- for cb, msg in zip(cbs, msgs):
- def sym_size_cb(proxy, assert_msg, dim):
- dim_proxy = super(
- _AddRuntimeAssertionsForInlineConstraintsPass,
- self
- ).call_operator(
- torch.ops.aten.sym_size.int,
- (proxy, dim),
- {},
- self._create_dummy_node_metadata(),
+ for module in graph_module.modules():
+ if not isinstance(module, torch.fx.GraphModule):
+ continue
+ for node in module.graph.nodes:
+ if node.op != "call_function":
+ continue
+ if "val" not in node.meta:
+ continue
+
+ val = node.meta["val"]
+ # In general, we may have to deal the case such as: ret[1].shape[0].
+ # We need first find out what symbols require assertion, then we need to follow the path
+ # from ret to the symbol, construct the proxies along the way and construct the messages
+ # piece-wise at the same time.
+ #
+ # We use post-order traversal to collect all the proxies callbacks needed, construct
+ # the error message callbacks, and at the top-level traversal tree we execute all the callbacks.
+ # We need the callbacks because, in order to call the function to create a proxy for shape[0], we
+ # need the proxy for shape, which further requires the proxy for ret[1], etc.
+
+ def add_assertions(val):
+ call_backs: List[Callable] = []
+ messages: List[str] = []
+ if isinstance(val, (torch.SymInt, torch.SymFloat, torch.SymBool)):
+ symbol = val.node.expr
+ if symbol in self.existing_inline_assertions:
+ return call_backs, messages
+ if isinstance(symbol, sympy.Symbol) and free_unbacked_symbols(symbol):
+ if symbol in self._asserts_generated_unbacked_symbols:
+ return call_backs, messages
+ # We only care about unbacked symints for these inline
+ # constraints, which are prefixed with 'u'
+ constraint = self.range_constraints[symbol]
+ min_val, max_val = _convert_range_to_int(constraint)
+ assert_msg = f" is outside of inline constraint [{min_val}, {max_val}]."
+ call_backs.append(
+ partial(self._assert_range_constraint, lower=min_val, upper=max_val)
)
- cb(proxy=dim_proxy, assert_msg=assert_msg)
- call_backs.append(partial(sym_size_cb, dim=i))
- messages.append(f".shape[{i}]" + msg)
- return call_backs, messages
-
- callbacks, messages = add_assertions(val)
- for cb, msg in zip(callbacks, messages):
- cb(proxy=ret, assert_msg=f"{ret.node}" + msg)
- return ret
+ messages.append(assert_msg)
+ self._asserts_generated_unbacked_symbols.add(symbol)
+
+ elif isinstance(val, torch.Tensor):
+ for i, sym in enumerate(val.shape):
+ cbs, msgs = add_assertions(sym)
+ for cb, msg in zip(cbs, msgs):
+ def sym_size_cb(node, assert_msg, dim):
+ with node.graph.inserting_after(node):
+ dim_node = module.graph.call_function(
+ torch.ops.aten.sym_size.int,
+ (node, dim),
+ {},
+ )
+ self._create_metadata(dim_node, node.meta, val.shape[dim])
+ cb(node=dim_node, assert_msg=assert_msg)
+ call_backs.append(partial(sym_size_cb, dim=i))
+ messages.append(f".shape[{i}]" + msg)
+ return call_backs, messages
- def call(self, graph_module):
- self.existing_inline_assertions = _get_existing_inline_assertions(
- graph_module, self.range_constraints
- )
+ callbacks, messages = add_assertions(val)
+ for cb, msg in zip(callbacks, messages):
+ cb(node=node, assert_msg=f"{node}" + msg)
- # Add runtime asserts for inline constraints
- val = super().call(graph_module)
+ module.recompile()
# Sometimes this pass would return a wrong graph where we have mismatched
# node names in signature. Before we fix it, let's just skip it.
@@ -148,11 +176,10 @@ class _AddRuntimeAssertionsForInlineConstraintsPass(_ExportPassBaseDeprecatedDoN
return PassResult(graph_module, False)
# Populate the stack trace with dummy vals to respect IR
- for node in val.graph_module.graph.nodes:
+ for node in graph_module.graph.nodes:
if not node.meta.get("stack_trace", None) and node.op not in ["placeholder", "output"]:
node.meta["stack_trace"] = "".join(traceback.format_stack(limit=1))
-
- return PassResult(val.graph_module, val.modified)
+ return PassResult(graph_module, True)
def _get_existing_inline_assertions(
diff --git a/torch/export/_trace.py b/torch/export/_trace.py
index 01d8f5e234..85844fd55b 100644
--- a/torch/export/_trace.py
+++ b/torch/export/_trace.py
@@ -1254,6 +1254,11 @@ def _export(
assert res is not None
gm = res.graph_module
+ if len(range_constraints) > 0:
+ res = _AddRuntimeAssertionsForInlineConstraintsPass(range_constraints)(gm)
+ assert res is not None
+ gm = res.graph_module
+
assert orig_out_spec is not None
_verify_nn_module_stack(gm)
_verify_stack_trace(gm)
@@ -1275,9 +1280,4 @@ def _export(
)
log.debug("Exported program from AOTAutograd:\n%s", exported_program)
- if len(range_constraints) > 0:
- exported_program = exported_program._transform_do_not_use(
- _AddRuntimeAssertionsForInlineConstraintsPass(range_constraints)
- )
-
return exported_program
diff --git a/torch/export/exported_program.py b/torch/export/exported_program.py
index 53b3a0a2ff..64844ea20d 100644
--- a/torch/export/exported_program.py
+++ b/torch/export/exported_program.py
@@ -662,6 +662,14 @@ class ExportedProgram:
self.constants[k] = v
_replace_sym_size_ops_pass(gm)
+
+ if len(new_range_constraints) > 0:
+ res = _AddRuntimeAssertionsForInlineConstraintsPass(new_range_constraints)(
+ gm
+ )
+ assert res is not None
+ gm = res.graph_module
+
exported_program = ExportedProgram(
root=gm,
graph=gm.graph,
@@ -673,11 +681,6 @@ class ExportedProgram:
verifier=self.verifier,
constants=self.constants,
)
- if len(new_range_constraints) > 0:
- exported_program = exported_program._transform_do_not_use(
- _AddRuntimeAssertionsForInlineConstraintsPass(new_range_constraints)
- )
-
return exported_program
def _transform_do_not_use(self, *passes: PassType) -> "ExportedProgram":
|
2.41.0
|
532c7949f58bd937434f23fdcac1c1a35818c2a
|
Mon, 22 Apr 2024 19:53:34 -0700
|
[PATCH 0533/1000] Fix import error in update_failures.py (#124695)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124695 Approved by: https://github.com/zou3519
|
diff --git a/scripts/compile_tests/update_failures.py b/scripts/compile_tests/update_failures.py
index b8e9e0d697..929ed9fe20 100755
--- a/scripts/compile_tests/update_failures.py
+++ b/scripts/compile_tests/update_failures.py
@@ -5,7 +5,6 @@ import pathlib
import subprocess
from common import (
- download_reports,
get_testcases,
is_failure,
is_passing_skipped_test,
@@ -14,6 +13,8 @@ from common import (
open_test_results,
)
+from download_reports import download_reports
+
"""
Usage: update_failures.py /path/to/dynamo_test_failures.py /path/to/test commit_sha
|
2.41.0
|
7e92162ebfab30f856cec91a05de405815804a7
|
Tue, 23 Apr 2024 12:40:57 -0700
|
[PATCH 0534/1000] [inductor] Keep inductor cache for tests when TORCH_COMPILE_DEBUG is specified (#124755)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124755 Approved by: https://github.com/masnesral
|
diff --git a/torch/_inductor/test_case.py b/torch/_inductor/test_case.py
index 0412d4eea5..3933c9dbc0 100644
--- a/torch/_inductor/test_case.py
+++ b/torch/_inductor/test_case.py
@@ -24,7 +24,10 @@ class TestCase(DynamoTestCase):
super().setUp()
self._inductor_test_stack = contextlib.ExitStack()
self._inductor_test_stack.enter_context(config.patch({"fx_graph_cache": True}))
- if os.environ.get("INDUCTOR_TEST_DISABLE_FRESH_CACHE") != "1":
+ if (
+ os.environ.get("INDUCTOR_TEST_DISABLE_FRESH_CACHE") != "1"
+ and os.environ.get("TORCH_COMPILE_DEBUG") != "1"
+ ):
self._inductor_test_stack.enter_context(fresh_inductor_cache())
def tearDown(self):
|
2.41.0
|
40774f4ed4a45c70d49e66f4e1f197dfc274758
|
Tue, 23 Apr 2024 20:26:42 +0000
|
[PATCH 0536/1000] [export] Fix up nn_module_stack for nodes occured around tracepoint ops. (#124457)
|
Summary: as title. Test Plan: hg checkout D55901896 buck run mode/opt torchrec/ir/tests:test_serializer -- --filter-regex test_serialize_deserialize_ebc Differential Revision: D56340319 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124457 Approved by: https://github.com/tugsbayasgalan
|
diff --git a/torch/_export/passes/collect_tracepoints_pass.py b/torch/_export/passes/collect_tracepoints_pass.py
index 72ccaa0d22..ca8eaf30be 100644
--- a/torch/_export/passes/collect_tracepoints_pass.py
+++ b/torch/_export/passes/collect_tracepoints_pass.py
@@ -30,6 +30,41 @@ class CollectTracepointsPass(PassBase):
else:
return ConstantArgument(name="", value=arg)
+ for module in gm.modules():
+ if not isinstance(module, torch.fx.GraphModule):
+ continue
+ nn_module_stack = None
+ for node in module.graph.nodes:
+ if node.op != "call_function":
+ continue
+ if node.target == torch.ops.higher_order._export_tracepoint:
+ kind = node.kwargs["kind"]
+ if kind == "module_call_outputs":
+ nn_module_stack = node.meta["nn_module_stack"]
+ elif kind == "module_call_inputs":
+ nn_module_stack = None
+ else:
+ raise AssertionError(f"Unknown tracepoint kind: {kind}")
+ elif node.meta["nn_module_stack"] == nn_module_stack:
+ node.meta["nn_module_stack"].popitem()
+ else:
+ nn_module_stack = None
+ nn_module_stack = None
+ for node in reversed(module.graph.nodes):
+ if node.op != "call_function":
+ continue
+ if node.target == torch.ops.higher_order._export_tracepoint:
+ kind = node.kwargs["kind"]
+ if kind == "module_call_inputs":
+ nn_module_stack = node.meta["nn_module_stack"]
+ elif kind == "module_call_outputs":
+ nn_module_stack = None
+ else:
+ raise AssertionError(f"Unknown tracepoint kind: {kind}")
+ elif node.meta["nn_module_stack"] == nn_module_stack:
+ node.meta["nn_module_stack"].popitem()
+ else:
+ nn_module_stack = None
for module in gm.modules():
if not isinstance(module, torch.fx.GraphModule):
continue
diff --git a/torch/export/_trace.py b/torch/export/_trace.py
index 85844fd55b..e27bf9a016 100644
--- a/torch/export/_trace.py
+++ b/torch/export/_trace.py
@@ -1041,6 +1041,7 @@ def _export(
"kind": node.kwargs["kind"],
},
)
+ new_node.meta = node.meta
node.replace_all_uses_with(new_node)
gm.graph.erase_node(node)
diff --git a/torch/export/unflatten.py b/torch/export/unflatten.py
index 33fd10a4e9..f8e220b00d 100644
--- a/torch/export/unflatten.py
+++ b/torch/export/unflatten.py
@@ -619,6 +619,7 @@ class _ModuleFrame:
self.parent_call_module.kwargs = kwarg_nodes
def add_placeholder(self, x):
+ assert self.fqn != "", f"Cannot add placeholder {x} to root module"
assert x.graph is self.flat_graph
# x is not in subgraph, create a new placeholder for subgraph
with self.graph.inserting_before(None):
|
2.41.0
|
63dc26e5918ce03372088d34498409528f8878b
|
Tue, 23 Apr 2024 11:36:34 -0300
|
[PATCH 0538/1000] [Dynamo] Add dynamo support to torch.func.linearize (#123118)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123118 Approved by: https://github.com/zou3519
|
diff --git a/aten/src/ATen/native/AutogradComposite.cpp b/aten/src/ATen/native/AutogradComposite.cpp
index 3051a4022c..dc98c90a59 100644
--- a/aten/src/ATen/native/AutogradComposite.cpp
+++ b/aten/src/ATen/native/AutogradComposite.cpp
@@ -43,17 +43,17 @@ Tensor _new_zeros_with_same_feature_meta(
const at::Tensor& self,
const at::Tensor& other,
int64_t self_num_batch_dims) {
- auto other_sizes = other.sizes();
- auto other_strides = other.strides();
+ auto other_sizes = other.sym_sizes();
+ auto other_strides = other.sym_strides();
auto other_storage_offset = other.storage_offset();
- int64_t other_storage_numel = other.storage().nbytes() / other.itemsize();
+ auto other_storage_numel = other.storage().sym_nbytes() / other.itemsize();
if (self_num_batch_dims == 0) {
- auto new_tensor = at::zeros({other_storage_numel}, other.options());
- return new_tensor.as_strided(other_sizes, other_strides, other_storage_offset);
+ auto new_tensor = at::zeros_symint({other_storage_numel}, other.options());
+ return new_tensor.as_strided_symint(other_sizes, other_strides, other_storage_offset);
}
- auto self_sizes = self.sizes();
+ auto self_sizes = self.sym_sizes();
// NB: We don't check that the sizes of self is the same as that of other
// because this function is also used in the inplace over view case
@@ -65,14 +65,14 @@ Tensor _new_zeros_with_same_feature_meta(
// this case.
constexpr int64_t kSmallBufferSizeHint = 8;
- auto out_sizes = c10::SmallBuffer<int64_t, kSmallBufferSizeHint>(other.dim() + self_num_batch_dims);
+ auto out_sizes = c10::SmallVector<c10::SymInt, kSmallBufferSizeHint>(other.dim() + self_num_batch_dims);
std::copy(self_sizes.begin(), self_sizes.begin() + self_num_batch_dims, out_sizes.begin());
std::copy(other_sizes.begin(), other_sizes.end(), out_sizes.begin() + self_num_batch_dims);
// We use the strides of other, and tack on the strides computed with
// the batch dims of self, so that the slices are arranged contiguously
- auto out_strides = c10::SmallBuffer<int64_t, kSmallBufferSizeHint>(other.dim() + self_num_batch_dims);
- int64_t prod = other_storage_numel;
+ auto out_strides = c10::SmallVector<c10::SymInt, kSmallBufferSizeHint>(other.dim() + self_num_batch_dims);
+ auto prod = other_storage_numel;
for (int64_t i = self_num_batch_dims - 1; i >= 0; --i) {
out_strides[i] = prod;
@@ -80,11 +80,11 @@ Tensor _new_zeros_with_same_feature_meta(
}
std::copy(other_strides.begin(), other_strides.end(), out_strides.begin() + self_num_batch_dims);
- int64_t storage_numel = prod;
+ auto storage_numel = prod;
// Inherit the TensorOptions of the primal
- auto new_tensor = at::zeros({storage_numel}, other.options());
- return new_tensor.as_strided(out_sizes, out_strides, other_storage_offset);
+ auto new_tensor = at::zeros_symint({storage_numel}, other.options());
+ return new_tensor.as_strided_symint(out_sizes, out_strides, other_storage_offset);
}
bool _has_same_storage_numel(const at::Tensor& base, const at::Tensor& other) {
diff --git a/test/dynamo/test_higher_order_ops.py b/test/dynamo/test_higher_order_ops.py
index 9f1819570a..1f9fc9c960 100644
--- a/test/dynamo/test_higher_order_ops.py
+++ b/test/dynamo/test_higher_order_ops.py
@@ -2690,6 +2690,25 @@ class HigherOrderOpVmapGuardTests(LoggingTestCase):
munge_exc(record.getMessage()),
)
+ @make_logging_test(recompiles=True)
+ def test_linearize_recompiles(self, records):
+ @torch.compile(backend="eager")
+ def fn(x):
+ out, jvp_fn = torch.func.linearize(torch.sin, x)
+ return out, jvp_fn(x)
+
+ x = torch.randn(2, 3)
+ fn(x)
+ self.assertEqual(len(records), 0)
+
+ z = torch.randn(2, 3)
+ fn(z)
+ self.assertEqual(len(records), 0)
+
+ y = torch.randn(3, 4)
+ fn(y)
+ self.assertGreater(len(records), 0)
+
class FuncTorchHigherOrderOpTests(torch._dynamo.test_case.TestCase):
def tearDown(self):
@@ -5077,6 +5096,87 @@ class GraphModule(torch.nn.Module):
)
self.assertEqual(actual, expected)
+ @config.patch(capture_func_transforms=True)
+ def test_linearize_jvp_fn(self):
+ counters.clear()
+
+ def wrapper_fn(x):
+ output, jvp_fn = torch.func.linearize(torch.sin, x)
+ return output, jvp_fn(x)
+
+ x = torch.randn(3, 3, 3)
+ wrapped_gm = self._compile_check(wrapper_fn, (x,), fullgraph=False, graph_idx=0)
+
+ # Dynamic shapes produce a slightly different graph.
+ if check_dynamic_shape_capture():
+ return
+
+ actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
+ self.assertExpectedInline(
+ actual,
+ """\
+class GraphModule(torch.nn.Module):
+ def forward(self, L_self_tensor_constant0 : torch.Tensor):
+ l_self_tensor_constant0 = L_self_tensor_constant0
+
+ alias_default = torch.ops.aten.alias.default(l_self_tensor_constant0); l_self_tensor_constant0 = None
+
+ sin_default = torch.ops.aten.sin.default(alias_default)
+
+ alias_default_1 = torch.ops.aten.alias.default(alias_default)
+
+ cos_default = torch.ops.aten.cos.default(alias_default_1); alias_default_1 = None
+
+ alias_default_2 = torch.ops.aten.alias.default(sin_default)
+ return (alias_default, cos_default, sin_default)
+""",
+ )
+
+ wrapped_gm = self._compile_check(wrapper_fn, (x,), fullgraph=False, graph_idx=1)
+ actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
+ self.assertExpectedInline(
+ actual,
+ """\
+class GraphModule(torch.nn.Module):
+ def forward(self, getattr_L_self_FX_CONST_FOLDED_ATTRS_0_ : torch.nn.parameter.Parameter, getattr_L_self_FX_CONST_FOLDED_ATTRS_1_ : torch.nn.parameter.Parameter, L_flat_tangents_1_ : torch.Tensor):
+ getattr_l_self_fx_const_folded_attrs_0_ = getattr_L_self_FX_CONST_FOLDED_ATTRS_0_
+ getattr_l_self_fx_const_folded_attrs_1_ = getattr_L_self_FX_CONST_FOLDED_ATTRS_1_
+ l_flat_tangents_1_ = L_flat_tangents_1_
+
+ _new_zeros_with_same_feature_meta_default = torch.ops.aten._new_zeros_with_same_feature_meta.default(l_flat_tangents_1_, getattr_l_self_fx_const_folded_attrs_0_); getattr_l_self_fx_const_folded_attrs_0_ = None
+
+ copy__default = torch.ops.aten.copy_.default(_new_zeros_with_same_feature_meta_default, l_flat_tangents_1_); _new_zeros_with_same_feature_meta_default = l_flat_tangents_1_ = None
+
+ mul_tensor = torch.ops.aten.mul.Tensor(copy__default, getattr_l_self_fx_const_folded_attrs_1_); copy__default = getattr_l_self_fx_const_folded_attrs_1_ = None
+ return (mul_tensor,)
+""",
+ )
+
+ def test_linearize_disable_capture(self):
+ counters.clear()
+ with config.patch(capture_func_transforms=False):
+ # We have verified above that this
+ # function compiles
+ def wrapper_fn(x):
+ out, _ = torch.func.linearize(torch.sin, x)
+ return out
+
+ x = torch.randn(2, 3)
+ actual = wrapper_fn(x)
+ expected = torch.compile(wrapper_fn, backend="aot_eager", fullgraph=False)(
+ x
+ )
+ self.assertEqual(len(counters["graph_break"]), 1)
+ self.assertEqual(
+ {
+ "torch.func.linearize capture is disabled, it can be "
+ "turned on by setting `torch._dynamo.config.capture_func_transforms=True`": 1,
+ },
+ dict(counters["graph_break"]),
+ )
+ self.assertEqual(actual, expected)
+
+ @config.patch(capture_func_transforms=True)
@config.patch(error_on_recompile=True)
def test_vmap_recompile(self):
@torch.compile(backend="eager")
diff --git a/torch/_dynamo/trace_rules.py b/torch/_dynamo/trace_rules.py
index cc6c2a4e88..2fb8dc7241 100644
--- a/torch/_dynamo/trace_rules.py
+++ b/torch/_dynamo/trace_rules.py
@@ -262,6 +262,8 @@ manual_torch_name_rule_map = {
"torch.autograd.forward_ad.exit_dual_level": UserFunctionVariable,
"torch.autograd.forward_ad.make_dual": UserFunctionVariable,
"torch.autograd.forward_ad.unpack_dual": UserFunctionVariable,
+ # functorch/linearize
+ "torch._functorch.eager_transforms.linearize": FunctorchHigherOrderVariable,
# functorch/jacfwd
"torch._functorch.eager_transforms.jacfwd": FunctorchHigherOrderVariable,
"torch._functorch.eager_transforms._construct_standard_basis_for": UserFunctionVariable,
@@ -2240,6 +2242,7 @@ torch_non_c_binding_in_graph_functions = dict.fromkeys(
"torch._functorch.eager_transforms._vjp_treespec_compare",
"torch._functorch.eager_transforms._set_tensor_requires_grad",
"torch._functorch.eager_transforms._jvp_treespec_compare",
+ "torch._functorch.eager_transforms._linearize_treespec_compare",
"torch._functorch.eager_transforms._is_differentiable",
"torch._functorch.eager_transforms._maybe_unwrap_functional_tensor",
"torch._functorch.eager_transforms._maybe_wrap_functional_tensor",
@@ -2248,7 +2251,6 @@ torch_non_c_binding_in_graph_functions = dict.fromkeys(
"torch._functorch.eager_transforms.assert_flat_tuple_of_tensors",
"torch._functorch.eager_transforms.functionalize",
"torch._functorch.eager_transforms.lazy_dynamo_disable",
- "torch._functorch.eager_transforms.linearize",
"torch._functorch.eager_transforms.noop",
"torch._functorch.functional_call.construct_stacked_leaf",
"torch._functorch.functional_call.functional_call",
@@ -2587,6 +2589,8 @@ torch_non_c_binding_in_graph_functions = dict.fromkeys(
"torch.functional.unravel_index",
"torch.futures.collect_all",
"torch.futures.wait_all",
+ "torch.fx.experimental.const_fold.split_const_subgraphs",
+ "torch.fx.experimental.proxy_tensor.make_fx",
"torch.get_deterministic_debug_mode",
"torch.get_float32_matmul_precision",
"torch.is_deterministic_algorithms_warn_only_enabled",
diff --git a/torch/_dynamo/variables/higher_order_ops.py b/torch/_dynamo/variables/higher_order_ops.py
index 0db77f53ee..a1abcb15fb 100644
--- a/torch/_dynamo/variables/higher_order_ops.py
+++ b/torch/_dynamo/variables/higher_order_ops.py
@@ -1191,6 +1191,7 @@ class FunctorchHigherOrderVariable(UserFunctionVariable):
"jacrev": "jacrev",
"jacfwd": "jacfwd",
"hessian": "hessian",
+ "linearize": "linearize",
}.get(name)
assert name is not None
unimplemented(
diff --git a/torch/_functorch/eager_transforms.py b/torch/_functorch/eager_transforms.py
index e158a40b88..fff6bd6783 100644
--- a/torch/_functorch/eager_transforms.py
+++ b/torch/_functorch/eager_transforms.py
@@ -47,10 +47,10 @@ from .apis import vmap
from .vmap import doesnt_support_saved_tensors_hooks, get_chunk_sizes
-def lazy_dynamo_disable(func):
+def lazy_dynamo_disallow(func):
import torch._dynamo
- return torch._dynamo.disable(func)
+ return torch._dynamo.disallow_in_graph(func)
@contextlib.contextmanager
@@ -91,6 +91,17 @@ def _jvp_treespec_compare(primals, tangents):
)
+def _linearize_treespec_compare(primals, tangents):
+ # Revert this once #116264 gets fixed
+ _, primals_argspec = tree_flatten(primals)
+ _, tangent_argspec = tree_flatten(tangents)
+ if tangent_argspec != primals_argspec:
+ raise RuntimeError(
+ f"Expected the tangents {tangent_argspec} to have "
+ f"the same argspec as the primals {primals_argspec}"
+ )
+
+
def _set_tensor_requires_grad(x):
# avoid graph-break on x.requires_grad_()
# https://github.com/pytorch/pytorch/pull/110053
@@ -1778,8 +1789,10 @@ def linearize(func: Callable, *primals) -> Tuple[Any, Callable]:
return tangents
- jvp_graph = make_fx(trace_fn)(flat_tangents)
- const_folded_jvp_graph = const_fold.split_const_subgraphs(jvp_graph)
+ jvp_graph = lazy_dynamo_disallow(make_fx)(trace_fn)(flat_tangents)
+ const_folded_jvp_graph = lazy_dynamo_disallow(const_fold.split_const_subgraphs)(
+ jvp_graph
+ )
# Hold only the meta-data regarding the primals.
flat_primals_shape = tuple(p.shape for p in flat_primals)
@@ -1817,11 +1830,7 @@ def linearize(func: Callable, *primals) -> Tuple[Any, Callable]:
# calling the folded fx graph and unflattening fx graph output
def jvp_fn(*tangents):
flat_tangents, tangent_argspec = tree_flatten(tangents)
- if tangent_argspec != primals_argspec:
- raise RuntimeError(
- f"Expected the tangents {tangent_argspec} to have "
- f"the same argspec as the primals {primals_argspec}"
- )
+ _linearize_treespec_compare(primals, tangents)
forward_ad_checks(flat_tangents)
|
2.41.0
|
ceb44c40d52c57143ab1dbc8c14373f5205e9fb
|
Tue, 23 Apr 2024 10:31:46 -0700
|
[PATCH 0539/1000] Add torch.library.opcheck (#124496)
|
This PR: - exposes torch.testing._internal.optests.opcheck as torch.library.opcheck - Adds support for CustomOpDef (aka functions decorated with torch.library.custom_op) to opcheck. Test Plan: - Updated tests - We validated opcheck's design internally. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124496 Approved by: https://github.com/williamwen42
|
diff --git a/docs/source/library.rst b/docs/source/library.rst
index b2ae235414..236da45f93 100644
--- a/docs/source/library.rst
+++ b/docs/source/library.rst
@@ -4,13 +4,23 @@ torch.library
.. currentmodule:: torch.library
torch.library is a collection of APIs for extending PyTorch's core library
-of operators. It contains utilities for creating new custom operators as
-well as extending operators defined with PyTorch's C++ operator
+of operators. It contains utilities for testing custom operators, creating new
+custom operators, and extending operators defined with PyTorch's C++ operator
registration APIs (e.g. aten operators).
For a detailed guide on effectively using these APIs, please see
`this gdoc <https://docs.google.com/document/d/1W--T6wz8IY8fOI0Vm8BF44PdBgs283QvpelJZWieQWQ/edit>`_
+Testing custom ops
+------------------
+
+Use :func:`torch.library.opcheck` to test custom ops for incorrect usage of the
+Python torch.library and/or C++ TORCH_LIBRARY APIs. Also, if your operator supports
+training, use :func:`torch.autograd.gradcheck` to test that the gradients are
+mathematically correct.
+
+.. autofunction:: opcheck
+
Creating new custom ops in Python
---------------------------------
diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py
index f4dc19eb01..caae309cf6 100644
--- a/test/test_custom_ops.py
+++ b/test/test_custom_ops.py
@@ -131,7 +131,7 @@ class TestCustomOpTesting(CustomOpTestCaseBase):
with self.assertRaisesRegex(
optests.OpCheckError, "Argument x is not defined as mutable but was mutated"
):
- optests.opcheck(op, (x,), {})
+ torch.library.opcheck(op, (x,), {})
def test_incorrect_schema_view(self, device):
lib = self.lib()
@@ -167,7 +167,7 @@ class TestCustomOpTesting(CustomOpTestCaseBase):
optests.OpCheckError,
"Argument x is not defined to alias output but was aliasing",
):
- optests.opcheck(op, (x,), {})
+ torch.library.opcheck(op, (x,), {})
def test_missing_abstract_impl(self, device):
lib = self.lib()
@@ -196,7 +196,7 @@ class TestCustomOpTesting(CustomOpTestCaseBase):
optests.OpCheckError,
"_test_custom_op.foo.default",
):
- optests.opcheck(op, (x,), {})
+ torch.library.opcheck(op, (x,), {})
def test_incorrect_abstract_impl(self, device):
lib = self.lib()
@@ -234,7 +234,7 @@ class TestCustomOpTesting(CustomOpTestCaseBase):
x = torch.tensor([0, 1.0], requires_grad=True)
with self.assertRaisesRegex(optests.OpCheckError, "Shapes .* are not equal"):
- optests.opcheck(op, (x,), {})
+ torch.library.opcheck(op, (x,), {})
def test_missing_functionalization(self, device):
lib = self.lib()
@@ -269,7 +269,7 @@ class TestCustomOpTesting(CustomOpTestCaseBase):
optests.OpCheckError,
"We only support functionalizing operators whose outputs do not have alias annotations",
):
- optests.opcheck(op, (y,), {})
+ torch.library.opcheck(op, (y,), {})
def test_autograd_registered_at_backend(self, device):
lib = self.lib()
@@ -295,7 +295,7 @@ class TestCustomOpTesting(CustomOpTestCaseBase):
torch.testing._internal.optests.OpCheckError,
"does not have an autograd kernel",
):
- optests.opcheck(op, (x,), {})
+ torch.library.opcheck(op, (x,), {})
# I'm not sure why this is necessary
del lib
@@ -323,7 +323,7 @@ class TestCustomOpTesting(CustomOpTestCaseBase):
with self.assertRaisesRegex(
optests.OpCheckError, "eager-mode PyTorch vs AOTAutograd"
):
- optests.opcheck(op, (x,), {})
+ torch.library.opcheck(op, (x,), {})
@ops(custom_op_db.custom_op_db, dtypes=OpDTypes.any_one)
def test_opcheck_opinfo(self, device, dtype, op):
@@ -332,7 +332,7 @@ class TestCustomOpTesting(CustomOpTestCaseBase):
):
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
- optests.opcheck(
+ torch.library.opcheck(
op.op,
args,
kwargs,
@@ -352,7 +352,7 @@ class TestCustomOpTesting(CustomOpTestCaseBase):
with self.assertRaisesRegex(
optests.OpCheckError, "Autograd has not been implemented for operator"
):
- optests.opcheck(self.get_op(f"{self.test_ns}::foo"), (x,), {})
+ torch.library.opcheck(self.get_op(f"{self.test_ns}::foo"), (x,), {})
def test_autograd_registration_check_autograd_kernel(self, device):
lib = self.lib()
@@ -2933,10 +2933,10 @@ opcheck(op, args, kwargs, test_utils="test_schema")
def test_opcheck(self):
x = torch.randn(3, requires_grad=True)
with self.assertRaisesRegex(ValueError, "OpOverload"):
- optests.opcheck(torch.sin, (x,))
+ torch.library.opcheck(torch.sin, (x,))
with self.assertRaisesRegex(ValueError, "test_utils to be subset of"):
- optests.opcheck(torch.ops.aten.sin.default, (x,), test_utils="blah")
- result = optests.opcheck(torch.ops.aten.sin.default, (x,))
+ torch.library.opcheck(torch.ops.aten.sin.default, (x,), test_utils="blah")
+ result = torch.library.opcheck(torch.ops.aten.sin.default, (x,))
self.assertEqual(
result,
@@ -2948,7 +2948,7 @@ opcheck(op, args, kwargs, test_utils="test_schema")
},
)
- result = optests.opcheck(
+ result = torch.library.opcheck(
torch.ops.aten.sin.default, (x,), test_utils="test_schema"
)
self.assertEqual(
@@ -2958,7 +2958,7 @@ opcheck(op, args, kwargs, test_utils="test_schema")
},
)
- result = optests.opcheck(
+ result = torch.library.opcheck(
torch.ops.aten.sin.default,
(x,),
test_utils=["test_schema", "test_faketensor"],
@@ -2971,6 +2971,21 @@ opcheck(op, args, kwargs, test_utils="test_schema")
},
)
+ def test_opcheck_customopdef(self):
+ sample_inputs = [
+ (torch.randn(3),),
+ (torch.randn(3, requires_grad=True),),
+ ]
+ if torch.cuda.is_available():
+ sample_inputs.extend(
+ [
+ (torch.randn(3, device="cuda"),),
+ (torch.randn(3, device="cuda", requires_grad=True),),
+ ]
+ )
+ for args in sample_inputs:
+ torch.library.opcheck(custom_op_db.numpy_cube, args)
+
def test_is_inside_opcheck_mode(self):
self.assertFalse(optests.is_inside_opcheck_mode())
with optests.generate_tests.OpCheckMode(
@@ -2982,9 +2997,9 @@ opcheck(op, args, kwargs, test_utils="test_schema")
op = op_with_incorrect_schema(self, "foo")
x = torch.randn(3)
with self.assertRaisesRegex(Exception, "is not defined to alias output"):
- optests.opcheck(op, (x,))
+ torch.library.opcheck(op, (x,))
- result = optests.opcheck(op, (x,), raise_exception=False)
+ result = torch.library.opcheck(op, (x,), raise_exception=False)
self.assertTrue(isinstance(result["test_schema"], RuntimeError))
del result["test_schema"]
self.assertEqual(
diff --git a/torch/library.py b/torch/library.py
index 6bd0bc9879..8d80bdef85 100644
--- a/torch/library.py
+++ b/torch/library.py
@@ -1,5 +1,5 @@
from ._ops import OpOverload
-from typing import Any, Optional, Set, List, Union, Callable
+from typing import Any, Optional, Set, List, Union, Callable, Tuple, Dict, Sequence
import traceback
import torch
import weakref
@@ -9,7 +9,7 @@ import re
import contextlib
import sys
import warnings
-from torch._library.custom_ops import custom_op, _maybe_get_opdef, device_types_t
+from torch._library.custom_ops import custom_op, _maybe_get_opdef, device_types_t, CustomOpDef
import torch._library as _library
@@ -743,3 +743,104 @@ def get_ctx() -> "torch._library.abstract_impl.AbstractImplCtx":
(see :func:`torch.library.register_fake` for more usage details.
"""
return torch._library.abstract_impl.global_ctx_getter()
+
+
+_OPCHECK_DEFAULT_UTILS = (
+ "test_schema",
+ "test_autograd_registration",
+ "test_faketensor",
+ "test_aot_dispatch_dynamic",
+)
+
+
+def opcheck(
+ op: Union[torch._ops.OpOverload, torch._ops.OpOverloadPacket, CustomOpDef],
+ args: Tuple[Any, ...],
+ kwargs: Optional[Dict[str, Any]] = None,
+ *,
+ test_utils: Union[str, Sequence[str]] = _OPCHECK_DEFAULT_UTILS,
+ raise_exception: bool = True,
+) -> Dict[str, str]:
+ """Given an operator and some sample arguments, tests if the operator is
+ registered correctly.
+
+ That is, when you use the torch.library/TORCH_LIBRARY APIs to create a
+ custom op, you specified metadata (e.g. mutability info) about the custom op
+ and these APIs require that the functions you pass them satisfy certain
+ properties (e.g. no data pointer access in the fake/meta/abstract kernel)
+ ``opcheck`` tests these metadata and properties.
+
+ Concretely, we test the following:
+ - test_schema: if the operator's schema is correct.
+ - test_autograd_registration: if autograd was registered correctly.
+ - test_faketensor: If the operator has a FakeTensor kernel
+ (and if it is correct). The FakeTensor kernel is necessary (
+ but not sufficient) for the operator to work with PyTorch compilation
+ APIs (torch.compile/export/FX).
+ - test_aot_dispatch_dynamic: If the operator has correct behavior
+ with PyTorch compilation APIs (torch.compile/export/FX).
+ This checks that the outputs (and gradients, if applicable) are the
+ same under eager-mode PyTorch and torch.compile.
+ This test is a superset of ``test_faketensor``.
+
+ For best results, please call ``opcheck`` multiple times with a
+ representative set of inputs. If your operator supports
+ autograd, please use ``opcheck`` with inputs with ``requires_grad = True``;
+ if your operator supports multiple devices (e.g. CPU and CUDA), please
+ use ``opcheck`` with inputs on all supported devices.
+
+ Args:
+ op: The operator. Must either be a function decorated with
+ :func:`torch.library.custom_op` or an OpOverload/OpOverloadPacket
+ found in torch.ops.* (e.g. torch.ops.aten.sin, torch.ops.mylib.foo)
+ args: The args to the operator
+ kwargs: The kwargs to the operator
+ test_utils: Tests that we should run. Default: all of them.
+ Example: ("test_schema", "test_faketensor")
+ raise_exception: If we should raise an exception on the first
+ error. If False, we will return a dict with information
+ on if each test passed or not.
+
+ .. warning::
+
+ opcheck and :func:`torch.autograd.gradcheck` test different things;
+ opcheck tests if your usage of torch.library APIs is correct while
+ :func:`torch.autograd.gradcheck` tests if your autograd formula is
+ mathematically correct. Use both to test custom ops that support
+ gradient computation.
+
+ Example:
+
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
+ >>> @torch.library.custom_op("mylib::numpy_mul", mutates_args=())
+ >>> def numpy_add(x: Tensor, y: float) -> Tensor:
+ >>> x_np = x.numpy(force=True)
+ >>> z_np = x_np + y
+ >>> return torch.from_numpy(z_np).to(x.device)
+ >>>
+ >>> @numpy_sin.register_fake
+ >>> def _(x, y):
+ >>> return torch.empty_like(x)
+ >>>
+ >>> def setup_context(ctx, inputs, output)
+ >>> y, = inputs
+ >>> ctx.y = y
+ >>>
+ >>> def backward(ctx, grad)
+ >>> return grad * ctx.y, None
+ >>>
+ >>> numpy_sin.register_autograd(backward, setup_context=setup_context)
+ >>>
+ >>> sample_inputs = [
+ >>> (torch.randn(3), 3.14),
+ >>> (torch.randn(2, 3, device='cuda'), 2.718),
+ >>> (torch.randn(1, 10, requires_grad=True), 1.234),
+ >>> (torch.randn(64, 64, device='cuda', requires_grad=True), 90.18),
+ >>> ]
+ >>>
+ >>> for args in sample_inputs:
+ >>> torch.library.opcheck(foo, args)
+
+ """
+ import torch.testing._internal.optests as optests
+ return optests.opcheck(op, args, kwargs, test_utils=test_utils, raise_exception=raise_exception)
diff --git a/torch/testing/_internal/optests/generate_tests.py b/torch/testing/_internal/optests/generate_tests.py
index 098c2b4cfd..70ee482748 100644
--- a/torch/testing/_internal/optests/generate_tests.py
+++ b/torch/testing/_internal/optests/generate_tests.py
@@ -10,7 +10,7 @@ import re
import tempfile
import threading
import unittest
-from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import torch
@@ -18,6 +18,7 @@ import torch._dynamo
import torch.utils._pytree as pytree
from torch._dynamo.utils import clone_input
+from torch._library.custom_ops import CustomOpDef
from torch._subclasses.schema_check_mode import SchemaCheckMode
from torch._utils_internal import get_file_path_2
from torch.overrides import TorchFunctionMode
@@ -620,48 +621,19 @@ def should_print_better_repro() -> None:
def opcheck(
- op: torch._ops.OperatorBase,
+ op: Union[torch._ops.OpOverload, torch._ops.OpOverloadPacket, CustomOpDef],
args: Tuple[Any, ...],
kwargs: Optional[Dict[str, Any]] = None,
*,
- test_utils: Union[str, List[str]] = DEFAULT_TEST_UTILS,
+ test_utils: Union[str, Sequence[str]] = DEFAULT_TEST_UTILS,
raise_exception: bool = True,
) -> Dict[str, str]:
- """Given an operator and some sample arguments, tests if the operator is
- registered correctly.
-
- We test the following (which are important for correctness in eager-mode
- PyTorch and with torch.compile):
- - test_schema: if the operator's schema is correct.
- - test_autograd_registration: if autograd was registered correctly,
- i.e. to the correct DispatchKey.
- - test_faketensor: If the operator has a FakeTensor implementation
- (and if it is correct).
- - test_aot_dispatch_static: If the operator works with
- AOTAutograd/AOTDispatch, which is one of the parts in the PT2 stack.
- Checks that the outputs (and gradients, if they are computable)
- of the operator are the same under eager-mode PyTorch and torch.compile.
- - test_aot_dispatch_dynamic: Same as aot_dispatch_static, but
- tests dynamic shapes instead of static shapes.
-
- For best results, please call ``opcheck`` multiple times with a
- representative set of inputs. For example, if your operator supports
- autograd, please use ``opcheck`` with inputs that require_grad.
-
- Args:
- op: The operator. Should look like torch.ops.aten.foo
- args: The args to the operator
- kwargs: The kwargs to the operator
- test_utils: Tests that we should run. Default: all of them.
- Example: ["test_schema", "test_faketensor"]
- raise_exception: If we should raise an exception on the first
- error. If False, we will return a dict with information
- on if each test passed or not.
-
- """
+ """See torch.library.opcheck for docstring"""
if kwargs is None:
kwargs = {}
+ if isinstance(op, CustomOpDef):
+ op = op._opoverload
if isinstance(op, torch._ops.OpOverloadPacket):
op = resolve_unique_overload_or_throw(op)
if not isinstance(op, torch._ops.OpOverload):
|
2.41.0
|
c268a710d2d844b32d9f772d8d507ee36e3d64b
|
Tue, 23 Apr 2024 22:21:50 +0000
|
[PATCH 0540/1000] Revert "AOTAutograd: gate view-replay behind config, not the default (#124488)"
|
This reverts commit 47330ca13321a42d4f1e75f091e17183227ae073. Reverted https://github.com/pytorch/pytorch/pull/124488 on behalf of https://github.com/seemethere due to submodule update causes xla to start failing see job on branch: https://github.com/pytorch/pytorch/actions/runs/8789091145/job/24124569508, Dr. CI incorrectly marked this as flaky and allowed the merge ([comment](https://github.com/pytorch/pytorch/pull/124488#issuecomment-2073568651))
|
diff --git a/.github/ci_commit_pins/xla.txt b/.github/ci_commit_pins/xla.txt
index 107774b06a..259a97684d 100644
--- a/.github/ci_commit_pins/xla.txt
+++ b/.github/ci_commit_pins/xla.txt
@@ -1 +1 @@
-62a2b11c8ae00bab8740b8b15f88c8596305d2e1
+58a412cb271a3f98ae2e01fd1d24bdbb66645d4e
diff --git a/test/functorch/test_aotdispatch.py b/test/functorch/test_aotdispatch.py
index 5daef5dbc9..4f7545ddf2 100644
--- a/test/functorch/test_aotdispatch.py
+++ b/test/functorch/test_aotdispatch.py
@@ -3261,7 +3261,6 @@ def forward(self, tangents_1):
return lambda f: aot_function(f, fw_compiler=lambda g, _: partial(wrapper, g))
- @patch("functorch.compile.config.view_replay_for_aliased_outputs", True)
def test_output_aliases_input_view_meta_replay(self):
@self._compile_and_erase_bases(0)
def f(a):
@@ -3275,7 +3274,6 @@ def forward(self, tangents_1):
str(out.grad_fn.__class__), """<class 'ViewBackward0'>"""
)
- @patch("functorch.compile.config.view_replay_for_aliased_outputs", True)
def test_output_aliases_intermediate_view_meta_replay(self):
@self._compile_and_erase_bases(0, 1)
def f(a):
@@ -3295,7 +3293,6 @@ def forward(self, tangents_1):
str(out2.grad_fn.__class__), """<class 'ViewBackward0'>"""
)
- @patch("functorch.compile.config.view_replay_for_aliased_outputs", True)
def test_output_aliases_output_view_meta_replay(self):
@self._compile_and_erase_bases(1)
def f(a):
diff --git a/test/inductor/test_torchinductor_opinfo.py b/test/inductor/test_torchinductor_opinfo.py
index ac4d96846a..7319656905 100644
--- a/test/inductor/test_torchinductor_opinfo.py
+++ b/test/inductor/test_torchinductor_opinfo.py
@@ -254,7 +254,6 @@ inductor_expected_failures_single_sample["cuda"] = {
# intentionally not handled
intentionally_not_handled = {
- ("as_strided", "partial_views"): {b8, f16, f32, f64, i32, i64},
"resize_": {b8, f16, f32, f64, i32, i64},
"resize_as_": {b8, f16, f32, f64, i32, i64},
}
diff --git a/torch/_functorch/_aot_autograd/functional_utils.py b/torch/_functorch/_aot_autograd/functional_utils.py
index 25197e9eea..b863f40efa 100644
--- a/torch/_functorch/_aot_autograd/functional_utils.py
+++ b/torch/_functorch/_aot_autograd/functional_utils.py
@@ -18,7 +18,6 @@ from torch.utils._python_dispatch import (
is_traceable_wrapper_subclass,
transform_subclass,
)
-from .. import config
aot_joint_log = getArtifactLogger(__name__, "aot_joint_graph")
@@ -220,7 +219,7 @@ def gen_alias_from_base(
# In summary, we use the fact that FunctionalTensorWrapper saves the view
# functions applied to itself (collected during functionalization) so as
# to replay them (view functions) on the aliased_base_tensor.
- if config.view_replay_for_aliased_outputs and target_functional_tensor is not None:
+ if target_functional_tensor is not None:
from .schemas import FunctionalTensorMetadataEq
assert isinstance(target_functional_tensor, FunctionalTensorMetadataEq)
@@ -238,10 +237,11 @@ def gen_alias_from_base(
#
# In order for this to work, we should have a way to replace those
# symbolic shapes with concrete numbers.
- aot_joint_log.info(
+ aot_joint_log.warning(
"could not reconstruct view by re-applying a ViewMeta sequence. "
+ "This error is possibly caused by dynamic shapes. "
"Fallbacking to reconstruction using as_strided. "
- "Reason: %s",
+ "Error message: %s",
str(e),
)
else:
diff --git a/torch/_functorch/config.py b/torch/_functorch/config.py
index cc580865d0..c3f34fa273 100644
--- a/torch/_functorch/config.py
+++ b/torch/_functorch/config.py
@@ -41,18 +41,6 @@ static_weight_shapes = True
# Applies CSE to the graph before partitioning
cse = True
-# When AOTAutograd regenerates aliased graph outputs,
-# attempte to use functionalization's view-replay logic
-# before falling back to the autograd engine's view replay or as_strided.
-# This can have some perf implications
-# (although for many models this will not matter).
-# (1) If you have many view ops chained together, replaying all of them
-# at runtime can have more overhead compared to a single as_strided call
-# (2) If you are doing training, AsStridedBackward is quite slow,
-# and the individual view op backward formulas will likely be faster.
-# (3) Some backends like XLA do not support as_strided
-view_replay_for_aliased_outputs = False
-
# Restricts the amount of computation AOTAutograd can do.
# NB: We have essentially disabled this heuristic now. However, this is kept
# here for now in case it's useful. Setting it low can artificially reduce the
|
2.41.0
|
8ffdf930c5bb93f547ff94a5d3af1f194cddbbc
|
Tue, 23 Apr 2024 22:24:27 +0000
|
[PATCH 0541/1000] Revert ARC jobs to run on classic infra again (#124748)
|
ARC jobs are too unstable right now. We're going to mitigate this by: 1. Reverting ARC jobs to run on the classic infra (this PR) 2. Spin up new jobs in parallel, marked as unstable, to run on the new infra. (coming soon) More details in https://github.com/pytorch/ci-infra/issues/149 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124748 Approved by: https://github.com/seemethere, https://github.com/zxiiro, https://github.com/malfet, https://github.com/jeanschmidt
|
diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml
index 8f54248101..713e454ac4 100644
--- a/.github/workflows/pull.yml
+++ b/.github/workflows/pull.yml
@@ -37,7 +37,7 @@ jobs:
linux-jammy-py3_8-gcc11-build:
name: linux-jammy-py3.8-gcc11
- uses: ./.github/workflows/_linux-build-rg.yml
+ uses: ./.github/workflows/_linux-build-label.yml
with:
build-environment: linux-jammy-py3.8-gcc11
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
@@ -74,7 +74,7 @@ jobs:
linux-jammy-py3_8-gcc11-no-ops:
name: linux-jammy-py3.8-gcc11-no-ops
- uses: ./.github/workflows/_linux-build-rg.yml
+ uses: ./.github/workflows/_linux-build-label.yml
with:
build-environment: linux-jammy-py3.8-gcc11-no-ops
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
@@ -85,7 +85,7 @@ jobs:
linux-jammy-py3_8-gcc11-pch:
name: linux-jammy-py3.8-gcc11-pch
- uses: ./.github/workflows/_linux-build-rg.yml
+ uses: ./.github/workflows/_linux-build-label.yml
with:
build-environment: linux-jammy-py3.8-gcc11-pch
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
@@ -96,7 +96,7 @@ jobs:
linux-jammy-py3_10-clang15-asan-build:
name: linux-jammy-py3.10-clang15-asan
- uses: ./.github/workflows/_linux-build-rg.yml
+ uses: ./.github/workflows/_linux-build-label.yml
with:
build-environment: linux-jammy-py3.10-clang15-asan
docker-image-name: pytorch-linux-jammy-py3-clang15-asan
@@ -125,7 +125,7 @@ jobs:
linux-focal-py3_8-clang10-onnx-build:
name: linux-focal-py3.8-clang10-onnx
- uses: ./.github/workflows/_linux-build-rg.yml
+ uses: ./.github/workflows/_linux-build-label.yml
with:
build-environment: linux-focal-py3.8-clang10-onnx
docker-image-name: pytorch-linux-focal-py3-clang10-onnx
@@ -148,7 +148,7 @@ jobs:
linux-focal-py3_8-clang10-build:
name: linux-focal-py3.8-clang10
- uses: ./.github/workflows/_linux-build-rg.yml
+ uses: ./.github/workflows/_linux-build-label.yml
with:
build-environment: linux-focal-py3.8-clang10
docker-image-name: pytorch-linux-focal-py3.8-clang10
@@ -177,7 +177,7 @@ jobs:
linux-focal-py3_11-clang10-build:
name: linux-focal-py3.11-clang10
- uses: ./.github/workflows/_linux-build-rg.yml
+ uses: ./.github/workflows/_linux-build-label.yml
with:
build-environment: linux-focal-py3.11-clang10
docker-image-name: pytorch-linux-focal-py3.11-clang10
diff --git a/.github/workflows/slow.yml b/.github/workflows/slow.yml
index 56a45a507e..85e9b516aa 100644
--- a/.github/workflows/slow.yml
+++ b/.github/workflows/slow.yml
@@ -138,7 +138,7 @@ jobs:
linux-jammy-py3_10-clang15-asan-build:
name: linux-jammy-py3.10-clang15-asan
- uses: ./.github/workflows/_linux-build-rg.yml
+ uses: ./.github/workflows/_linux-build-label.yml
with:
build-environment: linux-jammy-py3.10-clang15-asan
docker-image-name: pytorch-linux-jammy-py3-clang15-asan
|
2.41.0
|
c4ad87396df479ade9214433cd85d235877cc14
|
Tue, 23 Apr 2024 23:12:17 +0000
|
[PATCH 0543/1000] [TorchElastic] Option to enable TCPStore libuv backed (#124684)
|
Summary: Libuv backed isn't enabled in PTD by default now. Add an option to enable libuv backed to improve scaling of the rendezvous process. Tries not to make assumption on the default libuv settings in TCPStore since it may change in the next release. Test Plan: CI Differential Revision: D56435815 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124684 Approved by: https://github.com/d4l3k, https://github.com/XilunWu
|
diff --git a/test/distributed/elastic/utils/distributed_test.py b/test/distributed/elastic/utils/distributed_test.py
index 9e9c85e8a6..ded2c7e9d4 100644
--- a/test/distributed/elastic/utils/distributed_test.py
+++ b/test/distributed/elastic/utils/distributed_test.py
@@ -126,6 +126,33 @@ class DistributedUtilTest(TestCase):
timeout=1,
)
+ def test_create_store_with_libuv_support(self):
+ world_size = 1
+ wait_for_workers = False
+ localhost = socket.gethostname()
+
+ store = create_c10d_store(
+ is_server=True,
+ server_addr=localhost,
+ server_port=0,
+ timeout=2,
+ world_size=world_size,
+ wait_for_workers=wait_for_workers,
+ use_libuv=False,
+ )
+ self.assertFalse(store.libuvBackend)
+
+ store = create_c10d_store(
+ is_server=True,
+ server_addr=localhost,
+ server_port=0,
+ timeout=2,
+ world_size=world_size,
+ wait_for_workers=wait_for_workers,
+ use_libuv=True,
+ )
+ self.assertTrue(store.libuvBackend)
+
def test_port_already_in_use_on_server(self):
# try to create the TCPStore server twice on the same port
# the second should fail due to a port conflict
diff --git a/torch/distributed/elastic/utils/distributed.py b/torch/distributed/elastic/utils/distributed.py
index abbe114f99..808b965d50 100644
--- a/torch/distributed/elastic/utils/distributed.py
+++ b/torch/distributed/elastic/utils/distributed.py
@@ -6,8 +6,10 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import datetime
+import functools
import socket
from contextlib import closing
+from typing import Optional
import torch.distributed as dist
from torch.distributed.elastic.utils.logging import get_logger
@@ -30,6 +32,7 @@ def create_c10d_store(
timeout: float = (60 * 10), # 10 min
wait_for_workers: bool = True,
retries=3,
+ use_libuv: Optional[bool] = None,
):
if server_port == -1 and world_size > 1:
raise ValueError(
@@ -56,7 +59,8 @@ def create_c10d_store(
)
try:
- store = dist.TCPStore(
+ store_builder = functools.partial(
+ dist.TCPStore,
host_name=server_addr,
port=port,
world_size=world_size,
@@ -64,6 +68,11 @@ def create_c10d_store(
timeout=datetime.timedelta(seconds=timeout),
wait_for_workers=wait_for_workers,
)
+ if use_libuv is None:
+ # TCPStore default backend may change, don't specify it unless we explicity told to do so.
+ store = store_builder()
+ else:
+ store = store_builder(use_libuv=use_libuv)
# skips full rank check when we don't have to wait for all workers
if wait_for_workers:
_check_full_rank(store, world_size)
|
2.41.0
|
8f9f37b03fe74a7bc499ee7b01467b4513fa212
|
Tue, 23 Apr 2024 12:45:26 -0700
|
[PATCH 0545/1000] [FSDP2] Added test to show rank 0 broadcast meta-device flow (#124651)
|
This PR includes two things: 1. Changes to support `load_state_dict(assign=True)` - These changes are not ideal, but until we have `DTensor` padding the local tensor and general `swap_tensors` adoption, we may need to make do. 2. Example of how to convert a full state dict on rank 0 to sharded state dict on all ranks via broadcast - ~~To-do: check for `recordStream` from the funcol broadcast; if being called, remediate either via `async_op=False` c10d broadcast or use `TORCH_NCCL_AVOID_RECORD_STREAMS=1`~~ switched to using c10d `async_op=False` broadcast - To-do: check for broadcast latency since not using any coalescing Pull Request resolved: https://github.com/pytorch/pytorch/pull/124651 Approved by: https://github.com/wanchaol
|
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_init.py b/test/distributed/_composable/fsdp/test_fully_shard_init.py
index d52b20eaf2..73b74ecefb 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_init.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_init.py
@@ -5,6 +5,7 @@ import unittest
from typing import List
import torch
+import torch.distributed as dist
import torch.nn as nn
from torch.distributed._composable import replicate
from torch.distributed._composable.fsdp import fully_shard
@@ -14,7 +15,13 @@ from torch.distributed._composable.fsdp._fsdp_init import (
)
from torch.distributed._composable.fsdp._fsdp_param import ParamModuleInfo
from torch.distributed._composable.fsdp._fsdp_param_group import _get_param_module_infos
-from torch.distributed._tensor import DeviceMesh, DTensor, Replicate, Shard
+from torch.distributed._tensor import (
+ DeviceMesh,
+ distribute_tensor,
+ DTensor,
+ Replicate,
+ Shard,
+)
from torch.distributed.device_mesh import init_device_mesh
from torch.distributed.tensor.parallel import (
ColwiseParallel,
@@ -24,6 +31,11 @@ from torch.distributed.tensor.parallel import (
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP
from torch.testing._internal.common_utils import run_tests
+from torch.testing._internal.distributed._tensor.common_dtensor import (
+ ModelArgs,
+ Transformer,
+ TransformerBlock,
+)
class TestFullyShardDeviceTensor(FSDPTestMultiThread):
@@ -554,6 +566,91 @@ class TestFullyShardMetaDeviceInit(FSDPTestMultiThread):
with self.assertRaisesRegex(RuntimeError, error_regex):
model(inp)
+ @unittest.skipIf(not TEST_CUDA, "no cuda")
+ def test_rank0_broadcast_meta_device_init(self):
+ model_args = ModelArgs(dropout_p=0.0)
+ # Assume we have a CPU full state dict on rank 0
+ if self.rank == 0:
+ torch.manual_seed(42)
+ ref_model = Transformer(model_args)
+ full_sd = ref_model.state_dict()
+ for param in full_sd.values():
+ self.assertEqual(param.device, torch.device("cpu"))
+
+ # Initialize the sharded model on meta device
+ fsdp_mesh = init_device_mesh("cuda", (self.world_size,))
+ with torch.device("meta"):
+ model = Transformer(model_args)
+ for module in model.modules():
+ if isinstance(module, TransformerBlock):
+ fully_shard(module, mesh=fsdp_mesh)
+ fully_shard(model, mesh=fsdp_mesh)
+ for param in model.parameters():
+ self.assertEqual(param.device, torch.device("meta"))
+
+ # Construct a sharded state dict from the rank 0 full state dict by
+ # broadcasting and sharding
+ meta_sharded_sd = model.state_dict()
+ sharded_sd = {}
+ if self.rank == 0:
+ self.assertEqual(len(meta_sharded_sd), len(full_sd))
+ self.assertEqual(list(meta_sharded_sd.keys()), list(full_sd.keys()))
+ for (param_name, full_param), sharded_meta_param in zip(
+ full_sd.items(), meta_sharded_sd.values()
+ ):
+ full_param = full_param.detach().cuda()
+ mesh = sharded_meta_param.device_mesh
+ dist.broadcast(full_param, src=0, group=mesh.get_group(0))
+ sharded_tensor = distribute_tensor(
+ full_param, mesh, sharded_meta_param.placements
+ )
+ sharded_sd[param_name] = nn.Parameter(sharded_tensor)
+ else:
+ for param_name, sharded_meta_param in meta_sharded_sd.items():
+ full_tensor = torch.empty(
+ sharded_meta_param.size(),
+ device="cuda",
+ dtype=sharded_meta_param.dtype,
+ )
+ mesh = sharded_meta_param.device_mesh
+ dist.broadcast(full_tensor, src=0, group=mesh.get_group(0))
+ sharded_tensor = distribute_tensor(
+ full_tensor, mesh, sharded_meta_param.placements
+ )
+ sharded_sd[param_name] = nn.Parameter(sharded_tensor)
+
+ model.load_state_dict(sharded_sd, assign=True)
+ for param in model.parameters():
+ self.assertIsInstance(param, DTensor)
+ self.assertEqual(param.device.type, "cuda")
+
+ # Construct the reference model on nonzero ranks by broadcasting the
+ # unsharded model from rank 0 and sharding on all ranks
+ if self.rank != 0:
+ ref_model = Transformer(model_args)
+ for param in ref_model.parameters():
+ torch.distributed.broadcast(param.detach(), src=0)
+ for module in ref_model.modules():
+ if isinstance(module, TransformerBlock):
+ fully_shard(module, mesh=fsdp_mesh)
+ fully_shard(ref_model, mesh=fsdp_mesh)
+
+ for (param_name, param), (ref_param_name, ref_param) in zip(
+ model.named_parameters(), ref_model.named_parameters()
+ ):
+ self.assertEqual(param_name, ref_param_name)
+ self.assertEqual(param, ref_param)
+
+ # Check one forward/backward for parity
+ inp = torch.randint(0, model_args.vocab_size, (2, 16), device="cuda")
+ loss = model(inp).sum()
+ loss.backward()
+ ref_loss = ref_model(inp).sum()
+ ref_loss.backward()
+ self.assertEqual(loss, ref_loss)
+ for param, ref_param in zip(model.parameters(), ref_model.parameters()):
+ self.assertEqual(param.grad, ref_param.grad)
+
if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_state_dict.py b/test/distributed/_composable/fsdp/test_fully_shard_state_dict.py
index 2a81595894..3687667250 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_state_dict.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_state_dict.py
@@ -148,9 +148,14 @@ class TestFullyShardStateDict(FSDPTest):
param.to_local(),
torch.ones_like(param.to_local()) * new_fill_value,
)
- self.assertEqual(
- param.to_local().data_ptr(), param_name_to_data_ptr[param_name]
- )
+ local_param = param.to_local()
+ # Only guarantee that the local tensor's data pointer does not
+ # change if the sharding was even (i.e. no padding); otherwise,
+ # FSDP may re-pad the local tensor, changing its data pointer
+ if local_param.size(0) * param.device_mesh.size() == param.size(0):
+ self.assertEqual(
+ local_param.data_ptr(), param_name_to_data_ptr[param_name]
+ )
if __name__ == "__main__":
diff --git a/torch/distributed/_composable/fsdp/_fsdp_param.py b/torch/distributed/_composable/fsdp/_fsdp_param.py
index 4f1bc944f9..19bba5a9ba 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_param.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_param.py
@@ -161,6 +161,13 @@ class FSDPParam:
self._init_extensions()
self.all_gather_outputs: List[torch.Tensor] = []
self._param_fqn: Optional[str] = None # prefixed from root module
+ # TODO: Remove this padding logic once DTensor pads the local tensor:
+ # https://github.com/pytorch/pytorch/issues/113045
+ self._post_load_hook_handle = (
+ module_info.module.register_load_state_dict_post_hook(
+ lambda *args, **kwargs: self.reset_sharded_param()
+ )
+ )
@torch.no_grad()
def _init_sharded_param(self, param: nn.Parameter, device: torch.device):
@@ -526,6 +533,31 @@ class FSDPParam:
f"Expects to be in one of {states}, not {self.sharded_state}"
)
+ def reset_sharded_param(self):
+ # For ops like `nn.Module._apply` or `load_state_dict(assign=True)`
+ # that change the sharded parameter tensor, we may need to re-pad the
+ # sharded local tensor and re-save the reference.
+ module_info = self._module_info
+ new_param = getattr(module_info.module, module_info.param_name)
+ if new_param is not self.sharded_param:
+ if torch.__future__.get_swap_module_params_on_conversion():
+ raise AssertionError(
+ f"Expects swap_tensors to preserve object but got {new_param} "
+ f"instead of {self.sharded_param}"
+ )
+ self.sharded_param = new_param
+ local_tensor = new_param._local_tensor
+ padded_sharded_size = self.padded_sharded_param_size
+ if local_tensor.size() != padded_sharded_size:
+ padded_local_tensor = local_tensor.new_zeros(padded_sharded_size)
+ padded_local_tensor[: local_tensor.size(0)].copy_(local_tensor)
+ local_tensor = padded_local_tensor
+ if self.pin_memory and not local_tensor.is_pinned():
+ local_tensor = local_tensor.cpu().pin_memory()
+ self._sharded_param_data = local_tensor.view(-1)
+ assert isinstance(self.sharded_param, DTensor) # mypy
+ self.sharded_param._local_tensor = local_tensor[: self.sharded_size[0]]
+
def alloc_storage(tensor: torch.Tensor) -> None:
size = tensor.numel() * tensor.itemsize
diff --git a/torch/distributed/_composable/fsdp/fully_shard.py b/torch/distributed/_composable/fsdp/fully_shard.py
index 11aaf758a6..a46a103b3d 100644
--- a/torch/distributed/_composable/fsdp/fully_shard.py
+++ b/torch/distributed/_composable/fsdp/fully_shard.py
@@ -5,7 +5,7 @@ import typing_extensions
import torch
import torch.nn as nn
from torch.distributed._composable import contract
-from torch.distributed._tensor import DeviceMesh, DTensor
+from torch.distributed._tensor import DeviceMesh
from ._fsdp_api import MixedPrecisionPolicy, OffloadPolicy
from ._fsdp_common import FSDPMeshInfo, HSDPMeshInfo
@@ -286,26 +286,7 @@ class FSDP:
# https://github.com/pytorch/pytorch/issues/113045
with torch.no_grad():
for fsdp_param in fsdp_param_group.fsdp_params:
- module_info = fsdp_param._module_info
- new_param = getattr(module_info.module, module_info.param_name)
- if new_param is not fsdp_param.sharded_param:
- raise AssertionError(
- "Expects swap_tensors to preserve object but got "
- f"{new_param} instead of {fsdp_param.sharded_param}"
- )
- local_tensor = new_param._local_tensor
- padded_sharded_size = fsdp_param.padded_sharded_param_size
- if local_tensor.size() != padded_sharded_size:
- padded_local_tensor = local_tensor.new_zeros(padded_sharded_size)
- padded_local_tensor[: local_tensor.size(0)].copy_(local_tensor)
- local_tensor = padded_local_tensor
- if fsdp_param.pin_memory and not local_tensor.is_pinned():
- local_tensor = local_tensor.cpu().pin_memory()
- fsdp_param._sharded_param_data = local_tensor.view(-1)
- assert isinstance(fsdp_param.sharded_param, DTensor) # mypy
- fsdp_param.sharded_param._local_tensor = local_tensor[
- : fsdp_param.sharded_size[0]
- ]
+ fsdp_param.reset_sharded_param()
return ret
|
2.41.0
|
933af270987ad816cf27d36e86a612ae3240ef5
|
Mon, 22 Apr 2024 12:48:40 -0700
|
[PATCH 0547/1000] Switch to predispatch (#123573)
|
This PR switches export IR from aot-dispatch to pre-dispatch IR. **What is pre-dispatch IR and why should you care?** Currently the default IR returned by torch.export can contain only functional ATen operators after ALL pytorch dispatcher decompositions (for example, CompositeImplicitAutograd) run. In contrast, pre-dispatch IR refers to an IR that can contain all functional ATen operators (i.e., not just from the core subset), before any decomposition happens, as well as operators that manipulate autograd state. Pre-dispatch IR closely resembles eager PyTorch computation, but is still functional and serializable by torch.export. As a result: - You can train the pre-dispatch IR in eager mode as the IR contains necessary information for the autograd engine to automatically generate a backward graph. - You can write sound graph transformations more easily as the IR is functional. - Since it is an ATen IR, it is still normalized. For example, torch.add has multiple overloads, but aten.add.Tensor is unique in this IR. If you want to get the core aten IR out of `torch.export`, you will need to: ``` ep = torch.export.export(M(), inputs) ep_for_core_aten = ep.run_decompositions() ``` Differential Revision: [D56273267](https://our.internmc.facebook.com/intern/diff/D56273267) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123573 Approved by: https://github.com/gmagogsfm
|
diff --git a/test/distributed/_tensor/experimental/test_tp_transform.py b/test/distributed/_tensor/experimental/test_tp_transform.py
index 636870264f..fc094150cc 100644
--- a/test/distributed/_tensor/experimental/test_tp_transform.py
+++ b/test/distributed/_tensor/experimental/test_tp_transform.py
@@ -72,10 +72,10 @@ class TensorParallelTest(DTensorTestBase):
inputs = (torch.randn(7, 3, requires_grad=False).to(device=self.device_type),)
with torch.no_grad():
res = model(*inputs)
- exported_program = torch.export.export(
- model,
- inputs,
- )
+ exported_program = torch.export.export(
+ model,
+ inputs,
+ ).run_decompositions()
tp_exported_program = tensor_parallel_transformation(
exported_program,
self.rank,
@@ -110,10 +110,10 @@ class TensorParallelTest(DTensorTestBase):
with torch.inference_mode():
res = model(*inputs)
- exported_program = torch.export.export(
- model,
- inputs,
- )
+ exported_program = torch.export.export(
+ model,
+ inputs,
+ ).run_decompositions()
tp_exported_program = tensor_parallel_transformation(
exported_program,
self.rank,
@@ -146,10 +146,10 @@ class TensorParallelTest(DTensorTestBase):
with torch.inference_mode():
res = model(*inputs)
- exported_program = torch.export.export(
- model,
- inputs,
- )
+ exported_program = torch.export.export(
+ model,
+ inputs,
+ ).run_decompositions()
tp_exported_program = tensor_parallel_transformation(
exported_program,
self.rank,
diff --git a/test/export/test_experimental.py b/test/export/test_experimental.py
index 66f1a60ca9..d7d9ce7f4b 100644
--- a/test/export/test_experimental.py
+++ b/test/export/test_experimental.py
@@ -14,6 +14,8 @@ from torch.testing import FileCheck
@unittest.skipIf(not torch._dynamo.is_dynamo_supported(), "dynamo isn't supported")
class TestExperiment(TestCase):
+ # TODO AssertionError: Unknown tensor output kind: getitem_2
+ @unittest.expectedFailure
def test_with_buffer_as_submodule(self):
@_mark_strict_experimental
class B(torch.nn.Module):
diff --git a/test/export/test_export.py b/test/export/test_export.py
index f73d93a926..49cb4188e3 100644
--- a/test/export/test_export.py
+++ b/test/export/test_export.py
@@ -551,8 +551,6 @@ class TestExport(TestCase):
torch.allclose(ep.module()(torch.zeros(2, 3)), torch.ones(2, 3) * 21)
)
- # Predispatch has different expected results
- @testing.expectedFailureSerDerPreDispatch
def test_torch_fn(self):
class M1(torch.nn.Module):
def __init__(self):
@@ -567,7 +565,7 @@ class TestExport(TestCase):
x = x + x
return x
- ep1 = export(M1(), (torch.randn(3, 3),))
+ ep1 = export(M1(), (torch.randn(3, 3),)).run_decompositions()
expected_result = [
("linear_1", "builtin_function_or_method.linear"),
("linear_1", "builtin_function_or_method.linear"),
@@ -592,7 +590,9 @@ class TestExport(TestCase):
x = torch.add(x, x)
return x
- ep2 = export(M2(), (torch.randn(3, 3), torch.randn(3, 3), torch.randn(3)))
+ ep2 = export(
+ M2(), (torch.randn(3, 3), torch.randn(3, 3), torch.randn(3))
+ ).run_decompositions()
expected_result = [
("linear_1", "builtin_function_or_method.linear"),
("linear_1", "builtin_function_or_method.linear"),
@@ -2253,8 +2253,6 @@ def forward(self, arg_0):
)
)
- @testing.expectedFailureSerDerPreDispatch # .item call becomes aten.item in predispatch IR
- @testing.expectedFailurePreDispatchRunDecomp # assert name is still referring to item
def test_automatic_constrain_size(self):
class M(torch.nn.Module):
def forward(self, x, y):
@@ -2264,11 +2262,13 @@ def forward(self, arg_0):
ep = export(M(), (torch.tensor(1), torch.ones(4, 5)))
if is_non_strict_test(self._testMethodName):
- error_msg = r"Runtime assertion failed for _local_scalar_dense >= 0"
+ error_msg = r"Runtime assertion failed for item >= 0"
elif is_retracebility_test(self._testMethodName):
- error_msg = r"Runtime assertion failed for _local_scalar_dense_default >= 0"
+ error_msg = r"Runtime assertion failed for item_default >= 0"
else:
- error_msg = "_local_scalar_dense is outside of inline constraint \[0, 9223372036854775806\]."
+ error_msg = (
+ "item is outside of inline constraint \[0, 9223372036854775806\]."
+ )
with self.assertRaisesRegex(RuntimeError, error_msg):
_ = ep.module()(torch.tensor(-1), torch.randn(4, 5))
@@ -2316,8 +2316,6 @@ def forward(self, arg_0):
self.assertTrue(isinstance(node.meta["val"], (Tensor, int)))
@testing.expectedFailureNonStrict
- @testing.expectedFailureSerDerPreDispatch # .item() becomes aten.item in predispatch IR
- @testing.expectedFailurePreDispatchRunDecomp # Assert message is still using the old node name, so it shoudl fail
def test_export_with_inline_constraints(self):
class Module(torch.nn.Module):
def forward(self, x):
@@ -2335,7 +2333,7 @@ def forward(self, arg_0):
with self.assertRaisesRegex(
RuntimeError,
- r"_local_scalar_dense is outside of inline constraint \[4, 7\]",
+ r"item is outside of inline constraint \[4, 7\]",
) as cm:
ep.module()(torch.tensor([30]))
@@ -2894,8 +2892,6 @@ def forward(self, arg_0):
with self.assertRaisesRegex(ValueError, "Trying to flatten user inputs"):
exported_program.module()(torch.rand(2, 3), torch.rand(2, 3))
- @testing.expectedFailureSerDerPreDispatch # linear shouldn't decompose
- @testing.expectedFailurePreDispatchRunDecomp # no action needed here
def test_export_decomps_simple(self):
class M(torch.nn.Module):
def __init__(self):
@@ -2910,11 +2906,6 @@ def forward(self, arg_0):
ep = export(m, inp)
state_dict = ep.state_dict
- FileCheck().check_count("torch.ops.aten.t.default", 1, exactly=True).run(
- ep.graph_module.code
- )
- self.assertTrue(torch.allclose(ep.module()(*inp), m(*inp)))
-
core_aten_ep = ep.run_decompositions()
FileCheck().check_count("torch.ops.aten.permute.default", 1, exactly=True).run(
core_aten_ep.graph_module.code
@@ -3617,8 +3608,8 @@ def forward(self, arg_0):
inp = (torch.randn(4, 4),)
mod = Foo()
- ep_strict = torch.export.export(mod, inp)
- ep_non_strict = torch.export.export(mod, inp, strict=False)
+ ep_strict = torch.export.export(mod, inp).run_decompositions()
+ ep_non_strict = torch.export.export(mod, inp, strict=False).run_decompositions()
gm_unflat_non_strict = unflatten(ep_non_strict)
self.assertTrue(hasattr(gm_unflat_non_strict, "bar"))
@@ -3635,8 +3626,8 @@ graph():
%x : [num_users=1] = placeholder[target=x]
%weight : [num_users=1] = get_attr[target=weight]
%bias : [num_users=1] = get_attr[target=bias]
- %t : [num_users=1] = call_function[target=torch.ops.aten.t.default](args = (%weight,), kwargs = {})
- %addmm : [num_users=1] = call_function[target=torch.ops.aten.addmm.default](args = (%bias, %x, %t), kwargs = {})
+ %permute : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%weight, [1, 0]), kwargs = {})
+ %addmm : [num_users=1] = call_function[target=torch.ops.aten.addmm.default](args = (%bias, %x, %permute), kwargs = {})
return addmm""",
)
@@ -3707,9 +3698,8 @@ graph():
%x : [num_users=1] = placeholder[target=x]
%weight : [num_users=1] = get_attr[target=weight]
%bias : [num_users=1] = get_attr[target=bias]
- %t : [num_users=1] = call_function[target=torch.ops.aten.t.default](args = (%weight,), kwargs = {})
- %addmm : [num_users=1] = call_function[target=torch.ops.aten.addmm.default](args = (%bias, %x, %t), kwargs = {})
- return addmm""",
+ %linear : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%x, %weight, %bias), kwargs = {})
+ return linear""",
)
self.assertExpectedInline(
str(gm_unflat_non_strict.bar_different.leaf.linear.graph).strip(),
@@ -3718,9 +3708,8 @@ graph():
%add_2 : [num_users=1] = placeholder[target=add_2]
%weight : [num_users=1] = get_attr[target=weight]
%bias : [num_users=1] = get_attr[target=bias]
- %t_1 : [num_users=1] = call_function[target=torch.ops.aten.t.default](args = (%weight,), kwargs = {})
- %addmm_1 : [num_users=1] = call_function[target=torch.ops.aten.addmm.default](args = (%bias, %add_2, %t_1), kwargs = {})
- return addmm_1""",
+ %linear_1 : [num_users=1] = call_function[target=torch.ops.aten.linear.default](args = (%add_2, %weight, %bias), kwargs = {})
+ return linear_1""",
)
gm_flat_non_strict = ep_non_strict.module()
@@ -4222,7 +4211,7 @@ def forward(self, x):
inps = (torch.ones(5),)
- ep = torch.export.export(M(), inps)
+ ep = torch.export.export(M(), inps).run_decompositions()
self.assertExpectedInline(
str(ep.graph_module.code.strip()),
"""\
@@ -4483,7 +4472,9 @@ class TestOneOffModelExportResult(TestCase):
k = torch.randn(1, 16, 16, 64, dtype=torch.bfloat16, device="cuda")
v = torch.randn(1, 16, 16, 64, dtype=torch.bfloat16, device="cuda")
- ep = torch.export.export(ScaledDotProductAttention(), (q, k, v))
+ ep = torch.export.export(
+ ScaledDotProductAttention(), (q, k, v)
+ ).run_decompositions()
self.assertExpectedInline(
ep.graph_module.code.strip(),
"""\
diff --git a/test/export/test_passes.py b/test/export/test_passes.py
index c7240ec0ee..777913deab 100644
--- a/test/export/test_passes.py
+++ b/test/export/test_passes.py
@@ -450,8 +450,7 @@ class TestPasses(TestCase):
ep = export(mod, (x,))
with self.assertRaisesRegex(
- RuntimeError,
- r"_local_scalar_dense is outside of inline constraint \[2, 5\].",
+ RuntimeError, r"item is outside of inline constraint \[2, 5\]."
):
ep.module()(torch.tensor([6]))
@@ -548,7 +547,7 @@ class TestPasses(TestCase):
with self.assertRaisesRegex(
RuntimeError,
- r"_local_scalar_dense is outside of inline constraint \[4, 7\]",
+ r"item is outside of inline constraint \[4, 7\]",
) as cm:
gm(torch.tensor([20]))
diff --git a/test/export/test_safeguard.py b/test/export/test_safeguard.py
deleted file mode 100644
index 1d4ffa030c..0000000000
--- a/test/export/test_safeguard.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# Owner(s): ["oncall: export"]
-import unittest
-
-import torch
-import torch._dynamo as torchdynamo
-from torch.export import export
-from torch.testing._internal.common_utils import run_tests, TestCase
-
-
-@unittest.skipIf(not torchdynamo.is_dynamo_supported(), "dynamo isn't support")
-class TestSafeguard(TestCase):
- # If the autograd state doesn't change, dynamo eliminates autograd state manager op and later export can succeed.
- # Otherwise, autograd can be preserved in the produced gragh, and export will fail.
- def test_global_autograd(self):
- class F1(torch.nn.Module):
- def forward(self, a):
- with torch.no_grad():
- b = a + a
- return b
-
- f1 = F1()
-
- class F2(torch.nn.Module):
- def forward(self, a):
- with torch.enable_grad():
- b = a + a
- return b
-
- f2 = F2()
-
- class F3(torch.nn.Module):
- def forward(self, a):
- with torch.set_grad_enabled(False):
- b = a + a
- return b
-
- f3 = F3()
-
- class F4(torch.nn.Module):
- def forward(self, a):
- with torch.set_grad_enabled(True):
- b = a + a
- return b
-
- f4 = F4()
-
- a = torch.randn(10)
- with torch.no_grad():
- export(f1, (a,))
- export(f2, (a,))
- export(f3, (a,))
- export(f4, (a,))
-
- with torch.enable_grad():
- export(f2, (a,))
- export(f4, (a,))
-
- with self.assertRaisesRegex(
- RuntimeError, "Encountered autograd state manager op.*"
- ):
- export(f1, (a,))
-
- with self.assertRaisesRegex(
- RuntimeError, "Encountered autograd state manager op.*"
- ):
- export(f3, (a,))
-
- def test_tensor_autograd(self):
- # dynamo errors when Tensor.requires_grad_ change the autograd state
- class F1(torch.nn.Module):
- def forward(self, a):
- a.requires_grad_(True)
- b = a + a
- return b
-
- f1 = F1()
-
- # dynamo errors when Tensor.requires_grad_ change the autograd state
- class F2(torch.nn.Module):
- def forward(self, a):
- a.requires_grad_(False)
- b = a + a
- return b
-
- f2 = F2()
-
- # dynamo always errors on Tensor.requires_grad
- class F3(torch.nn.Module):
- def forward(self, a):
- a.requires_grad = False
- b = a + a
- return b
-
- f3 = F3()
-
- export(f1, (torch.randn(10, requires_grad=True),))
- export(f2, (torch.randn(10, requires_grad=False),))
-
- with self.assertRaises(RuntimeError):
- export(f1, (torch.randn(10, requires_grad=False),))
- with self.assertRaises(RuntimeError):
- export(f2, (torch.randn(10, requires_grad=True),))
- with self.assertRaises(RuntimeError):
- export(f3, (torch.randn(10, requires_grad=False),))
-
- def test_global_autograd_exempt_predispatch(self):
- class F1(torch.nn.Module):
- def forward(self, a):
- with torch.no_grad():
- b = a + a
- return b
-
- f1 = F1()
-
- class F2(torch.nn.Module):
- def forward(self, a):
- with torch.enable_grad():
- b = a + a
- return b
-
- f2 = F2()
-
- class F3(torch.nn.Module):
- def forward(self, a):
- with torch.set_grad_enabled(False):
- b = a + a
- return b
-
- f3 = F3()
-
- class F4(torch.nn.Module):
- def forward(self, a):
- with torch.set_grad_enabled(True):
- b = a + a
- return b
-
- f4 = F4()
-
- a = torch.randn(10)
-
- from torch.export._trace import _export
-
- with torch.no_grad():
- _export(f1, (a,), pre_dispatch=True)
- _export(f2, (a,), pre_dispatch=True)
- _export(f3, (a,), pre_dispatch=True)
- _export(f4, (a,), pre_dispatch=True)
-
- with torch.enable_grad():
- _export(f1, (a,), pre_dispatch=True)
- _export(f2, (a,), pre_dispatch=True)
- _export(f3, (a,), pre_dispatch=True)
- _export(f4, (a,), pre_dispatch=True)
-
-
-if __name__ == "__main__":
- run_tests()
diff --git a/test/export/test_serialize.py b/test/export/test_serialize.py
index 60c148defb..ce077ce8e1 100644
--- a/test/export/test_serialize.py
+++ b/test/export/test_serialize.py
@@ -141,7 +141,7 @@ class TestSerialize(TestCase):
inp = (torch.ones(10),)
# Module will only be able to roundtrip if metadata
# can be correctly parsed.
- ep = export(MyModule(), inp)
+ ep = export(MyModule(), inp).run_decompositions()
buffer = io.BytesIO()
save(ep, buffer)
loaded_ep = load(buffer)
@@ -194,7 +194,7 @@ class TestSerialize(TestCase):
torch.ones([512]),
torch.ones([512]),
),
- )
+ ).run_decompositions()
serialized = ExportedProgramSerializer().serialize(exported_module)
node = serialized.exported_program.graph_module.graph.nodes[-1]
diff --git a/test/export/test_upgrade.py b/test/export/test_upgrade.py
index 3913370f9e..d659823674 100644
--- a/test/export/test_upgrade.py
+++ b/test/export/test_upgrade.py
@@ -140,7 +140,7 @@ def div__Scalar_mode_0_3(self: torch.Tensor, other: Any, *, rounding_mode: Opti
fn = Foo()
inputs = (torch.ones([2, 3]) * 4, 2.0)
- ep = export(fn, inputs)
+ ep = export(fn, inputs).run_decompositions()
compiler_opset_version = {"aten": 4}
model_opset_version = {"aten": 3}
upgrader = GraphModuleOpUpgrader(
@@ -151,7 +151,7 @@ def div__Scalar_mode_0_3(self: torch.Tensor, other: Any, *, rounding_mode: Opti
self.assertEqual(count, 1)
# upgrade: replace op (div.Scalar_mode -> div__Scalar_mode_0_3) then retrace
- upgraded_ep = upgrader.upgrade(ep)
+ upgraded_ep = upgrader.upgrade(ep).run_decompositions()
upgraded_ep.graph_module.print_readable()
# no old version of op (div__Scalar_mode_0_3) anymore.
diff --git a/test/onnx/test_fx_op_consistency.py b/test/onnx/test_fx_op_consistency.py
index 004574d3e8..75aedc9037 100644
--- a/test/onnx/test_fx_op_consistency.py
+++ b/test/onnx/test_fx_op_consistency.py
@@ -1873,7 +1873,19 @@ def _run_test_output_match(
== pytorch_test_common.TorchModelType.TORCH_EXPORT_EXPORTEDPROGRAM
):
try:
- model = torch.export.export(model, inputs)
+ # TODO (tugsbayasgalan) Migrate to pre-dispatch IR
+ # BUG1: python test/onnx/test_fx_op_consistency.py -k test_output_match_triu_cpu_int32
+ # has unexpected success, but don't know how to remove from xfail list
+ # BUG2: User output to_sparse is not in the correct order or is not found in the
+ # exported program's user_output list (https://github.com/pytorch/pytorch/issues/124328)
+ # python test/onnx/test_fx_op_consistency.py -k test_output_match_to_sparse_cpu_float32
+ # BUG3: [ShapeInferenceError] Inference error(s): (op_type:aten_view, node name: aten_view_4):
+ # [ShapeInferenceError]
+ # Inference error(s): (op_type:Reshape, node name: n1): [ShapeInferenceError] Invalid position of 0.
+ # python test/onnx/test_fx_op_consistency.py -k test_output_match_stack_cpu_int32
+ from torch.export import _trace
+
+ model = _trace._export(model, inputs, pre_dispatch=False)
except AssertionError as e:
# NOTE: avoid fake_mode detection bug in torch.export.export
pytest.xfail(
diff --git a/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py b/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py
index 88a7bad396..c8ccc5eb47 100644
--- a/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py
+++ b/torch/_functorch/_aot_autograd/dispatch_and_compile_graph.py
@@ -15,6 +15,7 @@ from torch._dynamo.utils import lazy_format_graph_code
from torch._logging import getArtifactLogger, trace_structured
from torch._subclasses.functional_tensor import FunctionalTensorMode
from torch.fx.experimental.proxy_tensor import make_fx
+from torch.utils._python_dispatch import _detect_infra_mode
from .. import config
from .functional_utils import (
@@ -113,9 +114,7 @@ def aot_dispatch_base_graph(
)
fake = buffer.from_functional()
# The fake tensor in turn is associated with a proxy node.
- proxy_mode = torch._C._get_dispatch_mode(
- torch._C._TorchDispatchModeKey.PROXY
- )
+ proxy_mode = _detect_infra_mode(torch._C._TorchDispatchModeKey.PROXY)
assert proxy_mode is not None
proxy = torch.fx.experimental.proxy_tensor.get_proxy_slot(
fake, proxy_mode.tracer
diff --git a/torch/_functorch/_aot_autograd/traced_function_transforms.py b/torch/_functorch/_aot_autograd/traced_function_transforms.py
index 111f7570f2..0671c21102 100644
--- a/torch/_functorch/_aot_autograd/traced_function_transforms.py
+++ b/torch/_functorch/_aot_autograd/traced_function_transforms.py
@@ -372,8 +372,8 @@ def create_functionalized_fn(
# Populate the current FunctionalTensorMode with the tokens per
# operator. See Note [FunctionalTensorMode is Stateful]
- functional_tensor_mode = (
- torch.utils._python_dispatch._detect_functional_mode()
+ functional_tensor_mode = torch.utils._python_dispatch._detect_infra_mode(
+ torch._C._TorchDispatchModeKey.FUNCTIONAL
)
assert functional_tensor_mode is not None
for i, k in enumerate(meta.tokens.keys()):
diff --git a/torch/_subclasses/functional_tensor.py b/torch/_subclasses/functional_tensor.py
index 8b74d069c6..965f203c87 100644
--- a/torch/_subclasses/functional_tensor.py
+++ b/torch/_subclasses/functional_tensor.py
@@ -8,7 +8,7 @@ import torch.utils._pytree as pytree
from torch._C import _functionalization_reapply_views_tls as _reapply_views
from torch._ops import _get_dispatch_mode_pre_dispatch
from torch.utils._python_dispatch import (
- _detect_functional_mode,
+ _detect_infra_mode,
_disable_infra_mode,
return_and_correct_aliasing,
TorchDispatchMode,
@@ -185,7 +185,7 @@ class FunctionalTensor(torch.Tensor):
# and otherwise the sym_size() call will go to the proxy mode before hitting
# FunctionalTensor.__torch_dispatch__
- functional_mode = _detect_functional_mode()
+ functional_mode = _detect_infra_mode(torch._C._TorchDispatchModeKey.FUNCTIONAL)
assert functional_mode is not None
with functional_mode:
diff --git a/torch/export/__init__.py b/torch/export/__init__.py
index 62b9ef4e40..d8422ba411 100644
--- a/torch/export/__init__.py
+++ b/torch/export/__init__.py
@@ -178,6 +178,7 @@ def export(
dynamic_shapes,
strict=strict,
preserve_module_call_signature=preserve_module_call_signature,
+ pre_dispatch=True,
)
diff --git a/torch/onnx/_internal/exporter.py b/torch/onnx/_internal/exporter.py
index 7831a362ae..5315e034ec 100644
--- a/torch/onnx/_internal/exporter.py
+++ b/torch/onnx/_internal/exporter.py
@@ -814,7 +814,7 @@ class ONNXProgram:
... ) # Mutate buffer through in-place addition
... return output
>>> inputs = (torch.rand((64, 1, 28, 28), dtype=torch.float32), torch.randn(3))
- >>> exported_program = torch.export.export(CustomModule(), args=inputs)
+ >>> exported_program = torch.export.export(CustomModule(), args=inputs).run_decompositions({})
>>> onnx_program = torch.onnx.dynamo_export(exported_program, *inputs)
>>> pprint.pprint(onnx_program.model_signature)
ExportGraphSignature(input_specs=[InputSpec(kind=<InputKind.PARAMETER: 2>,
diff --git a/torch/utils/_python_dispatch.py b/torch/utils/_python_dispatch.py
index f5f830c2f1..d13f77191f 100644
--- a/torch/utils/_python_dispatch.py
+++ b/torch/utils/_python_dispatch.py
@@ -104,24 +104,25 @@ def _get_current_dispatch_mode():
return None
-def _detect_functional_mode():
+def _detect_infra_mode(key):
+ assert key in [torch._C._TorchDispatchModeKey.FUNCTIONAL, torch._C._TorchDispatchModeKey.PROXY]
from torch._ops import _get_dispatch_mode_pre_dispatch
- pre_dispatch_functional_mode = _get_dispatch_mode_pre_dispatch(
- torch._C._TorchDispatchModeKey.FUNCTIONAL
+ pre_dispatch_mode = _get_dispatch_mode_pre_dispatch(
+ key
)
- post_dispatch_functional_mode = torch._C._get_dispatch_mode(
- torch._C._TorchDispatchModeKey.FUNCTIONAL
+ post_dispatch_mode = torch._C._get_dispatch_mode(
+ key
)
- assert (pre_dispatch_functional_mode is None) or (
- post_dispatch_functional_mode is None
+ assert (pre_dispatch_mode is None) or (
+ post_dispatch_mode is None
)
- if pre_dispatch_functional_mode is None:
- return post_dispatch_functional_mode
+ if pre_dispatch_mode is None:
+ return post_dispatch_mode
- return pre_dispatch_functional_mode
+ return pre_dispatch_mode
def _unset_infra_mode(key):
|
2.41.0
|
1ca27af62d54786bb787d2fd8fba65bb75a8353
|
Mon, 22 Apr 2024 22:12:33 -0700
|
[PATCH 0548/1000] Add the quant lift up pass in convert phase (#122777)
|
**Summary** Lift up the quant node before view like nodes. It can benefit performance of Attention like block. For example, we have the pattern as: ``` DQ DQ LINEAR LINEAR VIEW VIEW PERMUTE PERMUTE TRANSPOSE Q Q DQ DQ Matmul DIV ADD SOFTMAX ``` We want to lift up the the quant nodes from `matmul` before view like nodes as the output of Linear node. ``` DQ DQ LINEAR LINEAR Q Q VIEW VIEW PERMUTE PERMUTE TRANSPOSE DQ DQ Matmul DIV ADD SOFTMAX ``` It produces a `DQ->LINEAR->Q` pattern which can be fused by backend. **Test Plan** ``` python -m pytest quantization/pt2e/test_x86inductor_quantizer.py -k test_attention_block ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/122777 Approved by: https://github.com/jerryzh168, https://github.com/jgong5 ghstack dependencies: #122776
|
diff --git a/test/inductor/test_mkldnn_pattern_matcher.py b/test/inductor/test_mkldnn_pattern_matcher.py
index e44f103571..195d02d379 100644
--- a/test/inductor/test_mkldnn_pattern_matcher.py
+++ b/test/inductor/test_mkldnn_pattern_matcher.py
@@ -73,6 +73,16 @@ quantization_inplace_add_fn_list = [
]
+def get_default_quantizer(is_qat, is_dynamic):
+ quantizer = X86InductorQuantizer()
+ quantizer.set_global(
+ xiq.get_default_x86_inductor_quantization_config(
+ is_qat=is_qat, is_dynamic=is_dynamic
+ )
+ )
+ return quantizer
+
+
@config.patch({"freezing": True})
class TestPatternMatcherBase(TestCase):
def _check_unary_is_decomposed(self, unary_fn):
@@ -90,7 +100,7 @@ class TestPatternMatcherBase(TestCase):
return tuple(clone(x) for x in inputs)
def _generate_qdq_quantized_model(
- self, mod, inputs, is_qat=False, is_dynamic=False
+ self, mod, inputs, is_qat=False, is_dynamic=False, quantizer=None
):
maybe_no_grad = contextlib.nullcontext() if is_qat else torch.no_grad()
with maybe_no_grad:
@@ -98,11 +108,8 @@ class TestPatternMatcherBase(TestCase):
mod,
inputs,
)
- quantizer = X86InductorQuantizer()
- quantizer.set_global(
- xiq.get_default_x86_inductor_quantization_config(
- is_qat=is_qat, is_dynamic=is_dynamic
- )
+ quantizer = (
+ quantizer if quantizer else get_default_quantizer(is_qat, is_dynamic)
)
prepare_model = (
prepare_qat_pt2e(export_model, quantizer)
@@ -128,6 +135,7 @@ class TestPatternMatcherBase(TestCase):
matcher_check_fn=None,
dtype=None,
is_dynamic=False,
+ quantizer=None,
):
counters.clear()
torch._dynamo.reset()
@@ -152,7 +160,7 @@ class TestPatternMatcherBase(TestCase):
if check_quantization:
convert_model = self._generate_qdq_quantized_model(
- mod, inputs, is_qat, is_dynamic
+ mod, inputs, is_qat, is_dynamic, quantizer
)
with torch.no_grad(), maybe_autocast:
_ = torch.compile(convert_model)(*inputs)
@@ -2416,6 +2424,85 @@ class TestDynamicPatternMatcher(TestPatternMatcherBase):
matcher_check_fn=matcher_check_fn,
)
+ @skipIfNoDynamoSupport
+ @skipIfNoONEDNN
+ @skipIfRocm
+ def test_q_attention_block(self):
+ class SelfAttnLikeModule(torch.nn.Module):
+ def __init__(
+ self,
+ input_dim,
+ transpose_for_score=False,
+ num_attention_heads=None,
+ attention_head_size=None,
+ ) -> None:
+ super().__init__()
+ self.input_dim = input_dim
+ self.q_proj = torch.nn.Linear(input_dim, input_dim, bias=False)
+ self.k_proj = torch.nn.Linear(input_dim, input_dim, bias=False)
+ self.v_proj = torch.nn.Linear(input_dim, input_dim, bias=False)
+ self.softmax = torch.nn.Softmax(dim=-1)
+ self.transpose_for_score = transpose_for_score
+ if self.transpose_for_score:
+ assert num_attention_heads is not None
+ assert attention_head_size is not None
+ self.num_attention_heads = num_attention_heads
+ self.attention_head_size = attention_head_size
+
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
+ new_x_shape = x.size()[:-1] + (
+ self.num_attention_heads,
+ self.attention_head_size,
+ )
+ x = x.view(new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(self, x):
+ q = self.q_proj(x)
+ k = self.k_proj(x)
+ v = self.v_proj(x)
+ if self.transpose_for_score:
+ q = self.transpose_for_scores(q)
+ k = self.transpose_for_scores(k)
+ v = self.transpose_for_scores(v)
+ scores = torch.matmul(q, k.transpose(-1, -2)) / (self.input_dim**0.5)
+ attention = self.softmax(scores)
+ weighted = torch.matmul(attention, v)
+ return weighted
+
+ for annotate_matmul in [False, True]:
+ mod = SelfAttnLikeModule(
+ input_dim=64 * 16,
+ transpose_for_score=True,
+ num_attention_heads=16,
+ attention_head_size=64,
+ ).eval()
+ v = torch.randn(2, 384, 1024)
+
+ def matcher_check_fn():
+ self.assertEqual(
+ counters["inductor"]["qlinear_weight_prepack_matcher_count"], 3
+ )
+ self.assertEqual(
+ counters["inductor"]["qlinear_unary_matcher_count"],
+ 3 if annotate_matmul else 0,
+ )
+
+ quantizer = X86InductorQuantizer()
+ quantizer.set_global(xiq.get_default_x86_inductor_quantization_config())
+ if annotate_matmul:
+ quantizer.set_function_type_qconfig(
+ torch.matmul, quantizer.get_global_quantization_config()
+ )
+
+ self._test_common(
+ mod,
+ (v,),
+ check_quantization=True,
+ matcher_check_fn=matcher_check_fn,
+ quantizer=quantizer,
+ )
+
if __name__ == "__main__":
if IS_LINUX and HAS_CPU and torch.backends.mkldnn.is_available():
diff --git a/torch/_inductor/fx_passes/pre_grad.py b/torch/_inductor/fx_passes/pre_grad.py
index b494c26f37..091de51c2d 100644
--- a/torch/_inductor/fx_passes/pre_grad.py
+++ b/torch/_inductor/fx_passes/pre_grad.py
@@ -218,6 +218,11 @@ def pre_grad_passes(gm: torch.fx.GraphModule, example_inputs=None):
if config.pre_grad_custom_pass is not None:
config.pre_grad_custom_pass(gm.graph)
stable_topological_sort(gm.graph)
+
+ from .quantization import quant_lift_up
+
+ quant_lift_up(gm)
+
gm.graph.lint()
gm.recompile()
optimus_scuba_log["after_recompile_pre_grad"] = upload_graph(gm.graph)
diff --git a/torch/_inductor/fx_passes/quantization.py b/torch/_inductor/fx_passes/quantization.py
index d9223f6332..bbff59c8ae 100644
--- a/torch/_inductor/fx_passes/quantization.py
+++ b/torch/_inductor/fx_passes/quantization.py
@@ -8,6 +8,7 @@ from typing import Any, Tuple
import torch
from torch._dynamo.utils import counters
from torch.fx.experimental.symbolic_shapes import has_free_symbols
+from torch.fx.node import map_arg
from ..lowering import lowerings as L, require_channels_last
from ..pattern_matcher import Arg, CallFunction, filter_nodes, KeywordArg, ListOf, Match
from ..utils import pad_listlike
@@ -19,6 +20,18 @@ prims = torch.ops.prims
quantized_decomposed = torch.ops.quantized_decomposed
quantized = torch.ops.quantized
+# Only for per tensor quant since permute may changes the channel idx
+_PER_TENSOR_QUANTIZE_OPS = [
+ quantized_decomposed.quantize_per_tensor.default,
+ quantized_decomposed.quantize_per_tensor.tensor,
+]
+
+_VIEW_OPS = [
+ aten.transpose.int,
+ aten.permute.default,
+ aten.view.default,
+]
+
"""
The quantization.py file primarily incorporates passes related to quantization fusion
in inductor, includes:
@@ -2163,3 +2176,94 @@ def _register_quantization_weight_pack_pass():
# Step 3: QLinear weight prepack
_register_qlinear_weight_prepack()
+
+
+def quant_lift_up(graph_module: torch.fx.GraphModule):
+ """
+ Lift up the quant node before view like nodes. It can benefit performance
+ of Attention like block. For example, we have the pattern as:
+
+ DQ
+ DQ LINEAR
+ LINEAR VIEW
+ VIEW PERMUTE
+ PERMUTE TRANSPOSE
+ Q Q
+ DQ DQ
+ Matmul
+ DIV
+ ADD
+ SOFTMAX
+
+ We want to lift up the the quant nodes from matmul before view like nodes
+ as the output of Linear node.
+
+ DQ
+ DQ LINEAR
+ LINEAR Q
+ Q VIEW
+ VIEW PERMUTE
+ PERMUTE TRANSPOSE
+ DQ DQ
+ Matmul
+ DIV
+ ADD
+ SOFTMAX
+
+ It produces a DQ->LINEAR->Q pattern which can be fused by backend.
+ """
+
+ def is_view_op(node):
+ return node.op == "call_function" and node.target in _VIEW_OPS
+
+ for node in graph_module.graph.nodes:
+ # <TODO> Leslie: Here we verify that the quant node has exactly
+ # one input FX node, with constant scalar value for scale and zero point.
+ # For the case input of quant node has more than one input FX nodes,
+ # extend the implementation to lift up all the connected nodes
+ # before the view nodes to keep the topological order.
+ if (
+ node.op == "call_function"
+ and node.target in _PER_TENSOR_QUANTIZE_OPS
+ and len(node.all_input_nodes) == 1
+ and is_view_op(node.all_input_nodes[0])
+ ):
+ quant_node = node
+ input_node_of_quant = quant_node.args[0]
+
+ # Check the nodes along lift up path has only 1 user node
+ # Propagate view like node to find where to insert the new quant node
+ could_lift_up = True
+ current_node = quant_node
+ input_node = current_node.args[0]
+ while is_view_op(input_node):
+ if len(input_node.users) != 1:
+ could_lift_up = False
+ break
+ current_node = input_node
+ input_node = current_node.args[0]
+
+ # Further check the input node of the first view node has only 1 user node
+ if could_lift_up and len(input_node.users) == 1:
+ # Replace dequant's input from quant to quant's input
+ quant_node.replace_all_uses_with(input_node_of_quant)
+ # Insert the new quant node
+ with graph_module.graph.inserting_before(current_node):
+ new_quant_node = graph_module.graph.node_copy(quant_node)
+ input_node.replace_all_uses_with(new_quant_node)
+
+ # Update inputs of new_quant_node
+ def maybe_replace_node(n: torch.fx.Node) -> torch.fx.Node:
+ if n == input_node_of_quant:
+ return input_node
+ else:
+ return n
+
+ new_args = map_arg(new_quant_node.args, maybe_replace_node)
+ new_kwargs = map_arg(new_quant_node.kwargs, maybe_replace_node)
+ new_quant_node.args = new_args
+ new_quant_node.kwargs = new_kwargs
+ graph_module.graph.erase_node(quant_node)
+
+ graph_module.graph.lint()
+ graph_module.recompile()
|
2.41.0
|
efb28c90025ea3d979b720942cd97a274fac6da
|
Tue, 23 Apr 2024 14:13:01 -0700
|
[PATCH 0550/1000] [quant][pt2e] Move batch norm op between eval/train for cuda (#123957)
|
Summary: Before in `move_exported_model_to_train/eval`, we only switched the CPU versions of the batch norm op. This commit adds support for the cuda versions of the op too. Note that this fix is temporary; we won't have to differentiate between these two cases once we have batch norm consolidation. Test Plan: python test/test_quantization.py -k test_move_exported_model_bn Reviewers: jerryzh168 Subscribers: jerryzh168, leslie-fang-intel, supriyar Differential Revision: [D56070054](https://our.internmc.facebook.com/intern/diff/D56070054) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123957 Approved by: https://github.com/jerryzh168
|
diff --git a/test/quantization/pt2e/test_quantize_pt2e.py b/test/quantization/pt2e/test_quantize_pt2e.py
index 3c759fc65c..0b9ad6a9a6 100644
--- a/test/quantization/pt2e/test_quantize_pt2e.py
+++ b/test/quantization/pt2e/test_quantize_pt2e.py
@@ -1826,6 +1826,18 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
def test_move_exported_model_dropout_inplace(self):
self._test_move_exported_model_dropout(inplace=True)
+ def _get_bn_train_eval_ops(self, is_cuda: bool):
+ if is_cuda:
+ return (
+ torch.ops.aten.cudnn_batch_norm.default,
+ torch.ops.aten.cudnn_batch_norm.default,
+ )
+ else:
+ return (
+ torch.ops.aten._native_batch_norm_legit.default,
+ torch.ops.aten._native_batch_norm_legit_no_training.default,
+ )
+
def test_move_exported_model_bn(self):
"""
Test switching batch_norm behavior between train and eval modes using
@@ -1840,12 +1852,18 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
def forward(self, x):
return self.bn(x)
- example_inputs = (torch.randn(1, 3, 3, 3),)
- m = M().train()
+ is_cuda = torch.cuda.is_available()
+ if is_cuda:
+ m = M().train().cuda()
+ example_inputs = (torch.randn(1, 3, 3, 3).cuda(),)
+ else:
+ m = M().train()
+ example_inputs = (torch.randn(1, 3, 3, 3),)
+ bn_train_op, bn_eval_op = self._get_bn_train_eval_ops(is_cuda)
m = capture_pre_autograd_graph(m, example_inputs)
# Assert that batch norm op exists and is in train mode
- bn_node = self._get_node(m, torch.ops.aten._native_batch_norm_legit.default)
+ bn_node = self._get_node(m, bn_train_op)
self.assertTrue(bn_node is not None)
self.assertTrue(bn_node.args[5])
@@ -1853,16 +1871,14 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
torch.ao.quantization.move_exported_model_to_eval(m)
# Assert that batch norm op is now in eval mode
- bn_node = self._get_node(
- m, torch.ops.aten._native_batch_norm_legit_no_training.default
- )
+ bn_node = self._get_node(m, bn_eval_op)
self.assertTrue(bn_node is not None)
# Move to train
torch.ao.quantization.move_exported_model_to_train(m)
# Assert that batch norm op is now in train mode again
- bn_node = self._get_node(m, torch.ops.aten._native_batch_norm_legit.default)
+ bn_node = self._get_node(m, bn_train_op)
self.assertTrue(bn_node is not None)
self.assertTrue(bn_node.args[5])
@@ -1908,22 +1924,25 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
x = self.dropout(x)
return x
- example_inputs = (torch.randn(1, 3, 3, 3),)
- m = M().train()
+ is_cuda = torch.cuda.is_available()
+ if is_cuda:
+ m = M().train().cuda()
+ example_inputs = (torch.randn(1, 3, 3, 3).cuda(),)
+ else:
+ m = M().train()
+ example_inputs = (torch.randn(1, 3, 3, 3),)
+ bn_train_op, bn_eval_op = self._get_bn_train_eval_ops(is_cuda)
m = capture_pre_autograd_graph(m, example_inputs)
def _assert_ops_are_correct(m: torch.fx.GraphModule, train: bool):
targets = [n.target for n in m.graph.nodes]
- bn_train_target = torch.ops.aten._native_batch_norm_legit.default
- bn_eval_target = torch.ops.aten._native_batch_norm_legit_no_training.default
- if train:
- self.assertTrue(bn_train_target in targets)
- self.assertTrue(bn_eval_target not in targets)
- else:
- self.assertTrue(bn_eval_target in targets)
- self.assertTrue(bn_train_target not in targets)
+ bn_op = bn_train_op if train else bn_eval_op
+ bn_node = self._get_node(m, bn_op)
+ self.assertTrue(bn_node is not None)
+ if is_cuda:
+ self.assertEqual(bn_node.args[5], train)
dropout_node = self._get_node(m, torch.ops.aten.dropout.default)
- self.assertTrue(dropout_node.args[2] == train)
+ self.assertEqual(dropout_node.args[2], train)
# Before wrapping: this is not OK
with self.assertRaises(NotImplementedError):
diff --git a/torch/ao/quantization/fx/utils.py b/torch/ao/quantization/fx/utils.py
index 21a1034739..be26332b24 100644
--- a/torch/ao/quantization/fx/utils.py
+++ b/torch/ao/quantization/fx/utils.py
@@ -23,6 +23,7 @@ from torch.ao.quantization.qconfig import (
)
from torch.ao.quantization.stubs import DeQuantStub
from torch.ao.quantization.utils import (
+ _assert_and_get_unique_device,
activation_is_statically_quantized,
)
from torch.ao.quantization.observer import _is_activation_post_process
@@ -222,26 +223,13 @@ def graph_module_from_producer_nodes(
graph_module = GraphModule(root, graph)
return graph_module
+# TODO: delete
def assert_and_get_unique_device(module: torch.nn.Module) -> Any:
"""
Returns the unique device for a module, or None if no device is found.
Throws an error if multiple devices are detected.
"""
- devices = {p.device for p in module.parameters()} | \
- {p.device for p in module.buffers()}
- """
- As a temp workaround for AIMP HHC publish we added CPU check.remove it later. T163614564
- """
- if {torch.device("cpu"), torch.device("meta")} == devices:
- warnings.warn("Both 'meta' and 'cpu' are present in the list of devices. Module can have one device. We Select 'cpu'.")
- devices = {torch.device("cpu")}
- ""
- assert len(devices) <= 1, (
- "prepare only works with cpu or single-device CUDA modules, "
- f"but got devices {devices}"
- )
- device = next(iter(devices)) if len(devices) > 0 else None
- return device
+ return _assert_and_get_unique_device(module)
def create_getattr_from_value(module: torch.nn.Module, graph: Graph, prefix: str, value: Any) -> Node:
"""
diff --git a/torch/ao/quantization/pt2e/export_utils.py b/torch/ao/quantization/pt2e/export_utils.py
index dae8baad8d..2e7b9e380d 100644
--- a/torch/ao/quantization/pt2e/export_utils.py
+++ b/torch/ao/quantization/pt2e/export_utils.py
@@ -3,6 +3,8 @@ import types
import torch
import torch.nn.functional as F
+from torch.ao.quantization.utils import _assert_and_get_unique_device
+
__all__ = [
"model_is_exported",
@@ -136,20 +138,26 @@ def _replace_batchnorm(m: torch.fx.GraphModule, train_to_eval: bool):
torch.randn(1), # bn_running_mean
torch.randn(1), # bn_running_var
)
+
+ device = _assert_and_get_unique_device(m)
+ is_cuda = device is not None and device.type == "cuda"
+ bn_train_aten = _get_aten_graph_module_for_pattern(
+ _WrapperModule(bn_train),
+ example_inputs,
+ is_cuda,
+ )
+ bn_eval_aten = _get_aten_graph_module_for_pattern(
+ _WrapperModule(bn_eval),
+ example_inputs,
+ is_cuda,
+ )
+
if train_to_eval:
- match_pattern = _get_aten_graph_module_for_pattern(
- _WrapperModule(bn_train), example_inputs
- )
- replacement_pattern = _get_aten_graph_module_for_pattern(
- _WrapperModule(bn_eval), example_inputs
- )
+ match_pattern = bn_train_aten
+ replacement_pattern = bn_eval_aten
else:
- match_pattern = _get_aten_graph_module_for_pattern(
- _WrapperModule(bn_eval), example_inputs
- )
- replacement_pattern = _get_aten_graph_module_for_pattern(
- _WrapperModule(bn_train), example_inputs
- )
+ match_pattern = bn_eval_aten
+ replacement_pattern = bn_train_aten
from torch.fx.subgraph_rewriter import replace_pattern_with_filters
diff --git a/torch/ao/quantization/utils.py b/torch/ao/quantization/utils.py
index 70b45b92fb..d0de50bbeb 100644
--- a/torch/ao/quantization/utils.py
+++ b/torch/ao/quantization/utils.py
@@ -688,6 +688,27 @@ def get_fqn_to_example_inputs(
torch.nn.Module.__call__ = orig_module_call # type: ignore[method-assign]
return fqn_to_example_inputs
+def _assert_and_get_unique_device(module: torch.nn.Module) -> Any:
+ """
+ Returns the unique device for a module, or None if no device is found.
+ Throws an error if multiple devices are detected.
+ """
+ devices = {p.device for p in module.parameters()} | \
+ {p.device for p in module.buffers()}
+ """
+ As a temp workaround for AIMP HHC publish we added CPU check.remove it later. T163614564
+ """
+ if {torch.device("cpu"), torch.device("meta")} == devices:
+ warnings.warn("Both 'meta' and 'cpu' are present in the list of devices. Module can have one device. We Select 'cpu'.")
+ devices = {torch.device("cpu")}
+ ""
+ assert len(devices) <= 1, (
+ "prepare only works with cpu or single-device CUDA modules, "
+ f"but got devices {devices}"
+ )
+ device = next(iter(devices)) if len(devices) > 0 else None
+ return device
+
__all__ = [
"NodePattern",
"Pattern",
|
2.41.0
|
ab0b3c9f8d60c534264302f48a122168fe11af7
|
Wed, 24 Apr 2024 01:09:04 +0000
|
[PATCH 0551/1000] [ROCm] avoid heap buffer overflow in hiprtc failure logs (#121865)
|
hiprtc doesn't seem to include the null byte automatically in the failure logs, resulting in heap buffer overflow. Initializing the log string with the null byte avoids the problem. Found by rocm address sanitizer. Pull Request resolved: https://github.com/pytorch/pytorch/pull/121865 Approved by: https://github.com/malfet
|
diff --git a/aten/src/ATen/native/cuda/jit_utils.cpp b/aten/src/ATen/native/cuda/jit_utils.cpp
index cb938886d6..6e804efe5f 100644
--- a/aten/src/ATen/native/cuda/jit_utils.cpp
+++ b/aten/src/ATen/native/cuda/jit_utils.cpp
@@ -1569,11 +1569,9 @@ NvrtcFunction jit_pwise_function(
if (compilation_result != NVRTC_SUCCESS) {
size_t logsize;
AT_CUDA_NVRTC_CHECK(nvrtc.nvrtcGetProgramLogSize(program, &logsize));
- std::vector<char> log(logsize);
- AT_CUDA_NVRTC_CHECK(nvrtc.nvrtcGetProgramLog(program, log.data()));
- std::stringstream cu;
- cu << log.data();
- throw std::runtime_error(code + cu.str());
+ std::string log(logsize, '\0');
+ AT_CUDA_NVRTC_CHECK(nvrtc.nvrtcGetProgramLog(program, &log[0]));
+ throw std::runtime_error(code + log);
}
size_t ptx_size = 0;
diff --git a/caffe2/cuda_rtc/common_rtc.h b/caffe2/cuda_rtc/common_rtc.h
index 9d9582d34b..0fa6bad7a0 100644
--- a/caffe2/cuda_rtc/common_rtc.h
+++ b/caffe2/cuda_rtc/common_rtc.h
@@ -50,11 +50,11 @@ class CudaRTCFunction {
if (compile_result != NVRTC_SUCCESS) {
size_t log_size;
NVRTC_CHECK(nvrtcGetProgramLogSize(prog, &log_size));
- vector<char> nvrtc_log(log_size);
- NVRTC_CHECK(nvrtcGetProgramLog(prog, nvrtc_log.data()));
+ std::string nvrtc_log(log_size, '\0');
+ NVRTC_CHECK(nvrtcGetProgramLog(prog, &nvrtc_log[0]));
LOG(FATAL) << "Compilation failure for nvrtc("
<< nvrtcGetErrorString(compile_result) << "): \n"
- << nvrtc_log.data();
+ << nvrtc_log;
}
size_t ptx_size;
NVRTC_CHECK(nvrtcGetPTXSize(prog, &ptx_size));
|
2.41.0
|
7a12d9d0f71879a83e731e67982385d1d534498
|
Wed, 24 Apr 2024 01:24:01 +0000
|
[PATCH 0552/1000] Add Half support to torch.sparse.addmm for CPU (#124694)
|
This PR is to add Half support to torch.sparse.addmm for CPU. It is a requested feature in model DCRNN for Half data type. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124694 Approved by: https://github.com/pearu
|
diff --git a/aten/src/ATen/native/sparse/SparseTensorMath.cpp b/aten/src/ATen/native/sparse/SparseTensorMath.cpp
index 79392d6b83..4b3691a7af 100644
--- a/aten/src/ATen/native/sparse/SparseTensorMath.cpp
+++ b/aten/src/ATen/native/sparse/SparseTensorMath.cpp
@@ -1311,7 +1311,7 @@ static Tensor& s_addmm_out_sparse_dense_cpu(
Tensor indices = sparse_._indices();
Tensor values = sparse_._values();
- AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(kBFloat16,
+ AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kBFloat16, kHalf,
values.scalar_type(), "addmm_sparse_dense", [&] {
s_addmm_out_sparse_dense_worker<scalar_t>(nnz, dim_i, dim_j, dim_k, r, beta, t, alpha, indices, values, dense);
}
diff --git a/test/test_sparse.py b/test/test_sparse.py
index 2f86be0210..da2e08d769 100644
--- a/test/test_sparse.py
+++ b/test/test_sparse.py
@@ -1551,11 +1551,12 @@ class TestSparse(TestSparseBase):
self.assertEqual(self.safeToDense(res), self.safeToDense(true_result))
@coalescedonoff
- @precisionOverride({torch.bfloat16: 5e-2})
- @dtypes(torch.double, torch.cdouble, torch.bfloat16)
+ @precisionOverride({torch.bfloat16: 5e-2, torch.float16: 5e-2})
+ @dtypes(torch.double, torch.cdouble, torch.bfloat16, torch.float16)
def test_sparse_addmm(self, device, dtype, coalesced):
- if dtype is torch.bfloat16 and device.startswith("cuda"):
- self.skipTest('addmm_sparse_cuda is not implemented for BFloat16')
+ if (dtype is torch.bfloat16 or dtype is torch.float16) and device.startswith("cuda"):
+ self.skipTest('addmm_sparse_cuda is not implemented for BFloat16 and Half')
+
def test_shape(m, n, p, nnz, broadcast, alpha_beta=None):
if alpha_beta is None:
|
2.41.0
|
716e77cf7e92e728decdea487ae27302c962aa3
|
Wed, 24 Apr 2024 01:25:07 +0000
|
[PATCH 0553/1000] [FSDP1][2D] Fix FSDP1 2D state_dict to use run_check=False (#123802)
|
`from_local` with replicate placement would run mesh_broadcast if `run_check=True`, by default `from_local` have `run_check=True`, but for FSDP state_dict case we are for sure that these are replicated on dp dimension (FSDP + TP) already, so we don't need to check/force check it. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123802 Approved by: https://github.com/wanchaol
|
diff --git a/torch/distributed/tensor/parallel/fsdp.py b/torch/distributed/tensor/parallel/fsdp.py
index 7b8d0af39b..d7eae93a72 100644
--- a/torch/distributed/tensor/parallel/fsdp.py
+++ b/torch/distributed/tensor/parallel/fsdp.py
@@ -255,7 +255,7 @@ def _chunk_dtensor(
shard_placements[0] = DShard(0) # type: ignore[call-overload]
return DTensor.from_local(
- tensor, parent_mesh, replicate_placements
+ tensor, parent_mesh, replicate_placements, run_check=False
).redistribute(
device_mesh=parent_mesh,
placements=shard_placements,
@@ -279,7 +279,7 @@ def _chunk_dtensor(
shard_placements[-1] = tp_placement # type: ignore[call-overload]
return DTensor.from_local(
- tensor, parent_mesh, replicate_placements
+ tensor, parent_mesh, replicate_placements, run_check=False
).redistribute(
device_mesh=parent_mesh,
placements=shard_placements,
|
2.41.0
|
3fa2421dc25c86d95d14a031b72342855635894
|
Wed, 24 Apr 2024 01:34:47 +0000
|
[PATCH 0554/1000] Get ARC jobs to run on both classic and ARC infra (#124753)
|
ARC jobs are too unstable right now. We're going to mitigate this by: - Reverting ARC jobs to run on the classic infra (https://github.com/pytorch/pytorch/pull/124748) - Spin up new jobs in parallel to run on the new infra. (this PR) - Mark these ARC jobs as unstable (will be done before merging this PR) More details in https://github.com/pytorch/ci-infra/issues/149 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124753 Approved by: https://github.com/zxiiro, https://github.com/seemethere
|
diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml
index 713e454ac4..21d29d4ba4 100644
--- a/.github/workflows/pull.yml
+++ b/.github/workflows/pull.yml
@@ -94,6 +94,7 @@ jobs:
{ config: "default", shard: 1, num_shards: 1 },
]}
+
linux-jammy-py3_10-clang15-asan-build:
name: linux-jammy-py3.10-clang15-asan
uses: ./.github/workflows/_linux-build-label.yml
@@ -111,6 +112,7 @@ jobs:
]}
sync-tag: asan-build
+
linux-jammy-py3_10-clang15-asan-test:
name: linux-jammy-py3.10-clang15-asan
uses: ./.github/workflows/_linux-test.yml
@@ -163,7 +165,6 @@ jobs:
{ config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
{ config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
]}
-
linux-focal-py3_8-clang10-test:
name: linux-focal-py3.8-clang10
uses: ./.github/workflows/_linux-test.yml
@@ -193,6 +194,7 @@ jobs:
{ config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
]}
+
linux-focal-py3_11-clang10-test:
name: linux-focal-py3.11-clang10
uses: ./.github/workflows/_linux-test.yml
diff --git a/.github/workflows/unstable.yml b/.github/workflows/unstable.yml
index a2c4a45bd8..c2076ef6e0 100644
--- a/.github/workflows/unstable.yml
+++ b/.github/workflows/unstable.yml
@@ -32,3 +32,117 @@ jobs:
echo
echo "Once the jobs are deemed stable enough (% red signal < 5% and TTS < 3h),"
echo " they can graduate and move back to pull or trunk."
+
+ #
+ # Experimental ARC jobs
+ #
+
+ linux-jammy-py3_8-gcc11-build:
+ name: linux-jammy-py3.8-gcc11
+ uses: ./.github/workflows/_linux-build-rg.yml
+ with:
+ build-environment: linux-jammy-py3.8-gcc11
+ docker-image-name: pytorch-linux-jammy-py3.8-gcc11
+ test-matrix: |
+ { include: [
+ { config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "docs_test", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
+ { config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
+ { config: "backwards_compat", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
+ { config: "distributed", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
+ { config: "distributed", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
+ ]}
+
+
+ linux-jammy-py3_8-gcc11-no-ops:
+ name: linux-jammy-py3.8-gcc11-no-ops
+ uses: ./.github/workflows/_linux-build-rg.yml
+ with:
+ build-environment: linux-jammy-py3.8-gcc11-no-ops
+ docker-image-name: pytorch-linux-jammy-py3.8-gcc11
+ test-matrix: |
+ { include: [
+ { config: "default", shard: 1, num_shards: 1 },
+ ]}
+
+ linux-jammy-py3_8-gcc11-pch:
+ name: linux-jammy-py3.8-gcc11-pch
+ uses: ./.github/workflows/_linux-build-rg.yml
+ with:
+ build-environment: linux-jammy-py3.8-gcc11-pch
+ docker-image-name: pytorch-linux-jammy-py3.8-gcc11
+ test-matrix: |
+ { include: [
+ { config: "default", shard: 1, num_shards: 1 },
+ ]}
+
+ linux-focal-py3_8-clang10-onnx-build:
+ name: linux-focal-py3.8-clang10-onnx
+ uses: ./.github/workflows/_linux-build-rg.yml
+ with:
+ build-environment: linux-focal-py3.8-clang10-onnx
+ docker-image-name: pytorch-linux-focal-py3-clang10-onnx
+ test-matrix: |
+ { include: [
+ { config: "default", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
+ { config: "default", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
+ ]}
+
+ linux-jammy-py3_10-clang15-asan-build:
+ name: linux-jammy-py3.10-clang15-asan
+ uses: ./.github/workflows/_linux-build-rg.yml
+ with:
+ build-environment: linux-jammy-py3.10-clang15-asan
+ docker-image-name: pytorch-linux-jammy-py3-clang15-asan
+ test-matrix: |
+ { include: [
+ { config: "default", shard: 1, num_shards: 6, runner: "linux.4xlarge" },
+ { config: "default", shard: 2, num_shards: 6, runner: "linux.4xlarge" },
+ { config: "default", shard: 3, num_shards: 6, runner: "linux.4xlarge" },
+ { config: "default", shard: 4, num_shards: 6, runner: "linux.4xlarge" },
+ { config: "default", shard: 5, num_shards: 6, runner: "linux.4xlarge" },
+ { config: "default", shard: 6, num_shards: 6, runner: "linux.4xlarge" },
+ ]}
+ sync-tag: asan-build-arc
+
+ linux-focal-py3_8-clang10-build:
+ name: linux-focal-py3.8-clang10
+ uses: ./.github/workflows/_linux-build-rg.yml
+ with:
+ build-environment: linux-focal-py3.8-clang10
+ docker-image-name: pytorch-linux-focal-py3.8-clang10
+ test-matrix: |
+ { include: [
+ { config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "crossref", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
+ { config: "crossref", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
+ { config: "dynamo", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
+ ]}
+
+ linux-focal-py3_11-clang10-build:
+ name: linux-focal-py3.11-clang10
+ uses: ./.github/workflows/_linux-build-rg.yml
+ with:
+ build-environment: linux-focal-py3.11-clang10
+ docker-image-name: pytorch-linux-focal-py3.11-clang10
+ test-matrix: |
+ { include: [
+ { config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "crossref", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
+ { config: "crossref", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
+ { config: "dynamo", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
+ { config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
+ ]}
+
+ #
+ # End of Experimental ARC jobs
+ #
\ No newline at end of file
|
2.41.0
|
52a00eda797c42bba577497ea3741b8dba2a756
|
Wed, 24 Apr 2024 01:44:37 +0000
|
[PATCH 0555/1000] torchelastic: change monitor_interval default to 0.1 (#124692)
|
This reduces the default monitor_interval for torchelastic to 0.1s as testing shows negligble load for common use cases. Even at the extremes, 100k processes is only 45.4% cpu util of a single core. Torchelastic monitor_interval only monitors the processes on a single worker so under typical loads even for huge jobs we expect ~8 subprocesses per machine with one per GPU. As an external datapoint, Python's wait polls every 50usec-50ms (https://github.com/python/cpython/blob/main/Lib/subprocess.py#L2035). ## Motivation This setting is used to control how frequently we poll for failed processes in elastic. * For some jobs of note we run elastic 3 times per try so with the default timeout of 5 seconds we should save ~15 seconds per retry. * @kiukchung's use case: Apparently this is annoying in notebooks etc since it adds delay to shutdown when testing things ## Results This is measured in cores (100% is a single core under full load). | monitor_interval (s) | nproc-per-node | CPU util (highest observed) | | -------------------- | -------------- | --------------------------- | | 1.0 | 10 | 0.2% | | 0.1 | 1 | 0.4% | | 0.1 | 10 | 0.4% | | 0.01 | 10 | 0.9% | | 0.001 | 10 | 4.0% | | 0.1 | 100 | 0.5% | | 0.1 | 1000 | 2.2% | | 0.1 | 10000 | 15.7% | | 0.1 | 100000 | 45.4% | ## Methodology ```sh # run command $ LOGLEVEL=INFO torchrun --nnodes 1 --nproc-per-node 10 --monitor-interval 0.1 ~/wait.py # wait a few seconds for all processes to start and reach steady state and then run, wait ~30s or 3 prints and take the highest $ top -b -d 10 -c | rg 'torchrun.*wait ``` wait.py ```py import time time.sleep(10*60) ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124692 Approved by: https://github.com/kiukchung, https://github.com/kurman
|
diff --git a/test/distributed/elastic/agent/server/test/api_test.py b/test/distributed/elastic/agent/server/test/api_test.py
index b0d64f9853..e57b7b9fcb 100644
--- a/test/distributed/elastic/agent/server/test/api_test.py
+++ b/test/distributed/elastic/agent/server/test/api_test.py
@@ -54,7 +54,7 @@ class WorkerGroupTest(unittest.TestCase):
args=(),
rdzv_handler=None,
max_restarts=50,
- monitor_interval=1,
+ monitor_interval=0.1,
)
worker_group = WorkerGroup(spec)
@@ -157,7 +157,7 @@ class SimpleElasticAgentTest(unittest.TestCase):
def _get_worker_spec(
self,
max_restarts=1,
- monitor_interval=1.0,
+ monitor_interval=0.1,
role="test_trainer",
local_world_size=8,
local_addr=None,
diff --git a/test/distributed/launcher/api_test.py b/test/distributed/launcher/api_test.py
index 81e9320d1f..38e3bc305f 100644
--- a/test/distributed/launcher/api_test.py
+++ b/test/distributed/launcher/api_test.py
@@ -70,7 +70,7 @@ def get_test_launch_config(
nproc_per_node=nproc_per_node,
run_id=run_id,
rdzv_endpoint=rdzv_endpoint,
- monitor_interval=1,
+ monitor_interval=0.1,
rdzv_backend=rdzv_backend,
start_method="spawn",
max_restarts=0,
diff --git a/torch/distributed/elastic/agent/server/api.py b/torch/distributed/elastic/agent/server/api.py
index 4ebfc59523..dd20703ced 100644
--- a/torch/distributed/elastic/agent/server/api.py
+++ b/torch/distributed/elastic/agent/server/api.py
@@ -85,7 +85,7 @@ class WorkerSpec:
entrypoint: Union[Callable, str, None] = None
args: Tuple = ()
max_restarts: int = 3
- monitor_interval: float = 30.0
+ monitor_interval: float = 0.1
master_port: Optional[int] = None
master_addr: Optional[str] = None
local_addr: Optional[str] = None
diff --git a/torch/distributed/launcher/api.py b/torch/distributed/launcher/api.py
index f2b4aca644..20de0a0327 100644
--- a/torch/distributed/launcher/api.py
+++ b/torch/distributed/launcher/api.py
@@ -75,7 +75,7 @@ class LaunchConfig:
rdzv_configs: Dict[str, Any] = field(default_factory=dict)
rdzv_timeout: int = -1
max_restarts: int = 3
- monitor_interval: float = 30
+ monitor_interval: float = 0.1
start_method: str = "spawn"
log_line_prefix_template: Optional[str] = None
metrics_cfg: Dict[str, str] = field(default_factory=dict)
diff --git a/torch/distributed/run.py b/torch/distributed/run.py
index 3352111068..98917a667e 100644
--- a/torch/distributed/run.py
+++ b/torch/distributed/run.py
@@ -499,7 +499,7 @@ def get_args_parser() -> ArgumentParser:
"--monitor_interval",
action=env,
type=float,
- default=5,
+ default=0.1,
help="Interval, in seconds, to monitor the state of workers.",
)
parser.add_argument(
|
2.41.0
|
0f7452e315c712c15f4d21925122a717a2c32a2
|
Wed, 24 Apr 2024 02:18:14 +0000
|
[PATCH 0556/1000] Do not propogate (#124769)
|
Fix the propogate typos. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124769 Approved by: https://github.com/Skylion007
|
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py
index 20cf8be304..7a99b1f31e 100644
--- a/test/inductor/test_torchinductor.py
+++ b/test/inductor/test_torchinductor.py
@@ -8612,7 +8612,7 @@ class CommonTemplate:
self.common(fn, (torch.ones(1, 1, 13, dtype=dtype),))
@unittest.skipIf(not HAS_CPU or not RUN_CPU, "requires C++ compiler")
- def test_data_type_propogation(self):
+ def test_data_type_propagation(self):
from torch._dynamo.utils import detect_fake_mode
from torch._inductor.codegen.common import boolean_ops
from torch._inductor.compile_fx import _shape_env_from_inputs
diff --git a/torch/_inductor/codegen/common.py b/torch/_inductor/codegen/common.py
index 674c131d42..256e16b68d 100644
--- a/torch/_inductor/codegen/common.py
+++ b/torch/_inductor/codegen/common.py
@@ -229,12 +229,12 @@ class DataTypePropagation:
if len(input_nodes) == 0:
return None
- all_input_nodes_propogated = all(
+ all_input_nodes_propagated = all(
OptimizationContext.key in n.meta
and n.meta[OptimizationContext.key].dtype is not None
for n in input_nodes
)
- if not all_input_nodes_propogated:
+ if not all_input_nodes_propagated:
return None
return functools.reduce(
diff --git a/torch/ao/pruning/_experimental/pruner/prune_functions.py b/torch/ao/pruning/_experimental/pruner/prune_functions.py
index a75c09cc30..2b16d4b327 100644
--- a/torch/ao/pruning/_experimental/pruner/prune_functions.py
+++ b/torch/ao/pruning/_experimental/pruner/prune_functions.py
@@ -84,7 +84,7 @@ def _prune_module_bias(module: nn.Module, mask: Tensor) -> None:
delattr(module, "_bias")
-def _propogate_module_bias(module: nn.Module, mask: Tensor) -> Optional[Tensor]:
+def _propagate_module_bias(module: nn.Module, mask: Tensor) -> Optional[Tensor]:
r"""
In the case that we need to propagate biases, this function will return the biases we need
"""
@@ -143,7 +143,7 @@ def prune_linear_activation_linear(
if getattr(linear1, "prune_bias", False):
_prune_module_bias(linear1, mask)
else:
- pruned_biases = _propogate_module_bias(linear1, mask)
+ pruned_biases = _propagate_module_bias(linear1, mask)
if pruned_biases is not None:
if activation:
pruned_biases = activation(pruned_biases)
@@ -251,7 +251,7 @@ def prune_conv2d_activation_conv2d(
if prune_bias:
_prune_module_bias(conv2d_1, mask)
else:
- pruned_biases = _propogate_module_bias(conv2d_1, mask)
+ pruned_biases = _propagate_module_bias(conv2d_1, mask)
if pruned_biases is not None:
if activation:
pruned_biases = activation(pruned_biases)
@@ -335,7 +335,7 @@ def prune_conv2d_pool_flatten_linear(
if getattr(conv2d, "prune_bias", False):
_prune_module_bias(conv2d, mask)
else:
- pruned_biases = cast(Tensor, _propogate_module_bias(conv2d, mask))
+ pruned_biases = cast(Tensor, _propagate_module_bias(conv2d, mask))
flattened_pruned_biases = torch.tensor(
[[bias] * flatten_scale for bias in pruned_biases], device=mask.device
).flatten()
diff --git a/torch/csrc/jit/frontend/ir_emitter.cpp b/torch/csrc/jit/frontend/ir_emitter.cpp
index 855744ff60..989a6eaf2d 100644
--- a/torch/csrc/jit/frontend/ir_emitter.cpp
+++ b/torch/csrc/jit/frontend/ir_emitter.cpp
@@ -5685,7 +5685,7 @@ void runCleanupPasses(std::shared_ptr<Graph>& to_clean) {
// successive runs of immutable constant prop does not change the graph
ConstantPropagationImmutableTypes(to_clean);
- // Constant Pooling pass must be after ConstantPropogation, which can create
+ // Constant Pooling pass must be after ConstantPropagation, which can create
// new constants that needs to be pooled.
ConstantPooling(to_clean);
diff --git a/torch/distributed/_spmd/distribute.py b/torch/distributed/_spmd/distribute.py
index 771b064b57..d0d7aba314 100644
--- a/torch/distributed/_spmd/distribute.py
+++ b/torch/distributed/_spmd/distribute.py
@@ -350,7 +350,7 @@ FACTORY_SYM_INT_CONSUMERS: Dict[torch._ops.OpOverload, Callable] = {
}
-# Dispatch override for factory ops, as DTensor cannot propogate sharding spec
+# Dispatch override for factory ops, as DTensor cannot propagate sharding spec
# without DTensor inputs.
FACTORY_OPS: Dict[torch._ops.OpOverload, Callable] = {
aten.scalar_tensor.default: default_factory_op_rule,
|
2.41.0
|
e7b8ff116c10d4d3cdf201e62802675042fda5f
|
Wed, 24 Apr 2024 02:29:28 +0000
|
[PATCH 0557/1000] [ROCm] Fix Int_mm() Integration with hipblasLT (#122431)
|
The PR - fixes int_mm() /int8_gemm() integration with hipblasLT backend (require ROCm 6.0). - enables/fixes the following tests on Rocm - test__int_mm_k_16_n_16_use_transpose_a_False_use_transpose_b_False_cuda - test__int_mm_k_16_n_16_use_transpose_a_False_use_transpose_b_True_cuda - test__int_mm_k_16_n_16_use_transpose_a_True_use_transpose_b_False_cuda - test__int_mm_k_16_n_16_use_transpose_a_True_use_transpose_b_True_cuda - test__int_mm_k_16_n_32_use_transpose_a_False_use_transpose_b_False_cuda - test__int_mm_k_16_n_32_use_transpose_a_False_use_transpose_b_True_cuda - test__int_mm_k_16_n_32_use_transpose_a_True_use_transpose_b_False_cuda - test__int_mm_k_16_n_32_use_transpose_a_True_use_transpose_b_True_cuda - test__int_mm_k_32_n_16_use_transpose_a_False_use_transpose_b_False_cuda - test__int_mm_k_32_n_16_use_transpose_a_False_use_transpose_b_True_cuda - test__int_mm_k_32_n_16_use_transpose_a_True_use_transpose_b_False_cuda - test__int_mm_k_32_n_16_use_transpose_a_True_use_transpose_b_True_cuda - test__int_mm_k_32_n_32_use_transpose_a_False_use_transpose_b_False_cuda - test__int_mm_k_32_n_32_use_transpose_a_False_use_transpose_b_True_cuda - test__int_mm_k_32_n_32_use_transpose_a_True_use_transpose_b_False_cuda - test__int_mm_k_32_n_32_use_transpose_a_True_use_transpose_b_True_cuda Pull Request resolved: https://github.com/pytorch/pytorch/pull/122431 Approved by: https://github.com/pruthvistony, https://github.com/jithunnair-amd, https://github.com/malfet, https://github.com/atalman
|
diff --git a/aten/src/ATen/cuda/CUDABlas.cpp b/aten/src/ATen/cuda/CUDABlas.cpp
index f9ac77b53e..c211092c49 100644
--- a/aten/src/ATen/cuda/CUDABlas.cpp
+++ b/aten/src/ATen/cuda/CUDABlas.cpp
@@ -1655,11 +1655,34 @@ void int8_gemm(
CuBlasLtMatrixLayout Bdesc(abType, k, n, mat2_ld, transpose_mat2);
CuBlasLtMatrixLayout Cdesc(cType, m, n, result_ld);
- cublasLtHandle_t ltHandle = at::cuda::getCurrentCUDABlasLtHandle();
-
// cublas team: alpha and beta need to be the same dtype as of scaleType
at::opmath_type<int32_t> alpha_val = 1;
int32_t beta_val = 0;
+ cublasLtHandle_t ltHandle = at::cuda::getCurrentCUDABlasLtHandle();
+
+#ifdef USE_ROCM
+ CuBlasLtMatmulPreference preference;
+ size_t workspaceSize = _getWorkspaceSize();
+ preference.setAttribute(CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, workspaceSize);
+ auto& allocator = *::c10::cuda::CUDACachingAllocator::get();
+ auto workspace = allocator.allocate(workspaceSize);
+ cublasLtMatmulHeuristicResult_t heuristicResult = {};
+ int returnedResult = 0;
+ TORCH_CUDABLAS_CHECK(cublasLtMatmulAlgoGetHeuristic(
+ ltHandle,
+ computeDesc.descriptor(),
+ Adesc.descriptor(),
+ Bdesc.descriptor(),
+ Cdesc.descriptor(),
+ Cdesc.descriptor(),
+ preference.descriptor(),
+ 1,
+ &heuristicResult,
+ &returnedResult));
+ if (returnedResult == 0) {
+ TORCH_CUDABLAS_CHECK(CUBLAS_STATUS_NOT_SUPPORTED);
+ }
+#endif
cublasStatus_t cublasStatus = cublasLtMatmul(
ltHandle,
@@ -1674,9 +1697,21 @@ void int8_gemm(
Cdesc.descriptor(),
result_ptr,
Cdesc.descriptor(),
+#ifdef USE_ROCM
+ &heuristicResult.algo,
+#else
nullptr, // Heuristics don't seem to work for int8
+#endif
+#ifdef USE_ROCM
+ workspace.mutable_get(),
+#else
nullptr, // Non-zero workspace doesn't seem to work.
+#endif
+#ifdef USE_ROCM
+ workspaceSize,
+#else
0,
+#endif
at::cuda::getCurrentCUDAStream());
TORCH_CHECK(
cublasStatus == CUBLAS_STATUS_SUCCESS,
diff --git a/aten/src/ATen/native/cuda/Blas.cpp b/aten/src/ATen/native/cuda/Blas.cpp
index 7195f939f7..060eb7408b 100644
--- a/aten/src/ATen/native/cuda/Blas.cpp
+++ b/aten/src/ATen/native/cuda/Blas.cpp
@@ -747,7 +747,7 @@ Tensor& _int_mm_out_cuda(const Tensor& self, const Tensor& mat2, Tensor& result)
TORCH_CHECK(result.is_contiguous(), "Expected result to be contiguous.");
-#if !defined(USE_ROCM) && !defined(_MSC_VER) && defined(CUDA_VERSION) && CUDA_VERSION >= 11070
+#if (!defined(USE_ROCM) && !defined(_MSC_VER) && defined(CUDA_VERSION) && CUDA_VERSION >= 11070) || (defined(USE_ROCM) && ROCM_VERSION >= 60000)
cublasCommonArgs args(self, mat2, result);
at::cuda::blas::int8_gemm(
diff --git a/test/test_linalg.py b/test/test_linalg.py
index 5053b5e8fe..e22dabcf56 100644
--- a/test/test_linalg.py
+++ b/test/test_linalg.py
@@ -5794,7 +5794,7 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2:
self.assertEqual(c, cpu_result)
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
- @unittest.skipIf(SM90OrLater, "Expected failure on sm90")
+ @unittest.skipIf(SM90OrLater and not TEST_WITH_ROCM, "Expected failure on sm90")
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@onlyCUDA
@parametrize("k", [16, 32])
@@ -5802,9 +5802,6 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2:
@parametrize("use_transpose_a", [True, False])
@parametrize("use_transpose_b", [True, False])
def test__int_mm(self, device, k, n, use_transpose_a, use_transpose_b):
- if TEST_WITH_ROCM:
- self.skipTest("_int_mm not compiled for ROCM")
-
def genf_int_float(x, y, use_transpose):
if use_transpose:
x, y = y, x
@@ -5837,7 +5834,10 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2:
SM80OrLater = torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 0)
SM70 = torch.cuda.is_available() and torch.cuda.get_device_capability() == (7, 0)
SM75 = torch.cuda.is_available() and torch.cuda.get_device_capability() == (7, 5)
- if version >= (11, 7):
+
+ if TEST_WITH_ROCM:
+ _test(17, k, n, use_transpose_a, use_transpose_b, True)
+ elif version >= (11, 7):
if not use_transpose_a and use_transpose_b:
if SM80OrLater or (version >= (12, 3) and (SM70 or SM75)):
_test(17, k, n, use_transpose_a, use_transpose_b, version > (11, 7))
|
2.41.0
|
dded148d0766d186df0bcaecc9bec7b889a9031
|
Wed, 24 Apr 2024 02:55:12 +0000
|
[PATCH 0558/1000] Fix test_extension_backend on non-AVX systems (#117272)
|
The test checks for a substring "loadu" in generated code. On AVX systems that line is: > auto tmp0 = at::vec::Vectorized<float>::loadu(in_ptr0 + static_cast<long>(i0)) however on non-AVX systems it is > auto tmp0 = in_ptr0[static_cast<long>(i0)]; the difference depends on `codecache.valid_vec_isa_list()` being non-empty. See torch/_inductor/codegen/cpp.py:2639 Modify the test to account for that. Pull Request resolved: https://github.com/pytorch/pytorch/pull/117272 Approved by: https://github.com/jgong5, https://github.com/jansel
|
diff --git a/test/inductor/test_extension_backend.py b/test/inductor/test_extension_backend.py
index f453284a23..7bb531d980 100644
--- a/test/inductor/test_extension_backend.py
+++ b/test/inductor/test_extension_backend.py
@@ -23,7 +23,7 @@ except ImportError:
)
import torch._inductor.config as config
-from torch._inductor import metrics
+from torch._inductor import codecache, metrics
from torch._inductor.codegen import cpp
from torch._inductor.codegen.common import (
get_scheduling_for_device,
@@ -146,9 +146,13 @@ class ExtensionBackendTests(TestCase):
metrics.reset()
opt_fn = torch.compile()(fn)
_, code = run_and_get_cpp_code(opt_fn, x, y, z)
- FileCheck().check("void").check("loadu").check("extension_device").run(
- code
- )
+ if codecache.valid_vec_isa_list():
+ load_expr = "loadu"
+ else:
+ load_expr = " = in_ptr0[static_cast<long>(i0)];"
+ FileCheck().check("void").check(load_expr).check(
+ "extension_device"
+ ).run(code)
opt_fn(x, y, z)
res = opt_fn(x, y, z)
self.assertEqual(ref, res.to(device="cpu"))
|
2.41.0
|
ee924d173c9bc48ac640507b8dfd2c3af481a3d
|
Wed, 24 Apr 2024 03:13:38 +0000
|
[PATCH 0559/1000] Enable test config selection when doing workflow dispatch (#124795)
|
Fixes https://github.com/pytorch/test-infra/issues/4468 This is done by updating the filter config script to accept a list of test configs coming from workflow dispatch. For example, having `inductor_huggingface_perf,inductor_timm_perf,inductor_torchbench_perf` will benchmark all 3 datasets, while having `inductor_torchbench_perf` will only run TorchBench. This is exposed via a new string workflow dispatch parameters called `benchmark_configs`. Note that GH limits the maximum number of workflow dispatch parameters to 10, so I need to consolidate `training` and `inference` into `training_and_inference` to squeeze the new parameter into the list. ### Testing Run the script manually and confirm that the filtered list of test config is correct. Also manually dispatch the job with the new parameter https://github.com/pytorch/pytorch/actions/runs/8808159905 and only the selected `inductor_huggingface_perf` is kept https://github.com/pytorch/pytorch/actions/runs/8808159905/job/24176683708#step:11:128 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124795 Approved by: https://github.com/clee2000
|
diff --git a/.github/actions/filter-test-configs/action.yml b/.github/actions/filter-test-configs/action.yml
index 77a0c50c11..e1f2067d58 100644
--- a/.github/actions/filter-test-configs/action.yml
+++ b/.github/actions/filter-test-configs/action.yml
@@ -13,6 +13,13 @@ inputs:
required: true
type: string
description: JSON description of what test configs to run.
+ selected-test-configs:
+ required: false
+ type: string
+ description: |
+ A comma-separated list of test configurations from the test matrix to keep,
+ The empty list means we are going to keep every configurations by defaults
+ default: ""
job-name:
type: string
required: false
@@ -126,6 +133,7 @@ runs:
--workflow "${GITHUB_WORKFLOW}" \
--job-name "${JOB_NAME}" \
--test-matrix "${{ inputs.test-matrix }}" \
+ --selected-test-configs "${{ inputs.selected-test-configs }}" \
--pr-number "${PR_NUMBER}" \
--tag "${TAG}" \
--event-name "${EVENT_NAME}" \
diff --git a/.github/scripts/filter_test_configs.py b/.github/scripts/filter_test_configs.py
index ebeccaeb16..c2e45bac81 100755
--- a/.github/scripts/filter_test_configs.py
+++ b/.github/scripts/filter_test_configs.py
@@ -66,6 +66,12 @@ def parse_args() -> Any:
parser.add_argument(
"--test-matrix", type=str, required=True, help="the original test matrix"
)
+ parser.add_argument(
+ "--selected-test-configs",
+ type=str,
+ default="",
+ help="a comma-separated list of test configurations from the test matrix to keep",
+ )
parser.add_argument(
"--workflow", type=str, help="the name of the current workflow, i.e. pull"
)
@@ -177,6 +183,28 @@ def filter(test_matrix: Dict[str, List[Any]], labels: Set[str]) -> Dict[str, Lis
return filtered_test_matrix
+def filter_selected_test_configs(
+ test_matrix: Dict[str, List[Any]], selected_test_configs: Set[str]
+) -> Dict[str, List[Any]]:
+ """
+ Keep only the selected configs if the list if not empty. Otherwise, keep all test configs.
+ This filter is used when the workflow is dispatched manually.
+ """
+ if not selected_test_configs:
+ return test_matrix
+
+ filtered_test_matrix: Dict[str, List[Any]] = {"include": []}
+ for entry in test_matrix.get("include", []):
+ config_name = entry.get("config", "")
+ if not config_name:
+ continue
+
+ if config_name in selected_test_configs:
+ filtered_test_matrix["include"].append(entry)
+
+ return filtered_test_matrix
+
+
def set_periodic_modes(
test_matrix: Dict[str, List[Any]], job_name: Optional[str]
) -> Dict[str, List[Any]]:
@@ -558,6 +586,16 @@ def main() -> None:
# No PR number, no tag, we can just return the test matrix as it is
filtered_test_matrix = test_matrix
+ if args.selected_test_configs:
+ selected_test_configs = {
+ v.strip().lower()
+ for v in args.selected_test_configs.split(",")
+ if v.strip()
+ }
+ filtered_test_matrix = filter_selected_test_configs(
+ filtered_test_matrix, selected_test_configs
+ )
+
if args.event_name == "schedule" and args.schedule == "29 8 * * *":
# we don't want to run the mem leak check or disabled tests on normal
# periodically scheduled jobs, only the ones at this time
diff --git a/.github/scripts/test_filter_test_configs.py b/.github/scripts/test_filter_test_configs.py
index 163c84795d..2f73d022c3 100755
--- a/.github/scripts/test_filter_test_configs.py
+++ b/.github/scripts/test_filter_test_configs.py
@@ -9,6 +9,7 @@ from unittest import main, mock, TestCase
import yaml
from filter_test_configs import (
filter,
+ filter_selected_test_configs,
get_labels,
mark_unstable_jobs,
parse_reenabled_issues,
@@ -315,6 +316,51 @@ class TestConfigFilter(TestCase):
)
self.assertEqual(case["expected"], json.dumps(filtered_test_matrix))
+ def test_filter_selected_test_configs(self) -> None:
+ testcases = [
+ {
+ "test_matrix": '{include: [{config: "default"}]}',
+ "selected_test_configs": "",
+ "expected": '{"include": [{"config": "default"}]}',
+ "description": "No selected test configs",
+ },
+ {
+ "test_matrix": '{include: [{config: "default"}]}',
+ "selected_test_configs": "foo",
+ "expected": '{"include": []}',
+ "description": "A different test config is selected",
+ },
+ {
+ "test_matrix": '{include: [{config: "default"}]}',
+ "selected_test_configs": "foo, bar",
+ "expected": '{"include": []}',
+ "description": "A different set of test configs is selected",
+ },
+ {
+ "test_matrix": '{include: [{config: "default"}]}',
+ "selected_test_configs": "foo, bar,default",
+ "expected": '{"include": [{"config": "default"}]}',
+ "description": "One of the test config is selected",
+ },
+ {
+ "test_matrix": '{include: [{config: "default"}, {config: "bar"}]}',
+ "selected_test_configs": "foo, bar,Default",
+ "expected": '{"include": [{"config": "default"}, {"config": "bar"}]}',
+ "description": "Several test configs are selected",
+ },
+ ]
+
+ for case in testcases:
+ selected_test_configs = {
+ v.strip().lower()
+ for v in case["selected_test_configs"].split(",")
+ if v.strip()
+ }
+ filtered_test_matrix = filter_selected_test_configs(
+ yaml.safe_load(case["test_matrix"]), selected_test_configs
+ )
+ self.assertEqual(case["expected"], json.dumps(filtered_test_matrix))
+
def test_set_periodic_modes(self) -> None:
testcases: List[Dict[str, str]] = [
{
diff --git a/.github/workflows/_linux-build.yml b/.github/workflows/_linux-build.yml
index 7edda580f5..c3bcb0d888 100644
--- a/.github/workflows/_linux-build.yml
+++ b/.github/workflows/_linux-build.yml
@@ -47,13 +47,20 @@ on:
An option JSON description of what test configs to run later on. This
is moved here from the Linux test workflow so that we can apply filter
logic using test-config labels earlier and skip unnecessary builds
+ selected-test-configs:
+ description: |
+ A comma-separated list of test configurations from the test matrix to keep,
+ The empty list means we are going to keep every configurations by defaults
+ required: false
+ type: string
+ default: ""
s3-bucket:
description: S3 bucket to download artifact
required: false
type: string
default: "gha-artifacts"
aws-role-to-assume:
- description: role to assume for downloading artifacts
+ description: Role to assume for downloading artifacts
required: false
type: string
default: ""
@@ -143,6 +150,7 @@ jobs:
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
test-matrix: ${{ inputs.test-matrix }}
+ selected-test-configs: ${{ inputs.selected-test-configs }}
job-name: ${{ steps.get-job-id.outputs.job-name }}
- name: Download pytest cache
diff --git a/.github/workflows/inductor-perf-test-nightly.yml b/.github/workflows/inductor-perf-test-nightly.yml
index 417646d5f7..16dd92f553 100644
--- a/.github/workflows/inductor-perf-test-nightly.yml
+++ b/.github/workflows/inductor-perf-test-nightly.yml
@@ -8,16 +8,11 @@ on:
# out, let try to run torchao cudagraphs_low_precision as part of cudagraphs
workflow_dispatch:
inputs:
- training:
- description: Run training?
+ training_and_inference:
+ description: Run training and inference?
required: false
- type: boolean
- default: true
- inference:
- description: Run inference?
- required: false
- type: boolean
- default: false
+ type: string
+ default: training-true-inference-false
default:
description: Run inductor_default?
required: false
@@ -58,6 +53,11 @@ on:
required: false
type: boolean
default: false
+ benchmark_configs:
+ description: The list of configs used the benchmark
+ required: false
+ type: string
+ default: inductor_huggingface_perf,inductor_timm_perf,inductor_torchbench_perf
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
@@ -88,6 +88,7 @@ jobs:
{ config: "inductor_torchbench_perf", shard: 3, num_shards: 4, runner: "linux.gcp.a100.large" },
{ config: "inductor_torchbench_perf", shard: 4, num_shards: 4, runner: "linux.gcp.a100.large" },
]}
+ selected-test-configs: ${{ inputs.benchmark_configs }}
secrets:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
@@ -128,7 +129,7 @@ jobs:
if: github.event_name == 'workflow_dispatch'
with:
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
- dashboard-tag: training-${{ inputs.training }}-inference-${{ inputs.inference }}-default-${{ inputs.default }}-dynamic-${{ inputs.dynamic }}-cudagraphs-${{ inputs.cudagraphs }}-cppwrapper-${{ inputs.cppwrapper }}-aotinductor-${{ inputs.aotinductor }}-maxautotune-${{ inputs.maxautotune }}-freezing_cudagraphs-${{ inputs.freezing_cudagraphs }}-cudagraphs_low_precision-${{ inputs.cudagraphs }}
+ dashboard-tag: ${{ inputs.training_and_inference }}-default-${{ inputs.default }}-dynamic-${{ inputs.dynamic }}-cudagraphs-${{ inputs.cudagraphs }}-cppwrapper-${{ inputs.cppwrapper }}-aotinductor-${{ inputs.aotinductor }}-maxautotune-${{ inputs.maxautotune }}-freezing_cudagraphs-${{ inputs.freezing_cudagraphs }}-cudagraphs_low_precision-${{ inputs.cudagraphs }}
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build.outputs.test-matrix }}
use-gha: anything-non-empty-to-use-gha
|
2.41.0
|
91f83f18139a6ed2626c30979a38e533d4c7d7c
|
Wed, 24 Apr 2024 04:23:53 +0000
|
[PATCH 0560/1000] [cudagraph] add config for cudagraph managed input mutation support (#124754)
|
Summary: [#123231](https://github.com/pytorch/pytorch/pull/123231) adds cudagraph supports for more types of functions (i.e., cudagraph managed input mutation). These newly supported functions may have mutated static inputs, leading to assertion errors in some workload which skip cudagraph previously. This diff adds a config to opt in the new feature. Test Plan: ci Differential Revision: D56481353 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124754 Approved by: https://github.com/eellison
|
diff --git a/test/inductor/test_cudagraph_trees.py b/test/inductor/test_cudagraph_trees.py
index cc701337c7..f80c610829 100644
--- a/test/inductor/test_cudagraph_trees.py
+++ b/test/inductor/test_cudagraph_trees.py
@@ -294,6 +294,8 @@ if HAS_CUDA and not TEST_WITH_ASAN:
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
+ @torch._dynamo.config.patch("cudagraph_backend_support_input_mutation", True)
+ @torch._inductor.config.patch("triton.cudagraph_support_input_mutation", True)
def test_mutation_on_inp(self, backend):
def foo(x):
x.add_(2)
@@ -339,6 +341,38 @@ if HAS_CUDA and not TEST_WITH_ASAN:
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
+ @torch._dynamo.config.patch("cudagraph_backend_support_input_mutation", False)
+ @torch._inductor.config.patch("triton.cudagraph_support_input_mutation", False)
+ def test_mutation_cudagraph_managed_tensors_config(self, backend):
+ def foo(x):
+ return x + 1
+
+ def mut(x):
+ x.add_(2)
+ return x
+
+ def non_mut(x):
+ return x.add(2)
+
+ mut = get_compile_fn(backend)(mut)
+ foo = get_compile_fn(backend)(foo)
+
+ with capture_stderr() as captured_output:
+ for i in range(3):
+ torch.compiler.cudagraph_mark_step_begin()
+ inp = torch.rand([4], device="cuda")
+
+ tmp = foo(inp)
+ mut_out = mut(tmp)
+ self.assertEqual(mut_out, non_mut(foo(inp)))
+ FileCheck().check_count(
+ "skipping cudagraphs due to mutation on input.", 1, exactly=True
+ ).run(captured_output[0])
+
+ @parametrize("backend", ("inductor", "cudagraphs"))
+ @torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
+ @torch._dynamo.config.patch("cudagraph_backend_support_input_mutation", True)
+ @torch._inductor.config.patch("triton.cudagraph_support_input_mutation", True)
def test_mutation_cudagraph_managed_tensors(self, backend):
def foo(x):
return x + 1
@@ -380,6 +414,8 @@ if HAS_CUDA and not TEST_WITH_ASAN:
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
+ @torch._dynamo.config.patch("cudagraph_backend_support_input_mutation", True)
+ @torch._inductor.config.patch("triton.cudagraph_support_input_mutation", True)
def test_mutation_cudagraph_managed_tensor_warn(self, backend):
def foo(x):
return x.add_(1)
@@ -403,6 +439,8 @@ if HAS_CUDA and not TEST_WITH_ASAN:
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
+ @torch._dynamo.config.patch("cudagraph_backend_support_input_mutation", True)
+ @torch._inductor.config.patch("triton.cudagraph_support_input_mutation", True)
def test_mutation_cudagraph_managed_tensor_warn_only_once(self, backend):
def foo(x):
return x + 1
diff --git a/torch/_dynamo/backends/cudagraphs.py b/torch/_dynamo/backends/cudagraphs.py
index 41de419dc8..ee89b79690 100644
--- a/torch/_dynamo/backends/cudagraphs.py
+++ b/torch/_dynamo/backends/cudagraphs.py
@@ -6,11 +6,13 @@ from collections import defaultdict
from typing import Dict, List, Optional
import torch
+from torch._dynamo import config
from torch._dynamo.backends.common import aot_autograd
from torch._dynamo.backends.debugging import boxed_nop
from torch._inductor.cudagraph_utils import (
BoxedDeviceIndex,
check_multiple_devices_or_any_cpu_nodes,
+ get_mutation_stack_trace,
get_placeholders,
)
from torch._inductor.utils import (
@@ -74,7 +76,24 @@ def get_device_node_mapping(gm: torch.fx.GraphModule):
return device_node_mapping
+def check_for_mutation_ignore_cuda_graph_managed_tensor(
+ aot_model: torch.fx.GraphModule, num_fixed
+) -> Optional[str]:
+ mutation_indices = find_input_mutations(aot_model.graph) - set(range(num_fixed))
+ if not mutation_indices:
+ return None
+
+ placeholders = [node for node in aot_model.graph.nodes if node.op == "placeholder"]
+ return get_mutation_stack_trace(placeholders, mutation_indices)
+
+
def check_for_skip(aot_model: torch.fx.GraphModule, num_fixed) -> Optional[str]:
+ if not config.cudagraph_backend_support_input_mutation:
+ if mut_skip := check_for_mutation_ignore_cuda_graph_managed_tensor(
+ aot_model, num_fixed
+ ):
+ return mut_skip
+
if skip := check_multiple_devices_or_any_cpu_nodes(
get_device_node_mapping(aot_model)
):
diff --git a/torch/_dynamo/config.py b/torch/_dynamo/config.py
index 9482cfabcc..1360782db1 100644
--- a/torch/_dynamo/config.py
+++ b/torch/_dynamo/config.py
@@ -384,6 +384,9 @@ _save_config_ignore = {
# can prevent cudagraphing.
cudagraph_backend_keep_input_mutation = False
+# enable cudagraph support for mutated inputs from prior cudagraph pool
+cudagraph_backend_support_input_mutation = False
+
# When True, only ops that have the torch.Tag.pt2_compliant tag
# will be allowed into the graph; all other ops will be disallowed
# and will fall back to eager-mode PyTorch. Useful to ensure
diff --git a/torch/_inductor/compile_fx.py b/torch/_inductor/compile_fx.py
index a95ed08450..c99d15a86f 100644
--- a/torch/_inductor/compile_fx.py
+++ b/torch/_inductor/compile_fx.py
@@ -513,7 +513,25 @@ def compile_fx_inner(
if isinstance(t, torch.Tensor)
)
+ if not config.triton.cudagraph_support_input_mutation:
+ # Skip supports for cudagraph-managed tensors
+ from torch._inductor.cudagraph_utils import (
+ check_for_mutation_ignore_cuda_graph_managed_tensor,
+ )
+
+ has_mutation_str = check_for_mutation_ignore_cuda_graph_managed_tensor(
+ gm, compiled_graph, num_fixed
+ )
+ has_mutation = has_mutation_str is not None
+
+ if has_mutation:
+ compiled_graph.disabled_cudagraphs_reason = has_mutation_str
+ else:
+ # Check mutation later to support cudagraph-managed tensors
+ has_mutation = None
+
cudagraph_tests = [
+ (not has_mutation, "mutated inputs"),
(not has_incompatible_cudagraph_ops(gm), "incompatible ops"),
(not complex_memory_overlap_inputs, "complex memory overlap"),
(
diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py
index f31a03a365..b26f6448b1 100644
--- a/torch/_inductor/config.py
+++ b/torch/_inductor/config.py
@@ -603,6 +603,9 @@ class triton:
# TODO - need to debug why this prevents cleanup
cudagraph_trees_history_recording = False
+ # Enable cudagraph support for mutated inputs from prior cudagraph pool
+ cudagraph_support_input_mutation = False
+
# synchronize after cudagraph invocation
force_cudagraph_sync = False
diff --git a/torch/_inductor/cudagraph_utils.py b/torch/_inductor/cudagraph_utils.py
index 0d79b88b40..e897096f4e 100644
--- a/torch/_inductor/cudagraph_utils.py
+++ b/torch/_inductor/cudagraph_utils.py
@@ -134,3 +134,25 @@ class BoxedDeviceIndex:
def set(self, device_idx: Optional[int]):
assert device_idx is None or isinstance(device_idx, int)
self.value = device_idx
+
+
+def check_for_mutation_ignore_cuda_graph_managed_tensor(
+ gm: torch.fx.GraphModule, compiled_graph, num_fixed: int
+) -> Optional[str]:
+ default_msg = format_default_skip_message("mutated inputs")
+
+ # doesnt work for non-trees because the warmup run would apply mutation twice
+ if torch._inductor.config.triton.cudagraph_trees:
+ # checking if mutation is only on parameters/static inputs
+ mutation_indices = [
+ idx for idx in compiled_graph.mutated_input_idxs if idx >= num_fixed
+ ]
+ has_mutation = len(mutation_indices) != 0
+ if not has_mutation:
+ return None
+ placeholders = [node for node in gm.graph.nodes if node.op == "placeholder"]
+ return get_mutation_stack_trace(placeholders, mutation_indices)
+
+ else:
+ has_mutation = len(compiled_graph.mutated_inputs) != 0
+ return None if not has_mutation else default_msg
|
2.41.0
|
da94f3a08e3ac0dbec1c832eb1ee30dd617a900
|
Tue, 23 Apr 2024 16:00:34 -0700
|
[PATCH 0561/1000] [device_mesh] add a private init backend option (#124780)
|
This PR adds a private init backend option, to tackle the issues sub mesh creation: in device mesh slicing we don't want to create process groups again, so explicitly turn the group creation off it's useful Also I think there might be more submesh creation functionality so having this flag would ensure that there's no new group created Differential Revision: [D56497780](https://our.internmc.facebook.com/intern/diff/D56497780) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124780 Approved by: https://github.com/awgu
|
diff --git a/test/distributed/test_device_mesh.py b/test/distributed/test_device_mesh.py
index c6b219fb16..1a74347add 100644
--- a/test/distributed/test_device_mesh.py
+++ b/test/distributed/test_device_mesh.py
@@ -143,6 +143,17 @@ class DeviceMeshTest(DTensorTestBase):
)
self.assertEqual(global_ranks, current_rank_expected_group_ranks)
+ @with_comms
+ def test_device_mesh_init_backend(self):
+ mesh = DeviceMesh(self.device_type, [1], _init_backend=False)
+
+ with self.assertRaisesRegex(RuntimeError, "process groups not initialized!"):
+ mesh.get_group()
+
+ # coordinates should always been populated when init_backend is False, as whenever
+ # we call init_backend we should make sure the default pg already created
+ mesh.get_coordinate()
+
def test_fake_pg_device_mesh(self):
fake_store = FakeStore()
init_process_group("fake", store=fake_store, rank=0, world_size=self.world_size)
@@ -350,10 +361,10 @@ class TestDeviceMeshGetItem(DTensorTestBase):
dp_mesh_2 = mesh["dp"]
self.assertEqual(ref_pg_count, _world.group_count)
- # When we call the "tp" slice, it should create a new pg, as the "tp" slice is called
- # for the first time.
+ # When we call the "tp" slice, it should not create a new pg, as the "tp" slice would
+ # just reuse the parent mesh pg.
tp_mesh = mesh["tp"]
- self.assertTrue(_world.group_count > ref_pg_count)
+ self.assertEqual(_world.group_count, ref_pg_count)
class TestMeshEnv(DTensorTestBase):
diff --git a/torch/distributed/device_mesh.py b/torch/distributed/device_mesh.py
index 1c1decfb2c..38416ed456 100644
--- a/torch/distributed/device_mesh.py
+++ b/torch/distributed/device_mesh.py
@@ -89,6 +89,7 @@ else:
device_mesh.device_type,
mesh_1d,
mesh_dim_names=(mesh_dim_name,),
+ _init_backend=False,
)
if cur_rank in mesh_1d:
res_sub_mesh = sub_mesh
@@ -207,6 +208,7 @@ else:
mesh: Union[torch.Tensor, "ArrayLike"],
*,
mesh_dim_names: Optional[Tuple[str, ...]] = None,
+ _init_backend: bool = True,
) -> None:
self.device_type = device_type
if isinstance(mesh, torch.Tensor) and mesh.device.type != "cpu":
@@ -222,14 +224,22 @@ else:
self._flatten_mesh_list = tuple(self.mesh.flatten().tolist())
self._hash = hash((self._flatten_mesh_list, self.mesh.shape, id(self)))
- # Skip process group initialization if xla device.
+ # Skip process group initialization if xla device or init backend is False
# TODO(yeounoh) implement DeviceMesh backend and register XLA backend.
if device_type != "xla":
# always try to create default (world) pg, even if it is not initialized
# already. The world pg is used for device mesh identity (rank) on each
# process (we need to know if the current global rank is in the mesh or not).
- self._get_or_create_default_group()
- self._init_process_groups()
+ if _init_backend:
+ self._get_or_create_default_group()
+ self._init_process_groups()
+
+ # calculate the coordinates of the current global rank on the mesh
+ rank_coords = (self.mesh == get_rank()).nonzero()
+ assert rank_coords.size(0) in (0, 1)
+ self._coordinate_on_dim: Optional[List[int]] = (
+ rank_coords[0].tolist() if rank_coords.size(0) > 0 else None
+ )
def _get_or_create_default_group(self):
default_initialized = is_initialized()
@@ -258,12 +268,6 @@ else:
)
device_handle.set_device(get_rank() % num_devices_per_host)
- # calculate the coordinates of the current global rank on the mesh
- rank_coords = (self.mesh == get_rank()).nonzero()
- assert rank_coords.size(0) in (0, 1)
- self._coordinate_on_dim: Optional[List[int]] = (
- rank_coords[0].tolist() if rank_coords.size(0) > 0 else None
- )
return _get_default_group()
def _init_process_groups(self):
|
2.41.0
|
4f468e66f4b8a3f9a7866af13a57422c7c5a49e
|
Wed, 24 Apr 2024 04:57:44 +0000
|
[PATCH 0562/1000] remove the redundent '* 1000' to timestamp (#124374)
|
activity->timestamp() already in nanosecond granularity, no need to multiply by 1000. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124374 Approved by: https://github.com/aaronenyeshi
|
diff --git a/torch/csrc/profiler/collection.cpp b/torch/csrc/profiler/collection.cpp
index d64df0bd04..2fa89bbb82 100644
--- a/torch/csrc/profiler/collection.cpp
+++ b/torch/csrc/profiler/collection.cpp
@@ -956,7 +956,7 @@ class TransferEvents {
static_cast<int32_t>(activity->resourceId())};
auto event = Result::create(
- activity->timestamp() * 1000,
+ activity->timestamp(),
noTID, // Placeholder
device_and_resource,
ExtraFields<EventType::Kineto>{
|
2.41.0
|
1740fd1f6fcd70c6ba4812c1289fe7efcc82908
|
Wed, 24 Apr 2024 05:31:26 +0000
|
[PATCH 0563/1000] [DCP] minor readability fix: make param name consistent with overriden function (#124770)
|
Summary: This diff has no logic changes. It updates the variable names to be in sync with the name used in prepare_global_plan in StorageWriter. Pasting func signature for easy reference - abc.abstractmethod def prepare_global_plan(self, plans: List[SavePlan]) -> List[SavePlan]: Differential Revision: D56480396 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124770 Approved by: https://github.com/fegin
|
diff --git a/torch/distributed/checkpoint/filesystem.py b/torch/distributed/checkpoint/filesystem.py
index e268ad057f..6768843292 100644
--- a/torch/distributed/checkpoint/filesystem.py
+++ b/torch/distributed/checkpoint/filesystem.py
@@ -445,10 +445,10 @@ class FileSystemWriter(StorageWriter):
self.fs.mkdir(self.path)
return plan
- def prepare_global_plan(self, global_plan: List[SavePlan]) -> List[SavePlan]:
+ def prepare_global_plan(self, plans: List[SavePlan]) -> List[SavePlan]:
new_plans = [
dataclasses.replace(plan, storage_data=_StoragePrefix(f"__{i}_"))
- for i, plan in enumerate(global_plan)
+ for i, plan in enumerate(plans)
]
return new_plans
@@ -617,8 +617,8 @@ class FileSystemReader(StorageReader):
def prepare_local_plan(self, plan: LoadPlan) -> LoadPlan:
return plan
- def prepare_global_plan(self, global_plan: List[LoadPlan]) -> List[LoadPlan]:
- return global_plan
+ def prepare_global_plan(self, plans: List[LoadPlan]) -> List[LoadPlan]:
+ return plans
@property
def checkpoint_id(self) -> Union[str, os.PathLike]:
|
2.41.0
|
90bfbe01f465bc47024804a57a547db632bca21
|
Tue, 23 Apr 2024 13:26:25 -0700
|
[PATCH 0564/1000] [DDP][PT2D] Lazy Initialization of DDP Module for Replicate API (#123424)
|
In order to make replicate work with Meta tensor, we need to do lazy Initialization for the replicate API. This PR impelements the lazy initialization and ensures that replicate still work with the new DDP compilation. Differential Revision: [D55787340](https://our.internmc.facebook.com/intern/diff/D55787340/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123424 Approved by: https://github.com/yf225 ghstack dependencies: #124421, #124422
|
diff --git a/test/distributed/_composable/test_replicate_with_compiler.py b/test/distributed/_composable/test_replicate_with_compiler.py
index 381610ef57..c3b188c747 100644
--- a/test/distributed/_composable/test_replicate_with_compiler.py
+++ b/test/distributed/_composable/test_replicate_with_compiler.py
@@ -117,7 +117,7 @@ class ReplicateTest(MultiProcessTestCase):
input = torch.randn([1, DIM], device=device)
compiled_replicate_model = torch.compile(
- replicate(deepcopy(model)), fullgraph=True
+ replicate(deepcopy(model)), fullgraph=False
)
compiled_replicate_optim = torch.optim.Adam(
compiled_replicate_model.parameters()
@@ -133,8 +133,8 @@ class ReplicateTest(MultiProcessTestCase):
models = [model, compiled_replicate_model, compiled_ddp_model]
optims = [optim, compiled_replicate_optim, compiled_ddp_optim]
sync_contexts = [
- replicate.state(model)._ddp.no_sync(),
- replicate.state(compiled_replicate_model._orig_mod)._ddp.no_sync(),
+ contextlib.nullcontext(),
+ contextlib.nullcontext(),
compiled_ddp_model.no_sync(),
]
@@ -149,8 +149,13 @@ class ReplicateTest(MultiProcessTestCase):
for model_idx in range(3):
if no_sync and i % 2 == 0:
context = sync_contexts[model_idx]
+ if model_idx <= 1:
+ models[model_idx].set_requires_gradient_sync(False)
else:
context = contextlib.nullcontext()
+ if model_idx <= 1:
+ models[model_idx].set_requires_gradient_sync(True)
+ context = contextlib.nullcontext()
with context:
bwd_context = (
@@ -208,13 +213,9 @@ class ReplicateTest(MultiProcessTestCase):
@skip_if_lt_x_gpu(2)
def test_compile_bf16(self):
def setup(model, compiled_replicate_model, compiled_ddp_model) -> None:
- replicate.state(model)._ddp.register_comm_hook(
- None, ddp_default_hooks.bf16_compress_hook
- )
+ model.register_comm_hook(None, ddp_default_hooks.bf16_compress_hook)
compiled_m = compiled_replicate_model._orig_mod
- replicate.state(compiled_m)._ddp.register_comm_hook(
- None, ddp_default_hooks.bf16_compress_hook
- )
+ compiled_m.register_comm_hook(None, ddp_default_hooks.bf16_compress_hook)
compiled_ddp_model.register_comm_hook(
None, ddp_default_hooks.bf16_compress_hook
)
@@ -226,13 +227,9 @@ class ReplicateTest(MultiProcessTestCase):
@skip_if_lt_x_gpu(2)
def test_compile_fp16(self):
def setup(model, compiled_replicate_model, compiled_ddp_model) -> None:
- replicate.state(model)._ddp.register_comm_hook(
- None, ddp_default_hooks.fp16_compress_hook
- )
+ model.register_comm_hook(None, ddp_default_hooks.fp16_compress_hook)
compiled_m = compiled_replicate_model._orig_mod
- replicate.state(compiled_m)._ddp.register_comm_hook(
- None, ddp_default_hooks.fp16_compress_hook
- )
+ compiled_m.register_comm_hook(None, ddp_default_hooks.fp16_compress_hook)
compiled_ddp_model.register_comm_hook(
None, ddp_default_hooks.fp16_compress_hook
)
@@ -260,7 +257,7 @@ class ReplicateTest(MultiProcessTestCase):
input = torch.randn([1, DIM])
torch._dynamo.config.optimize_ddp = "python_reducer"
compiled_replicate_model = torch.compile(
- replicate(deepcopy(model)), fullgraph=True
+ replicate(deepcopy(model)), fullgraph=False
)
def bwd(loss):
diff --git a/torch/distributed/_composable/replicate.py b/torch/distributed/_composable/replicate.py
index 202d1068ed..6e925c4f22 100644
--- a/torch/distributed/_composable/replicate.py
+++ b/torch/distributed/_composable/replicate.py
@@ -1,5 +1,7 @@
import weakref
-from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
+from typing import Any, cast, Dict, Iterable, List, Optional, Set, Tuple
+
+import typing_extensions
import torch
import torch.nn as nn
@@ -19,7 +21,12 @@ class _ReplicateState(_State):
self._param_list: nn.ParameterList = nn.ParameterList()
# TODO(@fegin): this variable is originally create for testing, we
# should remove this if possible.
+ self._orig_module = self.module
self._param_names: List[str] = []
+ self._no_sync: bool = False
+ self._init_args: Optional[Tuple[Any, ...]] = None
+ self._init_kwargs: Dict[str, Any] = {}
+ self._comm_hook_args: List[Any] = []
def _collect_params(
self,
@@ -53,44 +60,32 @@ class _ReplicateState(_State):
prefix=f"{recurse_prefix}{name}",
)
+ @torch._dynamo.disable(recursive=True)
+ def lazy_init(self) -> None:
+ self.init(*self._init_args, **self._init_kwargs)
+ self.register_comm_hook()
+ self._init_args = tuple()
+ self._init_kwargs = {}
+
+ @torch._dynamo.disable(recursive=True)
def init(
self,
module: nn.Module,
ignored_modules: Set[nn.Module],
**kwargs,
) -> None:
- if _is_fully_sharded(module):
- raise RuntimeError(
- "Cannot apply `replicate()` on a Module already managed by `fully_shard`"
- )
-
if self.has_initialized:
return
self.has_initialized = True
device_mesh = kwargs.get("device_mesh", None)
- if device_mesh is not None:
- from torch.distributed.device_mesh import _mesh_resources
-
- if _mesh_resources.get_parent_mesh(device_mesh) is not None:
- # TODO: This is a temporary work around to enable DDP + TP.
- # We should do the logic in DDP so that the 2D implementation is
- # sound and the state_dict works out of the box.
- #
- # This won't conflict with what is done in DDP class as the module
- # replicate is going to pass is NOT the original module.
- from torch.distributed.tensor.parallel.ddp import (
- _pre_dp_module_transform,
- )
-
- _pre_dp_module_transform(module)
-
self.module = module
ignored_params = {p for m in ignored_modules for p in m.parameters()}
+ from torch.distributed.tensor.parallel.ddp import _localize_dtensor
+
+ _localize_dtensor(module)
self._collect_params(module, ignored_modules, ignored_params)
- module.register_forward_pre_hook(self.forward_pre_hook, with_kwargs=True)
- module.register_forward_hook(self.forward_post_hook) # type: ignore[arg-type]
if "device_id" in kwargs:
# replicate() supports a small usability enhancement where
@@ -114,9 +109,22 @@ class _ReplicateState(_State):
# Weakref to the DDP instance is currently only used for testing.
replicate.state(self.module)._ddp_weakref = weakref.ref(self._ddp)
+ @torch._dynamo.disable(recursive=True)
+ def register_comm_hook(self) -> None:
+ for comm_args, comm_kwargs in self._comm_hook_args:
+ self._ddp.register_comm_hook(*comm_args, **comm_kwargs)
+ self._comm_hook_args.clear()
+
+ def record_init_args(self, *args, **kwargs) -> None:
+ self._init_args = args
+ self._init_kwargs = kwargs
+
def forward_pre_hook(
self, module: nn.Module, args: Tuple[Any, ...], kwargs: Dict[str, Any]
) -> Any:
+ if self._init_args or self._init_kwargs:
+ self.lazy_init()
+ self._ddp.require_backward_grad_sync = not self._no_sync
return self._ddp._pre_forward(*args, **kwargs)
def forward_post_hook(
@@ -128,6 +136,39 @@ class _ReplicateState(_State):
return self._ddp._post_forward(output)
+def unimplemented_deepcopy(*args: Any, **kwargs: Any) -> typing_extensions.Never:
+ raise AssertionError(
+ "DDP does not support deepcopy. Please use state dict for serialization."
+ )
+
+
+# Follow the same pattern as FSDP/fully_shard
+class DDP:
+ def __new__(cls, *args, **kwargs):
+ """
+ Override ``__new__`` to remove the DDP class and directly construct
+ the original class for cases like indexing into a container module.
+ """
+ # Use index 2 since 0 is the dynamically constructed `DDP<...>` class
+ # and index 1 is the `DDP` class itself
+ orig_cls = cls.__mro__[2]
+ return orig_cls.__new__(orig_cls, *args, **kwargs)
+
+ def set_requires_gradient_sync(self, requires_gradient_sync: bool) -> None:
+ """
+ Sets if the module should sync gradients. This can be used to implement
+ gradient accumulation without communication.
+
+ Args:
+ requires_gradient_sync (bool): Whether to reduce gradients for the
+ module's parameters.
+ """
+ replicate.state(self)._no_sync = not requires_gradient_sync
+
+ def register_comm_hook(self, *args, **kwargs) -> None:
+ replicate.state(self)._comm_hook_args.append((args, kwargs))
+
+
@contract(state_cls=_ReplicateState)
def replicate(
module: nn.Module,
@@ -155,12 +196,46 @@ def replicate(
f"but got {type(kwargs['device_id'])}"
)
+ if _is_fully_sharded(module):
+ raise RuntimeError(
+ "Cannot apply `replicate()` on a Module already managed by `fully_shard`"
+ )
+
if ignored_modules is None:
ignored_modules = {}
else:
ignored_modules = set(ignored_modules)
- replicate.state(module).init(module, ignored_modules, **kwargs)
+ state = cast(_ReplicateState, replicate.state(module))
+ module.register_forward_pre_hook(state.forward_pre_hook, with_kwargs=True)
+ device_mesh = kwargs.get("device_mesh", None)
+ if device_mesh is not None:
+ from torch.distributed.device_mesh import _mesh_resources
+
+ if _mesh_resources.get_parent_mesh(device_mesh) is not None:
+ # TODO: This is a temporary work around to enable DDP + TP.
+ # We should do the logic in DDP so that the 2D implementation is
+ # sound and the state_dict works out of the box.
+ #
+ # This won't conflict with what is done in DDP class as the module
+ # replicate is going to pass is NOT the original module.
+ from torch.distributed.tensor.parallel.ddp import (
+ _localize_dtensor,
+ _reconstruct_dtensor,
+ )
+
+ module.register_forward_pre_hook(_reconstruct_dtensor)
+ module.register_forward_hook(_localize_dtensor)
+
+ module.register_forward_hook(state.forward_post_hook) # type: ignore[arg-type]
+
+ state.record_init_args(module, ignored_modules, **kwargs)
+
+ # Place DDP leftmost for highest priority in the method resolution order
+ cls = module.__class__
+ dct = {"__deepcopy__": unimplemented_deepcopy}
+ new_cls = type(f"DDP{cls.__name__}", (DDP, cls), dct)
+ module.__class__ = new_cls
return module
|
2.41.0
|
848051844181a7ac816b555f84caf16f20f0256
|
Wed, 24 Apr 2024 06:48:19 +0000
|
[PATCH 0565/1000] Migrate linux-test Job yo ARC (#124386)
|
Migrate linux-test Job yo ARC * Separated `_linux-test-label.yml` workflow to use the `label`; * Separated `_linux-test-rg.yml` workflow to use the `group`; Pull Request resolved: https://github.com/pytorch/pytorch/pull/124386 Approved by: https://github.com/zxiiro, https://github.com/jeanschmidt
|
diff --git a/.github/actions/download-build-artifacts/action.yml b/.github/actions/download-build-artifacts/action.yml
index 9defe3ee97..2deeda7280 100644
--- a/.github/actions/download-build-artifacts/action.yml
+++ b/.github/actions/download-build-artifacts/action.yml
@@ -25,7 +25,7 @@ runs:
s3-bucket: ${{ inputs.s3-bucket }}
- name: Download PyTorch Build Artifacts from GHA
- if: inputs.use-gha
+ if: ${{ inputs.use-gha }}
uses: actions/download-artifact@v3
with:
name: ${{ inputs.name }}
diff --git a/.github/actions/linux-test/action.yml b/.github/actions/linux-test/action.yml
new file mode 100644
index 0000000000..6c8e761444
--- /dev/null
+++ b/.github/actions/linux-test/action.yml
@@ -0,0 +1,384 @@
+name: linux-test
+
+inputs:
+ build-environment:
+ required: true
+ type: string
+ description: Top-level label for what's being built/tested.
+ test-matrix:
+ required: true
+ type: string
+ description: JSON description of what test configs to run.
+ docker-image:
+ required: true
+ type: string
+ description: Docker image to run in.
+ sync-tag:
+ required: false
+ type: string
+ default: ""
+ description: |
+ If this is set, our linter will use this to make sure that every other
+ job with the same `sync-tag` is identical.
+ use-gha:
+ required: false
+ type: string
+ default: ""
+ description: If set to any value, upload to GHA. Otherwise upload to S3.
+ dashboard-tag:
+ required: false
+ type: string
+ default: ""
+ s3-bucket:
+ description: S3 bucket to download artifact
+ required: false
+ type: string
+ default: "gha-artifacts"
+ aws-role-to-assume:
+ description: role to assume for downloading artifacts
+ required: false
+ type: string
+ default: ""
+ HUGGING_FACE_HUB_TOKEN:
+ description: |
+ HF Auth token to avoid rate limits when downloading models or datasets from hub
+ required: false
+ default: ""
+ GITHUB_TOKEN:
+ description: GitHub token
+ required: true
+
+#env:
+# GIT_DEFAULT_BRANCH: ${{ inputs.default_branch }}
+
+runs:
+ using: composite
+ steps:
+ - name: Setup Linux
+ uses: ./.github/actions/setup-linux
+
+ - name: configure aws credentials
+ if : ${{ inputs.aws-role-to-assume != '' }}
+ uses: aws-actions/configure-aws-credentials@v3
+ with:
+ role-to-assume: ${{ inputs.aws-role-to-assume }}
+ role-session-name: gha-linux-test
+ aws-region: us-east-1
+
+ - name: Calculate docker image
+ id: calculate-docker-image
+ uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
+ with:
+ docker-image-name: ${{ inputs.docker-image }}
+
+ - name: Use following to pull public copy of the image
+ id: print-ghcr-mirror
+ env:
+ ECR_DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
+ shell: bash
+ run: |
+ tag=${ECR_DOCKER_IMAGE##*/}
+ echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
+
+ - name: Pull docker image
+ uses: pytorch/test-infra/.github/actions/pull-docker-image@main
+ with:
+ docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
+
+ - name: Check if in a ARC runner
+ shell: bash
+ id: check_arc_runner
+ run: echo "IN_ARC_RUNNER=$([ -f /.inarc ] && echo true || echo false)" >> "$GITHUB_OUTPUT"
+
+ - name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
+ id: install-nvidia-driver
+ uses: pytorch/test-infra/.github/actions/setup-nvidia@main
+ if: ${{ contains(inputs.build-environment, 'cuda') && !contains(matrix.config, 'nogpu') && steps.check_arc_runner.outputs.IN_ARC_RUNNER == 'false' }}
+
+ - name: Lock NVIDIA A100 40GB Frequency
+ shell: bash
+ run: |
+ sudo nvidia-smi -pm 1
+ sudo nvidia-smi -ac 1215,1410
+ nvidia-smi
+ if: contains(matrix.runner, 'a100')
+
+ - name: Start monitoring script
+ id: monitor-script
+ shell: bash
+ continue-on-error: true
+ run: |
+ python3 -m pip install psutil==5.9.1 nvidia-ml-py==11.525.84
+ python3 -m tools.stats.monitor > usage_log.txt 2>&1 &
+ echo "monitor-script-pid=${!}" >> "${GITHUB_OUTPUT}"
+
+ - name: Download build artifacts
+ uses: ./.github/actions/download-build-artifacts
+ with:
+ name: ${{ inputs.build-environment }}
+ s3-bucket: ${{ inputs.s3-bucket }}
+
+ - name: Download TD artifacts
+ continue-on-error: true
+ uses: ./.github/actions/download-td-artifacts
+
+ - name: Parse ref
+ id: parse-ref
+ shell: bash
+ run: .github/scripts/parse_ref.py
+
+ - name: Get workflow job id
+ id: get-job-id
+ uses: ./.github/actions/get-workflow-job-id
+ if: always()
+ with:
+ github-token: ${{ inputs.GITHUB_TOKEN }}
+
+ - name: Check for keep-going label and re-enabled test issues
+ # This uses the filter-test-configs action because it conviniently
+ # checks for labels and re-enabled test issues. It does not actually do
+ # any filtering. All filtering is done in the build step.
+ id: keep-going
+ uses: ./.github/actions/filter-test-configs
+ with:
+ github-token: ${{ inputs.GITHUB_TOKEN }}
+ test-matrix: ${{ inputs.test-matrix }}
+ job-name: ${{ steps.get-job-id.outputs.job-name }}
+
+ - name: Test
+ id: test
+ env:
+ BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
+ PR_NUMBER: ${{ github.event.pull_request.number }}
+ GITHUB_REPOSITORY: ${{ github.repository }}
+ GITHUB_WORKFLOW: ${{ github.workflow }}
+ GITHUB_JOB: ${{ github.job }}
+ GITHUB_RUN_ID: ${{ github.run_id }}
+ GITHUB_RUN_NUMBER: ${{ github.run_number }}
+ GITHUB_RUN_ATTEMPT: ${{ github.run_attempt }}
+ JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
+ JOB_NAME: ${{ steps.get-job-id.outputs.job-name }}
+ BRANCH: ${{ steps.parse-ref.outputs.branch }}
+ SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
+ BASE_SHA: ${{ github.event.pull_request.base.sha || github.sha }}
+ TEST_CONFIG: ${{ matrix.config }}
+ SHARD_NUMBER: ${{ matrix.shard }}
+ NUM_TEST_SHARDS: ${{ matrix.num_shards }}
+ REENABLED_ISSUES: ${{ steps.keep-going.outputs.reenabled-issues }}
+ CONTINUE_THROUGH_ERROR: ${{ steps.keep-going.outputs.keep-going }}
+ VERBOSE_TEST_LOGS: ${{ steps.keep-going.outputs.ci-verbose-test-logs }}
+ NO_TEST_TIMEOUT: ${{ steps.keep-going.outputs.ci-no-test-timeout }}
+ NO_TD: ${{ steps.keep-going.outputs.ci-no-td }}
+ TD_DISTRIBUTED: ${{ steps.keep-going.outputs.ci-td-distributed }}
+ SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
+ SCCACHE_S3_KEY_PREFIX: ${{ github.workflow }}
+ SHM_SIZE: ${{ contains(inputs.build-environment, 'cuda') && '2g' || '1g' }}
+ DOCKER_IMAGE: ${{ inputs.docker-image }}
+ XLA_CUDA: ${{ contains(inputs.build-environment, 'xla') && '0' || '' }}
+ XLA_CLANG_CACHE_S3_BUCKET_NAME: ossci-compiler-clang-cache-circleci-xla
+ PYTORCH_TEST_CUDA_MEM_LEAK_CHECK: ${{ matrix.mem_leak_check && '1' || '0' }}
+ PYTORCH_TEST_RERUN_DISABLED_TESTS: ${{ matrix.rerun_disabled_tests && '1' || '0' }}
+ DASHBOARD_TAG: ${{ inputs.dashboard-tag }}
+ HUGGING_FACE_HUB_TOKEN: ${{ inputs.HUGGING_FACE_HUB_TOKEN }}
+ shell: bash
+ run: |
+ set -x
+
+ if [[ $TEST_CONFIG == 'multigpu' ]]; then
+ TEST_COMMAND=.ci/pytorch/multigpu-test.sh
+ elif [[ $BUILD_ENVIRONMENT == *onnx* ]]; then
+ TEST_COMMAND=.ci/onnx/test.sh
+ else
+ TEST_COMMAND=.ci/pytorch/test.sh
+ fi
+
+ # detached container should get cleaned up by teardown_ec2_linux
+ # TODO: Stop building test binaries as part of the build phase
+ # Used for GPU_FLAG since that doesn't play nice
+ # shellcheck disable=SC2086,SC2090
+ container_name=$(docker run \
+ ${GPU_FLAG:-} \
+ -e BUILD_ENVIRONMENT \
+ -e PR_NUMBER \
+ -e GITHUB_ACTIONS \
+ -e GITHUB_REPOSITORY \
+ -e GITHUB_WORKFLOW \
+ -e GITHUB_JOB \
+ -e GITHUB_RUN_ID \
+ -e GITHUB_RUN_NUMBER \
+ -e GITHUB_RUN_ATTEMPT \
+ -e JOB_ID \
+ -e JOB_NAME \
+ -e BASE_SHA \
+ -e BRANCH \
+ -e SHA1 \
+ -e AWS_DEFAULT_REGION \
+ -e IN_WHEEL_TEST \
+ -e SHARD_NUMBER \
+ -e TEST_CONFIG \
+ -e NUM_TEST_SHARDS \
+ -e REENABLED_ISSUES \
+ -e CONTINUE_THROUGH_ERROR \
+ -e VERBOSE_TEST_LOGS \
+ -e NO_TEST_TIMEOUT \
+ -e NO_TD \
+ -e TD_DISTRIBUTED \
+ -e PR_LABELS \
+ -e MAX_JOBS="$(nproc --ignore=2)" \
+ -e SCCACHE_BUCKET \
+ -e SCCACHE_S3_KEY_PREFIX \
+ -e XLA_CUDA \
+ -e XLA_CLANG_CACHE_S3_BUCKET_NAME \
+ -e PYTORCH_TEST_CUDA_MEM_LEAK_CHECK \
+ -e PYTORCH_TEST_RERUN_DISABLED_TESTS \
+ -e SKIP_SCCACHE_INITIALIZATION=1 \
+ -e HUGGING_FACE_HUB_TOKEN \
+ -e DASHBOARD_TAG \
+ --env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
+ --security-opt seccomp=unconfined \
+ --cap-add=SYS_PTRACE \
+ --ipc=host \
+ --shm-size="${SHM_SIZE}" \
+ --tty \
+ --detach \
+ --name="${container_name}" \
+ --user jenkins \
+ -v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
+ -w /var/lib/jenkins/workspace \
+ "${DOCKER_IMAGE}"
+ )
+ # Propagate download.pytorch.org IP to container
+ grep download.pytorch.org /etc/hosts | docker exec -i "${container_name}" sudo bash -c "/bin/cat >> /etc/hosts"
+ echo "DOCKER_CONTAINER_ID=${container_name}" >> "${GITHUB_ENV}"
+ docker exec -t "${container_name}" sh -c "pip install $(echo dist/*.whl)[opt-einsum] && ${TEST_COMMAND}"
+
+ - name: Upload pytest cache if tests failed
+ uses: ./.github/actions/pytest-cache-upload
+ continue-on-error: true
+ if: failure() && steps.test.conclusion && steps.test.conclusion == 'failure'
+ with:
+ cache_dir: .pytest_cache
+ shard: ${{ matrix.shard }}
+ sha: ${{ github.event.pull_request.head.sha || github.sha }}
+ test_config: ${{ matrix.config }}
+ job_identifier: ${{ github.workflow }}_${{ inputs.build-environment }}
+
+ - name: Print remaining test logs
+ shell: bash
+ if: always() && steps.test.conclusion
+ run: |
+ cat test/**/*_toprint.log || true
+
+ - name: Stop monitoring script
+ if: always() && steps.monitor-script.outputs.monitor-script-pid
+ shell: bash
+ continue-on-error: true
+ env:
+ MONITOR_SCRIPT_PID: ${{ steps.monitor-script.outputs.monitor-script-pid }}
+ run: |
+ kill "$MONITOR_SCRIPT_PID"
+
+ - name: Upload test artifacts
+ uses: ./.github/actions/upload-test-artifacts
+ if: always() && steps.test.conclusion && steps.test.conclusion != 'skipped'
+ with:
+ file-suffix: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }}
+ use-gha: ${{ inputs.use-gha }}
+ s3-bucket: ${{ inputs.s3-bucket }}
+
+ - name: Collect backtraces from coredumps (if any)
+ if: always()
+ shell: bash
+ run: |
+ # shellcheck disable=SC2156
+ find . -iname "core.[1-9]*" -exec docker exec "${DOCKER_CONTAINER_ID}" sh -c "gdb python {} -ex 'bt' -ex 'q'" \;
+
+ - name: Store Core dumps on S3
+ uses: seemethere/upload-artifact-s3@v5
+ if: failure()
+ with:
+ name: coredumps-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}
+ retention-days: 14
+ if-no-files-found: ignore
+ path: ./**/core.[1-9]*
+
+ - name: Teardown Linux
+ uses: pytorch/test-infra/.github/actions/teardown-linux@main
+ if: always()
+
+ # NB: We are currently having an intermittent GPU-related issue on G5 runners with
+ # A10G GPU. Once this happens, trying to reset the GPU as done in setup-nvidia does
+ # not seem to help. Here are some symptoms:
+ # * Calling nvidia-smi timeouts after 60 second
+ # * Fail to run nvidia-smi with an unable to determine the device handle for GPU
+ # unknown error
+ # * Test fails with a missing CUDA GPU error when initializing CUDA in PyTorch
+ # * Run docker --gpus all fails with error response from daemon
+ #
+ # As both the root cause and recovery path are unclear, let's take the runner out of
+ # service so that it doesn't get any more jobs
+ - name: Check NVIDIA driver installation step
+ if: failure() && steps.install-nvidia-driver.outcome && steps.install-nvidia-driver.outcome != 'skipped'
+ shell: bash
+ env:
+ RUNNER_WORKSPACE: ${{ runner.workspace }}
+ run: |
+ set +e
+ set -x
+
+ nvidia-smi
+ # NB: Surprisingly, nvidia-smi command returns successfully with return code 0 even in
+ # the case where the driver has already crashed as it still can get the driver version
+ # and some basic information like the bus ID. However, the rest of the information
+ # would be missing (ERR!), for example:
+ #
+ # +-----------------------------------------------------------------------------+
+ # | NVIDIA-SMI 525.89.02 Driver Version: 525.89.02 CUDA Version: 12.0 |
+ # |-------------------------------+----------------------+----------------------+
+ # | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
+ # | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
+ # | | | MIG M. |
+ # |===============================+======================+======================|
+ # | 0 ERR! Off | 00000000:00:1E.0 Off | ERR! |
+ # |ERR! ERR! ERR! ERR! / ERR! | 4184MiB / 23028MiB | ERR! Default |
+ # | | | ERR! |
+ # +-------------------------------+----------------------+----------------------+
+ #
+ # +-----------------------------------------------------------------------------+
+ # | Processes: |
+ # | GPU GI CI PID Type Process name GPU Memory |
+ # | ID ID Usage |
+ # |=============================================================================|
+ # +-----------------------------------------------------------------------------+
+ #
+ # This should be reported as a failure instead as it will guarantee to fail when
+ # Docker tries to run with --gpus all
+ #
+ # So, the correct check here is to query one of the missing piece of info like
+ # GPU name, so that the command can fail accordingly
+ nvidia-smi --query-gpu=gpu_name --format=csv,noheader --id=0
+ NVIDIA_SMI_STATUS=$?
+
+ # These are acceptable return code from nvidia-smi as copied from setup-nvidia GitHub action
+ if [ "$NVIDIA_SMI_STATUS" -ne 0 ] && [ "$NVIDIA_SMI_STATUS" -ne 14 ]; then
+ echo "NVIDIA driver installation has failed, shutting down the runner..."
+ .github/scripts/stop_runner_service.sh
+ fi
+
+ # For runner with multiple GPUs, we also want to confirm that the number of GPUs are the
+ # power of 2, i.e. 1, 2, 4, or 8. This is to avoid flaky test issue when one GPU fails
+ # https://github.com/pytorch/test-infra/issues/4000
+ GPU_COUNT=$(nvidia-smi --list-gpus | wc -l)
+ NVIDIA_SMI_STATUS=$?
+
+ # These are acceptable return code from nvidia-smi as copied from setup-nvidia GitHub action
+ if [ "$NVIDIA_SMI_STATUS" -ne 0 ] && [ "$NVIDIA_SMI_STATUS" -ne 14 ]; then
+ echo "NVIDIA driver installation has failed, shutting down the runner..."
+ .github/scripts/stop_runner_service.sh
+ fi
+
+ # Check the GPU count to be a power of 2
+ if [ "$GPU_COUNT" -le 8 ] && [ "$GPU_COUNT" -ne 1 ] && [ "$GPU_COUNT" -ne 2 ] && [ "$GPU_COUNT" -ne 4 ] && [ "$GPU_COUNT" -ne 8 ]; then
+ echo "NVIDIA driver detects $GPU_COUNT GPUs. The runner has a broken GPU, shutting it down..."
+ .github/scripts/stop_runner_service.sh
+ fi
diff --git a/.github/workflows/_linux-test-label.yml b/.github/workflows/_linux-test-label.yml
new file mode 100644
index 0000000000..7056c0168a
--- /dev/null
+++ b/.github/workflows/_linux-test-label.yml
@@ -0,0 +1,85 @@
+name: linux-test-rg
+
+on:
+ workflow_call:
+ inputs:
+ build-environment:
+ required: true
+ type: string
+ description: Top-level label for what's being built/tested.
+ test-matrix:
+ required: true
+ type: string
+ description: JSON description of what test configs to run.
+ docker-image:
+ required: true
+ type: string
+ description: Docker image to run in.
+ sync-tag:
+ required: false
+ type: string
+ default: ""
+ description: |
+ If this is set, our linter will use this to make sure that every other
+ job with the same `sync-tag` is identical.
+ timeout-minutes:
+ required: false
+ type: number
+ default: 240
+ description: |
+ Set the maximum (in minutes) how long the workflow should take to finish
+ use-gha:
+ required: false
+ type: string
+ default: ""
+ description: If set to any value, upload to GHA. Otherwise upload to S3.
+ dashboard-tag:
+ required: false
+ type: string
+ default: ""
+ s3-bucket:
+ description: S3 bucket to download artifact
+ required: false
+ type: string
+ default: "gha-artifacts"
+ aws-role-to-assume:
+ description: role to assume for downloading artifacts
+ required: false
+ type: string
+ default: ""
+ secrets:
+ HUGGING_FACE_HUB_TOKEN:
+ required: false
+ description: |
+ HF Auth token to avoid rate limits when downloading models or datasets from hub
+
+env:
+ GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
+
+jobs:
+ test:
+ # Don't run on forked repos or empty test matrix
+ if: github.repository_owner == 'pytorch' && toJSON(fromJSON(inputs.test-matrix).include) != '[]'
+ strategy:
+ matrix: ${{ fromJSON(inputs.test-matrix) }}
+ fail-fast: false
+ runs-on: ${{ matrix.runner }}
+ timeout-minutes: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
+ steps:
+ - name: Checkout PyTorch
+ uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
+
+ - name: Linux Test
+ id: linux-test
+ uses: ./.github/actions/linux-test
+ with:
+ build-environment: ${{ inputs.build-environment }}
+ test-matrix: ${{ inputs.test-matrix }}
+ docker-image: ${{ inputs.docker-image }}
+ sync-tag: ${{ inputs.sync-tag }}
+ use-gha: ${{ inputs.use-gha }}
+ dashboard-tag: ${{ inputs.dashboard-tag }}
+ s3-bucket: ${{ inputs.s3-bucket }}
+ aws-role-to-assume: ${{ inputs.aws-role-to-assume }}
+ HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/_linux-test-rg.yml b/.github/workflows/_linux-test-rg.yml
new file mode 100644
index 0000000000..6dc2f6c63b
--- /dev/null
+++ b/.github/workflows/_linux-test-rg.yml
@@ -0,0 +1,86 @@
+name: linux-test-label
+
+on:
+ workflow_call:
+ inputs:
+ build-environment:
+ required: true
+ type: string
+ description: Top-level label for what's being built/tested.
+ test-matrix:
+ required: true
+ type: string
+ description: JSON description of what test configs to run.
+ docker-image:
+ required: true
+ type: string
+ description: Docker image to run in.
+ sync-tag:
+ required: false
+ type: string
+ default: ""
+ description: |
+ If this is set, our linter will use this to make sure that every other
+ job with the same `sync-tag` is identical.
+ timeout-minutes:
+ required: false
+ type: number
+ default: 240
+ description: |
+ Set the maximum (in minutes) how long the workflow should take to finish
+ use-gha:
+ required: false
+ type: string
+ default: ""
+ description: If set to any value, upload to GHA. Otherwise upload to S3.
+ dashboard-tag:
+ required: false
+ type: string
+ default: ""
+ s3-bucket:
+ description: S3 bucket to download artifact
+ required: false
+ type: string
+ default: "gha-artifacts"
+ aws-role-to-assume:
+ description: role to assume for downloading artifacts
+ required: false
+ type: string
+ default: ""
+ secrets:
+ HUGGING_FACE_HUB_TOKEN:
+ required: false
+ description: |
+ HF Auth token to avoid rate limits when downloading models or datasets from hub
+
+env:
+ GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
+
+jobs:
+ test:
+ # Don't run on forked repos or empty test matrix
+ if: github.repository_owner == 'pytorch' && toJSON(fromJSON(inputs.test-matrix).include) != '[]'
+ strategy:
+ matrix: ${{ fromJSON(inputs.test-matrix) }}
+ fail-fast: false
+ runs-on:
+ group: ${{ matrix.runner }}
+ timeout-minutes: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
+ steps:
+ - name: Checkout PyTorch
+ uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
+
+ - name: Linux Test
+ id: linux-test
+ uses: ./.github/actions/linux-test
+ with:
+ build-environment: ${{ inputs.build-environment }}
+ test-matrix: ${{ inputs.test-matrix }}
+ docker-image: ${{ inputs.docker-image }}
+ sync-tag: ${{ inputs.sync-tag }}
+ use-gha: ${{ inputs.use-gha }}
+ dashboard-tag: ${{ inputs.dashboard-tag }}
+ s3-bucket: ${{ inputs.s3-bucket }}
+ aws-role-to-assume: ${{ inputs.aws-role-to-assume }}
+ HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
2.41.0
|
04dca1502bfed06502fcb0c4b24d063a98df597
|
Tue, 23 Apr 2024 17:03:29 -0700
|
[PATCH 0566/1000] Add pending_fresh_unbacked_symbols, populate unbacked_bindings for Dynamo (#124290)
|
The important comment: ``` # Whenever we allocate a fresh unbacked Symbol, we add it to this # pending list. Unbacked symbol allocation can occur at unpredictable # points during meta tensor propagation, but at some point, the we # have to know what the binding site for an unbacked symbol is, and # this is computed when we actually place the node in the graph. The # important thing is that we always actually handle every unaccounted # for unbacked symbol, so this list helps us keep track of them and # then make sure they are all accounted for. # # We could potentially give rise to errors earlier by lexically # scoping when we do propagation, and only allowing unbacked symbols # to be allocated at this point in time. However this is inconvenient # to do in Dynamo, because fake tensor propagation is far from when we # analyze binding sites (set_example_value), so we do it in a more # mutatey way. # # NB: fresh unbacked symbols NEVER get substitutions applied to them, # they are binding sites! ``` The compute_unbacked_bindings is the other half of the equation: the thing that actually consumes the pending_fresh_unbacked_symbols and does something with them. Important comment: ``` After having run fake tensor propagation and producing example_value result, traverse example_value looking for freshly bound unbacked symbols and record their paths for later. It is an error if we have allocated an unbacked SymInt but it cannot be found in example_value. (NB: this means if you have a multi-output function, you must call this on the tuple of tensor output, you cannot wait!) ``` For example, if I return a tensor with size `[u0, u1]`, and u1 is a fresh unbacked SymInt, then I'll have `{u1: KeyPath(".size(1)")}`, telling me I can get u1 by running `size(1)` on the result of this node. u0 is not fresh (it probably flowed in as an argument), so I don't generate a binding for it. I eventually intend to propagate this information all the way to Inductor lowering, where extra metadata about unbacked symbol binding will be canonically used for codegen, instead of trying to infer it from defs/uses. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124290 Approved by: https://github.com/lezcano
|
diff --git a/docs/source/fx.experimental.rst b/docs/source/fx.experimental.rst
index 93974c0819..4d4d40aaf2 100644
--- a/docs/source/fx.experimental.rst
+++ b/docs/source/fx.experimental.rst
@@ -26,6 +26,8 @@ torch.fx.experimental.symbolic_shapes
SubclassSymbolicContext
DimConstraints
ShapeEnvSettings
+ ConvertIntKey
+ CallMethodKey
hint_int
is_concrete_int
@@ -43,3 +45,4 @@ torch.fx.experimental.symbolic_shapes
statically_known_true
lru_cache
check_consistent
+ compute_unbacked_bindings
diff --git a/torch/_dynamo/utils.py b/torch/_dynamo/utils.py
index 2b9be139ec..f4f9bc93bb 100644
--- a/torch/_dynamo/utils.py
+++ b/torch/_dynamo/utils.py
@@ -1153,7 +1153,11 @@ def set_example_value(node, example_value):
# this to accurately reflect what the state of the value was at the time
# the program was traced).
node.meta["example_value"] = example_value
- assert TracingContext.try_get() is not None
+ shape_env = TracingContext.get().fake_mode.shape_env
+ if symbol_to_path := torch.fx.experimental.symbolic_shapes.compute_unbacked_bindings(
+ shape_env, example_value
+ ):
+ node.meta["unbacked_bindings"] = symbol_to_path
def _get_fake_tensor(vt):
diff --git a/torch/csrc/jit/python/pybind_utils.cpp b/torch/csrc/jit/python/pybind_utils.cpp
index ba0135a024..23107d91d9 100644
--- a/torch/csrc/jit/python/pybind_utils.cpp
+++ b/torch/csrc/jit/python/pybind_utils.cpp
@@ -114,6 +114,16 @@ IValue toIValue(py::handle obj, const TypePtr& type, c10::optional<int32_t> N) {
if (torch::is_symfloat(py::handle(obj))) {
return py::cast<c10::SymFloat>(obj).guard_float(__FILE__, __LINE__);
}
+ if (THPVariable_Check(obj.ptr())) {
+ auto var = py::cast<autograd::Variable>(obj);
+ // NB: We carefully test if the storage is meta, because that is
+ // always accurate even if you have a fake tensor (which is the
+ // primary case we are trying to detect here)
+ if (var.storage().device_type() == c10::kMeta) {
+ throw py::cast_error(
+ "cannot extract float from tensor with meta storage");
+ }
+ }
return py::cast<double>(obj);
case TypeKind::ComplexType: {
auto c_obj = py::cast<std::complex<double>>(obj.ptr());
@@ -145,6 +155,13 @@ IValue toIValue(py::handle obj, const TypePtr& type, c10::optional<int32_t> N) {
if (torch::is_symint(py::handle(obj))) {
return py::cast<c10::SymInt>(obj).guard_int(__FILE__, __LINE__);
}
+ if (THPVariable_Check(obj.ptr())) {
+ auto var = py::cast<autograd::Variable>(obj);
+ if (var.storage().device_type() == c10::kMeta) {
+ throw py::cast_error(
+ "cannot extract int from tensor with meta storage");
+ }
+ }
return py::cast<int64_t>(obj);
case TypeKind::LayoutType: {
if (THPLayout_Check(obj.ptr())) {
@@ -195,6 +212,13 @@ IValue toIValue(py::handle obj, const TypePtr& type, c10::optional<int32_t> N) {
if (torch::is_symbool(obj.ptr())) {
return py::cast<c10::SymBool>(obj).guard_bool(__FILE__, __LINE__);
}
+ if (THPVariable_Check(obj.ptr())) {
+ auto var = py::cast<autograd::Variable>(obj);
+ if (var.storage().device_type() == c10::kMeta) {
+ throw py::cast_error(
+ "cannot extract bool from tensor with meta storage");
+ }
+ }
return py::cast<bool>(obj);
case TypeKind::TupleType: {
py::tuple tuple = py::cast<py::tuple>(obj);
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index ed19b7bc8e..ef37737459 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -67,6 +67,7 @@ from torch.utils._sympy.singleton_int import SingletonInt
from torch.utils._traceback import format_frame, CapturedTraceback
from torch._utils_internal import signpost_event
from torch._subclasses.meta_utils import is_sparse_any
+import torch.utils._pytree as pytree
from torch._logging import LazyString
@@ -95,6 +96,7 @@ __all__ = [
"has_free_symbols", "sym_eq", "SymbolicContext", "StatelessSymbolicContext",
"StatefulSymbolicContext", "SubclassSymbolicContext", "statically_known_true",
"guard_size_oblivious", "check_consistent",
+ "compute_unbacked_bindings", "ConvertIntKey",
]
# FX node metadata keys for symbolic shape FX graph.
@@ -399,6 +401,101 @@ def find_symbol_binding_fx_nodes(graph):
if is_symbol_binding_fx_node(node)
}
+
+# Analogous to ConvertIntSource
+@dataclass(frozen=True)
+class ConvertIntKey:
+ def __str__(self) -> str:
+ return ".__int__()"
+
+ def get(self, b: bool) -> int:
+ """Get the int value from bool"""
+ return int(b)
+
+
+@dataclass(frozen=True)
+class CallMethodKey:
+ name: str
+
+ def __str__(self) -> str:
+ return f".{self.name}()"
+
+ def get(self, o: Any) -> Any:
+ """Call the method on object"""
+ return getattr(o, self.name)()
+
+
+def compute_unbacked_bindings(shape_env, example_value):
+ """
+ After having run fake tensor propagation and producing example_value
+ result, traverse example_value looking for freshly bound unbacked
+ symbols and record their paths for later. It is an error if
+ we have allocated an unbacked SymInt but it cannot be found in
+ example_value. (NB: this means if you have a multi-output
+ function, you must call this on the tuple of tensor output, you
+ cannot wait!)
+ """
+ pending = set(shape_env.pending_fresh_unbacked_symbols)
+ if pending:
+ shape_env.pending_fresh_unbacked_symbols.clear()
+
+ def free_unbacked_symbols_with_path(
+ a, path
+ ) -> Dict[sympy.Symbol, pytree.KeyPath]:
+ r = {}
+ if isinstance(a, (tuple, list)):
+ for i in range(len(a)):
+ r.update(
+ free_unbacked_symbols_with_path(
+ a[i], path + (pytree.SequenceKey(i),)
+ )
+ )
+ elif isinstance(a, torch.Tensor):
+ r.update(
+ free_unbacked_symbols_with_path(
+ a.size(), path + (CallMethodKey("size"),)
+ )
+ )
+ r.update(
+ free_unbacked_symbols_with_path(
+ a.stride(), path + (CallMethodKey("stride"),)
+ )
+ )
+ r.update(
+ free_unbacked_symbols_with_path(
+ a.storage_offset(), path + (CallMethodKey("storage_offset"),)
+ )
+ )
+
+ # NB: Intentionally access _expr, not expr, do not want
+ # simplification!
+ elif (
+ isinstance(a, (torch.SymInt, torch.SymFloat))
+ and isinstance(s := a.node._expr, sympy.Symbol)
+ and s in pending
+ ):
+ r[s] = path
+ pending.remove(s)
+ # The annoyance here arises from the fact that SymBool is
+ # allocated by allocating a SymInt and then testing if it's equal
+ # to one. So you have a complicated binding site logic for this.
+ elif (
+ isinstance(a, torch.SymBool)
+ and isinstance(s := a.node._expr, sympy.Eq)
+ # This must match create_unbacked_symbool EXACTLY
+ and isinstance(s.lhs, sympy.Symbol)
+ and s.rhs == 1
+ and s.lhs in pending
+ ):
+ r[s.lhs] = path + (ConvertIntKey(),)
+ pending.remove(s.lhs)
+
+ return r
+
+ symbol_to_path = free_unbacked_symbols_with_path(example_value, ())
+ assert not pending, f"pending {pending} not in {example_value}"
+ return symbol_to_path
+
def definitely_true(a):
"""
Returns True only if we can tell that a is True, possibly introducing
@@ -2072,6 +2169,26 @@ class ShapeEnv:
# signpost_event
self.co_fields = co_fields if co_fields else {}
+ # Whenever we allocate a fresh unbacked Symbol, we add it to this
+ # pending list. Unbacked symbol allocation can occur at unpredictable
+ # points during meta tensor propagation, but at some point, the we
+ # have to know what the binding site for an unbacked symbol is, and
+ # this is computed when we actually place the node in the graph. The
+ # important thing is that we always actually handle every unaccounted
+ # for unbacked symbol, so this list helps us keep track of them and
+ # then make sure they are all accounted for.
+ #
+ # We could potentially give rise to errors earlier by lexically
+ # scoping when we do propagation, and only allowing unbacked symbols
+ # to be allocated at this point in time. However this is inconvenient
+ # to do in Dynamo, because fake tensor propagation is far from when we
+ # analyze binding sites (set_example_value), so we do it in a more
+ # mutatey way.
+ #
+ # NB: fresh unbacked symbols NEVER get substitutions applied to them,
+ # they are binding sites!
+ self.pending_fresh_unbacked_symbols: List[sympy.Symbol] = []
+
# Version counter used to invalidate cached values
self._prev_cache_key = self._get_key()
self._version_counter = 0
@@ -2180,7 +2297,7 @@ class ShapeEnv:
elif key == "name_to_node":
# Compare just the set of keys is the same.
return set(value.keys())
- elif key == "symbol_guard_counter":
+ elif key in ["symbol_guard_counter", "pending_fresh_unbacked_symbols"]:
# Skip this for comparisons
return None
return value
@@ -2643,6 +2760,7 @@ class ShapeEnv:
"""
symbol: sympy.Symbol = sympy.Symbol(f"f{next(self.unbacked_symfloat_counter)}")
self.counter["create_unbacked_symbol"] += 1
+ self.pending_fresh_unbacked_symbols.append(symbol)
self.var_to_stack[symbol] = CapturedTraceback.extract(skip=1)
vr = self.var_to_range[symbol] = ValueRanges.unknown()
@@ -2658,6 +2776,7 @@ class ShapeEnv:
"""Create a symbolic integer without a hint value
"""
symbol: sympy.Symbol = sympy.Symbol(f"u{next(self.unbacked_symint_counter)}", integer=True)
+ self.pending_fresh_unbacked_symbols.append(symbol)
self.counter["create_unbacked_symbol"] += 1
self.var_to_stack[symbol] = CapturedTraceback.extract(skip=1)
vr = self.var_to_range[symbol] = self._default_unspecified_value_range()
@@ -2680,6 +2799,7 @@ class ShapeEnv:
"""Create a symbolic boolean without a hint value
"""
symbol: sympy.Symbol = sympy.Symbol(f"u{next(self.unbacked_symint_counter)}", integer=True)
+ self.pending_fresh_unbacked_symbols.append(symbol)
self.counter["create_unbacked_symbol"] += 1
self.var_to_stack[symbol] = CapturedTraceback.extract(skip=1)
vr = self.var_to_range[symbol] = ValueRanges(0, 1)
|
2.41.0
|
0e2d897ed519e65b097fa811abb5db04e6576b2
|
Tue, 23 Apr 2024 17:03:29 -0700
|
[PATCH 0567/1000] Handle Tensor returns in PropagateUnbackedSymInts (#124297)
|
This subsumes https://github.com/pytorch/pytorch/pull/124069 In the original PR, my idea was that when we run PropagateUnbackedSymInts, we check that the sizes before and after are exactly the same. This ended up turning up lots of bugs that I didn't feel like fixing. Separately, Ivan let me know that this pass was quite expensive in terms of compile time, since we spent a lot of time thinking about the equalities. To kill two birds with one stone, we now only check for equality precisely when an unbacked SymInt was bound (thanks to the previous PR in this stack, we now have this information). Specifically, we look to see if `meta["unbacked_bindings"]` is set on the old node, and if it is, we assert the old value is equal to the new value from the repropagation. Note that the pytree key is used to actually extract the new value from the example value, as it may be nested inside an, e.g., tensor size. We do something a bit naughty at the end: we use `defer_runtime_assert` to actually teach ShapeEnv about the equality. This is implementationally equivalent to what we used to do, but we're going to change this later soon. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124297 Approved by: https://github.com/lezcano ghstack dependencies: #124290
|
diff --git a/docs/source/fx.experimental.rst b/docs/source/fx.experimental.rst
index 4d4d40aaf2..76cb96337b 100644
--- a/docs/source/fx.experimental.rst
+++ b/docs/source/fx.experimental.rst
@@ -46,3 +46,4 @@ torch.fx.experimental.symbolic_shapes
lru_cache
check_consistent
compute_unbacked_bindings
+ rebind_unbacked
diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index 2f27f16889..f2c6515242 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -8514,6 +8514,16 @@ def ___make_guard_fn():
RuntimeError, lambda: fn(torch.randn(2, 3), torch.tensor([1]))
)
+ @torch._dynamo.config.patch(
+ capture_scalar_outputs=True, capture_dynamic_output_shape_ops=True
+ )
+ def test_aot_autograd_propagate_unbacked_symints_shape(self):
+ @torch.compile(backend="aot_eager")
+ def f(x):
+ return torch.nonzero(x)
+
+ f(torch.tensor([1, 0, 3, 2, 0]))
+
def test_simple_set_usage(self):
def foo(x, y):
setty = {x, y}
diff --git a/test/inductor/test_torchinductor_dynamic_shapes.py b/test/inductor/test_torchinductor_dynamic_shapes.py
index 3e7e296d4d..12ff856f28 100644
--- a/test/inductor/test_torchinductor_dynamic_shapes.py
+++ b/test/inductor/test_torchinductor_dynamic_shapes.py
@@ -218,6 +218,15 @@ class TestInductorDynamic(TestCase):
opt_r = opt_f(x, b)
self.assertEqual(r, opt_r)
+ @torch._dynamo.config.patch(capture_dynamic_output_shape_ops=True)
+ def test_nonzero_no_realloc(self, device):
+ @torch.compile(fullgraph=True, dynamic=True)
+ def f(x, y):
+ z = x.nonzero()
+ return torch.split(z, [y.size(0)])
+
+ f(torch.tensor([1, 0, 1, 1, 0, 1, 0]), torch.randn(4))
+
@torch._dynamo.config.patch(capture_scalar_outputs=True)
def test_item_nobreak(self, device):
@torch.compile(fullgraph=True)
diff --git a/torch/_functorch/_aot_autograd/traced_function_transforms.py b/torch/_functorch/_aot_autograd/traced_function_transforms.py
index 0671c21102..aced6c63be 100644
--- a/torch/_functorch/_aot_autograd/traced_function_transforms.py
+++ b/torch/_functorch/_aot_autograd/traced_function_transforms.py
@@ -23,7 +23,11 @@ from torch import Tensor
from torch._decomp.decompositions_for_rng import PhiloxStateTracker
from torch._guards import detect_fake_mode
from torch._prims_common import CUDARngStateHelper
-from torch.fx.experimental.symbolic_shapes import definitely_false, sym_eq
+from torch.fx.experimental.symbolic_shapes import (
+ definitely_false,
+ rebind_unbacked,
+ sym_eq,
+)
from torch.nn.utils import stateless
from .. import config
@@ -675,16 +679,8 @@ def aot_dispatch_subclass(
class PropagateUnbackedSymInts(torch.fx.Interpreter):
def run_node(self, n: torch.fx.Node):
- import sympy
-
result = super().run_node(n)
- # TODO: handle Tensor returns
- if "example_value" in n.meta:
- if isinstance(result, torch.SymInt) and isinstance(
- result.node.expr, sympy.Symbol
- ):
- torch._check(result == n.meta["example_value"])
-
+ rebind_unbacked(detect_fake_mode().shape_env, n, result)
return result
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index ef37737459..95fd7c7de4 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -97,6 +97,7 @@ __all__ = [
"StatefulSymbolicContext", "SubclassSymbolicContext", "statically_known_true",
"guard_size_oblivious", "check_consistent",
"compute_unbacked_bindings", "ConvertIntKey",
+ "rebind_unbacked",
]
# FX node metadata keys for symbolic shape FX graph.
@@ -265,6 +266,29 @@ def check_consistent(new, old) -> None:
assert isinstance(old, scalar_types) and not isinstance(old, bool), f"{old} != {new}"
torch._check(old == new, lambda: f"{old} != {new} (old != new)")
+def rebind_unbacked(shape_env, n: torch.fx.Node, result):
+ """
+ Suppose we are retracing a pre-existing FX graph that previously had
+ fake tensor propagation (and therefore unbacked SymInts). When we retrace,
+ we re-propagate fake tensors, which results in new unbacked SymInts.
+ When this happens, we need to tell the shape environment about the equivalence
+ of the old and new unbacked SymInts. Pass us the old torch.fx.Node (which
+ has the old binding information) and the new result (which we can extract the
+ new unbacked SymInts out from).
+ """
+ if bindings := n.meta.get("unbacked_bindings"):
+ for raw_u0, path in bindings.items():
+ u1 = pytree.key_get(result, path)
+ # We should never have bindings for raw bools; instead they should
+ # have been converted to ints via ConvertIntKey
+ assert type(u1) is not bool
+ if isinstance(u1, (int, float)):
+ raw_u1 = sympy.sympify(u1)
+ else:
+ raw_u1 = u1.node.expr
+ # TODO: replace with rename unbacked to
+ shape_env.defer_runtime_assert(sympy.Eq(raw_u0, raw_u1), "")
+
def canonicalize_bool_expr(expr: SympyBoolean) -> SympyBoolean:
r""" Canonicalize a boolean expression by transforming it into a lt / le
inequality and moving all the non-constant terms to the rhs.
@@ -406,11 +430,11 @@ def find_symbol_binding_fx_nodes(graph):
@dataclass(frozen=True)
class ConvertIntKey:
def __str__(self) -> str:
- return ".__int__()"
+ return ".cast_symbool_to_symint_guardless()"
def get(self, b: bool) -> int:
"""Get the int value from bool"""
- return int(b)
+ return cast_symbool_to_symint_guardless(b)
@dataclass(frozen=True)
@@ -1229,7 +1253,7 @@ def _eval_is_non_overlapping_and_dense(sizes, strides):
def cast_symbool_to_symint_guardless(symbool: torch.SymBool) -> torch.SymInt:
int_sym = sympy.Piecewise((1, symbool.node.expr), (0, True))
- return symbool.node.shape_env.create_symintnode(int_sym, hint=int(symbool.node.require_hint()))
+ return symbool.node.shape_env.create_symintnode(int_sym, hint=int(symbool.node.require_hint()) if has_hint(symbool) else None)
SYMPY_INTERP = {
'Abs': operator.abs,
|
2.41.0
|
37aebc99fbc02cde91a8cdad8471cb82b44a525
|
Tue, 23 Apr 2024 20:41:38 +0200
|
[PATCH 0570/1000] [Inductor cutlass backend] Add bmm support (#121734)
|
Add support for bmm ( batch matrix multiply ) op through the Cutlass backend. Test Plan: * CI * Added test in test_cutlass_backend.py Pull Request resolved: https://github.com/pytorch/pytorch/pull/121734 Approved by: https://github.com/eellison ghstack dependencies: #121497, #123930, #123932
|
diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py
index d3b0c42d7c..24d2ba8e2a 100644
--- a/test/inductor/test_aot_inductor.py
+++ b/test/inductor/test_aot_inductor.py
@@ -2733,7 +2733,6 @@ def fail_non_abi_compatible_cuda(is_skip=False):
# test_failures, xfail by default, set is_skip=True to skip
CPU_TEST_FAILURES = {
"test_add_complex": fail_stack_allocation(is_skip=True),
- "test_bmm_multiple_dynamic": fail_with_and_without_stack_allocation(),
# FIXME: failed with Segfault while exiting the Python runtime
"test_duplicate_constant_folding": fail_with_and_without_stack_allocation(
is_skip=True
@@ -2976,7 +2975,6 @@ copy_tests(
"non_abi_compatible_cpu",
# test_failures, xfail by default, set is_skip=True to skip
{
- "test_bmm_multiple_dynamic": TestFailure(("non_abi_compatible_cpu",)),
"test_duplicate_constant_folding": TestFailure(
("non_abi_compatible_cpu",), is_skip=True
),
diff --git a/test/inductor/test_cutlass_backend.py b/test/inductor/test_cutlass_backend.py
index 5dc93b7325..f5e734560c 100644
--- a/test/inductor/test_cutlass_backend.py
+++ b/test/inductor/test_cutlass_backend.py
@@ -2,7 +2,7 @@
import logging
import os
import unittest
-from typing import Callable, List
+from typing import Callable, List, Optional
from unittest import mock
import torch
@@ -175,6 +175,7 @@ class TestCutlassBackend(TestCase):
fp16=True,
expected_fuse_count=1,
mm: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None,
+ batch_size: Optional[int] = None,
):
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = (
mixed_precision
@@ -185,8 +186,12 @@ class TestCutlassBackend(TestCase):
# so if these shapes don't all align to at least 8 elements
# it can happen that no Cutlass 3.x op is available
# that allows fusions
- a = torch.randn(256, 32).cuda()
- b = torch.randn(32, 256).cuda()
+ if batch_size is None:
+ a = torch.randn(256, 32).cuda()
+ b = torch.randn(32, 256).cuda()
+ else:
+ a = torch.randn(batch_size, 256, 32).cuda()
+ b = torch.randn(batch_size, 32, 256).cuda()
if fp16:
a = a.half()
b = b.half()
@@ -304,6 +309,18 @@ class TestCutlassBackend(TestCase):
mixed_precision=True, fp16=True, expected_fuse_count=0, mm=mm
)
+ def test_max_autotune_cutlass_backend_simple_bmm(self):
+ def bmm(a, b):
+ return torch.bmm(a, b)
+
+ self._test_max_autotune_cutlass_backend_epilogue_fusion( # test bmm
+ mixed_precision=False,
+ fp16=True,
+ expected_fuse_count=0,
+ mm=bmm,
+ batch_size=10,
+ )
+
@unittest.skipIf(not SM90OrLater, "need sm_90")
@unittest.skipIf(torch.version.hip, "HIP not supported")
@unittest.skipIf(config.is_fbcode(), "fbcode requires different CUTLASS path setup")
diff --git a/torch/_inductor/kernel/bmm.py b/torch/_inductor/kernel/bmm.py
index 1bb8c9d820..f19a54c19f 100644
--- a/torch/_inductor/kernel/bmm.py
+++ b/torch/_inductor/kernel/bmm.py
@@ -1,3 +1,5 @@
+import logging
+
import torch
from .. import ir
@@ -7,11 +9,19 @@ from ..select_algorithm import (
ExternKernelChoice,
TritonTemplate,
)
-from ..utils import ceildiv as cdiv, use_aten_gemm_kernels, use_triton_template
+from ..utils import (
+ ceildiv as cdiv,
+ use_aten_gemm_kernels,
+ use_cutlass_template,
+ use_triton_template,
+)
from ..virtualized import V
+from .mm import _is_static_problem
+
from .mm_common import addmm_epilogue, mm_args, mm_configs, mm_options
+log = logging.getLogger(__name__)
aten = torch.ops.aten
@@ -133,6 +143,15 @@ def tuned_bmm(mat1, mat2, *, layout=None):
layout=layout,
**mm_options(config, m, n, k, layout),
)
+ static_shape, is_nonzero = _is_static_problem([mat1, mat2], layout)
+ if static_shape and is_nonzero and use_cutlass_template(layout, m, n, k):
+ from ..codegen.cuda.gemm_template import CUTLASSGemmTemplate
+
+ CUTLASSGemmTemplate.add_cutlass_gemm_choices(choices, layout, [mat1, mat2])
+
+ if len(choices) == 0:
+ log.warning("No choices for GEMM, using ATen backend as fallback")
+ choices.append(aten_bmm.bind((mat1, mat2), layout))
return autotune_select_algorithm("bmm", choices, [mat1, mat2], layout)
|
2.41.0
|
76b5e3cc8b7a9085c39f25111e852cf668f7d1b
|
Tue, 23 Apr 2024 20:41:38 +0200
|
[PATCH 0571/1000] [Inductor Cutlass backend] Disable epilogue fusions (#124107)
|
This diff disables Cutlass backend EVT epilogue fusions. It does not yet contain the removal of most of the underlying implementation. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124107 Approved by: https://github.com/jansel ghstack dependencies: #121497, #123930, #123932, #121734
|
diff --git a/test/inductor/test_cutlass_backend.py b/test/inductor/test_cutlass_backend.py
index f5e734560c..58630d8180 100644
--- a/test/inductor/test_cutlass_backend.py
+++ b/test/inductor/test_cutlass_backend.py
@@ -173,7 +173,7 @@ class TestCutlassBackend(TestCase):
max_autotune_gemm_backends: str = "CUTLASS",
mixed_precision=False,
fp16=True,
- expected_fuse_count=1,
+ expected_fuse_count=0,
mm: Callable[[torch.Tensor, torch.Tensor], torch.Tensor] = None,
batch_size: Optional[int] = None,
):
@@ -225,7 +225,7 @@ class TestCutlassBackend(TestCase):
# The pointwise ops seem to be pre-fused into a single Pointwise
self._test_max_autotune_cutlass_backend_epilogue_fusion(
- mixed_precision=False, fp16=True, expected_fuse_count=1, mm=mm
+ mixed_precision=False, fp16=True, expected_fuse_count=0, mm=mm
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@@ -236,7 +236,7 @@ class TestCutlassBackend(TestCase):
return (a @ b) * 3.0
self._test_max_autotune_cutlass_backend_epilogue_fusion(
- mixed_precision=True, fp16=True, expected_fuse_count=1, mm=mm
+ mixed_precision=True, fp16=True, expected_fuse_count=0, mm=mm
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@@ -248,7 +248,7 @@ class TestCutlassBackend(TestCase):
# The pointwise ops seem to be pre-fused into a single Pointwise
self._test_max_autotune_cutlass_backend_epilogue_fusion(
- mixed_precision=False, fp16=True, expected_fuse_count=1, mm=mm
+ mixed_precision=False, fp16=True, expected_fuse_count=0, mm=mm
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@@ -259,7 +259,7 @@ class TestCutlassBackend(TestCase):
return (a @ b) * 3.3 - 1.234
self._test_max_autotune_cutlass_backend_epilogue_fusion(
- mixed_precision=True, fp16=True, expected_fuse_count=1, mm=mm
+ mixed_precision=True, fp16=True, expected_fuse_count=0, mm=mm
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@@ -270,7 +270,7 @@ class TestCutlassBackend(TestCase):
return torch.nn.functional.relu((a @ b) * 3.3 - 1.234)
self._test_max_autotune_cutlass_backend_epilogue_fusion(
- mixed_precision=False, fp16=True, expected_fuse_count=1, mm=mm
+ mixed_precision=False, fp16=True, expected_fuse_count=0, mm=mm
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@@ -282,7 +282,7 @@ class TestCutlassBackend(TestCase):
# The pointwise ops seem to be pre-fused into a single Pointwise
self._test_max_autotune_cutlass_backend_epilogue_fusion(
- mixed_precision=True, fp16=True, expected_fuse_count=1, mm=mm
+ mixed_precision=True, fp16=True, expected_fuse_count=0, mm=mm
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@@ -294,7 +294,7 @@ class TestCutlassBackend(TestCase):
# The pointwise ops seem to be pre-fused into a single Pointwise
self._test_max_autotune_cutlass_backend_epilogue_fusion(
- mixed_precision=True, fp16=True, expected_fuse_count=1, mm=mm
+ mixed_precision=True, fp16=True, expected_fuse_count=0, mm=mm
)
@unittest.skipIf(not SM90OrLater, "need sm_90")
@@ -329,7 +329,7 @@ class TestCutlassBackend(TestCase):
return (a @ b) / b.size(1)
self._test_max_autotune_cutlass_backend_epilogue_fusion(
- mixed_precision=True, fp16=True, expected_fuse_count=1, mm=mm
+ mixed_precision=True, fp16=True, expected_fuse_count=0, mm=mm
)
# TODO: Enable dynamic test cases when dynamic support is added.
diff --git a/torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py b/torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py
index 2e9dc2d8a7..8227924410 100644
--- a/torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py
+++ b/torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py
@@ -5,19 +5,12 @@ from ...._dynamo.utils import counters
from ... import config, ir
from ...codecache import code_hash, get_path
-from ...ir import ComputedBuffer, CUDATemplateBuffer, Pointwise
-from ...scheduler import (
- BaseSchedulerNode,
- BaseScheduling,
- FusedSchedulerNode,
- Scheduler,
- SchedulerNode,
-)
+from ...ir import CUDATemplateBuffer
+from ...scheduler import BaseSchedulerNode, BaseScheduling, Scheduler, SchedulerNode
from ...utils import get_fused_kernel_name, get_kernel_metadata, sympy_product
from ...virtualized import V
from ..common import IndentedBuffer
-from .cutlass_epilogue_gen import CUTLASSEVTOpNotImplementedError
log = logging.getLogger(__name__)
@@ -38,116 +31,15 @@ class CUDACPPScheduling(BaseScheduling):
def group_fn(self, sizes):
return tuple(V.graph.sizevars.simplify(sympy_product(s)) for s in sizes)
- def is_cuda_cpp_template(self, node: BaseSchedulerNode) -> bool:
+ @staticmethod
+ def is_cuda_cpp_template(node: BaseSchedulerNode) -> bool:
return isinstance(node, SchedulerNode) and isinstance(
node.node, CUDATemplateBuffer
)
- def is_cuda_cpp_fused_template(self, node: BaseSchedulerNode) -> bool:
- return isinstance(node, FusedSchedulerNode) and self.is_cuda_cpp_template(
- node.get_template_node()
- )
-
- def _can_fuse_epilogue_impl(
- self,
- cuda_template_buffer: CUDATemplateBuffer,
- epilogue_nodes: List[ir.IRNode],
- additional_node: ir.IRNode,
- ) -> bool:
- """
- Check if the given node can be fused with the epilogue. At the moment, Kernels
- support fusion with Pointwise operations, wrapped in (named) ComputedBuffer nodes.
-
- Args:
- cuda_template_buffer : A CUDATemplateBuffer object representing the CUDA template and it's result buffer
- epilogue_nodes : List[ir.Buffer]: The list of already fused epilogue nodes.
- additional_node: The ir.Buffer node to be checked if it can be fused with the epilogue.
- Returns:
- - bool: True if the given node can be fused with the epilogue, False otherwise.
-
- """
- if not isinstance(cuda_template_buffer, CUDATemplateBuffer):
- return False
- if not cuda_template_buffer.template.can_fuse_epilogue:
- # The used GEMM op does not support fusing epilogues
- return False
- if not isinstance(additional_node, ComputedBuffer):
- return False
- if not isinstance(additional_node.data, Pointwise):
- return False
- # We can fuse a Pointwise op that depends on the last fused epilogue node
- # if any. If there is no epilogue node yet, it needs to depend on the template
- # node
- node_name = additional_node.get_computed_buffer_name()
- if node_name is None:
- return False
-
- if len(epilogue_nodes) == 0:
- if cuda_template_buffer.name not in additional_node.get_read_names():
- return False
- else:
- last_epilogue_node = epilogue_nodes[-1]
- assert isinstance(last_epilogue_node, ir.ComputedBuffer) # for mypy
- last_epilogue_name = (
- last_epilogue_node.name
- if last_epilogue_node.name is not None
- else last_epilogue_node.data.name # type: ignore[attr-defined]
- )
- if last_epilogue_name not in additional_node.get_read_names():
- return False
- if additional_node.layout != cuda_template_buffer.layout:
- return False
- try:
- from torch._inductor.codegen.cuda.cutlass_epilogue_gen import (
- CutlassEVTEpilogueArgumentFormatter,
- CutlassEVTEpilogueTypeFormatter,
- )
-
- CutlassEVTEpilogueTypeFormatter.ir_to_evt_string(
- cast(str, cuda_template_buffer.name), "anything", [additional_node]
- )
- CutlassEVTEpilogueArgumentFormatter.ir_to_evt_argument_string(
- cast(str, cuda_template_buffer.name), [additional_node]
- )
- except CUTLASSEVTOpNotImplementedError as e:
- not_implemented_op = str(e)
- if not_implemented_op.startswith("_op_"):
- not_implemented_op = not_implemented_op[4:]
- log.warning(
- f"Cannot fuse epilogue node {additional_node} into {cuda_template_buffer.name}, likely due to unsupported operation: {not_implemented_op}" # noqa: G004, B950
- )
- return False
- else:
- # Likely due to unsupported dtype.
- log.warning(
- f"Cannot fuse epilogue node {additional_node} into {cuda_template_buffer.name}. Reason: {not_implemented_op}" # noqa: G004, B950
- )
- return False
- return True
-
- @staticmethod
- def _unwrap_epilogue_nodes(fused_node: FusedSchedulerNode) -> List[ir.IRNode]:
- nodes = fused_node.get_nodes()
- template_node = fused_node.get_template_node()
- nodes.remove(template_node)
- return [n.node for n in nodes]
-
def can_fuse_vertical(
self, node1: BaseSchedulerNode, node2: BaseSchedulerNode
) -> bool:
- if self.is_cuda_cpp_template(node1) and isinstance(node2, SchedulerNode):
- return self._can_fuse_epilogue_impl(
- cast(CUDATemplateBuffer, node1.node), [], node2.node
- )
- elif self.is_cuda_cpp_fused_template(node1) and isinstance(
- node2, SchedulerNode
- ):
- fnode1 = cast(FusedSchedulerNode, node1)
- return self._can_fuse_epilogue_impl(
- fnode1.get_template_node().node,
- self._unwrap_epilogue_nodes(fnode1),
- node2.node,
- )
return False
def define_kernel(self, src_code: str, node_schedule) -> str:
diff --git a/torch/_inductor/codegen/cuda_combined_scheduling.py b/torch/_inductor/codegen/cuda_combined_scheduling.py
index 587187b717..eceadeb4c7 100644
--- a/torch/_inductor/codegen/cuda_combined_scheduling.py
+++ b/torch/_inductor/codegen/cuda_combined_scheduling.py
@@ -29,9 +29,7 @@ class CUDACombinedScheduling(BaseScheduling):
self._cuda_cpp_scheduling = CUDACPPScheduling(scheduler)
def choose_node_backend(self, node: BaseSchedulerNode) -> BaseScheduling:
- if self._cuda_cpp_scheduling.is_cuda_cpp_template(
- node
- ) or self._cuda_cpp_scheduling.is_cuda_cpp_fused_template(node):
+ if self._cuda_cpp_scheduling.is_cuda_cpp_template(node):
return self._cuda_cpp_scheduling
return self._triton_scheduling
@@ -42,9 +40,7 @@ class CUDACombinedScheduling(BaseScheduling):
def can_fuse_horizontal(self, node1: BaseSchedulerNode, node2: BaseSchedulerNode):
for node in (node1, node2):
- if self._cuda_cpp_scheduling.is_cuda_cpp_template(
- node
- ) or self._cuda_cpp_scheduling.is_cuda_cpp_fused_template(node):
+ if self._cuda_cpp_scheduling.is_cuda_cpp_template(node):
return self._cuda_cpp_scheduling.can_fuse_horizontal(
node1, node2
) # always False at the moment
|
2.41.0
|
47f4253ab9c9be89293be13c89a2455e3f58385
|
Tue, 23 Apr 2024 20:41:39 +0200
|
[PATCH 0572/1000] [Inductor Cutlass backend] Set INDUCTOR_TEST_DISABLE_FRESH_CACHE in test setup (#124574)
|
The diff https://github.com/pytorch/pytorch/pull/122661 introduces a new automatic cache refresh mechanism during all inductor-derived test cases. But this refresh mechanism seems not to work properly across process boundaries, specifically when using autotune_in_subproc, which many tests in test_cutlass_backend.py rely on. Solution: Set the env var INDUCTOR_TEST_DISABLE_FRESH_CACHE=1 early during test setup within test_cutlass_backend.py Test Plan: This is a change to unit tests only. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124574 Approved by: https://github.com/aakhundov ghstack dependencies: #121497, #123930, #123932, #121734, #124107
|
diff --git a/test/inductor/test_cutlass_backend.py b/test/inductor/test_cutlass_backend.py
index 58630d8180..5d2addab7f 100644
--- a/test/inductor/test_cutlass_backend.py
+++ b/test/inductor/test_cutlass_backend.py
@@ -44,7 +44,21 @@ def _get_path_without_sccache() -> str:
@instantiate_parametrized_tests
class TestCutlassBackend(TestCase):
def setUp(self):
- super().setUp()
+ # The new inductor cache refresh mechanism
+ # introduced with https://github.com/pytorch/pytorch/pull/122661
+ # interacts badly with persistent subprocesses during
+ # autotuning. So we need to disable automatic cache refresh
+ # before calling setUp() on the parent class.
+ old_disable_fresh_cache_envvar = os.environ.get(
+ "INDUCTOR_TEST_DISABLE_FRESH_CACHE", ""
+ )
+ try:
+ os.environ["INDUCTOR_TEST_DISABLE_FRESH_CACHE"] = "1"
+ super().setUp()
+ finally:
+ os.environ[
+ "INDUCTOR_TEST_DISABLE_FRESH_CACHE"
+ ] = old_disable_fresh_cache_envvar
torch.random.manual_seed(1234)
@unittest.skipIf(not SM75OrLater, "need sm_75")
|
2.41.0
|
9f0d127fb4389dc1d5bad260396c560634e4889
|
Thu, 18 Apr 2024 11:32:39 +0300
|
[PATCH 0573/1000] Fix a bug in retrieving approximate bsr_dense_addmm kernel meta data (#124371)
|
Fixes #124333 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124371 Approved by: https://github.com/eqy, https://github.com/lezcano
|
diff --git a/torch/sparse/_triton_ops.py b/torch/sparse/_triton_ops.py
index a8d818392b..a22b5c8077 100644
--- a/torch/sparse/_triton_ops.py
+++ b/torch/sparse/_triton_ops.py
@@ -571,10 +571,12 @@ def bsr_dense_addmm_meta(M, K, N, Ms, Ks, beta, alpha,
device_name, version=(0, dtype, 0.5))
if meta is None:
# find approximate meta such that N % SPLIT_N == 0.
- for mkey, meta_ in sorted(get_meta(
- 'bsr_dense_addmm',
- (*key[:2], '*', *key[3:]),
- device_name, version=(0, dtype, 0.5)) or {}):
+ matching_meta = get_meta(
+ 'bsr_dense_addmm',
+ (*key[:2], '*', *key[3:]),
+ device_name, version=(0, dtype, 0.5))
+ for mkey in sorted(matching_meta or {}):
+ meta_ = matching_meta[mkey]
if N % meta_['SPLIT_N'] == 0 and mkey[2] <= N:
meta = meta_
if meta is not None:
diff --git a/torch/sparse/_triton_ops_meta.py b/torch/sparse/_triton_ops_meta.py
index 3355fd6241..e6fc1329e8 100644
--- a/torch/sparse/_triton_ops_meta.py
+++ b/torch/sparse/_triton_ops_meta.py
@@ -160,9 +160,8 @@ def get_meta(op, key, device_name=None, version=(0, torch.float16, 0.5), exact=F
values = op_data.get(key)
if values is not None:
matching_data[key] = values
-
matching_meta = {}
- for key, values in matching_data.items():
+ for op_key, values in matching_data.items():
if op == "scatter_mm":
names = (
"GROUP_SIZE",
@@ -182,7 +181,7 @@ def get_meta(op, key, device_name=None, version=(0, torch.float16, 0.5), exact=F
if "*" not in key:
return meta
- matching_meta[key] = meta
+ matching_meta[op_key] = meta
if "*" in key:
return matching_meta
|
2.41.0
|
d94f52a8a2eafebd1ab5231699e1f9e19a674af
|
Tue, 23 Apr 2024 20:41:40 +0200
|
[PATCH 0574/1000] [Inductor Cutlass backend] clean up CUTLASSGemmTemplate.add_cutlass_gemm_choices (#124575)
|
Clean up CUTLASSGemmTemplate.add_cutlass_gemm_choices, removing code that became unneccessary by removing EVT-based epilogue fusion. Test Plan: Already covered by test_cutlass_backend.py Pull Request resolved: https://github.com/pytorch/pytorch/pull/124575 Approved by: https://github.com/jansel ghstack dependencies: #121497, #123930, #123932, #121734, #124107, #124574
|
diff --git a/torch/_inductor/codegen/cuda/gemm_template.py b/torch/_inductor/codegen/cuda/gemm_template.py
index b527b4a513..5ec1e314b1 100644
--- a/torch/_inductor/codegen/cuda/gemm_template.py
+++ b/torch/_inductor/codegen/cuda/gemm_template.py
@@ -1,10 +1,11 @@
import copy
import logging
import re
-from typing import cast, Dict, List, Optional, Tuple
+from typing import cast, Dict, List, Optional, Tuple, Union
+from ... import ir
from ...config import cuda as inductor_cuda_config
-from ...ir import Buffer, CUDATemplateBuffer, FixedLayout, IRNode, Layout
+from ...ir import Buffer, ChoiceCaller, CUDATemplateBuffer, FixedLayout, IRNode, Layout
from ..common import IndentedBuffer
from . import cutlass_utils
@@ -186,60 +187,52 @@ class CUTLASSGemmTemplate(CUTLASSTemplate):
@staticmethod
def add_cutlass_gemm_choices(
- choices,
- layout,
- input_nodes,
- alpha=1,
- beta=0,
- input_reorder=None,
- fuseable=True,
- non_fuseable=True,
- ):
- if non_fuseable:
- if fuseable:
- # list both fuseable and non-fuseable ops, and treat them all as non-fuseable
- can_fuse_epilogue = False
- else:
- can_fuse_epilogue = None
-
- cutlass_template = CUTLASSGemmTemplate(
- input_nodes,
- layout,
- alpha=alpha,
- beta=beta,
- input_reorder=input_reorder,
- can_fuse_epilogue=can_fuse_epilogue,
- )
- ops = cutlass_template.gen_ops()
- for op in ops:
- cutlass_template.maybe_append_choice(
- choices,
- op=op,
- )
- else:
- ops = []
- if fuseable:
- cutlass_template_evt = CUTLASSGemmTemplate(
- input_nodes,
- layout,
- alpha=alpha,
- beta=beta,
- input_reorder=input_reorder,
- can_fuse_epilogue=True,
+ choices: List[ChoiceCaller],
+ layout: ir.Layout,
+ input_nodes: List[ir.IRNode],
+ alpha: Union[float, int] = 1,
+ beta: Union[float, int] = 0,
+ input_reorder: Optional[List[int]] = None,
+ **extra_kwargs,
+ ) -> None:
+ """
+ Adds Cutlass GEMM configurations choices to the auto-tuning list.
+
+ This function mutates the passed list of choices by appending the choices for Cutlass GEMM configs to it.
+
+ Args:
+ choices (list): The list to which choices are appended.
+ layout (ir.Layout): The layout configuration.
+ input_nodes (list): The list of input nodes.
+ alpha (float,int): Scaling factor, defaults to 1.
+ beta (float,int): Offset, defaults to 0.
+ input_reorder (list, optional): Order of the inputs, defaults to None.
+ **extra_kwargs: Additional keyword arguments.
+
+ """
+
+ cutlass_template = CUTLASSGemmTemplate(
+ input_nodes, # type: ignore[arg-type]
+ layout,
+ alpha=alpha,
+ beta=beta,
+ input_reorder=input_reorder,
+ )
+ ops = cutlass_template.gen_ops()
+ for op in ops:
+ cutlass_template.maybe_append_choice(
+ choices,
+ op=op,
)
- # This will list only ops capable of EVT fusion
- ops_evt = cutlass_template_evt.gen_ops()
- for op in ops_evt:
- cutlass_template_evt.maybe_append_choice(
- choices,
- op=op,
- )
- else:
- ops_evt = []
+ if len(ops) == 0:
+ input_layouts = [node.get_layout() for node in input_nodes]
+ input_strides = [node.get_stride() for node in input_nodes]
+ output_layout = layout
+ warning_msg = f"No suitable Cutlass GEMM configs found, fallbacks used ( {len(ops)=}, {output_layout=}, {input_layouts=}, {input_strides=} )" # noqa: B950
+ log.warning(warning_msg)
log.debug(
- "Added %d cutlass gemm configs and %d fuseable gemm configs.",
+ "Added %d Cutlass gemm configs.",
len(ops),
- len(ops_evt),
)
def header(self) -> IndentedBuffer:
|
2.41.0
|
2295fbacddf01f6949b3d463f080fef331b6773
|
Wed, 24 Apr 2024 14:41:26 +0000
|
[PATCH 0575/1000] Revert "Verify types in custom op schemas (#124520)"
|
This reverts commit 5b98d43488bed0836b4da5996a50bafd0dd2c11c. Reverted https://github.com/pytorch/pytorch/pull/124520 on behalf of https://github.com/zou3519 due to broke static runtime tests ([comment](https://github.com/pytorch/pytorch/pull/124520#issuecomment-2075111935))
|
diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py
index caae309cf6..9e748083ce 100644
--- a/test/test_custom_ops.py
+++ b/test/test_custom_ops.py
@@ -1740,17 +1740,6 @@ dynamic shape operator: _torch_testing.numpy_nonzero.default
res = torch._library.utils.is_functional_schema(schema)
self.assertEqual(res, expected)
- def test_incorrect_schema_types(self):
- with torch.library._scoped_library("mylib", "FRAGMENT") as lib:
- with self.assertRaisesRegex(RuntimeError, "unknown type specifier"):
- lib.define("foo12(Tensor a) -> asdfasdf")
- with self.assertRaisesRegex(RuntimeError, "unknown type specifier"):
- lib.define("foo12(asdf a) -> Tensor")
- with self.assertRaisesRegex(RuntimeError, "Use `SymInt` or `int`"):
- lib.define("foo12(int64_t a) -> Tensor")
- with self.assertRaisesRegex(RuntimeError, "Use `float`"):
- lib.define("foo12(double a) -> Tensor")
-
def test_is_tensorlist_like_type(self):
tensorlists = [
# Tensor[]
diff --git a/torch/csrc/jit/frontend/function_schema_parser.cpp b/torch/csrc/jit/frontend/function_schema_parser.cpp
index 94b477676d..4b681055bd 100644
--- a/torch/csrc/jit/frontend/function_schema_parser.cpp
+++ b/torch/csrc/jit/frontend/function_schema_parser.cpp
@@ -23,14 +23,14 @@ namespace torch::jit {
namespace {
struct SchemaParser {
- explicit SchemaParser(const std::string& str, bool allow_typevars)
+ explicit SchemaParser(const std::string& str)
: L(std::make_shared<Source>(
c10::string_view(str),
c10::nullopt,
0,
nullptr,
Source::DONT_COPY)),
- type_parser(L, /*parse_complete_tensor_types*/ false, allow_typevars) {}
+ type_parser(L, /*parse_complete_tensor_types*/ false) {}
std::variant<OperatorName, FunctionSchema> parseDeclaration() {
OperatorName name = parseName();
@@ -361,19 +361,16 @@ struct SchemaParser {
}
Lexer L;
SchemaTypeParser type_parser;
- bool allow_typevars_;
};
} // namespace
std::variant<OperatorName, FunctionSchema> parseSchemaOrName(
- const std::string& schemaOrName,
- bool allow_typevars) {
- return SchemaParser(schemaOrName, allow_typevars)
- .parseExactlyOneDeclaration();
+ const std::string& schemaOrName) {
+ return SchemaParser(schemaOrName).parseExactlyOneDeclaration();
}
-FunctionSchema parseSchema(const std::string& schema, bool allow_typevars) {
- auto parsed = parseSchemaOrName(schema, allow_typevars);
+FunctionSchema parseSchema(const std::string& schema) {
+ auto parsed = parseSchemaOrName(schema);
TORCH_CHECK(
std::holds_alternative<FunctionSchema>(parsed),
"Tried to parse a function schema but only the operator name was given");
diff --git a/torch/csrc/jit/frontend/function_schema_parser.h b/torch/csrc/jit/frontend/function_schema_parser.h
index a60215704f..a01ca7ad0b 100644
--- a/torch/csrc/jit/frontend/function_schema_parser.h
+++ b/torch/csrc/jit/frontend/function_schema_parser.h
@@ -8,15 +8,9 @@
namespace torch {
namespace jit {
-// allow_typevars: If true, we assume that lowercase types that we don't
-// understand are type variables. This is only needed for TorchScript (and not
-// not needed for custom ops).
TORCH_API std::variant<c10::OperatorName, c10::FunctionSchema> parseSchemaOrName(
- const std::string& schemaOrName,
- bool allow_typevars = true);
-TORCH_API c10::FunctionSchema parseSchema(
- const std::string& schema,
- bool allow_typevars = true);
+ const std::string& schemaOrName);
+TORCH_API c10::FunctionSchema parseSchema(const std::string& schema);
TORCH_API c10::OperatorName parseName(const std::string& name);
} // namespace jit
diff --git a/torch/csrc/jit/frontend/schema_type_parser.cpp b/torch/csrc/jit/frontend/schema_type_parser.cpp
index 68e6f7a02b..7c4b8ba0ca 100644
--- a/torch/csrc/jit/frontend/schema_type_parser.cpp
+++ b/torch/csrc/jit/frontend/schema_type_parser.cpp
@@ -82,27 +82,12 @@ TypePtr SchemaTypeParser::parseBaseType() {
auto it = type_map.find(text);
if (it == type_map.end()) {
- if (allow_typevars_ && !text.empty() && islower(text[0])) {
+ if (!text.empty() && islower(text[0])) {
// lower case identifiers that are not otherwise valid types
// are treated as type variables
return c10::TypeFactory::createNamed<VarType>(text);
}
- if (text == "double") {
- throw ErrorReport(tok.range)
- << "Use `float` instead of `double` in an operator's schema string. "
- "`float` in schema corresponds to the double type in C++";
- }
- if (text == "int64_t") {
- throw ErrorReport(tok.range)
- << "Use `SymInt` or `int` instead of `int64_t` in an operator's schema string. "
- "`SymInt` corresponds to c10::SymInt in C++ while `int` in schema corresponds "
- "to the int64_t type in C++.";
- }
- throw ErrorReport(tok.range)
- << "unknown type specifier. Common valid schema types include "
- "Tensor, SymInt, int, float, bool, Scalar; "
- "for a full list, please see "
- "https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/README.md#func ";
+ throw ErrorReport(tok.range) << "unknown type specifier";
}
return it->second;
}
diff --git a/torch/csrc/jit/frontend/schema_type_parser.h b/torch/csrc/jit/frontend/schema_type_parser.h
index e43a24beb5..c43e4363da 100644
--- a/torch/csrc/jit/frontend/schema_type_parser.h
+++ b/torch/csrc/jit/frontend/schema_type_parser.h
@@ -20,13 +20,8 @@ struct TORCH_API SchemaTypeParser {
c10::optional<at::ScalarType> parseTensorDType(const std::string& dtype);
TypePtr parseRefinedTensor();
- SchemaTypeParser(
- Lexer& L,
- bool parse_complete_tensor_types,
- bool allow_typevars)
- : complete_tensor_types(parse_complete_tensor_types),
- L(L),
- allow_typevars_(allow_typevars) {}
+ SchemaTypeParser(Lexer& L, bool parse_complete_tensor_types)
+ : complete_tensor_types(parse_complete_tensor_types), L(L) {}
private:
c10::optional<bool> tryToParseRequiresGrad();
@@ -40,7 +35,6 @@ struct TORCH_API SchemaTypeParser {
bool complete_tensor_types;
Lexer& L;
size_t next_id = 0;
- bool allow_typevars_;
};
} // namespace jit
} // namespace torch
diff --git a/torch/csrc/jit/ir/irparser.cpp b/torch/csrc/jit/ir/irparser.cpp
index 30cb5ad9eb..c37988e322 100644
--- a/torch/csrc/jit/ir/irparser.cpp
+++ b/torch/csrc/jit/ir/irparser.cpp
@@ -35,10 +35,7 @@ class IRParser {
: L(std::make_shared<Source>(str)),
g(graph),
vmap(vmap),
- type_parser(
- L,
- /*parse_complete_tensor_types*/ true,
- /*allow_type_vars*/ true),
+ type_parser(L, /*parse_complete_tensor_types*/ true),
parse_tensor_constants_(parse_tensor_constants) {}
std::string parseVar();
diff --git a/torch/csrc/jit/python/init.cpp b/torch/csrc/jit/python/init.cpp
index 2023ec27ba..5eb4851089 100644
--- a/torch/csrc/jit/python/init.cpp
+++ b/torch/csrc/jit/python/init.cpp
@@ -1765,11 +1765,7 @@ void initJITBindings(PyObject* module) {
},
py::arg("input"),
py::arg("parse_tensor_constants") = false);
- m.def(
- "parse_schema",
- &parseSchema,
- py::arg("schema"),
- py::arg("allow_typevars") = true);
+ m.def("parse_schema", parseSchema);
m.def("unify_type_list", [](const std::vector<TypePtr>& types) {
std::ostringstream s;
auto type = unifyTypeList(types, s);
diff --git a/torch/csrc/jit/runtime/static/passes.cpp b/torch/csrc/jit/runtime/static/passes.cpp
index 68fd8a270c..fcd2b79e39 100644
--- a/torch/csrc/jit/runtime/static/passes.cpp
+++ b/torch/csrc/jit/runtime/static/passes.cpp
@@ -1347,8 +1347,7 @@ bool isNoOpSlice(Node* node) {
void EliminateNoOpSlice(std::shared_ptr<Graph>& graph) {
DepthFirstGraphNodeIterator it(graph);
auto schema = torch::schema(
- "aten::slice.t(t[] l, int? start=None, int? end=None, int step=1) -> t[]",
- /*allow_typevars*/ true);
+ "aten::slice.t(t[] l, int? start=None, int? end=None, int step=1) -> t[]");
Node* node = nullptr;
std::vector<Node*> to_delete;
while ((node = it.next()) != nullptr) {
diff --git a/torch/library.h b/torch/library.h
index 035cc17597..c38179a6ee 100644
--- a/torch/library.h
+++ b/torch/library.h
@@ -406,8 +406,8 @@ inline CppFunction dispatch(c10::DeviceType type, Func&& raw_f) {
/// ```
///
/// \ingroup torch-schema-overloads
-inline c10::FunctionSchema schema(const char* str, c10::AliasAnalysisKind k, bool allow_typevars=false) {
- c10::FunctionSchema s = torch::jit::parseSchema(str, /*allow_typevars*/allow_typevars);
+inline c10::FunctionSchema schema(const char* str, c10::AliasAnalysisKind k) {
+ c10::FunctionSchema s = torch::jit::parseSchema(str);
s.setAliasAnalysis(k);
return s;
}
@@ -415,8 +415,8 @@ inline c10::FunctionSchema schema(const char* str, c10::AliasAnalysisKind k, boo
/// Function schemas can be directly constructed from string literals.
///
/// \ingroup torch-schema-overloads
-inline c10::FunctionSchema schema(const char* s, bool allow_typevars=false) {
- return schema(s, c10::AliasAnalysisKind::FROM_SCHEMA, allow_typevars);
+inline c10::FunctionSchema schema(const char* s) {
+ return schema(s, c10::AliasAnalysisKind::FROM_SCHEMA);
}
/// \private
|
2.41.0
|
739a2d59e3391116f2ee68622e2a9c131c1edf6
|
Wed, 24 Apr 2024 15:02:11 +0000
|
[PATCH 0576/1000] Revert "[quant][pt2e] Move batch norm op between eval/train for cuda (#123957)"
|
This reverts commit 4efb28c90025ea3d979b720942cd97a274fac6da. Reverted https://github.com/pytorch/pytorch/pull/123957 on behalf of https://github.com/jeanschmidt due to reverting to check if it will fix rocm jobs on main ([comment](https://github.com/pytorch/pytorch/pull/123957#issuecomment-2075158146))
|
diff --git a/test/quantization/pt2e/test_quantize_pt2e.py b/test/quantization/pt2e/test_quantize_pt2e.py
index 0b9ad6a9a6..3c759fc65c 100644
--- a/test/quantization/pt2e/test_quantize_pt2e.py
+++ b/test/quantization/pt2e/test_quantize_pt2e.py
@@ -1826,18 +1826,6 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
def test_move_exported_model_dropout_inplace(self):
self._test_move_exported_model_dropout(inplace=True)
- def _get_bn_train_eval_ops(self, is_cuda: bool):
- if is_cuda:
- return (
- torch.ops.aten.cudnn_batch_norm.default,
- torch.ops.aten.cudnn_batch_norm.default,
- )
- else:
- return (
- torch.ops.aten._native_batch_norm_legit.default,
- torch.ops.aten._native_batch_norm_legit_no_training.default,
- )
-
def test_move_exported_model_bn(self):
"""
Test switching batch_norm behavior between train and eval modes using
@@ -1852,18 +1840,12 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
def forward(self, x):
return self.bn(x)
- is_cuda = torch.cuda.is_available()
- if is_cuda:
- m = M().train().cuda()
- example_inputs = (torch.randn(1, 3, 3, 3).cuda(),)
- else:
- m = M().train()
- example_inputs = (torch.randn(1, 3, 3, 3),)
- bn_train_op, bn_eval_op = self._get_bn_train_eval_ops(is_cuda)
+ example_inputs = (torch.randn(1, 3, 3, 3),)
+ m = M().train()
m = capture_pre_autograd_graph(m, example_inputs)
# Assert that batch norm op exists and is in train mode
- bn_node = self._get_node(m, bn_train_op)
+ bn_node = self._get_node(m, torch.ops.aten._native_batch_norm_legit.default)
self.assertTrue(bn_node is not None)
self.assertTrue(bn_node.args[5])
@@ -1871,14 +1853,16 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
torch.ao.quantization.move_exported_model_to_eval(m)
# Assert that batch norm op is now in eval mode
- bn_node = self._get_node(m, bn_eval_op)
+ bn_node = self._get_node(
+ m, torch.ops.aten._native_batch_norm_legit_no_training.default
+ )
self.assertTrue(bn_node is not None)
# Move to train
torch.ao.quantization.move_exported_model_to_train(m)
# Assert that batch norm op is now in train mode again
- bn_node = self._get_node(m, bn_train_op)
+ bn_node = self._get_node(m, torch.ops.aten._native_batch_norm_legit.default)
self.assertTrue(bn_node is not None)
self.assertTrue(bn_node.args[5])
@@ -1924,25 +1908,22 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
x = self.dropout(x)
return x
- is_cuda = torch.cuda.is_available()
- if is_cuda:
- m = M().train().cuda()
- example_inputs = (torch.randn(1, 3, 3, 3).cuda(),)
- else:
- m = M().train()
- example_inputs = (torch.randn(1, 3, 3, 3),)
- bn_train_op, bn_eval_op = self._get_bn_train_eval_ops(is_cuda)
+ example_inputs = (torch.randn(1, 3, 3, 3),)
+ m = M().train()
m = capture_pre_autograd_graph(m, example_inputs)
def _assert_ops_are_correct(m: torch.fx.GraphModule, train: bool):
targets = [n.target for n in m.graph.nodes]
- bn_op = bn_train_op if train else bn_eval_op
- bn_node = self._get_node(m, bn_op)
- self.assertTrue(bn_node is not None)
- if is_cuda:
- self.assertEqual(bn_node.args[5], train)
+ bn_train_target = torch.ops.aten._native_batch_norm_legit.default
+ bn_eval_target = torch.ops.aten._native_batch_norm_legit_no_training.default
+ if train:
+ self.assertTrue(bn_train_target in targets)
+ self.assertTrue(bn_eval_target not in targets)
+ else:
+ self.assertTrue(bn_eval_target in targets)
+ self.assertTrue(bn_train_target not in targets)
dropout_node = self._get_node(m, torch.ops.aten.dropout.default)
- self.assertEqual(dropout_node.args[2], train)
+ self.assertTrue(dropout_node.args[2] == train)
# Before wrapping: this is not OK
with self.assertRaises(NotImplementedError):
diff --git a/torch/ao/quantization/fx/utils.py b/torch/ao/quantization/fx/utils.py
index be26332b24..21a1034739 100644
--- a/torch/ao/quantization/fx/utils.py
+++ b/torch/ao/quantization/fx/utils.py
@@ -23,7 +23,6 @@ from torch.ao.quantization.qconfig import (
)
from torch.ao.quantization.stubs import DeQuantStub
from torch.ao.quantization.utils import (
- _assert_and_get_unique_device,
activation_is_statically_quantized,
)
from torch.ao.quantization.observer import _is_activation_post_process
@@ -223,13 +222,26 @@ def graph_module_from_producer_nodes(
graph_module = GraphModule(root, graph)
return graph_module
-# TODO: delete
def assert_and_get_unique_device(module: torch.nn.Module) -> Any:
"""
Returns the unique device for a module, or None if no device is found.
Throws an error if multiple devices are detected.
"""
- return _assert_and_get_unique_device(module)
+ devices = {p.device for p in module.parameters()} | \
+ {p.device for p in module.buffers()}
+ """
+ As a temp workaround for AIMP HHC publish we added CPU check.remove it later. T163614564
+ """
+ if {torch.device("cpu"), torch.device("meta")} == devices:
+ warnings.warn("Both 'meta' and 'cpu' are present in the list of devices. Module can have one device. We Select 'cpu'.")
+ devices = {torch.device("cpu")}
+ ""
+ assert len(devices) <= 1, (
+ "prepare only works with cpu or single-device CUDA modules, "
+ f"but got devices {devices}"
+ )
+ device = next(iter(devices)) if len(devices) > 0 else None
+ return device
def create_getattr_from_value(module: torch.nn.Module, graph: Graph, prefix: str, value: Any) -> Node:
"""
diff --git a/torch/ao/quantization/pt2e/export_utils.py b/torch/ao/quantization/pt2e/export_utils.py
index 2e7b9e380d..dae8baad8d 100644
--- a/torch/ao/quantization/pt2e/export_utils.py
+++ b/torch/ao/quantization/pt2e/export_utils.py
@@ -3,8 +3,6 @@ import types
import torch
import torch.nn.functional as F
-from torch.ao.quantization.utils import _assert_and_get_unique_device
-
__all__ = [
"model_is_exported",
@@ -138,26 +136,20 @@ def _replace_batchnorm(m: torch.fx.GraphModule, train_to_eval: bool):
torch.randn(1), # bn_running_mean
torch.randn(1), # bn_running_var
)
-
- device = _assert_and_get_unique_device(m)
- is_cuda = device is not None and device.type == "cuda"
- bn_train_aten = _get_aten_graph_module_for_pattern(
- _WrapperModule(bn_train),
- example_inputs,
- is_cuda,
- )
- bn_eval_aten = _get_aten_graph_module_for_pattern(
- _WrapperModule(bn_eval),
- example_inputs,
- is_cuda,
- )
-
if train_to_eval:
- match_pattern = bn_train_aten
- replacement_pattern = bn_eval_aten
+ match_pattern = _get_aten_graph_module_for_pattern(
+ _WrapperModule(bn_train), example_inputs
+ )
+ replacement_pattern = _get_aten_graph_module_for_pattern(
+ _WrapperModule(bn_eval), example_inputs
+ )
else:
- match_pattern = bn_eval_aten
- replacement_pattern = bn_train_aten
+ match_pattern = _get_aten_graph_module_for_pattern(
+ _WrapperModule(bn_eval), example_inputs
+ )
+ replacement_pattern = _get_aten_graph_module_for_pattern(
+ _WrapperModule(bn_train), example_inputs
+ )
from torch.fx.subgraph_rewriter import replace_pattern_with_filters
diff --git a/torch/ao/quantization/utils.py b/torch/ao/quantization/utils.py
index d0de50bbeb..70b45b92fb 100644
--- a/torch/ao/quantization/utils.py
+++ b/torch/ao/quantization/utils.py
@@ -688,27 +688,6 @@ def get_fqn_to_example_inputs(
torch.nn.Module.__call__ = orig_module_call # type: ignore[method-assign]
return fqn_to_example_inputs
-def _assert_and_get_unique_device(module: torch.nn.Module) -> Any:
- """
- Returns the unique device for a module, or None if no device is found.
- Throws an error if multiple devices are detected.
- """
- devices = {p.device for p in module.parameters()} | \
- {p.device for p in module.buffers()}
- """
- As a temp workaround for AIMP HHC publish we added CPU check.remove it later. T163614564
- """
- if {torch.device("cpu"), torch.device("meta")} == devices:
- warnings.warn("Both 'meta' and 'cpu' are present in the list of devices. Module can have one device. We Select 'cpu'.")
- devices = {torch.device("cpu")}
- ""
- assert len(devices) <= 1, (
- "prepare only works with cpu or single-device CUDA modules, "
- f"but got devices {devices}"
- )
- device = next(iter(devices)) if len(devices) > 0 else None
- return device
-
__all__ = [
"NodePattern",
"Pattern",
|
2.41.0
|
d120b08c4828c39f116cfe1fb39195c844be485
|
Wed, 24 Apr 2024 17:04:36 +0000
|
[PATCH 0579/1000] Add common used score_mod functions for templated attention (#124670)
|
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/124670 Approved by: https://github.com/Chillee
|
diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py
index 4c8043d9bf..c2cf3b295e 100644
--- a/test/inductor/test_templated_attention.py
+++ b/test/inductor/test_templated_attention.py
@@ -13,7 +13,15 @@ from torch._higher_order_ops.templated_attention import (
)
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_code
-from torch.nn.attention._templated_attention import _compose, _templated_attention
+from torch.nn.attention._templated_attention import (
+ _causal,
+ _compose,
+ _generate_alibi_bias,
+ _identity,
+ _rel_bias,
+ _rel_causal,
+ _templated_attention,
+)
from torch.testing import FileCheck
from torch.testing._internal import common_utils
from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_BF16
@@ -48,9 +56,13 @@ test_dtypes_fast = [torch.float16]
if common_utils.TEST_WITH_ROCM:
test_dtypes = [torch.float32]
-
-def _identity_mod(score, b, h, m, n):
- return score
+test_score_mods = [
+ _identity,
+ _causal,
+ _rel_bias,
+ _rel_causal,
+ _generate_alibi_bias(8),
+]
def _causal_mod(score, b, h, token_q, token_kv):
@@ -90,58 +102,8 @@ class TestTemplatedSDPA(InductorTestCase):
@supported_platform
@common_utils.parametrize("dtype", test_dtypes)
- def test_identity(self, dtype: torch.dtype):
- def score_mod(score, b, h, m, n):
- return score
-
- self.run_test(score_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes)
- def test_causal_mask(self, dtype: torch.dtype):
- def score_mod(score, b, h, token_q, token_kv):
- return torch.where(token_q >= token_kv, score, float("-inf"))
-
- self.run_test(score_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes)
- def test_rel_bias(self, dtype: torch.dtype):
- def score_mod(score, b, h, m, n):
- return score + (m - n)
-
- self.run_test(score_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes)
- def test_alibi_bias(self, dtype: torch.dtype):
- def score_mod(score, b, h, m, n):
- return score + (m - n) * h
-
- self.run_test(score_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes)
- def test_rel_causal(self, dtype: torch.dtype):
- def score_mod(score, b, h, m, n):
- return torch.where(m <= n, score + (m - n), float("-inf"))
-
- self.run_test(score_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes)
- def test_skip_odd_keys(self, dtype: torch.dtype):
- def score_mod(score, b, h, q, kv):
- return torch.where(kv % 2 == 0, score, float("-inf"))
-
- self.run_test(score_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes)
- def test_alibi_causal(self, dtype: torch.dtype):
- def score_mod(score, b, h, m, n):
- return torch.where(m <= n, score + (m - n) * h, float("-inf"))
-
+ @common_utils.parametrize("score_mod", test_score_mods)
+ def test_builtin_score_mods(self, dtype: torch.dtype, score_mod: Callable):
self.run_test(score_mod, dtype)
@supported_platform
@@ -272,7 +234,7 @@ class TestTemplatedSDPA(InductorTestCase):
requires_grad=True,
)
q, k, v = make_tensor(), make_tensor(), make_tensor()
- out = _templated_attention(q, k, v, _identity_mod)
+ out = _templated_attention(q, k, v, _identity)
with self.assertRaisesRegex(
RuntimeError, "Autograd not implemented for templated_attention"
):
@@ -286,7 +248,7 @@ class TestTemplatedSDPA(InductorTestCase):
with self.assertRaisesRegex(
ValueError, "Expected query, key, and value to have the same dtype"
):
- _templated_attention(query, key, value, _identity_mod)
+ _templated_attention(query, key, value, _identity)
@supported_platform
def test_different_sequence_length_fails(self):
@@ -294,7 +256,7 @@ class TestTemplatedSDPA(InductorTestCase):
key = torch.randn((1, 1, 1024, 64), dtype=torch.float32, device="cuda")
value = torch.randn((1, 1, 1024, 64), dtype=torch.float32, device="cuda")
with self.assertRaisesRegex(ValueError, "NYI: The target sequence length"):
- _templated_attention(query, key, value, _identity_mod)
+ _templated_attention(query, key, value, _identity)
@supported_platform
@patch.object(torch._inductor.config, "max_autotune", True)
@@ -321,7 +283,7 @@ class TestTemplatedSDPA(InductorTestCase):
@supported_platform
@common_utils.parametrize("dtype", test_dtypes)
- @common_utils.parametrize("score_mod", [_identity_mod, _causal_mod])
+ @common_utils.parametrize("score_mod", [_identity, _causal])
def test_logsumexp_correctness(self, dtype, score_mod):
@torch.compile
def sdpa_hop(q, k, v, score_mod):
@@ -384,7 +346,7 @@ class TestTemplatedSDPA(InductorTestCase):
lse_2 = lse * 2
return lse_2
- _, code = run_and_get_code(func, q, k, v, _identity_mod)
+ _, code = run_and_get_code(func, q, k, v, _identity)
# Ensure that two kernels are generated
FileCheck().check_count(".run(", 2, True).run(code[0])
@@ -405,7 +367,7 @@ class TestTemplatedSDPA(InductorTestCase):
lse_2 = lse * 2
return out, lse_2
- _, code = run_and_get_code(func, q, k, v, _identity_mod)
+ _, code = run_and_get_code(func, q, k, v, _identity)
# Ensure that two kernels are generated
FileCheck().check_count(".run(", 2, True).run(code[0])
diff --git a/torch/nn/attention/_templated_attention.py b/torch/nn/attention/_templated_attention.py
index 0e614c8a4e..00183a2e31 100644
--- a/torch/nn/attention/_templated_attention.py
+++ b/torch/nn/attention/_templated_attention.py
@@ -90,3 +90,60 @@ def _templated_attention(
# Drop the logsumexp value since this is only needed for backwards
return out
+
+
+"""Some common used score_mod functions for templated attention in PyTorch."""
+
+
+def _identity(
+ score: torch.Tensor,
+ batch: torch.Tensor,
+ head: torch.Tensor,
+ token_q: torch.Tensor,
+ token_kv: torch.Tensor,
+) -> torch.Tensor:
+ return score
+
+
+def _causal(
+ score: torch.Tensor,
+ batch: torch.Tensor,
+ head: torch.Tensor,
+ token_q: torch.Tensor,
+ token_kv: torch.Tensor,
+) -> torch.Tensor:
+ return torch.where(token_q >= token_kv, score, float("-inf"))
+
+
+def _rel_bias(
+ score: torch.Tensor,
+ batch: torch.Tensor,
+ head: torch.Tensor,
+ token_q: torch.Tensor,
+ token_kv: torch.Tensor,
+) -> torch.Tensor:
+ return score + (token_q - token_kv)
+
+
+def _rel_causal(
+ score: torch.Tensor,
+ batch: torch.Tensor,
+ head: torch.Tensor,
+ token_q: torch.Tensor,
+ token_kv: torch.Tensor,
+) -> torch.Tensor:
+ return torch.where(token_q <= token_kv, score + (token_q - token_kv), float("-inf"))
+
+
+def _generate_alibi_bias(num_heads: int):
+ def _alibi_bias(
+ score: torch.Tensor,
+ batch: torch.Tensor,
+ head: torch.Tensor,
+ token_q: torch.Tensor,
+ token_kv: torch.Tensor,
+ ) -> torch.Tensor:
+ scale = torch.exp2(-((head + 1) * 8.0 / num_heads))
+ return score + (token_kv - token_q) * scale
+
+ return _alibi_bias
|
2.41.0
|
888d7495ece6b6df3b7334fc7c2a9d869359250
|
Wed, 24 Apr 2024 17:28:12 +0000
|
[PATCH 0580/1000] [ROCm] Triton upstream AMD backend integration (#121801)
|
Update ROCm-triton to use the AMD backend from https://github.com/openai/triton Note: `test__int_mm` can be enabled after https://github.com/pytorch/pytorch/pull/122431 is landed Co-authored-by: Pruthvi Madugundu <pruthvigithub@gmail.com> Co-authored-by: Nikita Shulga <2453524+malfet@users.noreply.github.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/121801 Approved by: https://github.com/nmacchioni, https://github.com/malfet
|
diff --git a/.ci/docker/ci_commit_pins/triton-rocm.txt b/.ci/docker/ci_commit_pins/triton-rocm.txt
index e2eb3bdf28..2df035af1f 100644
--- a/.ci/docker/ci_commit_pins/triton-rocm.txt
+++ b/.ci/docker/ci_commit_pins/triton-rocm.txt
@@ -1 +1 @@
-0a22a91d04c2b4a029a69a198eac390089c3e891
+bbe6246e37d8aa791c67daaf9d9d61b26c9ccfdc
diff --git a/.ci/docker/common/install_triton.sh b/.ci/docker/common/install_triton.sh
index c7e60d7d9e..de009c1a3a 100755
--- a/.ci/docker/common/install_triton.sh
+++ b/.ci/docker/common/install_triton.sh
@@ -13,7 +13,7 @@ conda_reinstall() {
}
if [ -n "${ROCM_VERSION}" ]; then
- TRITON_REPO="https://github.com/ROCmSoftwarePlatform/triton"
+ TRITON_REPO="https://github.com/openai/triton"
TRITON_TEXT_FILE="triton-rocm"
elif [ -n "${BASEKIT_VERSION}" ]; then
TRITON_REPO="https://github.com/intel/intel-xpu-backend-for-triton"
diff --git a/.github/scripts/amd/package_triton_wheel.sh b/.github/scripts/amd/package_triton_wheel.sh
new file mode 100755
index 0000000000..4295a97a34
--- /dev/null
+++ b/.github/scripts/amd/package_triton_wheel.sh
@@ -0,0 +1,99 @@
+set -ex
+
+# Set ROCM_HOME isn't available, use ROCM_PATH if set or /opt/rocm
+ROCM_HOME="${ROCM_HOME:-${ROCM_PATH:-/opt/rocm}}"
+
+# Find rocm_version.h header file for ROCm version extract
+rocm_version_h="${ROCM_HOME}/include/rocm-core/rocm_version.h"
+if [ ! -f "$rocm_version_h" ]; then
+ rocm_version_h="${ROCM_HOME}/include/rocm_version.h"
+fi
+
+# Error out if rocm_version.h not found
+if [ ! -f "$rocm_version_h" ]; then
+ echo "Error: rocm_version.h not found in expected locations." >&2
+ exit 1
+fi
+
+# Extract major, minor and patch ROCm version numbers
+MAJOR_VERSION=$(grep 'ROCM_VERSION_MAJOR' "$rocm_version_h" | awk '{print $3}')
+MINOR_VERSION=$(grep 'ROCM_VERSION_MINOR' "$rocm_version_h" | awk '{print $3}')
+PATCH_VERSION=$(grep 'ROCM_VERSION_PATCH' "$rocm_version_h" | awk '{print $3}')
+ROCM_INT=$(($MAJOR_VERSION * 10000 + $MINOR_VERSION * 100 + $PATCH_VERSION))
+echo "ROCm version: $ROCM_INT"
+
+# Check TRITON_ROCM_DIR is set
+if [[ -z "${TRITON_ROCM_DIR}" ]]; then
+ export TRITON_ROCM_DIR=third_party/amd/backend
+fi
+
+# Remove packaged libs and headers
+rm -rf $TRITON_ROCM_DIR/include/*
+
+LIBTINFO_PATH="/usr/lib64/libtinfo.so.5"
+LIBNUMA_PATH="/usr/lib64/libnuma.so.1"
+LIBELF_PATH="/usr/lib64/libelf.so.1"
+
+OS_SO_PATHS=(
+ $LIBELF_PATH
+ $LIBNUMA_PATH
+ $LIBTINFO_PATH
+)
+
+for lib in "${OS_SO_PATHS[@]}"
+do
+ cp $lib $TRITON_ROCM_DIR/lib/
+done
+
+# Required ROCm libraries
+if [[ "${MAJOR_VERSION}" == "6" ]]; then
+ libamdhip="libamdhip64.so.6"
+else
+ libamdhip="libamdhip64.so.5"
+fi
+
+# Required ROCm libraries - ROCm 6.0
+ROCM_SO=(
+ "${libamdhip}"
+ "libhsa-runtime64.so.1"
+ "libamd_comgr.so.2"
+ "libdrm.so.2"
+ "libdrm_amdgpu.so.1"
+)
+
+if [[ $ROCM_INT -ge 60100 ]]; then
+ ROCM_SO+=("librocprofiler-register.so.0")
+fi
+
+for lib in "${ROCM_SO[@]}"
+do
+ file_path=($(find $ROCM_HOME/lib/ -name "$lib")) # First search in lib
+ if [[ -z $file_path ]]; then
+ if [ -d "$ROCM_HOME/lib64/" ]; then
+ file_path=($(find $ROCM_HOME/lib64/ -name "$lib")) # Then search in lib64
+ fi
+ fi
+ if [[ -z $file_path ]]; then
+ file_path=($(find $ROCM_HOME/ -name "$lib")) # Then search in ROCM_HOME
+ fi
+ if [[ -z $file_path ]]; then
+ file_path=($(find /opt/ -name "$lib")) # Then search in /opt
+ fi
+ if [[ -z $file_path ]]; then
+ echo "Error: Library file $lib is not found." >&2
+ exit 1
+ fi
+
+ cp $file_path $TRITON_ROCM_DIR/lib
+ # When running locally, and not building a wheel, we need to satisfy shared objects requests that don't look for versions
+ LINKNAME=$(echo $lib | sed -e 's/\.so.*/.so/g')
+ ln -sf $lib $TRITON_ROCM_DIR/lib/$LINKNAME
+
+done
+
+# Copy Include Files
+cp -r $ROCM_HOME/include/hip $TRITON_ROCM_DIR/include
+
+# Copy linker
+mkdir -p $TRITON_ROCM_DIR/llvm/bin
+cp $ROCM_HOME/llvm/bin/ld.lld $TRITON_ROCM_DIR/llvm/bin/
diff --git a/.github/scripts/amd/patch_triton_wheel.sh b/.github/scripts/amd/patch_triton_wheel.sh
new file mode 100755
index 0000000000..d95ca023ff
--- /dev/null
+++ b/.github/scripts/amd/patch_triton_wheel.sh
@@ -0,0 +1,99 @@
+#!/bin/bash
+set -x
+
+WHEELHOUSE_DIR=/artifacts
+PATCHELF_BIN=patchelf
+ROCM_LIB=backends/amd/lib
+ROCM_LD=backends/amd/llvm/bin
+PREFIX=triton
+fname_without_so_number() {
+ LINKNAME=$(echo $1 | sed -e 's/\.so.*/.so/g')
+ echo "$LINKNAME"
+}
+
+replace_needed_sofiles() {
+ find $1 -name '*.so*' -o -name 'ld.lld' | while read sofile; do
+ origname=$2
+ patchedname=$3
+ if [[ "$origname" != "$patchedname" ]]; then
+ set +e
+ origname=$($PATCHELF_BIN --print-needed $sofile | grep "$origname.*")
+ ERRCODE=$?
+ set -e
+ if [ "$ERRCODE" -eq "0" ]; then
+ echo "patching $sofile entry $origname to $patchedname"
+ $PATCHELF_BIN --replace-needed $origname $patchedname $sofile
+ fi
+ fi
+ done
+}
+
+mkdir -p "/tmp_dir"
+pushd /tmp_dir
+for pkg in /$WHEELHOUSE_DIR/*triton*.whl; do
+ echo "Modifying $pkg"
+ rm -rf tmp
+ mkdir -p tmp
+ cd tmp
+ cp $pkg .
+ unzip -q $(basename $pkg)
+ rm -f $(basename $pkg)
+ $PATCHELF_BIN --set-rpath ${LD_SO_RPATH:-'$ORIGIN:$ORIGIN/../../lib'} $PREFIX/$ROCM_LD/ld.lld
+ $PATCHELF_BIN --print-rpath $PREFIX/$ROCM_LD/ld.lld
+ # Modify libtriton.so as it sits in _C directory apart from its dependencies
+ find $PREFIX/_C -type f -name "*.so*" | while read sofile; do
+ echo "Setting rpath of $sofile"
+ $PATCHELF_BIN --set-rpath ${C_SO_RPATH:-'$ORIGIN:$ORIGIN/'../$ROCM_LIB} ${FORCE_RPATH:-} $sofile
+ $PATCHELF_BIN --print-rpath $sofile
+ done
+
+ # All included dependencies are included in a single lib directory
+ deps=()
+ deps_soname=()
+ while read sofile; do
+ echo "Setting rpath of $sofile to ${LIB_SO_RPATH:-'$ORIGIN'}"
+ $PATCHELF_BIN --set-rpath ${LIB_SO_RPATH:-'$ORIGIN'} ${FORCE_RPATH:-} $sofile
+ $PATCHELF_BIN --print-rpath $sofile
+ deps+=("$sofile")
+ deps_soname+=("$(basename $sofile)")
+ done < <(find $PREFIX/$ROCM_LIB -type f -name "*.so*")
+
+ patched=()
+ for filepath in "${deps[@]}"; do
+ filename=$(basename $filepath)
+ destpath=$PREFIX/$ROCM_LIB/$filename
+ if [[ "$filepath" != "$destpath" ]]; then
+ cp $filepath $destpath
+ fi
+ patchedpath=$(fname_without_so_number $destpath)
+ patchedname=$(basename $patchedpath)
+ if [[ "$destpath" != "$patchedpath" ]]; then
+ mv $destpath $patchedpath
+ fi
+ patched+=("$patchedname")
+ echo "Copied $filepath to $patchedpath"
+ done
+
+ # Go through all required shared objects and see if any of our other objects are dependants. If so, replace so.ver wth so
+ for ((i=0;i<${#deps[@]};++i)); do
+ echo "replacing "${deps_soname[i]} ${patched[i]}
+ replace_needed_sofiles $PREFIX/$ROCM_LIB ${deps_soname[i]} ${patched[i]}
+ replace_needed_sofiles $PREFIX/_C ${deps_soname[i]} ${patched[i]}
+ replace_needed_sofiles $PREFIX/$ROCM_LD ${deps_soname[i]} ${patched[i]}
+ done
+
+ # Re-bundle whl with so adjustments
+ zip -rqy $(basename $pkg) *
+
+ if [[ -z "${MANYLINUX_VERSION}" ]]; then
+ newpkg=$pkg
+ else
+ newpkg=$(echo $pkg | sed -e "s/\linux_x86_64/${MANYLINUX_VERSION}/g")
+ fi
+
+ # Remove original whl
+ rm -f $pkg
+
+ # Move rebuilt whl to original location with new name.
+ mv $(basename $pkg) $newpkg
+done
diff --git a/.github/scripts/build_triton_wheel.py b/.github/scripts/build_triton_wheel.py
index 624de58d93..33a49d788a 100644
--- a/.github/scripts/build_triton_wheel.py
+++ b/.github/scripts/build_triton_wheel.py
@@ -10,9 +10,6 @@ from typing import Optional
SCRIPT_DIR = Path(__file__).parent
REPO_DIR = SCRIPT_DIR.parent.parent
-# TODO: Remove me once Triton version is again in sync for vanilla and ROCm
-ROCM_TRITION_VERSION = "2.1.0"
-
def read_triton_pin(rocm_hash: bool = False) -> str:
triton_file = "triton.txt" if not rocm_hash else "triton-rocm.txt"
@@ -32,27 +29,6 @@ def check_and_replace(inp: str, src: str, dst: str) -> str:
return inp.replace(src, dst)
-def patch_setup_py(
- path: Path,
- *,
- version: str,
- name: str = "triton",
- expected_version: Optional[str] = None,
-) -> None:
- with open(path) as f:
- orig = f.read()
- # Replace name
- orig = check_and_replace(orig, 'name="triton",', f'name="{name}",')
- # Replace version
- if not expected_version:
- expected_version = read_triton_version()
- orig = check_and_replace(
- orig, f'version="{expected_version}",', f'version="{version}",'
- )
- with open(path, "w") as f:
- f.write(orig)
-
-
def patch_init_py(
path: Path, *, version: str, expected_version: Optional[str] = None
) -> None:
@@ -92,11 +68,10 @@ def build_triton(
with TemporaryDirectory() as tmpdir:
triton_basedir = Path(tmpdir) / "triton"
triton_pythondir = triton_basedir / "python"
+ triton_repo = "https://github.com/openai/triton"
if build_rocm:
- triton_repo = "https://github.com/ROCmSoftwarePlatform/triton"
triton_pkg_name = "pytorch-triton-rocm"
else:
- triton_repo = "https://github.com/openai/triton"
triton_pkg_name = "pytorch-triton"
check_call(["git", "clone", triton_repo], cwd=tmpdir)
if release:
@@ -162,18 +137,15 @@ def build_triton(
patch_init_py(
triton_pythondir / "triton" / "__init__.py",
version=f"{version}",
- expected_version=ROCM_TRITION_VERSION if build_rocm else None,
+ expected_version=None,
)
if build_rocm:
- # TODO: Remove me when ROCM triton is updated
- patch_setup_py(
- triton_pythondir / "setup.py",
- name=triton_pkg_name,
- version=f"{version}",
- expected_version=ROCM_TRITION_VERSION,
+ check_call(
+ [f"{SCRIPT_DIR}/amd/package_triton_wheel.sh"],
+ cwd=triton_basedir,
+ shell=True,
)
- check_call("scripts/amd/setup_rocm_libs.sh", cwd=triton_basedir, shell=True)
print("ROCm libraries setup for triton installation...")
check_call(
@@ -184,8 +156,11 @@ def build_triton(
shutil.copy(whl_path, Path.cwd())
if build_rocm:
- check_call("scripts/amd/fix_so.sh", cwd=triton_basedir, shell=True)
-
+ check_call(
+ [f"{SCRIPT_DIR}/amd/patch_triton_wheel.sh"],
+ cwd=triton_basedir,
+ shell=True,
+ )
return Path.cwd() / whl_path.name
diff --git a/test/inductor/test_select_algorithm.py b/test/inductor/test_select_algorithm.py
index 48713bb63e..ca5b99f02c 100644
--- a/test/inductor/test_select_algorithm.py
+++ b/test/inductor/test_select_algorithm.py
@@ -109,6 +109,8 @@ class TestSelectAlgorithm(TestCase):
)
self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1)
+ # FIXME: Investigate why _int_mm_out_cuda is not compiled on ROCm
+ @skipIfRocm
@patches
def test__int_mm(self):
@torch.compile
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py
index 7a99b1f31e..9614eae4f3 100644
--- a/test/inductor/test_torchinductor.py
+++ b/test/inductor/test_torchinductor.py
@@ -9354,6 +9354,8 @@ class CommonTemplate:
b = torch.randn(65, 2**24, device=self.device)
fn(a, b)
+ # Skipped on ROCm until https://github.com/ROCm/triton/issues/443 resolved
+ @skipIfRocm
def test_fuse_large_params(self):
def pt2_optimizer_step(optimizer):
@torch.compile()
diff --git a/torch/_dynamo/device_interface.py b/torch/_dynamo/device_interface.py
index d2944bc9c0..d93a265466 100644
--- a/torch/_dynamo/device_interface.py
+++ b/torch/_dynamo/device_interface.py
@@ -202,8 +202,11 @@ class CudaInterface(DeviceInterface):
@staticmethod
def get_compute_capability(device: _device_t = None):
- major, min = torch.cuda.get_device_capability(device)
- return major * 10 + min
+ if torch.version.hip is None:
+ major, min = torch.cuda.get_device_capability(device)
+ return major * 10 + min
+ else:
+ return torch.cuda.get_device_properties(device).gcnArchName.split(":", 1)[0]
get_xpu_stream: Optional[Callable[[int], int]]
diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py
index 85587c2093..60a18e416f 100644
--- a/torch/_inductor/runtime/triton_heuristics.py
+++ b/torch/_inductor/runtime/triton_heuristics.py
@@ -332,7 +332,18 @@ class CachingAutotuner(KernelInterface):
),
)
- target = (compile_meta["device_type"], compile_meta["cc"])
+ cc_str = str(compile_meta["cc"])
+ if "gfx10" in cc_str or "gfx11" in cc_str:
+ rocm_warp_size = 32
+ else:
+ rocm_warp_size = 64
+
+ target = (
+ (compile_meta["device_type"], compile_meta["cc"])
+ if not torch.version.hip
+ else [compile_meta["device_type"], compile_meta["cc"], rocm_warp_size]
+ )
+
options = {
"num_warps": compile_meta["num_warps"],
"num_stages": compile_meta["num_stages"],
@@ -694,18 +705,12 @@ class CachingAutotuner(KernelInterface):
from torch._inductor.codecache import CudaKernelParamCache
- if self.device_props.type != "hip":
- CudaKernelParamCache.set(key, params, launcher.bin.asm["cubin"])
- else:
- # There is some divergence between CUDA and ROCm here.
- # On ROCm's triton we only have the the path to the binary, not the binary itself.
- # For ROCm we will copy the binary to the new location instead of writing to file
- import pathlib
-
- launcher.bin.asm["hsaco"] = pathlib.Path(
- launcher.bin.asm["hsaco_path"]
- ).read_bytes()
- CudaKernelParamCache.set(key, params, launcher.bin.asm["hsaco"])
+ binary = (
+ launcher.bin.asm["cubin"]
+ if torch.version.hip is None
+ else launcher.bin.asm["hsaco"]
+ )
+ CudaKernelParamCache.set(key, params, binary)
self.cuda_kernel_saved = True
diff --git a/torch/_utils_internal.py b/torch/_utils_internal.py
index ae6fd7086b..fe7d30fc6b 100644
--- a/torch/_utils_internal.py
+++ b/torch/_utils_internal.py
@@ -151,9 +151,29 @@ def justknobs_getval_int(name: str) -> int:
@functools.lru_cache(None)
def max_clock_rate():
- from triton.testing import nvsmi
+ if not torch.version.hip:
+ from triton.testing import nvsmi
- return nvsmi(["clocks.max.sm"])[0]
+ return nvsmi(["clocks.max.sm"])[0]
+ else:
+ # Manually set max-clock speeds on ROCm until equivalent nvmsi
+ # functionality in triton.testing or via pyamdsmi enablement. Required
+ # for test_snode_runtime unit tests.
+ gcn_arch = str(torch.cuda.get_device_properties(0).gcnArchName.split(":", 1)[0])
+ if "gfx94" in gcn_arch:
+ return 1700
+ elif "gfx90a" in gcn_arch:
+ return 1700
+ elif "gfx908" in gcn_arch:
+ return 1502
+ elif "gfx11" in gcn_arch:
+ return 1700
+ elif "gfx103" in gcn_arch:
+ return 1967
+ elif "gfx101" in gcn_arch:
+ return 1144
+ else:
+ return 1100
TEST_MASTER_ADDR = "127.0.0.1"
|
2.41.0
|
8312a7fc360a35ce8fad6721903c48677cd191a
|
Tue, 23 Apr 2024 12:50:35 -0700
|
[PATCH 0584/1000] [DeviceMesh] Removed unneeded `.to(cpu)` (#124768)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124768 Approved by: https://github.com/wz337 ghstack dependencies: #124651, #124741, #124767
|
diff --git a/torch/distributed/device_mesh.py b/torch/distributed/device_mesh.py
index d46d4db651..a1bb6de987 100644
--- a/torch/distributed/device_mesh.py
+++ b/torch/distributed/device_mesh.py
@@ -214,7 +214,7 @@ else:
if isinstance(mesh, torch.Tensor) and mesh.device.type != "cpu":
raise ValueError(f"`mesh` must be a CPU tensor, got {mesh}")
self.mesh = (
- mesh.detach().to(device="cpu", dtype=torch.int)
+ mesh.detach().to(dtype=torch.int)
if isinstance(mesh, torch.Tensor)
else torch.tensor(mesh, dtype=torch.int)
)
|
2.41.0
|
0cf38fd1599d24fdf24b0e589879a8f1929a71d
|
Wed, 24 Apr 2024 18:44:02 +0000
|
[PATCH 0585/1000] [BE]: Apply ruff rule FURB192 (#124742)
|
Apply RUFF rule FURB192 to remove unnecessary sorts and replace them with min / max. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124742 Approved by: https://github.com/albanD, https://github.com/malfet
|
diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py
index 2783e3edfb..375d65d99f 100644
--- a/torch/_inductor/scheduler.py
+++ b/torch/_inductor/scheduler.py
@@ -2272,10 +2272,10 @@ class Scheduler:
possible_fusions_group_by_priority[fusion_pair_priority].append(
(node1, node2)
)
- # Sorted by fusion_pair_priority and return the possible fusions with highest priority
- possible_fusions_with_highest_priority = sorted(
+ # return the possible fusions with highest priority
+ possible_fusions_with_highest_priority = min(
possible_fusions_group_by_priority.items(), key=lambda item: item[0]
- )[0][1]
+ )[1]
assert len(possible_fusions_with_highest_priority) > 0
return possible_fusions_with_highest_priority
diff --git a/torch/fx/passes/runtime_assert.py b/torch/fx/passes/runtime_assert.py
index acf05a358e..0de728dd00 100644
--- a/torch/fx/passes/runtime_assert.py
+++ b/torch/fx/passes/runtime_assert.py
@@ -91,7 +91,7 @@ def insert_deferred_runtime_asserts(
fvs = free_symbols(ra.expr)
missing = fvs - symbol_to_proxy.keys()
if missing:
- i1 = sorted(missing, key=lambda x: str(x))[0]
+ i1 = min(missing, key=str)
# TODO: Remove relaxing assert on unbacked_symint https://github.com/pytorch/pytorch/issues/119689
# assert shape_env.is_unbacked_symint(i1), i1
ras_by_symbol.setdefault(i1, []).append(ra)
|
2.41.0
|
07b6227e633946ec7b98d5dcc354cae6556d38a
|
Tue, 23 Apr 2024 13:41:22 -0700
|
[PATCH 0586/1000] Initial add of torch.distributed.pipelining (#124776)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124776 Approved by: https://github.com/wconstab
|
diff --git a/torch/distributed/pipelining/README.md b/torch/distributed/pipelining/README.md
new file mode 100644
index 0000000000..46a05a22c8
--- /dev/null
+++ b/torch/distributed/pipelining/README.md
@@ -0,0 +1,178 @@
+# Pipeline Parallelism for PyTorch
+
+> [!NOTE]
+> `torch.distributed.pipelining` is a package migrated from the [PiPPy](https://github.com/pytorch/PiPPy) project. It is currently in alpha state and under extensive development. If you need examples that work with our APIs, please refer to PiPPy's [examples](https://github.com/pytorch/PiPPy/tree/main/examples) directory.
+
+[**Why Pipeline Parallel?**](#why-pipeline-parallel)
+| [**What is `torch.distributed.pipelining`?**](#what-is-torchdistributedpipelining)
+| [**Examples**](#examples)
+| [**Techniques Explained**](#techniques-explained)
+
+# Why Pipeline Parallel?
+
+One of the most important techniques for advancing the state of the art in deep learning is scaling. Common techniques for scaling neural networks include _data parallelism_, _tensor/operation parallelism_, and _pipeline parallelism_. In many cases, pipeline parallelism in particular can be an effective technique for scaling, however it is often difficult to implement, requiring intrusive code changes to model code and difficult-to-implement runtime orchestration code. `torch.distributed.pipelining` aims to provide a toolkit that does said things automatically to allow high-productivity scaling of models.
+
+# What is `torch.distributed.pipelining`?
+
+`torch.distributed.pipelining` consists of a compiler and runtime stack for automated pipelining of PyTorch models. Pipelining, or _pipeline parallelism_, is a technique in which the _code_ of the model is partitioned and multiple _micro-batches_ execute different parts of the model code concurrently. To learn more about pipeline parallelism, see [this article](https://www.deepspeed.ai/tutorials/pipeline/).
+
+
+
+Figure: Pipeline parallel. "F", "B" and "U" denote forward, backward and weight update, respectively. Different colors represent different micro-batches.
+
+`torch.distributed.pipelining` provides the following features that make pipeline parallelism easier:
+
+* Automatic splitting of model code based on your specification. The goal is for the user to provide model code as-is to the system for parallelization, without having to make heavyweight modifications to make parallelism work. The specification is also simple.
+* Support for rich pipeline scheduling paradigms, including GPipe, 1F1B, Interleaved 1F1B and Looped BFS. More schedules will be added and it will be easy to customize your own schedule under `torch.distributed.pipelining`'s framework.
+* First-class support for cross-host pipeline parallelism, as this is where PP is typically used (over slower interconnects).
+* Composability with other PyTorch parallel schemes such as data parallelism (DDP, FSDP) or tensor parallelism (overall, known as "3d parallelism").
+
+# Examples
+
+In the [PiPPy](https://github.com/pytorch/PiPPy) repo where this package is migrated from, we provide rich examples based on realistic models. In particular, we show how to apply pipelining without any model code change. You can refer to the [HuggingFace examples directory](https://github.com/pytorch/PiPPy/tree/main/examples/huggingface). Popular examples include: [GPT2](https://github.com/pytorch/PiPPy/tree/main/examples/huggingface/pippy_gpt2.py), and [LLaMA](https://github.com/pytorch/PiPPy/tree/main/examples/llama).
+
+# Techniques Explained
+
+`torch.distributed.pipelining` consists of two parts: a _compiler_ and a _runtime_. The compiler takes your model code, splits it up, and transforms it into a `Pipe`, which is a wrapper that describes the model at each pipeline stage and their data-flow relationship. The runtime executes the `PipelineStage`s in parallel, handling things like micro-batch splitting, scheduling, communication, and gradient propagation, etc. We will cover the APIs for these concepts in this section.
+
+## Splitting a Model with `pipeline`
+
+To see how we can split a model into a pipeline, let's first take an example trivial neural network:
+
+```python
+import torch
+
+class MyNetworkBlock(torch.nn.Module):
+ def __init__(self, in_dim, out_dim):
+ super().__init__()
+ self.lin = torch.nn.Linear(in_dim, out_dim)
+
+ def forward(self, x):
+ x = self.lin(x)
+ x = torch.relu(x)
+ return x
+
+
+class MyNetwork(torch.nn.Module):
+ def __init__(self, in_dim, layer_dims):
+ super().__init__()
+
+ prev_dim = in_dim
+ for i, dim in enumerate(layer_dims):
+ setattr(self, f'layer{i}', MyNetworkBlock(prev_dim, dim))
+ prev_dim = dim
+
+ self.num_layers = len(layer_dims)
+ # 10 output classes
+ self.output_proj = torch.nn.Linear(layer_dims[-1], 10)
+
+ def forward(self, x):
+ for i in range(self.num_layers):
+ x = getattr(self, f'layer{i}')(x)
+
+ return self.output_proj(x)
+
+
+in_dim = 512
+layer_dims = [512, 1024, 256]
+mn = MyNetwork(in_dim, layer_dims).to(device)
+```
+
+This network is written as free-form Python code; it has not been modified for any specific parallelism technique.
+
+Let us see our first usage of the `torch.distributed.pipelining` interfaces:
+
+```python
+from torch.distributed.pipelining import annotate_split_points, pipeline, Pipe, SplitPoint
+
+annotate_split_points(mn, {'layer0': SplitPoint.END,
+ 'layer1': SplitPoint.END})
+
+batch_size = 32
+example_input = torch.randn(batch_size, in_dim, device=device)
+chunks = 4
+
+pipe = pipeline(mn, chunks, example_args=(example_input,))
+print(pipe)
+
+"""
+************************************* pipe *************************************
+GraphModule(
+ (submod_0): GraphModule(
+ (layer0): InterpreterModule(
+ (lin): InterpreterModule()
+ )
+ )
+ (submod_1): GraphModule(
+ (layer1): InterpreterModule(
+ (lin): InterpreterModule()
+ )
+ )
+ (submod_2): GraphModule(
+ (layer2): InterpreterModule(
+ (lin): InterpreterModule()
+ )
+ (output_proj): InterpreterModule()
+ )
+)
+
+def forward(self, arg8_1):
+ submod_0 = self.submod_0(arg8_1); arg8_1 = None
+ submod_1 = self.submod_1(submod_0); submod_0 = None
+ submod_2 = self.submod_2(submod_1); submod_1 = None
+ return (submod_2,)
+"""
+```
+
+So what's going on here? First, `pipeline` turns our model into a directed acyclic graph (DAG) by tracing the model. Then, it groups together the operations and parameters into _pipeline stages_. Stages are represented as `submod_N` submodules, where `N` is a natural number.
+
+We used `annotate_split_points` to specify that the code should be split and the end of `layer0` and `layer1`. Our code has thus been split into _three_ pipeline stages. Our library also provides `SplitPoint.BEGINNING` if a user wants to split before certain annotation point.
+
+While the `annotate_split_points` API gives users a way to specify the split points without modifying the model, our library also provides an API for in-model annotation: `pipe_split()`. For details, you can read [this example](https://github.com/pytorch/PiPPy/blob/main/test/test_pipe.py).
+
+This covers the basic usage of the `Pipe` API. For more information, please see the documentation.
+
+<!-- (TODO: link to docs when live) -->
+
+## Using PipelineStage for Pipelined Execution
+
+Given the above `Pipe` object, we can use one of the `PipelineStage` classes to execute our model in a pipelined fashion. First off, let us instantiate a `PipelineStage` instance:
+
+```python
+# We are using `torchrun` to run this example with multiple processes.
+# `torchrun` defines two environment variables: `RANK` and `WORLD_SIZE`.
+rank = int(os.environ["RANK"])
+world_size = int(os.environ["WORLD_SIZE"])
+
+# Initialize distributed environment
+import torch.distributed as dist
+dist.init_process_group(rank=rank, world_size=world_size)
+
+# Pipeline stage is our main pipeline runtime. It takes in the pipe object,
+# the rank of this process, and the device.
+from torch.distributed.pipelining import PipelineStage
+stage = PipelineStage(pipe, rank, device)
+```
+
+We can now run the pipeline by attaching the `PipelineStage` to a pipeline schedule, GPipe for example:
+
+```python
+from torch.distributed.pipelining import ScheduleGPipe
+schedule = ScheduleGPipe(stage, chunks)
+
+# Input data
+x = torch.randn(batch_size, in_dim, device=device)
+
+# Run the pipeline with input `x`. Divide the batch into 4 micro-batches
+# and run them in parallel on the pipeline
+if rank == 0:
+ schedule.step(x)
+else:
+ output = schedule.step()
+```
+
+Note that since we split our model into three stages, we must run this script with three workers. For this example, we will use `torchrun` to run multiple processes within a single machine for demonstration purposes. We can collect up all of the code blocks above into a file named [example.py](https://github.com/pytorch/PiPPy/tree/main/examples/basic) and then run it with `torchrun` like so:
+
+```
+torchrun --nproc_per_node=3 example.py
+```
|
2.41.0
|
ad6dc2cf3acabe0f7bc25ffe43791706a4a3b22
|
Wed, 24 Apr 2024 18:52:06 +0000
|
[PATCH 0587/1000] [Profiler][PrivateUse1] Profiler support PrivateUse1 key (#124818)
|
Summary: 1.Package public headers of kineto if USE_KINETO so that they can be used by PrivateUse1 user. 2.Add PrivateUse1 key to ActivityType. 3. Support PrivateUse1 key in function deviceTypeFromActivity and _supported_activities. 4. Fix some bugs when processing profiler results. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124818 Approved by: https://github.com/aaronenyeshi
|
diff --git a/setup.py b/setup.py
index d8d5e230c4..d35240683c 100644
--- a/setup.py
+++ b/setup.py
@@ -1386,6 +1386,12 @@ def main():
"include/tensorpipe/transport/uv/*.h",
]
)
+ if get_cmake_cache_vars()["USE_KINETO"]:
+ torch_package_data.extend(
+ [
+ "include/kineto/*.h",
+ ]
+ )
torchgen_package_data = [
"packaged/**/*.cpp",
"packaged/**/*.h",
diff --git a/torch/autograd/profiler.py b/torch/autograd/profiler.py
index 5aa607a8e6..b360794455 100644
--- a/torch/autograd/profiler.py
+++ b/torch/autograd/profiler.py
@@ -283,7 +283,6 @@ class profile:
self.profiler_kind = ProfilerState.KINETO_PRIVATEUSE1_FALLBACK
else:
self.kineto_activities.add(ProfilerActivity.PrivateUse1)
- self.profiler_kind = ProfilerState.KINETO_PRIVATEUSE1
assert (
len(self.kineto_activities) > 0
@@ -328,10 +327,10 @@ class profile:
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enabled:
return
- if self.use_device == "cuda":
- torch.cuda.synchronize()
- elif self.use_device == "xpu":
- torch.xpu.synchronize()
+ if self.use_device and hasattr(torch, self.use_device):
+ device_module = getattr(torch, self.use_device)
+ if hasattr(device_module, "synchronize"):
+ device_module.synchronize()
t0 = perf_counter_ns()
self.kineto_results = _disable_profiler()
@@ -558,7 +557,10 @@ class profile:
and fe.id in device_corr_map
):
for f_evt in device_corr_map[fe.id]:
- if f_evt.device_type == DeviceType.CUDA:
+ if (
+ f_evt.device_type == DeviceType.CUDA
+ or f_evt.device_type == DeviceType.PrivateUse1
+ ):
fe.append_kernel(
f_evt.name,
f_evt.device_index,
diff --git a/torch/autograd/profiler_util.py b/torch/autograd/profiler_util.py
index 4c889678ad..92a33e4fb5 100644
--- a/torch/autograd/profiler_util.py
+++ b/torch/autograd/profiler_util.py
@@ -859,7 +859,7 @@ def _build_table(
"Self CPU Mem",
]
)
- if has_device_mem:
+ if use_device and has_device_mem:
headers.extend(
[
f"{device_name} Mem",
@@ -1012,7 +1012,7 @@ def _build_table(
_format_memory(evt.self_cpu_memory_usage),
]
)
- if has_device_mem:
+ if use_device and has_device_mem:
row_values.extend(
[
# Device Mem Total
diff --git a/torch/csrc/autograd/init.cpp b/torch/csrc/autograd/init.cpp
index 5fedfb9be4..81fc50f100 100644
--- a/torch/csrc/autograd/init.cpp
+++ b/torch/csrc/autograd/init.cpp
@@ -332,6 +332,9 @@ PyObject* THPAutograd_initExtension(PyObject* _unused, PyObject* unused) {
if (at::hasMTIA()) {
activities.insert(torch::profiler::impl::ActivityType::MTIA);
}
+ if (c10::get_privateuse1_backend() != "privateuseone") {
+ activities.insert(torch::profiler::impl::ActivityType::PrivateUse1);
+ }
#endif
return activities;
});
diff --git a/torch/csrc/autograd/profiler_kineto.cpp b/torch/csrc/autograd/profiler_kineto.cpp
index e30aba2d84..0c73c8b7a7 100644
--- a/torch/csrc/autograd/profiler_kineto.cpp
+++ b/torch/csrc/autograd/profiler_kineto.cpp
@@ -555,7 +555,9 @@ void prepareProfiler(
config.state == ProfilerState::KINETO_PRIVATEUSE1_FALLBACK,
"Supported only in Kineto profiler");
torch::profiler::impl::kineto::prepareTrace(
- /*cpuOnly=*/!(at::hasCUDA() || at::hasXPU() || at::hasMTIA()),
+ /*cpuOnly=*/!(
+ at::hasCUDA() || at::hasXPU() || at::hasMTIA() ||
+ c10::get_privateuse1_backend() != "privateuseone"),
activities,
config.experimental_config);
diff --git a/torch/csrc/profiler/kineto_shim.cpp b/torch/csrc/profiler/kineto_shim.cpp
index 41561c6f3e..6d6cb46e42 100644
--- a/torch/csrc/profiler/kineto_shim.cpp
+++ b/torch/csrc/profiler/kineto_shim.cpp
@@ -25,6 +25,8 @@ const std::set<libkineto::ActivityType> kCpuTypes{
libkineto::ActivityType::CUDA_RUNTIME,
libkineto::ActivityType::CUDA_DRIVER,
libkineto::ActivityType::PYTHON_FUNCTION,
+ libkineto::ActivityType::PRIVATEUSE1_RUNTIME,
+ libkineto::ActivityType::PRIVATEUSE1_DRIVER,
};
const std::set<libkineto::ActivityType> kCudaTypes = {
@@ -47,6 +49,15 @@ const std::set<libkineto::ActivityType> kMtiaTypes = {
libkineto::ActivityType::MTIA_CCP_EVENTS,
libkineto::ActivityType::MTIA_RUNTIME,
};
+const std::set<libkineto::ActivityType> kPrivateUse1Types = {
+ libkineto::ActivityType::GPU_MEMCPY,
+ libkineto::ActivityType::GPU_MEMSET,
+ libkineto::ActivityType::GPU_USER_ANNOTATION,
+ libkineto::ActivityType::CONCURRENT_KERNEL,
+ // PRIVATEUSE1_RUNTIME appears in both kCpuTypes and kPrivateUse1Types.
+ libkineto::ActivityType::PRIVATEUSE1_RUNTIME,
+ libkineto::ActivityType::PRIVATEUSE1_DRIVER,
+};
} // namespace
#endif // USE_KINETO
@@ -248,6 +259,9 @@ void prepareTrace(
if (collectivesProfilerExists()) {
k_activities.insert(libkineto::ActivityType::COLLECTIVE_COMM);
}
+ if (activities.count(torch::autograd::profiler::ActivityType::PrivateUse1)) {
+ k_activities.insert(kPrivateUse1Types.begin(), kPrivateUse1Types.end());
+ }
ExperimentalConfigWrapper configWrap(config);
@@ -336,8 +350,18 @@ c10::DeviceType deviceTypeFromActivity(libkineto::ActivityType activity_type) {
case libkineto::ActivityType::GPU_USER_ANNOTATION:
case libkineto::ActivityType::CUDA_PROFILER_RANGE:
// TODO: T151322015
- case libkineto::ActivityType::MTIA_CCP_EVENTS:
- return c10::DeviceType::CUDA;
+ case libkineto::ActivityType::MTIA_CCP_EVENTS: {
+ // PrivateUse1 kineto backend reuse above ActivityTypes,
+ // If PrivateUse1 backend enabled, this should return
+ // c10::DeviceType::PrivateUse1.
+ c10::DeviceType device_type = []() {
+ if (c10::get_privateuse1_backend() != "privateuseone") {
+ return c10::DeviceType::PrivateUse1;
+ }
+ return c10::DeviceType::CUDA;
+ }();
+ return device_type;
+ }
case libkineto::ActivityType::CPU_OP:
case libkineto::ActivityType::USER_ANNOTATION:
case libkineto::ActivityType::EXTERNAL_CORRELATION:
@@ -348,6 +372,8 @@ c10::DeviceType deviceTypeFromActivity(libkineto::ActivityType activity_type) {
case libkineto::ActivityType::MTIA_RUNTIME:
case libkineto::ActivityType::PYTHON_FUNCTION:
case libkineto::ActivityType::CUDA_DRIVER:
+ case libkineto::ActivityType::PRIVATEUSE1_RUNTIME:
+ case libkineto::ActivityType::PRIVATEUSE1_DRIVER:
return c10::DeviceType::CPU;
default: {
TORCH_WARN(
diff --git a/torch/csrc/profiler/orchestration/observer.h b/torch/csrc/profiler/orchestration/observer.h
index da675e0f3d..4230851607 100644
--- a/torch/csrc/profiler/orchestration/observer.h
+++ b/torch/csrc/profiler/orchestration/observer.h
@@ -17,6 +17,7 @@ enum class C10_API_ENUM ActivityType {
XPU, // XPU kernels, runtime
CUDA, // CUDA kernels, runtime
MTIA, // MTIA kernels, runtime
+ PrivateUse1, // PrivateUse1 kernels, runtime
NUM_KINETO_ACTIVITIES, // must be the last one
};
diff --git a/torch/csrc/profiler/python/init.cpp b/torch/csrc/profiler/python/init.cpp
index fe8bb92df1..bfaeed1398 100644
--- a/torch/csrc/profiler/python/init.cpp
+++ b/torch/csrc/profiler/python/init.cpp
@@ -322,7 +322,8 @@ void initPythonBindings(PyObject* module) {
.value("CPU", ActivityType::CPU)
.value("XPU", ActivityType::XPU)
.value("MTIA", ActivityType::MTIA)
- .value("CUDA", ActivityType::CUDA);
+ .value("CUDA", ActivityType::CUDA)
+ .value("PrivateUse1", ActivityType::PrivateUse1);
py::class_<ExperimentalConfig>(m, "_ExperimentalConfig")
.def(
diff --git a/torch/profiler/profiler.py b/torch/profiler/profiler.py
index 20094e5814..82daffcdcb 100644
--- a/torch/profiler/profiler.py
+++ b/torch/profiler/profiler.py
@@ -12,6 +12,7 @@ from typing_extensions import Self
import torch
import torch.autograd.profiler as prof
+from torch._C import _get_privateuse1_backend_name
from torch._C._profiler import (
_add_execution_trace_observer,
_disable_execution_trace_observer,
@@ -130,8 +131,8 @@ class _KinetoProfile:
self.use_device = "cuda"
elif ProfilerActivity.XPU in self.activities:
self.use_device = "xpu"
- else:
- self.use_device = "privateuseone"
+ elif ProfilerActivity.PrivateUse1 in self.activities:
+ self.use_device = _get_privateuse1_backend_name()
# user-defined metadata to be amended to the trace
self.preset_metadata: Dict[str, str] = dict()
|
2.41.0
|
72eeb0d7deebb58915289756d8c786f68630547
|
Tue, 23 Apr 2024 17:53:58 -0700
|
[PATCH 0588/1000] Refresh OpOverloadPacket if a new OpOverload gets added (#124654)
|
If a user accesses an OpOverloadPacket, then creates a new OpOverload, then uses the OpOverloadPacket, the new OpOverload never gets hit. This is because OpOverloadPacket caches OpOverloads when it is constructed. This PR fixes the problem by "refreshing" the OpOverloadPacket if a new OpOverload gets constructed and the OpOverloadPacket exists. Test Plan: - new tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/124654 Approved by: https://github.com/albanD
|
diff --git a/test/jit/test_list_dict.py b/test/jit/test_list_dict.py
index f3d314dbac..90fa24e435 100644
--- a/test/jit/test_list_dict.py
+++ b/test/jit/test_list_dict.py
@@ -5,7 +5,7 @@ import os
import sys
import types
import unittest
-from collections import OrderedDict
+from collections import defaultdict, OrderedDict
from textwrap import dedent
from typing import Any, Dict, List, NamedTuple, Optional, Tuple
@@ -2966,3 +2966,32 @@ class TestScriptList(JitTestCase):
self.assertEqual(len(l), 3)
self.assertTrue(3 in l)
self.assertEqual(l[2], 3)
+
+ def test_defaultdict(self):
+ def get_dict():
+ test_dict = defaultdict(list)
+ return test_dict
+
+ class Test(torch.nn.Module):
+ segments_groupby_col: Dict[str, List[str]]
+
+ def __init__(self):
+ super().__init__()
+ self.segments_groupby_col = get_dict()
+ self.col1 = "a"
+ self.col2 = "b"
+
+ def forward(self):
+ if self.col1 in self.segments_groupby_col.keys():
+ return 1
+ else:
+ return 2
+
+ test = Test()
+ test_script = torch.jit.script(test)
+ test_script.segments_groupby_col
+
+ # Smoketest for flakiness. Takes around 2s.
+ for i in range(300):
+ test = Test()
+ test_script = torch.jit.script(test)
diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py
index 9e748083ce..9c367624d5 100644
--- a/test/test_custom_ops.py
+++ b/test/test_custom_ops.py
@@ -2720,6 +2720,30 @@ Please use `add.register_fake` to add an fake impl.""",
y = f(x)
self.assertEqual(y, x.sin())
+ @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug")
+ def test_overloading(self):
+ called_f = 0
+ called_f1 = 0
+
+ @torch.library.custom_op("_torch_testing::f", mutates_args=())
+ def f(x: Tensor) -> Tensor:
+ nonlocal called_f
+ called_f += 1
+ return x.clone()
+
+ x = torch.randn(2, 3)
+ torch.ops._torch_testing.f(x)
+ self.assertEqual(called_f, 1)
+
+ @torch.library.custom_op("_torch_testing::f.overload", mutates_args=())
+ def f1(x: Tensor, y: Tensor) -> Tensor:
+ nonlocal called_f1
+ called_f1 += 1
+ return x.clone()
+
+ torch.ops._torch_testing.f(x, x)
+ self.assertEqual(called_f1, 1)
+
def test_disallows_output_aliasing(self):
@torch.library.custom_op("_torch_testing::f", mutates_args=())
def f(x: Tensor) -> Tensor:
diff --git a/torch/_ops.py b/torch/_ops.py
index 9ada11cd9f..774b6753c9 100644
--- a/torch/_ops.py
+++ b/torch/_ops.py
@@ -1119,8 +1119,10 @@ class _OpNamespace(types.ModuleType):
# for overloads and raise an exception if there are more than one.
namespace_name = self.name
qualified_op_name = f"{namespace_name}::{op_name}"
+ module_name = self.__module__ + "." + namespace_name
+
try:
- op, overload_names = torch._C._jit_get_operation(qualified_op_name)
+ op, overload_names = _get_packet(qualified_op_name, module_name)
if op is None:
raise AttributeError(
f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
@@ -1132,10 +1134,7 @@ class _OpNamespace(types.ModuleType):
f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
) from e
- # let the script frontend know that op is identical to the builtin op
- # with qualified_op_name
- torch.jit._builtins._register_builtin(op, qualified_op_name)
- op.__module__ = self.__module__ + "." + namespace_name
+ op.__module__ = module_name
opoverloadpacket = OpOverloadPacket(
qualified_op_name, op_name, op, overload_names
)
@@ -1147,6 +1146,23 @@ class _OpNamespace(types.ModuleType):
return opoverloadpacket
+def _get_packet(qualname, op_module):
+ op, overload_names = torch._C._jit_get_operation(qualname)
+ if op is not None:
+ # let the script frontend know that op is identical to the builtin op
+ # with qualified_op_name
+ torch.jit._builtins._register_builtin(op, qualname)
+ op.__module__ = op_module
+ return op, overload_names
+
+
+def _refresh_packet(packet):
+ op, overload_names = _get_packet(packet._qualified_op_name, packet._op.__module__)
+ assert op is not None
+ packet._op = op
+ packet._overload_names = overload_names
+
+
class _PyOpNamespace(_OpNamespace):
def __init__(self, name, ops):
super().__init__(name)
diff --git a/torch/library.py b/torch/library.py
index 8d80bdef85..2a0ea68baf 100644
--- a/torch/library.py
+++ b/torch/library.py
@@ -109,8 +109,23 @@ class Library:
assert self.m is not None
if isinstance(tags, torch.Tag):
tags = (tags,)
+
+ name = schema.split("(")[0]
+ packet_name = name.split(".")[0] if "." in name else name
+ has_preexisting_packet = hasattr(torch.ops, self.ns) and hasattr(getattr(torch.ops, self.ns), packet_name)
+
result = self.m.define(schema, alias_analysis, tuple(tags))
- qualname = self.ns + "::" + schema.split("(")[0]
+ name = schema.split("(")[0]
+ qualname = self.ns + "::" + name
+
+ # If the OpOverloadPacket exists already, then this means we're adding a
+ # new OpOverload for it. Refresh the packet to include the new OpOverload.
+ if has_preexisting_packet:
+ ns = getattr(torch.ops, self.ns)
+ packet = getattr(ns, packet_name)
+ print("refreshing", ns, packet_name)
+ torch._ops._refresh_packet(packet)
+
self._op_defs.add(qualname)
_defs.add(qualname)
return result
|
2.41.0
|
55309e58f88dd37e41e80425fd84a71d4b51548
|
Wed, 24 Apr 2024 19:39:31 +0000
|
[PATCH 0589/1000] OSS: Capture triton kernel in ET (#124775)
|
This DIFF is to capture triton kernels in execution trace Pull Request resolved: https://github.com/pytorch/pytorch/pull/124775 Approved by: https://github.com/briancoutinho
|
diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py
index 7606dee2c8..8fb64587e0 100644
--- a/test/profiler/test_profiler.py
+++ b/test/profiler/test_profiler.py
@@ -31,6 +31,7 @@ import weakref
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
from unittest.mock import patch
+from warnings import warn
import expecttest
import torch
@@ -38,6 +39,7 @@ import torch.nn as nn
import torch.optim
import torch.utils.data
import torch.utils.data.datapipes as dp
+from torch import _dynamo as torchdynamo
from torch._C._profiler import _TensorMetadata
from torch.autograd import (
_record_function_with_args_enter,
@@ -69,7 +71,9 @@ from torch.profiler._pattern_matcher import (
report_all_anti_patterns,
SynchronizedDataLoaderPattern,
)
-from torch.testing._internal.common_cuda import TEST_MULTIGPU
+
+from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
+
from torch.testing._internal.common_device_type import skipCUDAVersionIn
from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
@@ -89,6 +93,8 @@ from torch.testing._internal.common_utils import (
TestCase,
)
+from torch.utils._triton import has_triton
+
Json = Dict[str, Any]
try:
@@ -532,42 +538,54 @@ class TestExecutionTrace(TestCase):
assert loop_count == expected_loop_events
@unittest.skipIf(IS_WINDOWS, "torch.compile does not support WINDOWS")
+ @unittest.skipIf(
+ sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+"
+ )
+ @unittest.skipIf(not TEST_CUDA or not has_triton(), "need CUDA and triton to run")
def test_execution_trace_with_pt2(self):
- class ConvAndRelu(nn.Module):
- def __init__(self) -> None:
- super().__init__()
- self.linear = nn.Linear(4096, 4096)
- self.relu = nn.ReLU(inplace=True)
+ @torchdynamo.optimize("inductor")
+ def fn(a, b, c):
+ x = torch.nn.functional.linear(a, b)
+ x = x + c
+ return x.cos()
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.linear(x)
- x = self.relu(x)
- return x
+ a, b, c = (torch.randn(4, 4, requires_grad=True).to("cuda") for _ in range(3))
+
+ inputs = [a, b, c]
+ with torch._inductor.config.patch(compile_threads=1):
+ fn(*inputs)
# Create a temp file to save execution trace data.
fp = tempfile.NamedTemporaryFile("w+t", suffix="_et.json", delete=False)
fp.close()
- with torch._inductor.config.patch(compile_threads=1):
- test_module = torch.compile(ConvAndRelu())
-
- x = torch.rand(128, 4096)
- et = ExecutionTraceObserver().register_callback(fp.name)
- et.start()
- test_module.forward(x)
- et.stop()
+ with profile(
+ activities=torch.profiler.supported_activities(),
+ record_shapes=True,
+ schedule=torch.profiler.schedule(
+ skip_first=3, wait=1, warmup=1, active=2, repeat=1
+ ),
+ execution_trace_observer=(
+ ExecutionTraceObserver().register_callback(fp.name)
+ ),
+ ) as p:
+ for idx in range(10):
+ with record_function(f"## LOOP {idx} ##"):
+ fn(*inputs)
+ p.step()
- assert fp.name == et.get_output_file_path()
- et.unregister_callback()
nodes = self.get_execution_trace_root(fp.name)
-
- found_root_node = False
+ found_captured_triton_kernel_node = False
for n in nodes:
assert "name" in n
- if "[pytorch|profiler|execution_trace|process]" in n["name"]:
- found_root_node = True
-
- assert found_root_node
+ if "triton_" in n["name"]:
+ for attr in n["attrs"]:
+ if attr["name"] == "kernel_file" and attr["value"] != "":
+ found_captured_triton_kernel_node = True
+ assert len(n["inputs"]["values"]) > 0
+ assert len(n["outputs"]["values"]) == 0
+ if not found_captured_triton_kernel_node:
+ warn("triton kernels not found")
def test_execution_trace_start_stop(self):
use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py
index 60a18e416f..49508e9214 100644
--- a/torch/_inductor/runtime/triton_heuristics.py
+++ b/torch/_inductor/runtime/triton_heuristics.py
@@ -806,7 +806,7 @@ class CachingAutotuner(KernelInterface):
args,
{
"kernel_file": self.filename,
- "kernel_type": "triton",
+ "kernel_backend": "triton",
"grid": grid_info,
"stream": stream,
},
diff --git a/torch/csrc/profiler/standalone/execution_trace_observer.cpp b/torch/csrc/profiler/standalone/execution_trace_observer.cpp
index 763f449c23..346dd0718d 100644
--- a/torch/csrc/profiler/standalone/execution_trace_observer.cpp
+++ b/torch/csrc/profiler/standalone/execution_trace_observer.cpp
@@ -236,6 +236,8 @@ const ExecutionTraceObserver::ID root_id{1};
struct FunctionCallContext : public ObserverContext {
std::string name;
+ std::string kernel_backend;
+ std::string kernel_file;
ExecutionTraceObserver::ID op_id{uninitialized_id};
ExecutionTraceObserver::ID parent_id{uninitialized_id};
ExecutionTraceObserver::ID fw_parent_id{uninitialized_id};
@@ -273,14 +275,24 @@ static void writeJsonNode(
const std::string& outputs = "[]",
const std::string& output_shapes = "[]",
const std::string& output_types = "[]",
- const std::string& operator_schema = "") {
+ const std::string& operator_schema = "",
+ const std::string& kernel_backend = "",
+ const std::string& kernel_file = "") {
out << fmt::format(
R"JSON(
{{
"id": {}, "name": "{}", "ctrl_deps": {},
"inputs": {{"values": {}, "shapes": {}, "types": {}}},
"outputs": {{"values": {}, "shapes": {}, "types": {}}},
- "attrs": [{{"name": "rf_id", "type": "uint64", "value": {}}}, {{"name": "fw_parent", "type": "uint64", "value": {}}}, {{"name": "seq_id", "type": "int64", "value": {}}}, {{"name": "scope", "type": "uint64", "value": {}}}, {{"name": "tid", "type": "uint64", "value": {}}}, {{"name": "fw_tid", "type": "uint64", "value": {}}}, {{"name": "op_schema", "type": "string", "value": "{}"}}]
+ "attrs": [{{"name": "rf_id", "type": "uint64", "value": {}}},
+ {{"name": "fw_parent", "type": "uint64", "value": {}}},
+ {{"name": "seq_id", "type": "int64", "value": {}}},
+ {{"name": "scope", "type": "uint64", "value": {}}},
+ {{"name": "tid", "type": "uint64", "value": {}}},
+ {{"name": "fw_tid", "type": "uint64", "value": {}}},
+ {{"name": "op_schema", "type": "string", "value": "{}"}},
+ {{"name": "kernel_backend", "type": "string", "value": "{}"}},
+ {{"name": "kernel_file", "type": "string", "value": "{}"}}]
}})JSON",
id,
name,
@@ -297,7 +309,9 @@ static void writeJsonNode(
scope,
tid,
fw_tid,
- operator_schema);
+ operator_schema,
+ kernel_backend,
+ kernel_file);
}
inline std::string timeString(const std::time_t timepoint) {
@@ -326,7 +340,7 @@ static bool initExecutionTraceStart(ExecutionTraceObserver& ob) {
ob.out << fmt::format(
R"JSON({{
- "schema": "1.0.3-chakra.0.0.4", "pid": {}, "time": "{}", "start_ts": {},
+ "schema": "1.0.4-chakra.0.0.4", "pid": {}, "time": "{}", "start_ts": {},
"nodes": [)JSON",
ob.pid,
ob.record_time,
@@ -442,6 +456,44 @@ inline void appendValueInfo(
shapes.push_back(getValueShape(val));
}
+inline void handleKernelBackendInfo(
+ FunctionCallContext& fc,
+ const RecordFunction& fn) {
+ // triton kernel related information are in kwinputs
+ const auto& kwinputs = fn.kwinputs();
+ if (kwinputs.find("kernel_backend") != kwinputs.end()) {
+ fc.kernel_backend = kwinputs.at("kernel_backend").toStringRef();
+ if (fc.kernel_backend == "triton") {
+ fc.kernel_file = kwinputs.at("kernel_file").toStringRef();
+ TORCH_INTERNAL_ASSERT(
+ kwinputs.find("kernel_file") != kwinputs.end(),
+ "kernel file is missing in triton kernel");
+ // Remove the path of the file name
+ if (fc.kernel_file.find_last_of('/') != std::string::npos)
+ fc.kernel_file =
+ fc.kernel_file.substr(fc.kernel_file.find_last_of('/') + 1);
+
+ // get grid information
+ TORCH_INTERNAL_ASSERT(
+ kwinputs.find("grid") != kwinputs.end(),
+ "grid is missing in triton kernel");
+ fc.input_values.emplace_back(
+ "\"" + kwinputs.at("grid").toStringRef() + "\"");
+ fc.input_types.emplace_back("\"String\"");
+ fc.input_shapes.emplace_back("[]");
+
+ // get stream information
+ TORCH_INTERNAL_ASSERT(
+ kwinputs.find("stream") != kwinputs.end(),
+ "stream is missing in triton kernel");
+ fc.input_values.emplace_back(
+ std::to_string(kwinputs.at("stream").toInt()));
+ fc.input_types.emplace_back("\"Int\"");
+ fc.input_shapes.emplace_back("[]");
+ }
+ }
+}
+
static void recordOperatorStart(
ExecutionTraceObserver& ob,
FunctionCallContext& fc,
@@ -491,6 +543,9 @@ static void recordOperatorStart(
appendValueInfo(
ob, inputs[i], fc.input_values, fc.input_types, fc.input_shapes);
}
+
+ handleKernelBackendInfo(fc, fn);
+
fc.parent_id = ob.op_stack[tid].top();
// get parent id from the forward stack, this can be different for
// autograd ops, which may execute on a different thread than the original
@@ -615,7 +670,9 @@ static void onFunctionExit(const RecordFunction& fn, ObserverContext* ctx_ptr) {
vectorToString(output_values),
vectorToString(output_shapes),
vectorToString(output_types),
- op_schema_str);
+ op_schema_str,
+ fc.kernel_backend,
+ fc.kernel_file);
ob->out << ",";
} catch (const std::exception& e) {
LOG(WARNING) << "Exception in execution trace observer: [" << fc.name
diff --git a/torch/profiler/profiler.py b/torch/profiler/profiler.py
index 82daffcdcb..81f1d2c2f1 100644
--- a/torch/profiler/profiler.py
+++ b/torch/profiler/profiler.py
@@ -1,6 +1,7 @@
import gzip
import json
import os
+import shutil
import tempfile
from abc import ABC, abstractmethod
from enum import Enum
@@ -792,8 +793,36 @@ class ExecutionTraceObserver(_ITraceObserver):
"""
Removes ET observer from record function callbacks.
"""
+
+ def _save_triton_kernels():
+ # Save the kernel paths for the generated kernels
+ from torch._inductor.codecache import PyCodeCache as PyCodeCache
+
+ kernel_files = [
+ v.__file__
+ for v in PyCodeCache.cache.values()
+ if getattr(v, "__file__", None) is not None
+ ]
+ work_dir, file_name = os.path.split(self._output_file_path)
+ resource_dir = os.path.join(
+ work_dir, os.path.splitext(file_name)[0] + "_resources"
+ )
+ if not os.path.exists(resource_dir):
+ os.mkdir(resource_dir)
+
+ for kernel_file in kernel_files:
+ if kernel_file is None:
+ continue
+ path, name = os.path.split(kernel_file)
+ dst = os.path.join(resource_dir, name)
+ shutil.copyfile(kernel_file, dst)
+
if self._registered:
self.stop()
+ try:
+ _save_triton_kernels()
+ except Exception as e:
+ warn(f"Execution trace failed to save kernels: {e}")
_remove_execution_trace_observer()
self._registered = False
|
2.41.0
|
fe0b8b6a8df5191b4c440e5e4db577c6401d075
|
Wed, 24 Apr 2024 19:44:51 +0000
|
[PATCH 0590/1000] No CPP or xdist process level reruns (#124798)
|
xdist doesn't play well with current process level rerun scheme Pull Request resolved: https://github.com/pytorch/pytorch/pull/124798 Approved by: https://github.com/huydhn
|
diff --git a/test/run_test.py b/test/run_test.py
index c029a96566..406fe0ecb2 100755
--- a/test/run_test.py
+++ b/test/run_test.py
@@ -487,7 +487,12 @@ def run_test(
os.close(log_fd)
command = (launcher_cmd or []) + executable + argv
- should_retry = "--subprocess" not in command and not RERUN_DISABLED_TESTS
+ should_retry = (
+ "--subprocess" not in command
+ and not RERUN_DISABLED_TESTS
+ and not is_cpp_test
+ and "-n" not in command
+ )
is_slow = "slow" in os.environ.get("TEST_CONFIG", "") or "slow" in os.environ.get(
"BUILD_ENVRIONMENT", ""
)
|
2.41.0
|
1d92bace2b9ff6431976cda69c83df668d078f0
|
Wed, 24 Apr 2024 19:47:13 +0000
|
[PATCH 0591/1000] [CUDA] Fix 64-bit indexing in `vol2col` in conv3d (#124650)
|
Similar to #118005, fixes sometimes silent IMAs that occur CC @atalman @malfet Pull Request resolved: https://github.com/pytorch/pytorch/pull/124650 Approved by: https://github.com/soulitzer
|
diff --git a/aten/src/ATen/native/cuda/vol2col.cuh b/aten/src/ATen/native/cuda/vol2col.cuh
index 51dbe1c744..98ec2c3522 100644
--- a/aten/src/ATen/native/cuda/vol2col.cuh
+++ b/aten/src/ATen/native/cuda/vol2col.cuh
@@ -36,7 +36,7 @@ __global__ void vol2col_kernel(
const int height_col,
const int width_col,
T* data_col) {
- CUDA_KERNEL_LOOP(index, n) {
+ CUDA_KERNEL_LOOP_TYPE(index, n, int64_t) {
auto w_out = index % width_col;
index /= width_col;
auto h_out = index % height_col;
diff --git a/test/nn/test_convolution.py b/test/nn/test_convolution.py
index 0bf6065a18..acf83107d2 100644
--- a/test/nn/test_convolution.py
+++ b/test/nn/test_convolution.py
@@ -3183,6 +3183,16 @@ class TestConvolutionNNDeviceType(NNTestCase):
output_cpu = model(input_tensor.float().cpu())
self.assertEqual(output.cpu().float(), output_cpu, atol=1e-3, rtol=1e-3)
+ @onlyCUDA
+ @largeTensorTest("24GB", "cpu")
+ @largeTensorTest("20GB", "cuda")
+ def test_conv3d_large_batch_1(self, device):
+ x = torch.rand(1, 32, 512, 512, 256)
+ m = torch.nn.Conv3d(32, 1, kernel_size=1, padding=0, stride=1, bias=False)
+ yref = m(x)
+ y = m.to(device=device)(x.to(device=device))
+ self.assertEqual(yref, y.cpu())
+
@onlyCUDA
@skipCUDAIfNoCudnn
def test_contig_wrong_stride_cudnn(self, device):
|
2.41.0
|
dbf62cd0a75042feb2e4b3cbb6f91804212b5d7
|
Tue, 23 Apr 2024 15:05:50 -0700
|
[PATCH 0592/1000] Fix layer norm in static runtime when input is non-contiguous (#124789)
|
Test: The added unit test fails before this fix. But it passes now after the fix. The fix is coming from @swolchok in D56087067. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124789 Approved by: https://github.com/davidberard98
|
diff --git a/benchmarks/static_runtime/test_static_runtime.cc b/benchmarks/static_runtime/test_static_runtime.cc
index a2b8922a4a..8fe1e88d37 100644
--- a/benchmarks/static_runtime/test_static_runtime.cc
+++ b/benchmarks/static_runtime/test_static_runtime.cc
@@ -673,6 +673,12 @@ TEST(StaticRuntime, LayerNorm) {
return torch.layer_norm(input, normalized_shape, None, None, 1e-05, False).clone()
)JIT";
+ const std::string layer_norm_with_noncontiguous_input = R"JIT(
+ def forward(self, input: Tensor, normalized_shape: List[int], weight: Tensor, bias: Tensor):
+ input = torch.transpose(input, 1, 2)
+ return torch.layer_norm(input, normalized_shape, weight, bias, 1e-05, False).clone()
+ )JIT";
+
const auto a = torch::rand({1, 2, 2, 2});
const auto b = torch::rand({3, 2, 2, 2});
for (int normalized_size : {2, 3}) {
@@ -684,6 +690,7 @@ TEST(StaticRuntime, LayerNorm) {
std::vector<IValue> args1{b, normalized_shape, weight, bias};
testStaticRuntime(layer_norm_with_weights, args);
testStaticRuntime(layer_norm_with_weights, args, args1);
+ testStaticRuntime(layer_norm_with_noncontiguous_input, args);
args = {a, normalized_shape};
testStaticRuntime(layer_norm_without_weights, args);
diff --git a/torch/csrc/jit/runtime/static/ops.cpp b/torch/csrc/jit/runtime/static/ops.cpp
index 0d67ea4196..b4f4c38c2a 100644
--- a/torch/csrc/jit/runtime/static/ops.cpp
+++ b/torch/csrc/jit/runtime/static/ops.cpp
@@ -2122,7 +2122,7 @@ REGISTER_OPERATOR_FUNCTOR(aten::layer_norm, aten_layer_norm, [](Node* n) -> SROp
p_node->Output(0).toTensor(), X->sizes(), c10::nullopt);
}
at::Tensor& output = p_node->Output(0).toTensor();
- at::native::layer_norm_cpu_out(output, input, *gamma, *beta, eps, M, N);
+ at::native::layer_norm_cpu_out(output, *X, *gamma, *beta, eps, M, N);
};
});
|
2.41.0
|
d58aeb73a742b30de21b64e10a1c05143d77efc
|
Wed, 24 Apr 2024 16:29:37 -0400
|
[PATCH 0594/1000] Handle size/etc accessors in FakeTensor, support accessing symbolic types from toInt/etc in IValue (#124760)
|
Fixes https://github.com/pytorch/pytorch/issues/122772 Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124760 Approved by: https://github.com/albanD, https://github.com/eellison
|
diff --git a/aten/src/ATen/core/ivalue.h b/aten/src/ATen/core/ivalue.h
index 24506f0756..07e85677c3 100644
--- a/aten/src/ATen/core/ivalue.h
+++ b/aten/src/ATen/core/ivalue.h
@@ -532,8 +532,13 @@ struct TORCH_API IValue final {
return Tag::Double == tag;
}
double toDouble() const {
- AT_ASSERT(isDouble());
- return payload.u.as_double;
+ if (isDouble()) {
+ return payload.u.as_double;
+ } else if (isSymFloat()) {
+ return toSymFloat().guard_float(__FILE__, __LINE__);
+ } else {
+ TORCH_INTERNAL_ASSERT(0, "expected double");
+ }
}
// ComplexDouble
@@ -639,8 +644,13 @@ struct TORCH_API IValue final {
}
int64_t toInt() const {
- AT_ASSERT(isInt());
- return payload.u.as_int;
+ if (isInt()) {
+ return payload.u.as_int;
+ } else if (isSymInt()) {
+ return toSymInt().guard_int(__FILE__, __LINE__);
+ } else {
+ TORCH_INTERNAL_ASSERT(0, "expected int");
+ }
}
// Bool
@@ -658,8 +668,13 @@ struct TORCH_API IValue final {
return Tag::Bool == tag;
}
bool toBool() const {
- AT_ASSERT(isBool());
- return payload.u.as_bool;
+ if (isBool()) {
+ return payload.u.as_bool;
+ } else if (isSymBool()) {
+ return toSymBool().guard_bool(__FILE__, __LINE__);
+ } else {
+ TORCH_INTERNAL_ASSERT(0, "expected bool");
+ }
}
// IntList
diff --git a/test/dynamo_expected_failures/TestLinalgCPU.test_inverse_cpu_complex128 b/test/dynamo_expected_failures/TestLinalgCPU.test_inverse_cpu_complex128
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestLinalgCPU.test_inverse_cpu_complex64 b/test/dynamo_expected_failures/TestLinalgCPU.test_inverse_cpu_complex64
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestLinalgCPU.test_inverse_cpu_float32 b/test/dynamo_expected_failures/TestLinalgCPU.test_inverse_cpu_float32
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestLinalgCPU.test_inverse_cpu_float64 b/test/dynamo_expected_failures/TestLinalgCPU.test_inverse_cpu_float64
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestVmapOperators.test_chunk b/test/dynamo_expected_failures/TestVmapOperators.test_chunk
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/functorch/test_aotdispatch.py b/test/functorch/test_aotdispatch.py
index 4f7545ddf2..73b32ee0f0 100644
--- a/test/functorch/test_aotdispatch.py
+++ b/test/functorch/test_aotdispatch.py
@@ -5404,9 +5404,6 @@ symbolic_aot_autograd_failures = {
xfail(
"nn.functional.embedding_bag", ""
), # Cannot call sizes() on tensor with symbolic sizes/strides
- xfail(
- "nn.functional.fractional_max_pool2d", ""
- ), # rand() received an invalid combination of arguments - g...
xfail(
"nn.functional.fractional_max_pool3d", ""
), # rand() received an invalid combination of arguments - g...
@@ -5608,7 +5605,6 @@ symbolic_aot_autograd_module_failures = {
torch.nn.GaussianNLLLoss, # NotImplementedError: local_scalar_dense/item NYI for torch.bool
torch.nn.GroupNorm, # in native_group_norm_backward cpg, _rem = divmod(C, group)
# TypeError: unsupported operand type(s) for divmod(): 'SymInt' and 'int'
- torch.nn.FractionalMaxPool2d, # int() argument must be a string, a bytes-like object or a number, not 'SymFloat'
torch.nn.FractionalMaxPool3d, # int() argument must be a string, a bytes-like object or a number, not 'SymFloat'
torch.nn.BCELoss, # new_size = _infer_size(target.size(), weight.size())
# RuntimeError: expected int at position 0, but got: SymInt
diff --git a/test/test_jit.py b/test/test_jit.py
index 6f24f07a50..6f79267a63 100644
--- a/test/test_jit.py
+++ b/test/test_jit.py
@@ -5598,7 +5598,7 @@ a")
g = parse_ir(graph_str)
m = self.createFunctionFromGraph(g)
self.getExportImportCopy(m)
- with self.assertRaisesRegex(RuntimeError, "isInt"):
+ with self.assertRaisesRegex(RuntimeError, "expected int"):
m()
diff --git a/test/test_proxy_tensor.py b/test/test_proxy_tensor.py
index fd2cc6d304..b6a4626925 100644
--- a/test/test_proxy_tensor.py
+++ b/test/test_proxy_tensor.py
@@ -1905,9 +1905,7 @@ symbolic_tensor_failures = {
xfail('nn.functional.binary_cross_entropy', ''), # aten.new_empty.default - couldn't find symbolic meta function/decom...
xfail('nn.functional.cross_entropy', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.ctc_loss'), # aten._ctc_loss.Tensor - couldn't find symbolic meta function/decomposition
- xfail('nn.functional.fractional_max_pool3d', ''), # argument 'size' must be tuple of ints, but found element of t...
xfail('quantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend.
- xfail('resize_as_', ''), # aten.clone.default - couldn't find symbolic meta function/decomposition
xfail('unique_consecutive', ''), # aten.unique_consecutive.default - couldn't find symbolic meta function/decomposition
xfail('unique', ''), # aten._unique2.default - couldn't find symbolic meta function/decomposition
@@ -1958,29 +1956,17 @@ out_symbolic_tensor_failures = {
xfail('angle', ''),
xfail('argmax', ''),
xfail('argmin', ''),
- xfail('bmm', ''),
xfail('fft.fft2', ''),
xfail('fft.fftn', ''),
xfail('fft.ifft2', ''),
xfail('fft.ifftn', ''),
xfail('gather', ''),
- xfail('linalg.cholesky', ''),
- xfail('linalg.cholesky_ex', ''),
- xfail('linalg.det', ''),
- xfail('linalg.det', 'singular'),
- xfail('linalg.inv', ''),
- xfail('linalg.inv_ex', ''),
xfail('linalg.pinv', ''),
xfail('linalg.pinv', 'hermitian'),
- xfail('linalg.svdvals', ''),
xfail('lu', ''),
- xfail('max', 'reduction_with_dim'),
- xfail('min', 'reduction_with_dim'),
- xfail('nn.functional.avg_pool2d', ''),
xfail('scatter_add', ''),
xfail('scatter', ''),
xfail('take_along_dim', ''),
- xfail('topk', ''),
xfail('triangular_solve', ''),
xfail('view_copy', ''),
diff --git a/torch/_subclasses/fake_tensor.py b/torch/_subclasses/fake_tensor.py
index f29f4ed524..df4674e4e6 100644
--- a/torch/_subclasses/fake_tensor.py
+++ b/torch/_subclasses/fake_tensor.py
@@ -540,6 +540,18 @@ class FakeTensor(torch.Tensor):
else:
return args[0].fake_device
+ # this handler must be done inside FakeTensor subclass, not mode, because
+ # we can end up dispatching here when we have a fake tensor with
+ # symbolic sizes running under in_kernel_invocation_manager.
+ # The subclass is asked to handle this query because size (not
+ # sym_size) was called, but we are unable to serve it directly because
+ # there are symbolic sizes in the class. The use of
+ # in_kernel_invocation_manager means it's incorrect to activate a
+ # mode to actually handle this (this caused
+ # https://github.com/pytorch/pytorch/issues/122772).
+ if handler := _DISPATCH_META_HANDLERS.get(func):
+ return handler(args)
+
# Because fake mode can return NotImplemented (if it sees a subclass
# it doesn't know how to deal with), this test here is important
# because the next dispatch after a fake mode will attempt to use
@@ -1468,6 +1480,9 @@ class FakeTensorMode(TorchDispatchMode):
r = func(*args, **kwargs)
except NotImplementedError as not_implemented_error:
return maybe_run_unsafe_fallback(not_implemented_error)
+ except Exception:
+ log.exception("failed while attempting to run meta for %s", func)
+ raise
return self.wrap_meta_outputs_with_default_device_logic(
r, func, flat_args, device=kwargs.get("device")
|
2.41.0
|
22847a9cb4dde38bb51e5300b9cc9f586e63459
|
Wed, 24 Apr 2024 16:29:37 -0400
|
[PATCH 0595/1000] We should not be in kernel invocation before we restore fake mode (#124762)
|
Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124762 Approved by: https://github.com/eellison ghstack dependencies: #124760
|
diff --git a/torch/_subclasses/fake_tensor.py b/torch/_subclasses/fake_tensor.py
index df4674e4e6..dcd559f836 100644
--- a/torch/_subclasses/fake_tensor.py
+++ b/torch/_subclasses/fake_tensor.py
@@ -592,6 +592,8 @@ class FakeTensor(torch.Tensor):
)
return NotImplemented
+ assert not fake_mode.in_kernel_invocation
+
with fake_mode: # type: ignore[attr-defined]
return func(*args, **kwargs)
|
2.41.0
|
08aa0182cf1bdab9ae657f4462743736ae09aa6
|
Tue, 23 Apr 2024 13:17:37 -0700
|
[PATCH 0596/1000] Build device generic torch.Stream and torch.Event based on c10::Stream/Event (#123611)
|
This diff intends to build device generic torch.Stream and torch.Event for newly added accelerators in PyTorch. ------------ **torch.Stream APIs** ``` # Defined in torch/csrc/Stream.cpp class Stream(_StreamBase): stream_id: _int # Stream id device_index: _int device_type: _int device: _device # The device of the stream @overload def __new__(self, device: Optional[DeviceLikeType] = None, priority: _int = 0) -> Stream: ... @overload def __new__(self, stream_id: _int, device_index: _int, device_type: _int, priority: _int = 0) -> Stream: ... def wait_event(self, event: Event) -> None: ... def wait_stream(self, other: Stream) -> None: ... def record_event(self, event: Optional[Event] = None) -> Event: ... def query(self) -> None: ... def synchronize(self) -> None: ... def __hash__(self) -> _int: ... def __repr__(self) -> str: ... def __eq__(self, other: object) -> _bool: ... ``` ------------------ **torch.Event APIs**: - IPC related APIs are not implemented, since many device backends don't support it, but we leave interfaces there for future adaption of torch.cuda.Stream. - currently only the enable_timing is supported, since it is the most common one used in other device backends. We have to refactor the event flag system in PyTorch to support more fancy flag. - elapsedTime API is added to c10::Event ``` # Defined in torch/csrc/Event.cpp class Event(_EventBase): device: _device # The device of the Event event_id: _int # The raw event created by device backend def __new__(self, device: Optional[DeviceLikeType] = None, enable_timing: _bool = False, blocking: _bool = False, interprocess: _bool = False) -> Event: ... @classmethod def from_ipc_handle(self, device: DeviceLikeType, ipc_handle: bytes) -> Event: ... def record(self, stream: Optional[Stream] = None) -> None: ... def wait(self, stream: Optional[Stream] = None) -> None: ... def query(self) -> _bool: ... def elapsed_time(self, other: Event) -> _float: ... def synchronize(self) -> None: ... def ipc_handle(self) -> bytes: ... def __repr__(self) -> str: ... ``` ----------- c10::Event provides new APIs - calculate **elapsedTime**. - Get raw event id - Synchronize event. ``` double elapsedTime(const Event& event) const { return impl_.elapsedTime(event.impl_); } void* eventId() const { return impl_.eventId(); } void synchronize() const { return impl_.synchronize(); } ``` ---------- TODO: need to find a good way to test them in PyTorch with API mocks. Differential Revision: [D56443357](https://our.internmc.facebook.com/intern/diff/D56443357) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123611 Approved by: https://github.com/albanD, https://github.com/jeffdaily
|
diff --git a/aten/src/ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h b/aten/src/ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h
index e34e210cd5..a0fc211e4c 100644
--- a/aten/src/ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h
+++ b/aten/src/ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h
@@ -88,6 +88,9 @@ struct HIPGuardImplMasqueradingAsCUDA final : public c10::impl::DeviceGuardImplI
Stream getDefaultStream(Device d) const override {
return getDefaultHIPStreamMasqueradingAsCUDA(d.index());
}
+ Stream getNewStream(Device d, int priority = 0) const override {
+ return getStreamFromPoolMasqueradingAsCUDA(priority, d.index());
+ }
Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false) const override {
return getStreamFromPoolMasqueradingAsCUDA(isHighPriority, d.index());
}
diff --git a/aten/src/ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h b/aten/src/ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h
index 2b30018b4a..fb13ada5ad 100644
--- a/aten/src/ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h
+++ b/aten/src/ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h
@@ -96,6 +96,11 @@ inline getStreamFromPoolMasqueradingAsCUDA(const bool isHighPriority = false, De
return HIPStreamMasqueradingAsCUDA(getStreamFromPool(isHighPriority, device));
}
+HIPStreamMasqueradingAsCUDA
+inline getStreamFromPoolMasqueradingAsCUDA(const int priority, DeviceIndex device = -1) {
+ return HIPStreamMasqueradingAsCUDA(getStreamFromPool(priority, device));
+}
+
HIPStreamMasqueradingAsCUDA
inline getStreamFromExternalMasqueradingAsCUDA(hipStream_t ext_stream, DeviceIndex device) {
return HIPStreamMasqueradingAsCUDA(getStreamFromExternal(ext_stream, device));
diff --git a/build_variables.bzl b/build_variables.bzl
index 22e36a4d8b..cebda39f4b 100644
--- a/build_variables.bzl
+++ b/build_variables.bzl
@@ -795,6 +795,7 @@ libtorch_python_core_sources = [
"torch/csrc/StorageMethods.cpp",
"torch/csrc/StorageSharing.cpp",
"torch/csrc/Stream.cpp",
+ "torch/csrc/Event.cpp",
"torch/csrc/TypeInfo.cpp",
"torch/csrc/api/src/python/init.cpp",
"torch/csrc/autograd/functions/init.cpp",
diff --git a/c10/core/Event.h b/c10/core/Event.h
index 2cbaf18022..b94db9f4f2 100644
--- a/c10/core/Event.h
+++ b/c10/core/Event.h
@@ -118,6 +118,18 @@ struct Event final {
return impl_.query();
}
+ double elapsedTime(const Event& event) const {
+ return impl_.elapsedTime(event.impl_);
+ }
+
+ void* eventId() const {
+ return impl_.eventId();
+ }
+
+ void synchronize() const {
+ return impl_.synchronize();
+ }
+
private:
impl::InlineEvent<impl::VirtualGuardImpl> impl_;
};
diff --git a/c10/core/impl/DeviceGuardImplInterface.h b/c10/core/impl/DeviceGuardImplInterface.h
index 1b168f7821..59210a92d6 100644
--- a/c10/core/impl/DeviceGuardImplInterface.h
+++ b/c10/core/impl/DeviceGuardImplInterface.h
@@ -122,6 +122,16 @@ struct C10_API DeviceGuardImplInterface {
TORCH_CHECK(false, "Backend doesn't support acquiring a stream from pool.")
}
+ /**
+ * Return a new stream for a given device and priority. The stream will be
+ * copied and shared around, device backend should be able to correctly handle
+ * the lifetime of the stream.
+ */
+ virtual Stream getNewStream(Device, int priority = 0) const {
+ (void)priority;
+ TORCH_CHECK(false, "Backend doesn't support create a new Stream.")
+ }
+
/**
* Set a stream to be the thread local current stream for its device.
* Return the previous stream for that device. You are NOT required
@@ -194,6 +204,14 @@ struct C10_API DeviceGuardImplInterface {
TORCH_CHECK(false, "Backend doesn't support synchronizing streams.");
}
+ /**
+ * Wait (by blocking the calling thread) until all the work previously
+ * recorded on the event has completed running on the device.
+ */
+ virtual void synchronizeEvent(void* /*event*/) const {
+ TORCH_CHECK(false, "Backend doesn't support synchronizing events.");
+ }
+
/**
* Ensure the caching allocator (if any) is aware that the given DataPtr is
* being used on the given stream, and that it should thus avoid recycling the
@@ -202,6 +220,13 @@ struct C10_API DeviceGuardImplInterface {
virtual void recordDataPtrOnStream(const c10::DataPtr&, const Stream&) const {
}
+ /**
+ * Fetch the elapsed time between two recorded events.
+ */
+ virtual double elapsedTime(void* /*event1*/, void* /*event2*/) const {
+ TORCH_CHECK(false, "Backend doesn't support elapsedTime.");
+ }
+
/**
* Intended use of this class is to leak the DeviceGuardImpl at program end.
* So you better not call the destructor, buster!
@@ -234,6 +259,13 @@ struct NoOpDeviceGuardImpl final : public DeviceGuardImplInterface {
// no-op
return Stream(Stream::DEFAULT, Device(D, -1));
}
+
+ Stream getNewStream(Device, int priority = 0) const override {
+ // no-op
+ (void)priority;
+ return Stream(Stream::DEFAULT, Device(D, -1));
+ }
+
// NB: These do NOT set the current device
Stream exchangeStream(Stream) const noexcept override {
// no-op
diff --git a/c10/core/impl/InlineEvent.h b/c10/core/impl/InlineEvent.h
index ef1e2c6d6f..3485da37c9 100644
--- a/c10/core/impl/InlineEvent.h
+++ b/c10/core/impl/InlineEvent.h
@@ -101,6 +101,32 @@ struct InlineEvent final {
return backend_.queryEvent(event_);
}
+ void* eventId() const {
+ return event_;
+ }
+
+ double elapsedTime(const InlineEvent& other) const {
+ TORCH_CHECK(
+ other.was_marked_for_recording(),
+ "other was not marked for recording.");
+ TORCH_CHECK(
+ was_marked_for_recording(), "self was not marked for recording.");
+ TORCH_CHECK(
+ other.device_type() == device_type_,
+ "Event device type ",
+ DeviceTypeName(device_type_),
+ " does not match other's device type ",
+ DeviceTypeName(other.device_type()),
+ ".");
+ return backend_.elapsedTime(event_, other.event_);
+ }
+
+ void synchronize() const {
+ if (!was_marked_for_recording_)
+ return;
+ backend_.synchronizeEvent(event_);
+ }
+
private:
void* event_ = nullptr;
T backend_;
diff --git a/c10/core/impl/VirtualGuardImpl.h b/c10/core/impl/VirtualGuardImpl.h
index ce32411d3b..2065150535 100644
--- a/c10/core/impl/VirtualGuardImpl.h
+++ b/c10/core/impl/VirtualGuardImpl.h
@@ -39,6 +39,9 @@ class VirtualGuardImpl final : public DeviceGuardImplInterface {
Stream getStream(Device d) const noexcept override {
return impl_->getStream(d);
}
+ Stream getNewStream(Device d, int priority = 0) const override {
+ return impl_->getNewStream(d, priority);
+ }
Stream getDefaultStream(Device d) const override {
return impl_->getDefaultStream(d);
}
@@ -84,6 +87,14 @@ class VirtualGuardImpl final : public DeviceGuardImplInterface {
impl_->recordDataPtrOnStream(data_ptr, stream);
}
+ double elapsedTime(void* event1, void* event2) const override {
+ return impl_->elapsedTime(event1, event2);
+ }
+
+ void synchronizeEvent(void* event) const override {
+ return impl_->synchronizeEvent(event);
+ }
+
private:
const DeviceGuardImplInterface* impl_ = nullptr;
};
diff --git a/c10/cuda/impl/CUDAGuardImpl.h b/c10/cuda/impl/CUDAGuardImpl.h
index 7c0ea21b12..2d983beaf8 100644
--- a/c10/cuda/impl/CUDAGuardImpl.h
+++ b/c10/cuda/impl/CUDAGuardImpl.h
@@ -62,6 +62,9 @@ struct CUDAGuardImpl final : public c10::impl::DeviceGuardImplInterface {
Stream getDefaultStream(Device d) const override {
return getDefaultCUDAStream(d.index());
}
+ Stream getNewStream(Device d, int priority = 0) const override {
+ return getStreamFromPool(priority, d.index());
+ }
Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false)
const override {
return getStreamFromPool(isHighPriority, d.index());
diff --git a/test/test_public_bindings.py b/test/test_public_bindings.py
index e6a1914aa4..16ed16d11a 100644
--- a/test/test_public_bindings.py
+++ b/test/test_public_bindings.py
@@ -230,6 +230,7 @@ class TestPublicBindings(TestCase):
"StaticModule",
"Stream",
"StreamObjType",
+ "Event",
"StringType",
"SUM",
"SymFloat",
diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in
index 1b53c1b40f..ef6cbc5835 100644
--- a/torch/_C/__init__.pyi.in
+++ b/torch/_C/__init__.pyi.in
@@ -112,7 +112,44 @@ class Stream:
device_index: _int
device_type: _int
- device: device # The device of the stream
+ device: _device # The device of the stream
+
+ @overload
+ def __new__(self, device: Optional[DeviceLikeType] = None, *, priority: _int = 0) -> Stream: ...
+ @overload
+ def __new__(self, stream_id: _int, device_index: _int, device_type: _int, *, priority: _int = 0) -> Stream: ...
+ def query(self) -> _bool: ...
+ def synchronize(self) -> None: ...
+ def wait_event(self, event: Event) -> None: ...
+ def wait_stream(self, other: Stream) -> None: ...
+ def record_event(self, event: Optional[Event] = None) -> Event: ...
+ def __hash__(self) -> _int: ...
+ def __repr__(self) -> str: ...
+ def __eq__(self, other: object) -> _bool: ...
+
+
+# Defined in torch/csrc/Event.cpp
+class Event:
+
+ device: _device # The device of the Event
+ event_id: _int # The raw event created by device backend
+
+ def __new__(self,
+ device: Optional[DeviceLikeType] = None,
+ *,
+ enable_timing: _bool = False,
+ blocking: _bool = False,
+ interprocess: _bool = False) -> Event: ...
+ @classmethod
+ def from_ipc_handle(self, device: _device, ipc_handle: bytes) -> Event: ...
+ def record(self, stream: Optional[Stream] = None) -> None: ...
+ def wait(self, stream: Optional[Stream] = None) -> None: ...
+ def query(self) -> _bool: ...
+ def elapsed_time(self, other: Event) -> _float: ...
+ def synchronize(self) -> None: ...
+ def ipc_handle(self) -> bytes: ...
+ def __repr__(self) -> str: ...
+
# Defined in torch/csrc/Size.cpp
class Size(Tuple[_int, ...]):
diff --git a/torch/csrc/Event.cpp b/torch/csrc/Event.cpp
new file mode 100644
index 0000000000..b8cf8b2580
--- /dev/null
+++ b/torch/csrc/Event.cpp
@@ -0,0 +1,328 @@
+#include <pybind11/pybind11.h>
+#include <torch/csrc/Device.h>
+#include <torch/csrc/Event.h>
+#include <torch/csrc/Stream.h>
+#include <torch/csrc/THP.h>
+#include <torch/csrc/utils/pybind.h>
+#include <torch/csrc/utils/pycfunction_helpers.h>
+#include <torch/csrc/utils/python_arg_parser.h>
+
+#include <c10/core/Event.h>
+#include <c10/core/Stream.h>
+
+#include <c10/core/DeviceType.h>
+#include <c10/core/impl/DeviceGuardImplInterface.h>
+#include <structmember.h>
+#include <string>
+
+PyObject* THPEventClass = nullptr;
+
+static PyObject* THPEvent_pynew(
+ PyTypeObject* type,
+ PyObject* args,
+ PyObject* kwargs) {
+ HANDLE_TH_ERRORS
+
+ unsigned char enable_timing = 0;
+ unsigned char blocking = 0;
+ unsigned char interprocess = 0;
+
+ static torch::PythonArgParser parser({
+ "Event(Device device=None, *, bool enable_timing=True, bool blocking=False, bool interprocess=False)",
+ });
+
+ torch::ParsedArgs<4> parsed_args;
+ auto r = parser.parse(args, kwargs, parsed_args);
+
+ auto device = r.deviceOptional(0);
+
+ if (!device.has_value()) {
+ device = at::Device(at::getAccelerator(false).value_or(at::kCPU));
+ }
+ enable_timing = r.toBoolWithDefault(1, true);
+ blocking = r.toBoolWithDefault(2, false);
+ interprocess = r.toBoolWithDefault(3, false);
+
+ THPObjectPtr ptr(type->tp_alloc(type, 0));
+ if (!ptr) {
+ TORCH_CHECK(ptr, "Failed to allocate memory for Event");
+ }
+
+ THPEvent* self = (THPEvent*)ptr.get();
+
+ // TODO: blocking and interprocess are not supported yet. To support them, the
+ // flag system of c10::Event needs to be refactored. C10::Event should also
+ // provide a generic constructor to support blocking and interprocess events.
+ (void)blocking;
+ (void)interprocess;
+
+ new (&self->event) c10::Event(
+ device->type(),
+ (enable_timing ? c10::EventFlag::PYTORCH_DEFAULT
+ : c10::EventFlag::BACKEND_DEFAULT));
+
+ return (PyObject*)ptr.release();
+ END_HANDLE_TH_ERRORS
+}
+
+PyObject* THPEvent_new(c10::DeviceType device_type, c10::EventFlag flag) {
+ auto type = (PyTypeObject*)&THPEventType;
+ auto self = THPObjectPtr{type->tp_alloc(type, 0)};
+ TORCH_CHECK(self, "Failed to allocate memory for Event");
+ auto self_ = reinterpret_cast<THPEvent*>(self.get());
+ new (&self_->event) c10::Event(device_type, flag);
+ return self.release();
+}
+
+static void THPEvent_dealloc(THPEvent* self) {
+ {
+ pybind11::gil_scoped_release no_gil{};
+ self->event.~Event();
+ }
+ Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+static PyObject* THPEvent_get_device(THPEvent* self, void* unused) {
+ HANDLE_TH_ERRORS
+ at::optional<at::Device> device = self->event.device();
+ if (!device) {
+ Py_RETURN_NONE;
+ }
+ return THPDevice_New(device.value());
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* THPEvent_record(
+ PyObject* _self,
+ PyObject* args,
+ PyObject* kwargs) {
+ HANDLE_TH_ERRORS
+ auto self = (THPEvent*)_self;
+ PyObject* _stream = Py_None;
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
+ constexpr const char* accepted_args[] = {"stream", nullptr};
+ if (!PyArg_ParseTupleAndKeywords(
+ args,
+ kwargs,
+ "|O",
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
+ const_cast<char**>(accepted_args),
+ &_stream)) {
+ TORCH_WARN("Parsing THPEvent_record arg fails");
+ return nullptr;
+ }
+ if (_stream != Py_None) {
+ auto stream = (THPStream*)_stream;
+ self->event.record(c10::Stream::unpack3(
+ stream->stream_id,
+ stream->device_index,
+ static_cast<c10::DeviceType>(stream->device_type)));
+ } else {
+ c10::impl::VirtualGuardImpl impl{
+ static_cast<c10::DeviceType>(self->event.device_type())};
+ self->event.record(impl.getStream(impl.getDevice()));
+ }
+ Py_RETURN_NONE;
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* THPEvent_from_ipc_handle(
+ PyObject* _type,
+ PyObject* args,
+ PyObject* kwargs) {
+ HANDLE_TH_ERRORS
+ auto type = (PyTypeObject*)_type;
+
+ static torch::PythonArgParser parser({
+ "from_ipc_handle(Device device, std::string ipc_handle)",
+ });
+ torch::ParsedArgs<2> parsed_args;
+ auto r = parser.parse(args, kwargs, parsed_args);
+
+ at::Device device = r.device(0);
+ std::string handle_string = r.string(1);
+ TORCH_CHECK_NOT_IMPLEMENTED(
+ false,
+ "torch.Event ipc is not supported yet, please open an issue if you need this!");
+ THPObjectPtr ptr(type->tp_alloc(type, 0));
+ if (!ptr) {
+ return nullptr;
+ }
+ THPEvent* self = (THPEvent*)ptr.get();
+
+ // TODO: for constructing event from ipc handle, the c10::Event needs to have
+ // more general constructor to achieve that.
+ new (&self->event) c10::Event(device.type(), c10::EventFlag::PYTORCH_DEFAULT);
+
+ return (PyObject*)ptr.release();
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* THPEvent_ipc_handle(PyObject* _self, PyObject* noargs) {
+ HANDLE_TH_ERRORS
+ auto self = (THPEvent*)_self;
+ (void)self;
+ TORCH_CHECK_NOT_IMPLEMENTED(
+ false,
+ "torch.Event ipc is not supported yet, please open an issue if you need this!");
+ std::string handle = "0";
+ return PyBytes_FromStringAndSize((const char*)&handle, sizeof(handle));
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* THPEvent_wait(
+ PyObject* _self,
+ PyObject* args,
+ PyObject* kwargs) {
+ HANDLE_TH_ERRORS {
+ auto self = (THPEvent*)_self;
+ PyObject* _stream = Py_None;
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
+ constexpr const char* accepted_args[] = {"stream", nullptr};
+ if (!PyArg_ParseTupleAndKeywords(
+ args,
+ kwargs,
+ "|O",
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
+ const_cast<char**>(accepted_args),
+ &_stream)) {
+ TORCH_WARN("Parsing THPEvent_wait arg fails");
+ return nullptr;
+ }
+ if (_stream != Py_None) {
+ auto stream = (THPStream*)_stream;
+ self->event.block(c10::Stream::unpack3(
+ stream->stream_id,
+ stream->device_index,
+ static_cast<c10::DeviceType>(stream->device_type)));
+ } else {
+ c10::impl::VirtualGuardImpl impl{
+ static_cast<c10::DeviceType>(self->event.device_type())};
+ self->event.block(impl.getStream(impl.getDevice()));
+ }
+ }
+ Py_RETURN_NONE;
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* THPEvent_query(PyObject* _self, PyObject* noargs) {
+ HANDLE_TH_ERRORS
+ auto self = (THPEvent*)_self;
+ return PyBool_FromLong(self->event.query());
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* THPEvent_elapsed_time(PyObject* _self, PyObject* _other) {
+ HANDLE_TH_ERRORS
+ auto self = (THPEvent*)_self;
+ auto other = (THPEvent*)_other;
+ return PyFloat_FromDouble(self->event.elapsedTime(other->event));
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* THPEvent_synchronize(PyObject* _self, PyObject* noargs) {
+ HANDLE_TH_ERRORS {
+ pybind11::gil_scoped_release no_gil{};
+ auto self = (THPEvent*)_self;
+ self->event.synchronize();
+ }
+ Py_RETURN_NONE;
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* THPEvent_evend_id(PyObject* _self, PyObject* noargs) {
+ HANDLE_TH_ERRORS
+ auto self = (THPEvent*)_self;
+ return PyLong_FromVoidPtr(self->event.eventId());
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* THPEvent_repr(THPEvent* self) {
+ HANDLE_TH_ERRORS
+ return THPUtils_packString(
+ "torch.Event device_type=" +
+ c10::DeviceTypeName(
+ static_cast<c10::DeviceType>(self->event.device_type()), true) +
+ ", device_index=" + std::to_string(self->event.device_index()) +
+ ", event_flag=" +
+ std::to_string(static_cast<int64_t>(self->event.flag())) + ", event_id=" +
+ std::to_string(reinterpret_cast<int64_t>(self->event.eventId())));
+ END_HANDLE_TH_ERRORS
+}
+
+// NOLINTNEXTLINE(*c-arrays*, *global-variables)
+static struct PyGetSetDef THPEvent_properties[] = {
+ {"device", (getter)THPEvent_get_device, nullptr, nullptr, nullptr},
+ {"event_id", (getter)THPEvent_evend_id, nullptr, nullptr, nullptr},
+ {nullptr}};
+
+// NOLINTNEXTLINE(*c-arrays*, *global-variables)
+static PyMethodDef THPEvent_methods[] = {
+ {(char*)"from_ipc_handle",
+ castPyCFunctionWithKeywords(THPEvent_from_ipc_handle),
+ METH_CLASS | METH_VARARGS | METH_KEYWORDS,
+ nullptr},
+ {(char*)"record",
+ castPyCFunctionWithKeywords(THPEvent_record),
+ METH_VARARGS | METH_KEYWORDS,
+ nullptr},
+ {(char*)"wait",
+ castPyCFunctionWithKeywords(THPEvent_wait),
+ METH_VARARGS | METH_KEYWORDS,
+ nullptr},
+ {(char*)"query", THPEvent_query, METH_NOARGS, nullptr},
+ {(char*)"elapsed_time", THPEvent_elapsed_time, METH_O, nullptr},
+ {(char*)"synchronize", THPEvent_synchronize, METH_NOARGS, nullptr},
+ {(char*)"ipc_handle", THPEvent_ipc_handle, METH_NOARGS, nullptr},
+ {nullptr}};
+
+PyTypeObject THPEventType = {
+ PyVarObject_HEAD_INIT(nullptr, 0) "torch.Event", /* tp_name */
+ sizeof(THPEvent), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor)THPEvent_dealloc, /* tp_dealloc */
+ 0, /* tp_vectorcall_offset */
+ nullptr, /* tp_getattr */
+ nullptr, /* tp_setattr */
+ nullptr, /* tp_reserved */
+ (reprfunc)THPEvent_repr, /* tp_repr */
+ nullptr, /* tp_as_number */
+ nullptr, /* tp_as_sequence */
+ nullptr, /* tp_as_mapping */
+ nullptr, /* tp_hash */
+ nullptr, /* tp_call */
+ nullptr, /* tp_str */
+ nullptr, /* tp_getattro */
+ nullptr, /* tp_setattro */
+ nullptr, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+ nullptr, /* tp_doc */
+ nullptr, /* tp_traverse */
+ nullptr, /* tp_clear */
+ nullptr, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ nullptr, /* tp_iter */
+ nullptr, /* tp_iternext */
+ THPEvent_methods, /* tp_methods */
+ nullptr, /* tp_members */
+ THPEvent_properties, /* tp_getset */
+ nullptr, /* tp_base */
+ nullptr, /* tp_dict */
+ nullptr, /* tp_descr_get */
+ nullptr, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ nullptr, /* tp_init */
+ nullptr, /* tp_alloc */
+ THPEvent_pynew, /* tp_new */
+};
+
+void THPEvent_init(PyObject* module) {
+ THPEventClass = (PyObject*)&THPEventType;
+ if (PyType_Ready(&THPEventType) < 0) {
+ throw python_error();
+ }
+ Py_INCREF(&THPEventType);
+ if (PyModule_AddObject(module, "Event", (PyObject*)&THPEventType) < 0) {
+ throw python_error();
+ }
+}
diff --git a/torch/csrc/Event.h b/torch/csrc/Event.h
new file mode 100644
index 0000000000..745610d5dd
--- /dev/null
+++ b/torch/csrc/Event.h
@@ -0,0 +1,21 @@
+#ifndef THP_EVENT_INC
+#define THP_EVENT_INC
+
+#include <c10/core/Event.h>
+#include <torch/csrc/python_headers.h>
+
+struct TORCH_API THPEvent {
+ PyObject_HEAD c10::Event event;
+};
+extern PyObject* THPEventClass;
+TORCH_API extern PyTypeObject THPEventType;
+
+TORCH_API void THPEvent_init(PyObject* module);
+TORCH_API PyObject* THPEvent_new(
+ c10::DeviceType device_type,
+ c10::EventFlag flag);
+inline bool THPEvent_Check(PyObject* obj) {
+ return THPEventClass && PyObject_IsInstance(obj, THPEventClass);
+}
+
+#endif // THP_EVENT_INC
diff --git a/torch/csrc/Module.cpp b/torch/csrc/Module.cpp
index f025bb8e13..a254c5bb3c 100644
--- a/torch/csrc/Module.cpp
+++ b/torch/csrc/Module.cpp
@@ -40,6 +40,7 @@
#include <torch/csrc/Device.h>
#include <torch/csrc/Dtype.h>
#include <torch/csrc/DynamicTypes.h>
+#include <torch/csrc/Event.h>
#include <torch/csrc/Generator.h>
#include <torch/csrc/Layout.h>
#include <torch/csrc/MemoryFormat.h>
@@ -1606,6 +1607,7 @@ PyObject* initModule() {
THPQScheme_init(module);
THPDevice_init(module);
THPStream_init(module);
+ THPEvent_init(module);
ASSERT_TRUE(THPVariable_initModule(module));
ASSERT_TRUE(THPFunction_initModule(module));
ASSERT_TRUE(THPEngine_initModule(module));
diff --git a/torch/csrc/Stream.cpp b/torch/csrc/Stream.cpp
index bd8abb0ecd..06dac515c1 100644
--- a/torch/csrc/Stream.cpp
+++ b/torch/csrc/Stream.cpp
@@ -1,10 +1,19 @@
#include <pybind11/pybind11.h>
#include <torch/csrc/Device.h>
+#include <torch/csrc/Event.h>
+#include <torch/csrc/Stream.h>
#include <torch/csrc/THP.h>
#include <torch/csrc/utils/pybind.h>
+#include <torch/csrc/utils/pycfunction_helpers.h>
#include <torch/csrc/utils/python_arg_parser.h>
+#include <c10/core/DeviceGuard.h>
+#include <c10/core/Stream.h>
+#include <c10/core/impl/DeviceGuardImplInterface.h>
+#include <c10/util/Exception.h>
+#include <c10/util/hash.h>
#include <structmember.h>
+#include <cstdint>
PyTypeObject* THPStreamClass = nullptr;
@@ -13,22 +22,53 @@ static PyObject* THPStream_pynew(
PyObject* args,
PyObject* kwargs) {
HANDLE_TH_ERRORS
- int64_t stream_id = 0;
- int64_t device_index = 0;
+
+ int64_t stream_id = -1;
int64_t device_type = 0;
- // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
- constexpr const char* kwlist[] = {
- "stream_id", "device_index", "device_type", nullptr};
- if (!PyArg_ParseTupleAndKeywords(
- args,
- kwargs,
- "|LLL",
- // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
- const_cast<char**>(kwlist),
- &stream_id,
- &device_index,
- &device_type)) {
- return nullptr;
+ int64_t device_index = 0;
+ int64_t priority = 0;
+
+ static torch::PythonArgParser parser({
+ "Steram(Device device=None, *, int64_t priority=0)",
+ "Stream(int64_t stream_id, int64_t device_index, int64_t device_type, *, int64_t priority=0)",
+ });
+
+ torch::ParsedArgs<4> parsed_args;
+ auto r = parser.parse(args, kwargs, parsed_args);
+
+ std::unique_ptr<c10::DeviceGuard> device_guard_ptr;
+
+ if (r.idx == 0) {
+ auto default_accelerator = at::getAccelerator(false);
+ auto device = r.deviceOptional(0);
+ if (device.has_value()) {
+ device_type = static_cast<int64_t>(device->type());
+ device_index = static_cast<int64_t>(device->index());
+ // Initialize device guard if device is not None.
+ device_guard_ptr = std::make_unique<c10::DeviceGuard>(device.value());
+ } else {
+ // If device is None, we will use the current accelerator and index.
+ // If the current accelerator is not set, we will use the CPU as device
+ // type.
+ device_type = static_cast<int64_t>(
+ default_accelerator.value_or(c10::DeviceType::CPU));
+ c10::impl::VirtualGuardImpl impl{
+ static_cast<c10::DeviceType>(device_type)};
+ const auto current_device = impl.getDevice();
+ device_index = current_device.index();
+ }
+ priority = r.toInt64WithDefault(1, 0);
+ } else if (r.idx == 1) {
+ stream_id = r.toInt64WithDefault(0, -1);
+ device_index = r.toInt64WithDefault(1, 0);
+ device_type =
+ r.toInt64WithDefault(2, static_cast<int64_t>(c10::DeviceType::CPU));
+ priority = r.toInt64WithDefault(3, 0);
+ } else {
+ TORCH_CHECK(
+ false,
+ "parse stream arg fails please check the usage: ",
+ parser.get_signatures());
}
THPObjectPtr ptr(type->tp_alloc(type, 0));
@@ -37,9 +77,29 @@ static PyObject* THPStream_pynew(
}
THPStream* self = (THPStream*)ptr.get();
- self->stream_id = stream_id;
- self->device_index = device_index;
- self->device_type = device_type;
+
+ // If torch.Stream is not created from existing Stream, then create a new one.
+ // It requires other device backends override getNewStream method. How the new
+ // stream is created is backend specific. Backend should be able to correctly
+ // manage the lifetime of streams.
+ c10::optional<c10::Stream> stream_opt;
+ if (r.idx == 0) {
+ c10::impl::VirtualGuardImpl impl{static_cast<c10::DeviceType>(device_type)};
+ stream_opt = impl.getNewStream(
+ c10::Device(static_cast<c10::DeviceType>(device_type), device_index),
+ static_cast<int>(priority));
+ } else {
+ stream_opt = c10::Stream::unpack3(
+ stream_id,
+ static_cast<c10::DeviceIndex>(device_index),
+ static_cast<c10::DeviceType>(device_type));
+ }
+
+ TORCH_CHECK(stream_opt.has_value(), "Failed to create stream");
+ self->stream_id = static_cast<int64_t>(stream_opt->id());
+ self->device_index = static_cast<int64_t>(stream_opt->device_index());
+ self->device_type = static_cast<int64_t>(stream_opt->device_type());
+
return (PyObject*)ptr.release();
END_HANDLE_TH_ERRORS
}
@@ -73,15 +133,167 @@ static PyObject* THPStream_get_device(THPStream* self, void* unused) {
END_HANDLE_TH_ERRORS
}
+static PyObject* THPStream_query(PyObject* _self, PyObject* noargs) {
+ HANDLE_TH_ERRORS
+ auto self = (THPStream*)_self;
+
+ return PyBool_FromLong(c10::Stream::unpack3(
+ self->stream_id,
+ self->device_index,
+ static_cast<c10::DeviceType>(self->device_type))
+ .query());
+
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* THPStream_synchronize(PyObject* _self, PyObject* noargs) {
+ HANDLE_TH_ERRORS {
+ pybind11::gil_scoped_release no_gil;
+ auto self = (THPStream*)_self;
+
+ c10::Stream::unpack3(
+ self->stream_id,
+ self->device_index,
+ static_cast<c10::DeviceType>(self->device_type))
+ .synchronize();
+ }
+ Py_RETURN_NONE;
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* THPStream_wait_event(PyObject* _self, PyObject* _event) {
+ HANDLE_TH_ERRORS {
+ auto self = (THPStream*)_self;
+ auto event = (THPEvent*)_event;
+ c10::Stream::unpack3(
+ self->stream_id,
+ self->device_index,
+ static_cast<c10::DeviceType>(self->device_type))
+ .wait(event->event);
+ }
+ Py_RETURN_NONE;
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* THPStream_wait_stream(PyObject* _self, PyObject* _other) {
+ HANDLE_TH_ERRORS {
+ auto self = (THPStream*)_self;
+ auto other_stream = (THPStream*)_other;
+ c10::Event new_event(
+ static_cast<c10::DeviceType>(other_stream->device_type),
+ c10::EventFlag::PYTORCH_DEFAULT);
+ new_event.record(c10::Stream::unpack3(
+ other_stream->stream_id,
+ other_stream->device_index,
+ static_cast<c10::DeviceType>(other_stream->device_type)));
+ c10::Stream::unpack3(
+ self->stream_id,
+ self->device_index,
+ static_cast<c10::DeviceType>(self->device_type))
+ .wait(new_event);
+ }
+ Py_RETURN_NONE;
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* THPStream_record_event(
+ PyObject* _self,
+ PyObject* args,
+ PyObject* kwargs) {
+ HANDLE_TH_ERRORS
+ auto self = (THPStream*)_self;
+ PyObject* _new_event;
+ PyObject* _event = Py_None;
+
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
+ constexpr const char* accepted_args[] = {"event", nullptr};
+ if (!PyArg_ParseTupleAndKeywords(
+ args,
+ kwargs,
+ "|O",
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
+ const_cast<char**>(accepted_args),
+ &_event)) {
+ TORCH_CHECK(false, "parse record_event arg fails");
+ }
+ if (_event != Py_None) {
+ // Increase the refcount of the event to avoid it being destroyed.
+ Py_INCREF(_event);
+ _new_event = _event;
+ } else {
+ _new_event = THPEvent_new(
+ static_cast<c10::DeviceType>(self->device_type),
+ c10::EventFlag::PYTORCH_DEFAULT);
+ }
+ auto new_event = (THPEvent*)_new_event;
+ TORCH_CHECK(new_event, "event must not be null");
+ new_event->event.record(c10::Stream::unpack3(
+ self->stream_id,
+ self->device_index,
+ static_cast<c10::DeviceType>(self->device_type)));
+ return (PyObject*)new_event;
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* THPStream_repr(THPStream* self) {
+ HANDLE_TH_ERRORS
+ return THPUtils_packString(
+ "torch.Stream device_type=" +
+ c10::DeviceTypeName(
+ static_cast<c10::DeviceType>(self->device_type), true) +
+ ", device_index=" + std::to_string(self->device_index) +
+ ", stream_id=" + std::to_string(self->stream_id));
+ END_HANDLE_TH_ERRORS
+}
+
+static Py_hash_t THPStream_hash(THPStream* self) {
+ return static_cast<long>(at::hash_combine(
+ self->device_type,
+ (at::hash_combine(self->stream_id, self->device_index))));
+}
+
static PyObject* THPStream_eq(THPStream* self, THPStream* other) {
HANDLE_TH_ERRORS
return PyBool_FromLong(
- self->stream_id == other->stream_id &&
- self->device_index == other->device_index &&
- self->device_type == other->device_type);
+ (self->stream_id == other->stream_id) &&
+ (self->device_index == other->device_index) &&
+ (self->device_type == other->device_type));
+ END_HANDLE_TH_ERRORS
+}
+
+static PyObject* THPStream_ne(THPStream* self, THPStream* other) {
+ HANDLE_TH_ERRORS
+ return PyBool_FromLong(
+ (self->stream_id != other->stream_id) ||
+ (self->device_index != other->device_index) ||
+ (self->device_type != other->device_type));
END_HANDLE_TH_ERRORS
}
+static PyObject* THPStream_richcompare(
+ PyObject* self,
+ PyObject* other,
+ int op) {
+ PyObject* result = NULL;
+ if (other == Py_None) {
+ result = Py_False;
+ } else {
+ switch (op) {
+ case Py_EQ:
+ result = THPStream_eq((THPStream*)self, (THPStream*)other);
+ break;
+ case Py_NE:
+ result = THPStream_ne((THPStream*)self, (THPStream*)other);
+ break;
+ default:
+ result = Py_False;
+ break;
+ }
+ }
+ Py_XINCREF(result);
+ return result;
+}
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables)
static struct PyMemberDef THPStream_members[] = {
{"stream_id",
@@ -108,6 +320,14 @@ static struct PyGetSetDef THPStream_properties[] = {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables)
static PyMethodDef THPStream_methods[] = {
+ {"query", THPStream_query, METH_NOARGS, nullptr},
+ {"synchronize", THPStream_synchronize, METH_NOARGS, nullptr},
+ {"wait_event", THPStream_wait_event, METH_O, nullptr},
+ {"wait_stream", THPStream_wait_stream, METH_O, nullptr},
+ {"record_event",
+ castPyCFunctionWithKeywords(THPStream_record_event),
+ METH_VARARGS | METH_KEYWORDS,
+ nullptr},
{"__eq__", (PyCFunction)THPStream_eq, METH_O, nullptr},
{nullptr}};
@@ -120,11 +340,11 @@ PyTypeObject THPStreamType = {
nullptr, /* tp_getattr */
nullptr, /* tp_setattr */
nullptr, /* tp_reserved */
- nullptr, /* tp_repr */
+ (reprfunc)THPStream_repr, /* tp_repr */
nullptr, /* tp_as_number */
nullptr, /* tp_as_sequence */
nullptr, /* tp_as_mapping */
- nullptr, /* tp_hash */
+ (hashfunc)THPStream_hash, /* tp_hash */
nullptr, /* tp_call */
nullptr, /* tp_str */
nullptr, /* tp_getattro */
@@ -135,7 +355,7 @@ PyTypeObject THPStreamType = {
nullptr, /* tp_doc */
nullptr, /* tp_traverse */
nullptr, /* tp_clear */
- nullptr, /* tp_richcompare */
+ THPStream_richcompare, /* tp_richcompare */
0, /* tp_weaklistoffset */
nullptr, /* tp_iter */
nullptr, /* tp_iternext */
|
2.41.0
|
81653de63df4b1b31cc95531320caf83b1b60b3
|
Tue, 23 Apr 2024 13:17:38 -0700
|
[PATCH 0597/1000] torch.mtia module for MTIA device backend (#123612)
|
MTIA device has its own Module in PyTorch now. torch.mtia has following APIs similar to other backends. The lazy_init is also supported. ``` __all__ = [ "init", "is_available", "synchronize", "device_count", "current_device", "current_stream", "default_stream", "set_stream", "stream", "device", ] ``` ------------ For device management. We expand AccleratorHooksInterface to support generic device management and it can be used in both C++ and PyThon. ``` def _accelerator_hooks_device_count() -> _int: ... def _accelerator_hooks_set_current_device(device_index: _int) -> None: ... def _accelerator_hooks_get_current_device() -> _int : ... def _accelerator_hooks_exchange_device(device_index: _int) -> _int : ... def _accelerator_hooks_maybe_exchange_device(device_index: _int) -> _int : ... ``` --------- Adding get_device_module API to retrieve device modules for different device types. ``` def get_device_module(device: Optional[Union[torch.device, str]] = None) ``` --------- Differential Revision: [D56443356](https://our.internmc.facebook.com/intern/diff/D56443356) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123612 Approved by: https://github.com/albanD ghstack dependencies: #123611
|
diff --git a/aten/src/ATen/Context.h b/aten/src/ATen/Context.h
index 32b22855f9..b50f0479e2 100644
--- a/aten/src/ATen/Context.h
+++ b/aten/src/ATen/Context.h
@@ -69,6 +69,8 @@ class TORCH_API Context {
return at::detail::getMPSHooks();
} else if (device_type == at::kPrivateUse1) {
return at::detail::getPrivateUse1Hooks();
+ } else if (device_type == at::kMTIA) {
+ return at::detail::getMTIAHooks();
} else {
AT_ERROR(
c10::DeviceTypeName(device_type), " device type not an accelerator.");
@@ -156,6 +158,9 @@ class TORCH_API Context {
void lazyInitXPU() {
c10::call_once(thx_init, [&] { detail::getXPUHooks().initXPU(); });
}
+ void lazyInitMTIA() {
+ c10::call_once(th_mtia_init, [&] { detail::getMTIAHooks().initMTIA(); });
+ }
void lazyInitPrivateUse1() {
c10::call_once(thp_init, [&] {
if (isPrivateUse1HooksRegistered()) {
@@ -349,6 +354,7 @@ class TORCH_API Context {
c10::once_flag thc_init;
c10::once_flag thh_init;
c10::once_flag thx_init;
+ c10::once_flag th_mtia_init;
c10::once_flag thp_init;
bool enabled_cudnn = true;
bool deterministic_cudnn = false;
diff --git a/aten/src/ATen/DeviceAccelerator.cpp b/aten/src/ATen/DeviceAccelerator.cpp
index 05327cc219..ec3cd2a2f5 100644
--- a/aten/src/ATen/DeviceAccelerator.cpp
+++ b/aten/src/ATen/DeviceAccelerator.cpp
@@ -10,6 +10,9 @@ C10_API std::optional<DeviceType> getAccelerator(bool checked) {
#define CHECK_NO_PU1 \
TORCH_CHECK(!is_privateuse1_backend_registered(), "Cannot have both CUDA and PrivateUse1");
+#define CHECK_NO_MTIA \
+ TORCH_CHECK(!at::hasMTIA(), "Cannot have MTIA with other devices");
+
if (is_privateuse1_backend_registered()) {
// We explicitly allow PrivateUse1 and another device at the same time
// as we use this for testing.
@@ -17,7 +20,12 @@ C10_API std::optional<DeviceType> getAccelerator(bool checked) {
return kPrivateUse1;
} else if (at::hasCUDA()) {
CHECK_NO_PU1
+ CHECK_NO_MTIA
return kCUDA;
+ } else if (at::hasMTIA()) {
+ CHECK_NO_CUDA
+ CHECK_NO_PU1
+ return kMTIA;
} else {
TORCH_CHECK(!checked, "Cannot access accelerator device when none is available.")
return std::nullopt;
diff --git a/aten/src/ATen/detail/AcceleratorHooksInterface.h b/aten/src/ATen/detail/AcceleratorHooksInterface.h
index c099c9f59a..96e15e1f69 100644
--- a/aten/src/ATen/detail/AcceleratorHooksInterface.h
+++ b/aten/src/ATen/detail/AcceleratorHooksInterface.h
@@ -1,7 +1,7 @@
#pragma once
#include <c10/core/Device.h>
-
+#include <c10/core/Stream.h>
namespace at {
// AcceleratorHooksInterface is a shared interface provided by all
@@ -16,6 +16,29 @@ struct TORCH_API AcceleratorHooksInterface {
// Whether the device at device_index is fully initialized or not.
virtual bool hasPrimaryContext(DeviceIndex device_index) const = 0;
+
+ virtual DeviceIndex deviceCount() const {
+ return 0;
+ }
+
+ virtual void setCurrentDevice(DeviceIndex device) const {
+ TORCH_CHECK(false, "Backend doesn't support setCurrentDevice()");
+ }
+
+ virtual DeviceIndex getCurrentDevice() const {
+ TORCH_CHECK(false, "Backend doesn't support getCurrentDevice()");
+ return -1;
+ }
+
+ virtual DeviceIndex exchangeDevice(DeviceIndex device) const {
+ TORCH_CHECK(false, "Backend doesn't support exchangeDevice()");
+ return -1;
+ }
+
+ virtual DeviceIndex maybeExchangeDevice(DeviceIndex device) const {
+ TORCH_CHECK(false, "Backend doesn't support maybeExchangeDevice()");
+ return -1;
+ }
};
} // namespace at
diff --git a/aten/src/ATen/detail/MTIAHooksInterface.cpp b/aten/src/ATen/detail/MTIAHooksInterface.cpp
index 6b69fdb03f..0963881713 100644
--- a/aten/src/ATen/detail/MTIAHooksInterface.cpp
+++ b/aten/src/ATen/detail/MTIAHooksInterface.cpp
@@ -8,19 +8,22 @@
namespace at {
namespace detail {
-
-const MTIAHooksInterface &getMTIAHooks() {
- static MTIAHooksInterface* MTIA_hooks = nullptr;
+const MTIAHooksInterface& getMTIAHooks() {
+ static std::unique_ptr<MTIAHooksInterface> mtia_hooks = nullptr;
static c10::once_flag once;
c10::call_once(once, [] {
- MTIA_hooks =
- MTIAHooksRegistry()->Create("MTIAHooks", MTIAHooksArgs{}).release();
- if (!MTIA_hooks) {
- MTIA_hooks = new MTIAHooksInterface();
+ mtia_hooks = MTIAHooksRegistry()->Create("MTIAHooks", MTIAHooksArgs{});
+ if (!mtia_hooks) {
+ mtia_hooks = std::make_unique<MTIAHooksInterface>();
}
});
- return *MTIA_hooks;
+ return *mtia_hooks;
+}
+
+bool isMTIAHooksBuilt() {
+ return MTIAHooksRegistry()->Has("MTIAHooks");
}
+
} // namespace detail
C10_DEFINE_REGISTRY(MTIAHooksRegistry, MTIAHooksInterface, MTIAHooksArgs)
diff --git a/aten/src/ATen/detail/MTIAHooksInterface.h b/aten/src/ATen/detail/MTIAHooksInterface.h
index c843ca52c2..1da1bda4e6 100644
--- a/aten/src/ATen/detail/MTIAHooksInterface.h
+++ b/aten/src/ATen/detail/MTIAHooksInterface.h
@@ -1,7 +1,9 @@
#pragma once
+#include <c10/core/Device.h>
#include <c10/util/Exception.h>
+#include <c10/core/Stream.h>
#include <c10/util/Registry.h>
#include <ATen/detail/AcceleratorHooksInterface.h>
@@ -20,33 +22,72 @@ constexpr const char* MTIA_HELP =
"to use some MTIA's functionality without MTIA extension included.";
struct TORCH_API MTIAHooksInterface : AcceleratorHooksInterface {
+// this fails the implementation if MTIAHooks functions are called, but
+// MTIA backend is not present.
+#define FAIL_MTIAHOOKS_FUNC(func) \
+ TORCH_CHECK(false, "Cannot execute ", func, "() without MTIA backend.");
+
virtual ~MTIAHooksInterface() override = default;
virtual void initMTIA() const {
- TORCH_CHECK(
- false,
- "Cannot initialize MTIA without MTIA Extension for PyTorch.",
- MTIA_HELP);
+ // Avoid logging here, since MTIA needs init devices first then it will know
+ // how many devices are available. Make it as no-op if mtia extension is not
+ // dynamically loaded.
+ return;
}
virtual bool hasMTIA() const {
return false;
}
+ virtual DeviceIndex deviceCount() const override {
+ return 0;
+ }
+
+ virtual void deviceSynchronize(c10::DeviceIndex device_index) const {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ }
+
virtual std::string showConfig() const {
- TORCH_CHECK(
- false,
- "Cannot query detailed MTIA version without MTIA Extension for PyTorch.",
- MTIA_HELP);
+ FAIL_MTIAHOOKS_FUNC(__func__);
}
virtual bool hasPrimaryContext(DeviceIndex device_index) const override {
- TORCH_CHECK(
- false,
- "Cannot check MTIA primary context without MTIA Extension for PyTorch.",
- MTIA_HELP);
+ return false;
+ }
+
+ virtual void setCurrentDevice(DeviceIndex device) const override {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ }
+
+ virtual DeviceIndex getCurrentDevice() const override {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ return -1;
}
+ virtual DeviceIndex exchangeDevice(DeviceIndex device) const override {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ return -1;
+ }
+
+ virtual DeviceIndex maybeExchangeDevice(DeviceIndex device) const override {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ return -1;
+ }
+
+ virtual c10::Stream getCurrentStream(DeviceIndex device) const {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ return c10::Stream::unpack3(-1, 0, c10::DeviceType::MTIA);
+ }
+
+ virtual c10::Stream getDefaultStream(DeviceIndex device) const {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ return c10::Stream::unpack3(-1, 0, c10::DeviceType::MTIA);
+ }
+
+ virtual void setCurrentStream(const c10::Stream& stream) const {
+ FAIL_MTIAHOOKS_FUNC(__func__);
+ }
};
struct TORCH_API MTIAHooksArgs {};
@@ -57,5 +98,6 @@ C10_DECLARE_REGISTRY(MTIAHooksRegistry, MTIAHooksInterface, MTIAHooksArgs);
namespace detail {
TORCH_API const MTIAHooksInterface& getMTIAHooks();
+TORCH_API bool isMTIAHooksBuilt();
} // namespace detail
} // namespace at
diff --git a/build_variables.bzl b/build_variables.bzl
index cebda39f4b..5939da825c 100644
--- a/build_variables.bzl
+++ b/build_variables.bzl
@@ -822,6 +822,7 @@ libtorch_python_core_sources = [
"torch/csrc/dynamo/init.cpp",
"torch/csrc/functorch/init.cpp",
"torch/csrc/mps/Module.cpp",
+ "torch/csrc/mtia/Module.cpp",
"torch/csrc/inductor/aoti_runner/pybind.cpp",
"torch/csrc/jit/backends/backend_init.cpp",
"torch/csrc/jit/python/init.cpp",
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 9e7cc6a9a6..a7afe60bc2 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -69,6 +69,7 @@ Features described in this documentation are classified by release status:
torch.cuda.memory <torch_cuda_memory>
mps
xpu
+ mtia
meta
torch.backends <backends>
torch.export <export>
diff --git a/docs/source/mtia.rst b/docs/source/mtia.rst
new file mode 100644
index 0000000000..f2f5b5195d
--- /dev/null
+++ b/docs/source/mtia.rst
@@ -0,0 +1,34 @@
+torch.mtia
+===================================
+
+The MTIA backend is implemented out of the tree, only interfaces are be defined here.
+
+.. automodule:: torch.mtia
+.. currentmodule:: torch.mtia
+
+.. autosummary::
+ :toctree: generated
+ :nosignatures:
+
+ StreamContext
+ current_device
+ current_stream
+ default_stream
+ device_count
+ init
+ is_available
+ is_initialized
+ set_stream
+ stream
+ synchronize
+ device
+ DeferredMtiaCallError
+
+Streams and events
+------------------
+.. autosummary::
+ :toctree: generated
+ :nosignatures:
+
+ Event
+ Stream
diff --git a/docs/source/torch.rst b/docs/source/torch.rst
index b65a7a5239..32bcadc154 100644
--- a/docs/source/torch.rst
+++ b/docs/source/torch.rst
@@ -684,6 +684,7 @@ Utilities
set_float32_matmul_precision
get_float32_matmul_precision
set_warn_always
+ get_device_module
is_warn_always_enabled
vmap
_assert
diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in
index ef6cbc5835..9bbc721dad 100644
--- a/torch/_C/__init__.pyi.in
+++ b/torch/_C/__init__.pyi.in
@@ -1715,6 +1715,24 @@ _TensorBase = TensorBase
# Defined in torch/csrc/multiprocessing/init.cpp
def _multiprocessing_init() -> None: ...
+# Defined in torch/csrc/Module.cpp
+def _accelerator_hooks_device_count() -> _int: ...
+def _accelerator_hooks_set_current_device(device_index: _int) -> None: ...
+def _accelerator_hooks_get_current_device() -> _int: ...
+def _accelerator_hooks_exchange_device(device_index: _int) -> _int: ...
+def _accelerator_hooks_maybe_exchange_device(device_index: _int) -> _int: ...
+def _get_accelerator(check: _bool = False) -> _device: ...
+
+# Defined in torch/csrc/mtia/Module.cpp
+def _mtia_init() -> None: ...
+def _mtia_isBuilt() -> _bool: ...
+def _mtia_isInBadFork() -> _bool: ...
+def _mtia_deviceSynchronize() -> None: ...
+def _mtia_getCurrentStream(device: _int) -> Stream: ...
+def _mtia_setCurrentStream(stream: Stream) -> None: ...
+def _mtia_getDefaultStream(device: _int) -> Stream: ...
+
+
# Defined in torch/csrc/mps/Module.cpp
def _mps_deviceSynchronize() -> None: ...
def _mps_get_default_generator() -> Generator: ...
diff --git a/torch/_C/_autograd.pyi b/torch/_C/_autograd.pyi
index 34eb451be0..118d913f68 100644
--- a/torch/_C/_autograd.pyi
+++ b/torch/_C/_autograd.pyi
@@ -24,6 +24,7 @@ class DeviceType(Enum):
FPGA = ...
MAIA = ...
XLA = ...
+ MTIA = ...
MPS = ...
HPU = ...
Meta = ...
diff --git a/torch/__init__.py b/torch/__init__.py
index 9a7249f220..846038e351 100644
--- a/torch/__init__.py
+++ b/torch/__init__.py
@@ -58,6 +58,7 @@ __all__ = [
'SymBool', 'sym_not', 'unravel_index',
'sym_int', 'sym_float', 'sym_max', 'sym_min', 'sym_ite', 'compile', 'vmap',
'export', 'autocast', 'cond', 'GradScaler',
+ 'get_device_module',
]
################################################################################
@@ -1579,6 +1580,7 @@ from torch import cuda as cuda
from torch import cpu as cpu
from torch import mps as mps
from torch import xpu as xpu
+from torch import mtia as mtia
from torch import autograd as autograd
from torch.autograd import (
no_grad as no_grad,
@@ -2016,6 +2018,27 @@ else:
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
+def get_device_module(device: Optional[Union[torch.device, str]] = None):
+ """
+ Returns the module associated with a given device(e.g., torch.device('cuda'), "mtia:0", "xpu", ...).
+ If no device is given, return the module for the current accelerator or CPU if none is present.
+ """
+ if isinstance(device, torch.device):
+ device_module_name = device.type
+ elif isinstance(device, str):
+ device_module_name = torch.device(device).type
+ elif device is None:
+ # Using default accelerator type. If no accelerator is available, it automatically returns CPU device.
+ device_module_name = torch._C._get_accelerator().type
+ else:
+ raise RuntimeError(f"Invalid value of device '{device}', expect torch.device, str, or None")
+ device_module = getattr(torch, device_module_name, None)
+ if device_module is None:
+ raise RuntimeError(
+ f"Device '{device_module_name}' does not have a corresponding module registered as 'torch.{device_module_name}'."
+ )
+ return device_module
+
def _constrain_as_value(symbol, min: Optional[builtins.int] = None, max: Optional[builtins.int] = None):
"""
diff --git a/torch/_utils.py b/torch/_utils.py
index 7f9a1af43f..43c6284d24 100644
--- a/torch/_utils.py
+++ b/torch/_utils.py
@@ -713,6 +713,8 @@ def _get_available_device_type():
return "cuda"
if hasattr(torch, "xpu") and torch.xpu.is_available(): # type: ignore[attr-defined]
return "xpu"
+ if hasattr(torch, "mtia") and torch.mtia.is_available():
+ return "mtia"
custom_backend_name = torch._C._get_privateuse1_backend_name()
custom_device_mod = getattr(torch, custom_backend_name, None)
if custom_device_mod and custom_device_mod.is_available():
@@ -727,6 +729,8 @@ def _get_device_attr(get_member):
return get_member(torch.cuda)
if device_type and device_type.lower() == "xpu":
return get_member(torch.xpu) # type: ignore[attr-defined]
+ if device_type and device_type.lower() == "mtia":
+ return get_member(torch.mtia)
if device_type == torch._C._get_privateuse1_backend_name():
return get_member(getattr(torch, device_type))
# add more available device types here
diff --git a/torch/csrc/Module.cpp b/torch/csrc/Module.cpp
index a254c5bb3c..1949f278eb 100644
--- a/torch/csrc/Module.cpp
+++ b/torch/csrc/Module.cpp
@@ -1,3 +1,4 @@
+#include <ATen/DeviceAccelerator.h>
#include <c10/util/Optional.h>
#include <fmt/core.h>
#include <sys/types.h>
@@ -16,10 +17,12 @@
#include <ATen/Parallel.h>
#include <ATen/Utils.h>
#include <ATen/core/Vitals.h>
+#include <ATen/detail/AcceleratorHooksInterface.h>
#include <ATen/dlpack.h>
#include <ATen/native/ConvUtils.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/Normalization.h>
+#include <c10/core/Device.h>
#include <c10/core/DispatchKeySet.h>
#include <c10/util/AbortHandler.h>
#include <c10/util/Backtrace.h>
@@ -72,6 +75,7 @@
#include <torch/csrc/lazy/python/init.h>
#include <torch/csrc/monitor/python_init.h>
#include <torch/csrc/mps/Module.h>
+#include <torch/csrc/mtia/Module.h>
#include <torch/csrc/multiprocessing/init.h>
#include <torch/csrc/onnx/init.h>
#include <torch/csrc/profiler/python/init.h>
@@ -1643,6 +1647,7 @@ PyObject* initModule() {
#ifdef USE_XPU
torch::xpu::initModule(module);
#endif
+ torch::mtia::initModule(module);
torch::cpu::initModule(module);
torch::initVerboseBindings(module);
ASSERT_TRUE(THPStorage_init(module));
@@ -1977,6 +1982,70 @@ Call this whenever a new thread is created in order to propagate values from
return at::impl::ThreadLocalPythonObjects::get_state().contains(key);
});
+ py_module.def("_accelerator_hooks_device_count", []() {
+ auto device_type = at::getAccelerator();
+ if (device_type.has_value()) {
+ return at::globalContext()
+ .getAcceleratorHooksInterface(device_type.value())
+ .deviceCount();
+ }
+ return c10::DeviceIndex(-1);
+ });
+
+ py_module.def(
+ "_accelerator_hooks_set_current_device",
+ [](c10::DeviceIndex device_index) {
+ auto device_type = at::getAccelerator();
+ if (device_type.has_value()) {
+ at::globalContext()
+ .getAcceleratorHooksInterface(device_type.value())
+ .setCurrentDevice(device_index);
+ }
+ });
+
+ py_module.def("_accelerator_hooks_get_current_device", []() {
+ auto device_type = at::getAccelerator();
+ if (device_type.has_value()) {
+ return at::globalContext()
+ .getAcceleratorHooksInterface(device_type.value())
+ .getCurrentDevice();
+ }
+ return c10::DeviceIndex(-1);
+ });
+
+ py_module.def(
+ "_accelerator_hooks_exchange_device", [](c10::DeviceIndex device_index) {
+ auto device_type = at::getAccelerator();
+ if (device_type.has_value()) {
+ return at::globalContext()
+ .getAcceleratorHooksInterface(device_type.value())
+ .exchangeDevice(device_index);
+ }
+ return c10::DeviceIndex(-1);
+ });
+
+ py_module.def(
+ "_accelerator_hooks_maybe_exchange_device",
+ [](c10::DeviceIndex device_index) {
+ auto device_type = at::getAccelerator();
+ if (device_type.has_value()) {
+ return at::globalContext()
+ .getAcceleratorHooksInterface(device_type.value())
+ .maybeExchangeDevice(device_index);
+ }
+ return c10::DeviceIndex(-1);
+ });
+
+ py_module.def(
+ "_get_accelerator",
+ [](c10::optional<bool> check = c10::nullopt) {
+ return c10::Device(
+ at::getAccelerator(check.value_or(false))
+ .value_or(c10::DeviceType::CPU),
+ -1);
+ },
+ py::arg("check") = nullptr);
+
#ifdef USE_CUDA
PyObject* has_cuda = Py_True;
#else
diff --git a/torch/csrc/mtia/Module.cpp b/torch/csrc/mtia/Module.cpp
new file mode 100644
index 0000000000..84cc11f718
--- /dev/null
+++ b/torch/csrc/mtia/Module.cpp
@@ -0,0 +1,81 @@
+#include <ATen/ATen.h>
+#include <c10/util/CallOnce.h>
+#include <torch/csrc/Generator.h>
+#include <torch/csrc/Stream.h>
+#include <torch/csrc/python_headers.h>
+#include <torch/csrc/utils/device_lazy_init.h>
+#include <torch/csrc/utils/pybind.h>
+
+#include <c10/core/DeviceType.h>
+#include <c10/core/Stream.h>
+#ifndef WIN32
+#include <pthread.h>
+#endif
+
+namespace torch {
+namespace mtia {
+
+static bool in_bad_fork = false; // True for children forked after mtia init
+
+#ifndef WIN32
+// Called in the forked child if mtia has already been initialized
+static void forked_child() {
+ in_bad_fork = true;
+ torch::utils::set_requires_device_init(at::kMTIA, true);
+}
+#endif
+
+// Should be called before the first mtia call.
+// Note: This is distinct from initExtension because a stub mtia implementation
+// has some working functions (e.g. device_count) but cannot fully initialize.
+static void poison_fork() {
+#ifndef WIN32
+ static c10::once_flag flag;
+ c10::call_once(flag, [] { pthread_atfork(nullptr, nullptr, forked_child); });
+#endif
+}
+
+void initModule(PyObject* module) {
+ auto m = py::handle(module).cast<py::module>();
+
+ m.def("_mtia_init", []() {
+ TORCH_INTERNAL_ASSERT(!in_bad_fork); // Handled at python level
+ poison_fork();
+ at::globalContext().lazyInitMTIA();
+ });
+
+ m.def("_mtia_isBuilt", []() {
+ // Check if the MTIAHooks class has been registered with the registry.
+ return at::detail::isMTIAHooksBuilt();
+ });
+
+ m.def("_mtia_isInBadFork", []() { return in_bad_fork; });
+
+ m.def("_mtia_getCurrentStream", [](c10::DeviceIndex device_index) {
+ torch::utils::device_lazy_init(at::kMTIA);
+ return at::detail::getMTIAHooks().getCurrentStream(device_index);
+ });
+
+ m.def("_mtia_deviceSynchronize", [](c10::DeviceIndex device_index) {
+ torch::utils::device_lazy_init(at::kMTIA);
+ at::detail::getMTIAHooks().deviceSynchronize(
+ at::detail::getMTIAHooks().getCurrentDevice());
+ });
+
+ m.def("_mtia_getDefaultStream", [](c10::DeviceIndex device_index) {
+ torch::utils::device_lazy_init(at::kMTIA);
+ return at::detail::getMTIAHooks().getDefaultStream(device_index);
+ });
+
+ m.def("_mtia_setCurrentStream", [](const c10::Stream& stream) {
+ torch::utils::device_lazy_init(at::kMTIA);
+ auto device = at::detail::getMTIAHooks().getCurrentDevice();
+ if (device != stream.device_index()) {
+ at::detail::getMTIAHooks().setCurrentDevice(stream.device_index());
+ }
+ at::detail::getMTIAHooks().setCurrentStream(stream);
+ });
+}
+
+} // namespace mtia
+} // namespace torch
diff --git a/torch/csrc/mtia/Module.h b/torch/csrc/mtia/Module.h
new file mode 100644
index 0000000000..96a98ed448
--- /dev/null
+++ b/torch/csrc/mtia/Module.h
@@ -0,0 +1,12 @@
+#pragma once
+
+#include <torch/csrc/python_headers.h>
+
+namespace torch {
+namespace mtia {
+
+// PyMethodDef* python_functions();
+void initModule(PyObject* module);
+
+} // namespace mtia
+} // namespace torch
diff --git a/torch/csrc/utils/pybind.h b/torch/csrc/utils/pybind.h
index 36cb83659a..1a4e7bb26f 100644
--- a/torch/csrc/utils/pybind.h
+++ b/torch/csrc/utils/pybind.h
@@ -194,6 +194,12 @@ struct type_caster<c10::Stream> {
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
PYBIND11_TYPE_CASTER(c10::Stream, _("torch.Stream"));
+ // PYBIND11_TYPE_CASTER defines a member field called value. Since c10::Stream
+ // cannot be default-initialized, we provide this constructor to explicitly
+ // initialize that field. The value doesn't matter as it will be overwritten
+ // after a successful call to load.
+ type_caster() : value(c10::Stream::DEFAULT, c10::Device(c10::kCPU, 0)) {}
+
bool load(handle src, bool) {
PyObject* obj = src.ptr();
if (THPStream_Check(obj)) {
diff --git a/torch/mtia/__init__.py b/torch/mtia/__init__.py
new file mode 100644
index 0000000000..4007f0e584
--- /dev/null
+++ b/torch/mtia/__init__.py
@@ -0,0 +1,262 @@
+r"""
+This package enables an interface for accessing MTIA backend in python
+"""
+
+import threading
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+import torch
+
+from torch.types import Device
+
+from .. import device as _device, Tensor
+from .._utils import _dummy_type, _LazySeedTracker, classproperty
+from ._utils import _get_device_index
+
+_device_t = Union[_device, str, int, None]
+
+# torch.mtia.Event/Stream is alias of torch.Event/Stream
+Event = torch.Event
+Stream = torch.Stream
+
+_initialized = False
+_queued_calls: List[
+ Tuple[Callable[[], None], List[str]]
+] = [] # don't invoke these until initialization occurs
+_tls = threading.local()
+_initialization_lock = threading.Lock()
+_lazy_seed_tracker = _LazySeedTracker()
+
+
+def init():
+ _lazy_init()
+
+
+def is_initialized():
+ r"""Return whether PyTorch's MTIA state has been initialized."""
+ return _initialized and not _is_in_bad_fork()
+
+
+def _is_in_bad_fork() -> bool:
+ return torch._C._mtia_isInBadFork()
+
+
+def _lazy_init() -> None:
+ global _initialized, _queued_calls
+ if is_initialized() or hasattr(_tls, "is_initializing"):
+ return
+ with _initialization_lock:
+ # We be double-checked locking, boys! This is OK because
+ # the above test was GIL protected anyway. The inner test
+ # is for when a thread blocked on some other thread which was
+ # doing the initialization; when they get the lock, they will
+ # find there is nothing left to do.
+ if is_initialized():
+ return
+ # It is important to prevent other threads from entering _lazy_init
+ # immediately, while we are still guaranteed to have the GIL, because some
+ # of the C calls we make below will release the GIL
+ if _is_in_bad_fork():
+ raise RuntimeError(
+ "Cannot re-initialize MTIA in forked subprocess. To use MTIA with "
+ "multiprocessing, you must use the 'spawn' start method"
+ )
+ if not _is_compiled():
+ raise AssertionError("Torch not compiled with MTIA enabled")
+
+ torch._C._mtia_init()
+ # Some of the queued calls may reentrantly call _lazy_init();
+ # we need to just return without initializing in that case.
+ # However, we must not let any *other* threads in!
+ _tls.is_initializing = True
+
+ for calls in _lazy_seed_tracker.get_calls():
+ if calls:
+ _queued_calls.append(calls)
+
+ try:
+ for queued_call, orig_traceback in _queued_calls:
+ try:
+ queued_call()
+ except Exception as e:
+ msg = (
+ f"MTIA call failed lazily at initialization with error: {str(e)}\n\n"
+ f"MTIA call was originally invoked at:\n\n{''.join(orig_traceback)}"
+ )
+ raise DeferredMtiaCallError(msg) from e
+ finally:
+ delattr(_tls, "is_initializing")
+ _initialized = True
+
+
+class DeferredMtiaCallError(Exception):
+ pass
+
+
+def _is_compiled() -> bool:
+ r"""Return true if compiled with MTIA support."""
+ return torch._C._mtia_isBuilt()
+
+
+def is_available() -> bool:
+ r"""Return true if MTIA device is available"""
+ if not _is_compiled():
+ return False
+ # MTIA has to init devices first to know if there is any devices available.
+ return device_count() > 0
+
+
+def synchronize() -> None:
+ r"""Waits for all jobs in all streams on a MTIA device to complete."""
+ return torch._C._mtia_deviceSynchronize()
+
+
+def device_count() -> int:
+ r"""Return the number of MTIA devices available."""
+ return torch._C._accelerator_hooks_device_count()
+
+
+def current_device() -> int:
+ r"""Return the index of a currently selected device."""
+ return torch._C._accelerator_hooks_get_current_device()
+
+
+def current_stream(device: Optional[_device_t] = None) -> Stream:
+ r"""Return the currently selected :class:`Stream` for a given device.
+
+ Args:
+ device (torch.device or int, optional): selected device. Returns
+ the currently selected :class:`Stream` for the current device, given
+ by :func:`~torch.mtia.current_device`, if :attr:`device` is ``None``
+ (default).
+ """
+ return torch._C._mtia_getCurrentStream(_get_device_index(device, optional=True))
+
+
+def default_stream(device: Optional[_device_t] = None) -> Stream:
+ r"""Return the default :class:`Stream` for a given device.
+
+ Args:
+ device (torch.device or int, optional): selected device. Returns
+ the default :class:`Stream` for the current device, given by
+ :func:`~torch.mtia.current_device`, if :attr:`device` is ``None``
+ (default).
+ """
+ return torch._C._mtia_getDefaultStream(_get_device_index(device, optional=True))
+
+
+def set_stream(stream: Stream):
+ r"""Set the current stream.This is a wrapper API to set the stream.
+ Usage of this function is discouraged in favor of the ``stream``
+ context manager.
+
+ Args:
+ stream (Stream): selected stream. This function is a no-op
+ if this argument is ``None``.
+ """
+ if stream is None:
+ return
+ torch._C._mtia_setCurrentStream(stream)
+
+
+class device:
+ r"""Context-manager that changes the selected device.
+
+ Args:
+ device (torch.device or int): device index to select. It's a no-op if
+ this argument is a negative integer or ``None``.
+ """
+
+ def __init__(self, device: Any):
+ self.idx = _get_device_index(device, optional=True)
+ self.prev_idx = -1
+
+ def __enter__(self):
+ self.prev_idx = torch._C._accelerator_hooks_maybe_exchange_device(self.idx)
+
+ def __exit__(self, type: Any, value: Any, traceback: Any):
+ self.idx = torch._C._accelerator_hooks_maybe_exchange_device(self.prev_idx)
+ return False
+
+
+class StreamContext:
+ r"""Context-manager that selects a given stream.
+
+ All MTIA kernels queued within its context will be enqueued on a selected
+ stream.
+
+ Args:
+ Stream (Stream): selected stream. This manager is a no-op if it's
+ ``None``.
+ .. note:: Streams are per-device.
+ """
+
+ cur_stream: Optional["torch.mtia.Stream"]
+
+ def __init__(self, stream: Optional["torch.mtia.Stream"]):
+ self.stream = stream
+ self.idx = _get_device_index(None, True)
+ if not torch.jit.is_scripting():
+ if self.idx is None:
+ self.idx = -1
+
+ self.src_prev_stream = (
+ None if not torch.jit.is_scripting() else torch.mtia.default_stream(None)
+ )
+ self.dst_prev_stream = (
+ None if not torch.jit.is_scripting() else torch.mtia.default_stream(None)
+ )
+
+ def __enter__(self):
+ # Local cur_stream variable for type refinement
+ cur_stream = self.stream
+ # Return if stream is None or MTIA device not available
+ if cur_stream is None or self.idx == -1:
+ return
+ self.src_prev_stream = torch.mtia.current_stream(None)
+
+ # If the stream is not on the current device, then
+ # set the current stream on the device
+ if self.src_prev_stream.device != cur_stream.device:
+ with device(cur_stream.device):
+ self.dst_prev_stream = torch.mtia.current_stream(cur_stream.device)
+ torch.mtia.set_stream(cur_stream)
+
+ def __exit__(self, type: Any, value: Any, traceback: Any):
+ # Local cur_stream variable for type refinement
+ cur_stream = self.stream
+ # If stream is None or no MTIA device available, return
+ if cur_stream is None or self.idx == -1:
+ return
+
+ # Reset the stream on the original device
+ # and destination device
+ if self.src_prev_stream.device != cur_stream.device: # type: ignore[union-attr]
+ torch.mtia.set_stream(self.dst_prev_stream) # type: ignore[arg-type]
+ torch.mtia.set_stream(self.src_prev_stream) # type: ignore[arg-type]
+
+
+def stream(stream: Optional["torch.mtia.Stream"]) -> StreamContext:
+ r"""Wrap around the Context-manager StreamContext that selects a given stream.
+
+ Arguments:
+ stream (Stream): selected stream. This manager is a no-op if it's
+ ``None``.
+ ..Note:: In eager mode stream is of type Stream class while in JIT it doesn't support torch.mtia.stream
+ """
+ return StreamContext(stream)
+
+
+__all__ = [
+ "init",
+ "is_available",
+ "is_initialized",
+ "synchronize",
+ "device_count",
+ "current_device",
+ "current_stream",
+ "default_stream",
+ "set_stream",
+ "stream",
+ "device",
+]
diff --git a/torch/mtia/_utils.py b/torch/mtia/_utils.py
new file mode 100644
index 0000000000..090e26f321
--- /dev/null
+++ b/torch/mtia/_utils.py
@@ -0,0 +1,38 @@
+from typing import Any
+
+import torch
+
+# The _get_device_index has been moved to torch.utils._get_device_index
+from torch._utils import _get_device_index as _torch_get_device_index
+
+
+def _get_device_index(
+ device: Any, optional: bool = False, allow_cpu: bool = False
+) -> int:
+ r"""Get the device index from :attr:`device`, which can be a torch.device object, a Python integer, or ``None``.
+
+ If :attr:`device` is a torch.device object, returns the device index if it
+ is a MTIA device. Note that for a MTIA device without a specified index,
+ i.e., ``torch.device('mtia')``, this will return the current default MTIA
+ device if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
+ CPU devices will be accepted and ``-1`` will be returned in this case.
+
+ If :attr:`device` is a Python integer, it is returned as is.
+
+ If :attr:`device` is ``None``, this will return the current default MTIA
+ device if :attr:`optional` is ``True``.
+ """
+ if isinstance(device, int):
+ return device
+ if isinstance(device, str):
+ device = torch.device(device)
+ if isinstance(device, torch.device):
+ if allow_cpu:
+ if device.type not in ["mtia", "cpu"]:
+ raise ValueError(f"Expected a mtia or cpu device, but got: {device}")
+ elif device.type != "mtia":
+ raise ValueError(f"Expected a mtia device, but got: {device}")
+ if not torch.jit.is_scripting():
+ if isinstance(device, torch.mtia.device):
+ return device.idx
+ return _torch_get_device_index(device, optional, allow_cpu)
diff --git a/torch/overrides.py b/torch/overrides.py
index 728c75c090..6c521bc700 100644
--- a/torch/overrides.py
+++ b/torch/overrides.py
@@ -283,6 +283,7 @@ def get_ignored_functions() -> Set[Callable]:
torch.use_deterministic_algorithms,
torch.is_deterministic_algorithms_warn_only_enabled,
torch.set_deterministic_debug_mode,
+ torch.get_device_module,
torch.get_deterministic_debug_mode,
torch.set_float32_matmul_precision,
torch.get_float32_matmul_precision,
|
2.41.0
|
55dc34f865036c4c625fcdafe54db846b2be2c2
|
Tue, 23 Apr 2024 13:17:40 -0700
|
[PATCH 0598/1000] Add test_cpp_extensions tests for stream_and_event and mita_backend (#123614)
|
Test the generic torch.Stream/Event with fake device gurad and hooks. Differential Revision: [D56443358](https://our.internmc.facebook.com/intern/diff/D56443358) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123614 Approved by: https://github.com/albanD ghstack dependencies: #123611, #123612
|
diff --git a/test/cpp_extensions/mtia_extension.cpp b/test/cpp_extensions/mtia_extension.cpp
new file mode 100644
index 0000000000..3b02d3968e
--- /dev/null
+++ b/test/cpp_extensions/mtia_extension.cpp
@@ -0,0 +1,219 @@
+#include <ATen/detail/MTIAHooksInterface.h>
+#include <c10/core/Device.h>
+#include <c10/core/Stream.h>
+#include <c10/core/impl/DeviceGuardImplInterface.h>
+#include <c10/util/Logging.h>
+#include <torch/csrc/utils/device_lazy_init.h>
+#include <thread>
+namespace torch::mtia {
+
+constexpr c10::DeviceType kMTIADeviceType = c10::DeviceType::MTIA;
+constexpr c10::DeviceIndex kMTIADeviceCount = 2;
+static thread_local c10::DeviceIndex current_device = 0;
+static thread_local std::array<c10::Stream, kMTIADeviceCount> current_streams =
+ {c10::Stream::unpack3(0, 0, c10::DeviceType::MTIA),
+ c10::Stream::unpack3(0, 1, c10::DeviceType::MTIA)};
+static int64_t stream_id_gen = 1;
+static int64_t event_id_gen = 1;
+static std::array<c10::Stream, kMTIADeviceCount> default_streams = {
+ c10::Stream::unpack3(0, 0, c10::DeviceType::MTIA),
+ c10::Stream::unpack3(0, 1, c10::DeviceType::MTIA)};
+struct MTIAGuardImpl final : public c10::impl::DeviceGuardImplInterface {
+ MTIAGuardImpl() = default;
+ explicit MTIAGuardImpl(c10::DeviceType t) {
+ TORCH_INTERNAL_ASSERT(t == kMTIADeviceType);
+ }
+ c10::DeviceType type() const override {
+ return kMTIADeviceType;
+ }
+ c10::Device exchangeDevice(c10::Device d) const override {
+ c10::Device old_device = getDevice();
+ if (old_device.index() != d.index()) {
+ setDevice(d);
+ }
+ return old_device;
+ }
+ c10::Device getDevice() const override {
+ return c10::Device(kMTIADeviceType, current_device);
+ }
+
+ void setDevice(c10::Device d) const override {
+ c10::Device current_device = getDevice();
+ if (current_device.index() != d.index()) {
+ current_device = d;
+ }
+ }
+ void uncheckedSetDevice(c10::Device d) const noexcept override {
+ (void)d;
+ }
+ c10::Stream getStream(c10::Device d) const noexcept override {
+ return current_streams[d.index()];
+ }
+ c10::Stream getNewStream(c10::Device d, int priority = 0) const override {
+ (void)priority;
+ return c10::Stream::unpack3(stream_id_gen++, d.index(), d.type());
+ }
+ c10::Stream getDefaultStream(c10::Device d) const override {
+ return default_streams[d.index()];
+ }
+ c10::Stream getStreamFromGlobalPool(
+ c10::Device d,
+ bool isHighPriority = false) const override {
+ return c10::Stream::unpack3(stream_id_gen++, d.index(), d.type());
+ }
+ // NB: These do NOT set the current device
+ c10::Stream exchangeStream(c10::Stream s) const noexcept override {
+ c10::Stream old_stream = getStream(s.device());
+ return old_stream;
+ }
+ c10::DeviceIndex deviceCount() const noexcept override {
+ return kMTIADeviceCount;
+ }
+
+ void destroyEvent(void* event, const c10::DeviceIndex device_index)
+ const noexcept override {
+ (void)device_index;
+ }
+
+ void record(
+ void** event,
+ const c10::Stream& stream,
+ const c10::DeviceIndex device_index,
+ const c10::EventFlag flag) const override {
+ TORCH_CHECK(
+ device_index == -1 || device_index == stream.device_index(),
+ "Event device index ",
+ device_index,
+ " does not match recording stream's device index ",
+ stream.device_index(),
+ ".");
+
+ const auto orig_device = getDevice();
+
+ setDevice(stream.device());
+
+ if (*event == nullptr) {
+ *event = reinterpret_cast<void*>(event_id_gen++);
+ }
+ setDevice(orig_device);
+ }
+
+ void block(void* event, const c10::Stream& stream) const override {
+ (void)event;
+ (void)stream;
+ }
+
+ // May be called from any device
+ bool queryEvent(void* event) const override {
+ (void)event;
+ return true;
+ }
+
+ // Stream-related functions
+ bool queryStream(const c10::Stream& stream) const override {
+ (void)stream;
+ return true;
+ }
+
+ void synchronizeStream(const c10::Stream& stream) const override {
+ (void)stream;
+ }
+
+ void recordDataPtrOnStream(
+ const c10::DataPtr& data_ptr,
+ const c10::Stream& stream) const override {
+ (void)data_ptr;
+ (void)stream;
+ }
+
+ double elapsedTime(void* event1, void* event2) const override {
+ uint64_t elapsed_time = 1e6;
+ return (double)(elapsed_time / 1e6);
+ }
+
+ void synchronizeEvent(void* event) const override {
+ (void)event;
+ }
+};
+
+struct MTIAHooks : public at::MTIAHooksInterface {
+ explicit MTIAHooks(at::MTIAHooksArgs) {}
+ void initMTIA() const override {}
+
+ bool hasMTIA() const override {
+ return true;
+ }
+
+ c10::DeviceIndex deviceCount() const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+ return c10::DeviceIndex(2);
+ }
+
+ void deviceSynchronize(c10::DeviceIndex device_index) const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+ (void)device_index;
+ }
+
+ std::string showConfig() const override {
+ return "None config";
+ }
+
+ c10::DeviceIndex exchangeDevice(c10::DeviceIndex device) const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+ auto orig_device = current_device;
+ if (current_device != device) {
+ current_device = device;
+ }
+ return orig_device;
+ }
+
+ c10::DeviceIndex maybeExchangeDevice(c10::DeviceIndex device) const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+
+ auto orig_device = current_device;
+ if (current_device != device) {
+ current_device = device;
+ }
+ return orig_device;
+ }
+
+ c10::Stream getDefaultStream(c10::DeviceIndex device) const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+
+ return default_streams[device];
+ }
+
+ c10::Stream getCurrentStream(c10::DeviceIndex device) const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+
+ return current_streams[device];
+ }
+
+ void setCurrentStream(const c10::Stream& stream) const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+
+ current_streams[stream.device_index()] = stream;
+ }
+
+ c10::DeviceIndex getCurrentDevice() const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+
+ return current_device;
+ }
+
+ void setCurrentDevice(c10::DeviceIndex device) const override {
+ torch::utils::device_lazy_init(at::kMTIA);
+
+ if (current_device != device) {
+ current_device = device;
+ }
+ }
+};
+
+using at::MTIAHooksRegistry;
+using at::RegistererMTIAHooksRegistry;
+
+REGISTER_MTIA_HOOKS(MTIAHooks);
+C10_REGISTER_GUARD_IMPL(MTIA, MTIAGuardImpl);
+
+} // namespace torch::mtia
diff --git a/test/run_test.py b/test/run_test.py
index 406fe0ecb2..6a14e44469 100755
--- a/test/run_test.py
+++ b/test/run_test.py
@@ -191,6 +191,8 @@ XPU_TEST = [
RUN_PARALLEL_BLOCKLIST = [
"test_cpp_extensions_jit",
"test_cpp_extensions_open_device_registration",
+ "test_cpp_extensions_stream_and_event",
+ "test_cpp_extensions_mtia_backend",
"test_jit_disabled",
"test_mobile_optimizer",
"test_multiprocessing",
diff --git a/test/test_cpp_extensions_mtia_backend.py b/test/test_cpp_extensions_mtia_backend.py
new file mode 100644
index 0000000000..e2ebbf702d
--- /dev/null
+++ b/test/test_cpp_extensions_mtia_backend.py
@@ -0,0 +1,154 @@
+# Owner(s): ["module: mtia"]
+
+import os
+import shutil
+import sys
+import tempfile
+import unittest
+
+import torch
+import torch.testing._internal.common_utils as common
+import torch.utils.cpp_extension
+from torch.testing._internal.common_utils import (
+ IS_ARM64,
+ IS_LINUX,
+ skipIfTorchDynamo,
+ TEST_CUDA,
+ TEST_PRIVATEUSE1,
+)
+from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
+
+
+TEST_CUDA = TEST_CUDA and CUDA_HOME is not None
+TEST_ROCM = TEST_CUDA and torch.version.hip is not None and ROCM_HOME is not None
+
+
+def remove_build_path():
+ if sys.platform == "win32":
+ # Not wiping extensions build folder because Windows
+ return
+ default_build_root = torch.utils.cpp_extension.get_default_build_root()
+ if os.path.exists(default_build_root):
+ shutil.rmtree(default_build_root, ignore_errors=True)
+
+
+@unittest.skipIf(
+ IS_ARM64 or not IS_LINUX or TEST_CUDA or TEST_PRIVATEUSE1,
+ "Only on linux platform and mutual exclusive to other backends",
+)
+@torch.testing._internal.common_utils.markDynamoStrictTest
+class TestCppExtensionMTIABackend(common.TestCase):
+ """Tests MTIA backend with C++ extensions."""
+
+ module = None
+
+ def setUp(self):
+ super().setUp()
+ # cpp extensions use relative paths. Those paths are relative to
+ # this file, so we'll change the working directory temporarily
+ self.old_working_dir = os.getcwd()
+ os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
+ def tearDown(self):
+ super().tearDown()
+ # return the working directory (see setUp)
+ os.chdir(self.old_working_dir)
+
+ @classmethod
+ def tearDownClass(cls):
+ remove_build_path()
+
+ @classmethod
+ def setUpClass(cls):
+ remove_build_path()
+ build_dir = tempfile.mkdtemp()
+ # Load the fake device guard impl.
+ cls.module = torch.utils.cpp_extension.load(
+ name="mtia_extension",
+ sources=["cpp_extensions/mtia_extension.cpp"],
+ build_directory=build_dir,
+ extra_include_paths=[
+ "cpp_extensions",
+ "path / with spaces in it",
+ "path with quote'",
+ ],
+ is_python_module=False,
+ verbose=True,
+ )
+
+ @skipIfTorchDynamo("Not a TorchDynamo suitable test")
+ def test_get_device_module(self):
+ device = torch.device("mtia:0")
+ default_stream = torch.get_device_module(device).current_stream()
+ self.assertEqual(
+ default_stream.device_type, int(torch._C._autograd.DeviceType.MTIA)
+ )
+ print(torch._C.Stream.__mro__)
+ print(torch.cuda.Stream.__mro__)
+
+ @skipIfTorchDynamo("Not a TorchDynamo suitable test")
+ def test_stream_basic(self):
+ default_stream = torch.mtia.current_stream()
+ user_stream = torch.mtia.Stream()
+ self.assertEqual(torch.mtia.current_stream(), default_stream)
+ self.assertNotEqual(default_stream, user_stream)
+ # Check mtia_extension.cpp, default stream id starts from 0.
+ self.assertEqual(default_stream.stream_id, 0)
+ self.assertNotEqual(user_stream.stream_id, 0)
+ with torch.mtia.stream(user_stream):
+ self.assertEqual(torch.mtia.current_stream(), user_stream)
+ self.assertTrue(user_stream.query())
+ default_stream.synchronize()
+ self.assertTrue(default_stream.query())
+
+ @skipIfTorchDynamo("Not a TorchDynamo suitable test")
+ def test_stream_context(self):
+ mtia_stream_0 = torch.mtia.Stream(device="mtia:0")
+ mtia_stream_1 = torch.mtia.Stream(device="mtia:0")
+ print(mtia_stream_0)
+ print(mtia_stream_1)
+ with torch.mtia.stream(mtia_stream_0):
+ current_stream = torch.mtia.current_stream()
+ msg = f"current_stream {current_stream} should be {mtia_stream_0}"
+ self.assertTrue(current_stream == mtia_stream_0, msg=msg)
+
+ with torch.mtia.stream(mtia_stream_1):
+ current_stream = torch.mtia.current_stream()
+ msg = f"current_stream {current_stream} should be {mtia_stream_1}"
+ self.assertTrue(current_stream == mtia_stream_1, msg=msg)
+
+ @skipIfTorchDynamo("Not a TorchDynamo suitable test")
+ def test_stream_context_different_device(self):
+ device_0 = torch.device("mtia:0")
+ device_1 = torch.device("mtia:1")
+ mtia_stream_0 = torch.mtia.Stream(device=device_0)
+ mtia_stream_1 = torch.mtia.Stream(device=device_1)
+ print(mtia_stream_0)
+ print(mtia_stream_1)
+ orig_current_device = torch.mtia.current_device()
+ with torch.mtia.stream(mtia_stream_0):
+ current_stream = torch.mtia.current_stream()
+ self.assertTrue(torch.mtia.current_device() == device_0.index)
+ msg = f"current_stream {current_stream} should be {mtia_stream_0}"
+ self.assertTrue(current_stream == mtia_stream_0, msg=msg)
+ self.assertTrue(torch.mtia.current_device() == orig_current_device)
+ with torch.mtia.stream(mtia_stream_1):
+ current_stream = torch.mtia.current_stream()
+ self.assertTrue(torch.mtia.current_device() == device_1.index)
+ msg = f"current_stream {current_stream} should be {mtia_stream_1}"
+ self.assertTrue(current_stream == mtia_stream_1, msg=msg)
+ self.assertTrue(torch.mtia.current_device() == orig_current_device)
+
+ @skipIfTorchDynamo("Not a TorchDynamo suitable test")
+ def test_device_context(self):
+ device_0 = torch.device("mtia:0")
+ device_1 = torch.device("mtia:1")
+ with torch.mtia.device(device_0):
+ self.assertTrue(torch.mtia.current_device() == device_0.index)
+
+ with torch.mtia.device(device_1):
+ self.assertTrue(torch.mtia.current_device() == device_1.index)
+
+
+if __name__ == "__main__":
+ common.run_tests()
diff --git a/test/test_cpp_extensions_stream_and_event.py b/test/test_cpp_extensions_stream_and_event.py
new file mode 100644
index 0000000000..0be81dd492
--- /dev/null
+++ b/test/test_cpp_extensions_stream_and_event.py
@@ -0,0 +1,108 @@
+# Owner(s): ["module: mtia"]
+
+import os
+import shutil
+import sys
+import tempfile
+import unittest
+
+import torch
+import torch.testing._internal.common_utils as common
+import torch.utils.cpp_extension
+from torch.testing._internal.common_utils import (
+ IS_ARM64,
+ IS_LINUX,
+ skipIfTorchDynamo,
+ TEST_CUDA,
+ TEST_PRIVATEUSE1,
+)
+from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
+
+
+TEST_CUDA = TEST_CUDA and CUDA_HOME is not None
+TEST_ROCM = TEST_CUDA and torch.version.hip is not None and ROCM_HOME is not None
+
+
+def remove_build_path():
+ if sys.platform == "win32":
+ # Not wiping extensions build folder because Windows
+ return
+ default_build_root = torch.utils.cpp_extension.get_default_build_root()
+ if os.path.exists(default_build_root):
+ shutil.rmtree(default_build_root, ignore_errors=True)
+
+
+# Since we use a fake MTIA device backend to test generic Stream/Event, device backends are mutual exclusive to each other.
+# The test will be skipped if any of the following conditions are met:
+@unittest.skipIf(
+ IS_ARM64 or not IS_LINUX or TEST_CUDA or TEST_PRIVATEUSE1,
+ "Only on linux platform and mutual exclusive to other backends",
+)
+@torch.testing._internal.common_utils.markDynamoStrictTest
+class TestCppExtensionStreamAndEvent(common.TestCase):
+ """Tests Stream and Event with C++ extensions."""
+
+ module = None
+
+ def setUp(self):
+ super().setUp()
+ # cpp extensions use relative paths. Those paths are relative to
+ # this file, so we'll change the working directory temporarily
+ self.old_working_dir = os.getcwd()
+ os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
+ def tearDown(self):
+ super().tearDown()
+ # return the working directory (see setUp)
+ os.chdir(self.old_working_dir)
+
+ @classmethod
+ def tearDownClass(cls):
+ remove_build_path()
+
+ @classmethod
+ def setUpClass(cls):
+ remove_build_path()
+ build_dir = tempfile.mkdtemp()
+ # Load the fake device guard impl.
+ src = f"{os.path.abspath(os.path.dirname(__file__))}/cpp_extensions/mtia_extension.cpp"
+ cls.module = torch.utils.cpp_extension.load(
+ name="mtia_extension",
+ sources=[src],
+ build_directory=build_dir,
+ extra_include_paths=[
+ "cpp_extensions",
+ "path / with spaces in it",
+ "path with quote'",
+ ],
+ is_python_module=False,
+ verbose=True,
+ )
+
+ @skipIfTorchDynamo("Not a TorchDynamo suitable test")
+ def test_stream_event(self):
+ s = torch.Stream()
+ self.assertTrue(s.device_type, int(torch._C._autograd.DeviceType.MTIA))
+ e = torch.Event()
+ self.assertTrue(e.device.type, "mtia")
+ # Should be nullptr by default
+ self.assertTrue(e.event_id == 0)
+ s.record_event(event=e)
+ print(f"recorded event 1: {e}")
+ self.assertTrue(e.event_id != 0)
+ e2 = s.record_event()
+ print(f"recorded event 2: {e2}")
+ self.assertTrue(e2.event_id != 0)
+ self.assertTrue(e2.event_id != e.event_id)
+ e.synchronize()
+ e2.synchronize()
+ time_elapsed = e.elapsed_time(e2)
+ print(f"time elapsed between e1 and e2: {time_elapsed}")
+ old_event_id = e.event_id
+ e.record(stream=s)
+ print(f"recorded event 1: {e}")
+ self.assertTrue(e.event_id == old_event_id)
+
+
+if __name__ == "__main__":
+ common.run_tests()
diff --git a/tools/testing/modulefinder_determinator.py b/tools/testing/modulefinder_determinator.py
index ce55fdb424..ba58d75c57 100644
--- a/tools/testing/modulefinder_determinator.py
+++ b/tools/testing/modulefinder_determinator.py
@@ -21,6 +21,8 @@ TARGET_DET_LIST = [
"test_cpp_extensions_aot_no_ninja",
"test_cpp_extensions_jit",
"test_cpp_extensions_open_device_registration",
+ "test_cpp_extensions_stream_and_event",
+ "test_cpp_extensions_mtia_backend",
"test_cuda",
"test_cuda_primary_ctx",
"test_dataloader",
|
2.41.0
|
885638f9509900ca204d12d8c733f318c16a818
|
Wed, 24 Apr 2024 20:55:56 +0000
|
[PATCH 0599/1000] [quant][pt2e] Propagate get_attr meta through known ops only (#124415)
|
Summary: Avoid situation where the graph traversal finds a matmul node with a `get_attr` as its `args[0]`, and incorrectly propagate the `get_attr`'s meta to everything downstream. Test Plan: CI Differential Revision: D56219120 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124415 Approved by: https://github.com/jerryzh168
|
diff --git a/test/quantization/pt2e/test_metadata_porting.py b/test/quantization/pt2e/test_metadata_porting.py
index 40bb1f2555..7f1bc21831 100644
--- a/test/quantization/pt2e/test_metadata_porting.py
+++ b/test/quantization/pt2e/test_metadata_porting.py
@@ -7,7 +7,7 @@ from typing import List
import torch
import torch._export
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
-from torch.ao.quantization.quantizer import Quantizer
+from torch.ao.quantization.quantizer import QuantizationAnnotation, Quantizer
from torch.ao.quantization.quantizer.xnnpack_quantizer import (
get_symmetric_quantization_config,
)
@@ -456,3 +456,67 @@ class TestMetaDataPorting(QuantizationTestCase):
self._test_quant_tag_preservation_through_decomp(
m, example_inputs, from_node_to_tags
)
+
+ def test_no_metadata_porting_through_unknown_ops(self):
+ """
+ Model under test
+ matmul -> add -> relu
+ matmul has get_attr as first input, but the quantization_tag should not be
+ propagated to add even if it's part of a chain that ends at get_attr
+ """
+
+ class MatmulWithConstInput(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.register_parameter("w", torch.nn.Parameter(torch.rand(8, 16)))
+
+ def forward(self, x, y):
+ x = torch.matmul(self.w, x)
+ z = x + y
+ return torch.nn.functional.relu(z)
+
+ class BackendAQuantizer(Quantizer):
+ def annotate(self, gm: torch.fx.GraphModule) -> torch.fx.GraphModule:
+ backend_string = "BackendA"
+ qconfig = get_symmetric_quantization_config()
+ for n in gm.graph.nodes:
+ if n.op != "call_function":
+ continue
+
+ n.meta["quantization_annotation"] = QuantizationAnnotation(
+ input_qspec_map={n.args[0]: qconfig.input_activation},
+ output_qspec=qconfig.output_activation,
+ )
+
+ tag = str(n.target)
+ n.meta["quantization_tag"] = tag
+ for arg in n.args:
+ if arg.op == "get_attr":
+ arg.meta["quantization_tag"] = tag
+
+ def validate(self, model: torch.fx.GraphModule) -> None:
+ pass
+
+ example_inputs = (torch.randn(16, 24), torch.randn(8, 24))
+ get_attr_tags = {"aten.matmul.default"}
+ quantize_per_tensor_tensor_tags = {
+ "aten.matmul.default",
+ "aten.add.Tensor",
+ "aten.relu.default",
+ }
+ dequantize_per_tensor_tensor_tags = {
+ "aten.matmul.default",
+ "aten.add.Tensor",
+ "aten.relu.default",
+ }
+ node_tags = {
+ "get_attr": get_attr_tags,
+ torch.ops.quantized_decomposed.quantize_per_tensor.default: quantize_per_tensor_tensor_tags,
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default: dequantize_per_tensor_tensor_tags,
+ }
+ m = self._test_metadata_porting(
+ MatmulWithConstInput(),
+ example_inputs,
+ BackendAQuantizer(),
+ node_tags,
+ )
diff --git a/torch/ao/quantization/pt2e/port_metadata_pass.py b/torch/ao/quantization/pt2e/port_metadata_pass.py
index 3f02943146..c47e820735 100644
--- a/torch/ao/quantization/pt2e/port_metadata_pass.py
+++ b/torch/ao/quantization/pt2e/port_metadata_pass.py
@@ -101,10 +101,25 @@ def _port_metadata_for_input_quant_nodes(
# if the q_node can be traced back to get_attr node
q_to_get_attr_nodes = [q_node]
q_node_input = q_node.args[0]
- while isinstance(q_node_input, torch.fx.Node) and q_node_input.op not in [
- "placeholder",
- "get_attr",
- ]:
+ while (
+ isinstance(q_node_input, torch.fx.Node)
+ and q_node_input.op == "call_function"
+ and q_node_input.target
+ in [
+ torch.ops.aten.flatten.using_ints,
+ torch.ops.aten.permute.default,
+ torch.ops.aten.permute_copy.default,
+ torch.ops.aten.slice_copy.Tensor,
+ torch.ops.aten.squeeze.dim,
+ torch.ops.aten.squeeze_copy.dim,
+ torch.ops.aten.transpose.Dimname,
+ torch.ops.aten.transpose.int,
+ torch.ops.aten.transpose_,
+ torch.ops.aten.view_copy.default,
+ torch.ops.aten.view.default,
+ torch.ops.aten._mkldnn_transpose,
+ ]
+ ):
q_to_get_attr_nodes.append(q_node_input)
q_node_input = q_node_input.args[0]
if isinstance(q_node_input, torch.fx.Node) and q_node_input.op == "get_attr":
|
2.41.0
|
0ab062103be7da46a592c01cac676b5f1d6e297
|
Wed, 24 Apr 2024 21:16:28 +0000
|
[PATCH 0600/1000] [MemoryViz] Improve description of blocks with missing frames (#124784)
|
Summary: It is common for blocks to be missing frames and there are many users asking why. Let's improve this output message to cover common reasons: 1) block was allocated before _record_memory_history was enabled 2) context or stacks passed to _record_memory_history does not include this block 3) backward events allocated with C++ stack and will not show if stacks = python Test Plan: CI and ran it locally:  Differential Revision: D56490921 Pulled By: aaronenyeshi Pull Request resolved: https://github.com/pytorch/pytorch/pull/124784 Approved by: https://github.com/zdevito
|
diff --git a/torch/utils/viz/MemoryViz.js b/torch/utils/viz/MemoryViz.js
index 51a64c275f..e725ae1606 100644
--- a/torch/utils/viz/MemoryViz.js
+++ b/torch/utils/viz/MemoryViz.js
@@ -762,7 +762,12 @@ function frameFilter({name, filename}) {
function format_frames(frames) {
if (frames.length === 0) {
- return `<block was allocated before _record_history was enabled>`;
+ return (
+ `This block has no frames. Potential causes:\n` +
+ `1) This block was allocated before _record_memory_history was enabled.\n` +
+ `2) The context or stacks passed to _record_memory_history does not include this block. Consider changing context to 'state', 'alloc', or 'all', or changing stacks to 'all'.\n` +
+ `3) This event occurred during backward, which has no python frames, and memory history did not include C++ frames. Use stacks='all' to record both C++ and python frames.`
+ );
}
const frame_strings = frames
.filter(frameFilter)
|
2.41.0
|
809b34288181e7bbc16618e30beeb07fb326530
|
Wed, 24 Apr 2024 21:34:33 +0000
|
[PATCH 0602/1000] [DTensor][Easy] Update OpSchema __repr__ to show args_schema in format print (#124812)
|
When printing op_schema with `print(f"{op_schema=}")`: Before -- can't view into the OpStrategy/TupleStrategy in format print: ``` # A pointwise strategy op_schema=OpSchema(op=aten.relu.default, args_schema=(<torch.distributed._tensor.op_schema.OpStrategy object at 0x7f4e763e0520>,), kwargs_schema={}) # A pointwise strategy pointwise_strategy -- op_schema=OpSchema(op=aten.threshold_backward.default, args_schema=(<torch.distributed._tensor.op_schema.OpStrategy object at 0x7f4e763e1540>, <torch.distributed._tensor.op_schema.OpStrategy object at 0x7f4e763e1510>, 0), kwargs_schema={}) # A tuple strategy op_schema=OpSchema(op=aten._foreach_lerp_.Scalar, args_schema=(<torch.distributed._tensor.op_schema.TupleStrategy object at 0x7f4e763e31f0>, <torch.distributed._tensor.op_schema.TupleStrategy object at 0x7f4e763e3460>, 0.09999999999999998), kwargs_schema={}) ``` After -- printing out the OpStrategy/TupleStrategy string: ``` # A pointwise strategy op_schema=OpSchema(op=aten.relu.default, args_schema=(OpStrategy:[None -> R] @ mesh: (4,)), kwargs_schema={}) # A pointwise strategy op_schema=OpSchema(op=aten.threshold_backward.default, args_schema=(OpStrategy:[None -> R] @ mesh: (4,), OpStrategy:[None -> R] @ mesh: (4,), 0), kwargs_schema={}) # A tuple strategy op_schema=OpSchema(op=aten._foreach_lerp_.Scalar, args_schema=(TupleStrategy(OpStrategy:[None -> S(0)] @ mesh: (4,)), TupleStrategy(OpStrategy:[None -> S(0)] @ mesh: (4,)),0.09999999999999998), kwargs_schema={}) ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124812 Approved by: https://github.com/wanchaol
|
diff --git a/torch/distributed/_tensor/op_schema.py b/torch/distributed/_tensor/op_schema.py
index 89f113c6fc..804a3ceaef 100644
--- a/torch/distributed/_tensor/op_schema.py
+++ b/torch/distributed/_tensor/op_schema.py
@@ -246,9 +246,10 @@ class OpSchema:
return tuple(item for item in self.args_schema if isinstance(item, DTensorSpec))
def __repr__(self) -> str:
+ args_schema = ", ".join([str(arg_schema) for arg_schema in self.args_schema])
return (
f"OpSchema(op={self.op},"
- f" args_schema={self.args_schema},"
+ f" args_schema=({args_schema}),"
f" kwargs_schema={self.kwargs_schema})"
)
|
2.41.0
|
89f442f0b103fa6f38103784a2dfedbd147f863
|
Wed, 24 Apr 2024 21:46:46 +0000
|
[PATCH 0603/1000] add -fclang-abi-compat=17 to HIP_HIPCC_FLAGS (#124862)
|
C++20 mangling rules were recently added to hip-clang. This flag maintains compatibility since pytorch is at C++17. Otherwise the linker fails. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124862 Approved by: https://github.com/malfet, https://github.com/pruthvistony
|
diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
index d8aea3999e..32848bbd5b 100644
--- a/cmake/Dependencies.cmake
+++ b/cmake/Dependencies.cmake
@@ -1314,6 +1314,9 @@ if(USE_ROCM)
list(APPEND HIP_HIPCC_FLAGS -fdebug-info-for-profiling)
endif(CMAKE_BUILD_TYPE MATCHES Debug)
+ # needed for compat with newer versions of hip-clang that introduced C++20 mangling rules
+ list(APPEND HIP_HIPCC_FLAGS -fclang-abi-compat=17)
+
set(HIP_CLANG_FLAGS ${HIP_CXX_FLAGS})
# Ask hcc to generate device code during compilation so we can use
# host linker to link.
|
2.41.0
|
2fe9071c23b92be28af7bb7b2b58df411426d0c
|
Wed, 24 Apr 2024 21:55:24 +0000
|
[PATCH 0604/1000] [ROCm][CI] fix 5.7 nightly wheel build (#124797)
|
Fixes broken ROCm 5.7 build caused by #122106. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124797 Approved by: https://github.com/atalman
|
diff --git a/aten/src/ATen/cuda/CUDABlas.cpp b/aten/src/ATen/cuda/CUDABlas.cpp
index c211092c49..3efcd23df5 100644
--- a/aten/src/ATen/cuda/CUDABlas.cpp
+++ b/aten/src/ATen/cuda/CUDABlas.cpp
@@ -375,7 +375,7 @@ class CuBlasLtMatmulPreference : public CuBlasLtDescriptor<
template <typename Dtype>
inline void bgemm_internal_cublaslt(CUDABLAS_BGEMM_ARGTYPES(Dtype)) {
-#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 50700)
+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && ROCM_VERSION >= 60000)
cudaDataType_t abcType = CUDA_R_32F;
cublasComputeType_t computeType = CUBLAS_COMPUTE_32F;
cudaDataType_t scaleType = CUDA_R_32F;
|
2.41.0
|
5b28ffc3a287bc568a65a49913779a1d189877d
|
Wed, 24 Apr 2024 10:12:17 -0700
|
[PATCH 0605/1000] [quant][pt2e] Move batch norm op between eval/train for cuda (#123957)
|
Summary: Before in `move_exported_model_to_train/eval`, we only switched the CPU versions of the batch norm op. This commit adds support for the cuda versions of the op too. Note that this fix is temporary; we won't have to differentiate between these two cases once we have batch norm consolidation. Test Plan: python test/test_quantization.py -k test_move_exported_model_bn Reviewers: jerryzh168 Subscribers: jerryzh168, leslie-fang-intel, supriyar Differential Revision: [D56070054](https://our.internmc.facebook.com/intern/diff/D56070054) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123957 Approved by: https://github.com/jerryzh168
|
diff --git a/test/quantization/pt2e/test_quantize_pt2e.py b/test/quantization/pt2e/test_quantize_pt2e.py
index 3c759fc65c..b96e1ff12a 100644
--- a/test/quantization/pt2e/test_quantize_pt2e.py
+++ b/test/quantization/pt2e/test_quantize_pt2e.py
@@ -53,6 +53,8 @@ from torch.testing._internal.common_utils import (
instantiate_parametrized_tests,
parametrize,
TemporaryFileName,
+ TEST_CUDA,
+ TEST_WITH_ROCM,
)
@@ -1826,6 +1828,23 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
def test_move_exported_model_dropout_inplace(self):
self._test_move_exported_model_dropout(inplace=True)
+ def _get_bn_train_eval_ops(self):
+ if TEST_WITH_ROCM:
+ return (
+ torch.ops.aten.miopen_batch_norm.default,
+ torch.ops.aten.miopen_batch_norm.default,
+ )
+ elif TEST_CUDA:
+ return (
+ torch.ops.aten.cudnn_batch_norm.default,
+ torch.ops.aten.cudnn_batch_norm.default,
+ )
+ else:
+ return (
+ torch.ops.aten._native_batch_norm_legit.default,
+ torch.ops.aten._native_batch_norm_legit_no_training.default,
+ )
+
def test_move_exported_model_bn(self):
"""
Test switching batch_norm behavior between train and eval modes using
@@ -1840,12 +1859,17 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
def forward(self, x):
return self.bn(x)
- example_inputs = (torch.randn(1, 3, 3, 3),)
- m = M().train()
+ if TEST_CUDA:
+ m = M().train().cuda()
+ example_inputs = (torch.randn(1, 3, 3, 3).cuda(),)
+ else:
+ m = M().train()
+ example_inputs = (torch.randn(1, 3, 3, 3),)
+ bn_train_op, bn_eval_op = self._get_bn_train_eval_ops()
m = capture_pre_autograd_graph(m, example_inputs)
# Assert that batch norm op exists and is in train mode
- bn_node = self._get_node(m, torch.ops.aten._native_batch_norm_legit.default)
+ bn_node = self._get_node(m, bn_train_op)
self.assertTrue(bn_node is not None)
self.assertTrue(bn_node.args[5])
@@ -1853,16 +1877,14 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
torch.ao.quantization.move_exported_model_to_eval(m)
# Assert that batch norm op is now in eval mode
- bn_node = self._get_node(
- m, torch.ops.aten._native_batch_norm_legit_no_training.default
- )
+ bn_node = self._get_node(m, bn_eval_op)
self.assertTrue(bn_node is not None)
# Move to train
torch.ao.quantization.move_exported_model_to_train(m)
# Assert that batch norm op is now in train mode again
- bn_node = self._get_node(m, torch.ops.aten._native_batch_norm_legit.default)
+ bn_node = self._get_node(m, bn_train_op)
self.assertTrue(bn_node is not None)
self.assertTrue(bn_node.args[5])
@@ -1908,22 +1930,24 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
x = self.dropout(x)
return x
- example_inputs = (torch.randn(1, 3, 3, 3),)
- m = M().train()
+ if TEST_CUDA:
+ m = M().train().cuda()
+ example_inputs = (torch.randn(1, 3, 3, 3).cuda(),)
+ else:
+ m = M().train()
+ example_inputs = (torch.randn(1, 3, 3, 3),)
+ bn_train_op, bn_eval_op = self._get_bn_train_eval_ops()
m = capture_pre_autograd_graph(m, example_inputs)
def _assert_ops_are_correct(m: torch.fx.GraphModule, train: bool):
targets = [n.target for n in m.graph.nodes]
- bn_train_target = torch.ops.aten._native_batch_norm_legit.default
- bn_eval_target = torch.ops.aten._native_batch_norm_legit_no_training.default
- if train:
- self.assertTrue(bn_train_target in targets)
- self.assertTrue(bn_eval_target not in targets)
- else:
- self.assertTrue(bn_eval_target in targets)
- self.assertTrue(bn_train_target not in targets)
+ bn_op = bn_train_op if train else bn_eval_op
+ bn_node = self._get_node(m, bn_op)
+ self.assertTrue(bn_node is not None)
+ if TEST_CUDA:
+ self.assertEqual(bn_node.args[5], train)
dropout_node = self._get_node(m, torch.ops.aten.dropout.default)
- self.assertTrue(dropout_node.args[2] == train)
+ self.assertEqual(dropout_node.args[2], train)
# Before wrapping: this is not OK
with self.assertRaises(NotImplementedError):
diff --git a/torch/ao/quantization/fx/utils.py b/torch/ao/quantization/fx/utils.py
index 21a1034739..be26332b24 100644
--- a/torch/ao/quantization/fx/utils.py
+++ b/torch/ao/quantization/fx/utils.py
@@ -23,6 +23,7 @@ from torch.ao.quantization.qconfig import (
)
from torch.ao.quantization.stubs import DeQuantStub
from torch.ao.quantization.utils import (
+ _assert_and_get_unique_device,
activation_is_statically_quantized,
)
from torch.ao.quantization.observer import _is_activation_post_process
@@ -222,26 +223,13 @@ def graph_module_from_producer_nodes(
graph_module = GraphModule(root, graph)
return graph_module
+# TODO: delete
def assert_and_get_unique_device(module: torch.nn.Module) -> Any:
"""
Returns the unique device for a module, or None if no device is found.
Throws an error if multiple devices are detected.
"""
- devices = {p.device for p in module.parameters()} | \
- {p.device for p in module.buffers()}
- """
- As a temp workaround for AIMP HHC publish we added CPU check.remove it later. T163614564
- """
- if {torch.device("cpu"), torch.device("meta")} == devices:
- warnings.warn("Both 'meta' and 'cpu' are present in the list of devices. Module can have one device. We Select 'cpu'.")
- devices = {torch.device("cpu")}
- ""
- assert len(devices) <= 1, (
- "prepare only works with cpu or single-device CUDA modules, "
- f"but got devices {devices}"
- )
- device = next(iter(devices)) if len(devices) > 0 else None
- return device
+ return _assert_and_get_unique_device(module)
def create_getattr_from_value(module: torch.nn.Module, graph: Graph, prefix: str, value: Any) -> Node:
"""
diff --git a/torch/ao/quantization/pt2e/export_utils.py b/torch/ao/quantization/pt2e/export_utils.py
index dae8baad8d..2e7b9e380d 100644
--- a/torch/ao/quantization/pt2e/export_utils.py
+++ b/torch/ao/quantization/pt2e/export_utils.py
@@ -3,6 +3,8 @@ import types
import torch
import torch.nn.functional as F
+from torch.ao.quantization.utils import _assert_and_get_unique_device
+
__all__ = [
"model_is_exported",
@@ -136,20 +138,26 @@ def _replace_batchnorm(m: torch.fx.GraphModule, train_to_eval: bool):
torch.randn(1), # bn_running_mean
torch.randn(1), # bn_running_var
)
+
+ device = _assert_and_get_unique_device(m)
+ is_cuda = device is not None and device.type == "cuda"
+ bn_train_aten = _get_aten_graph_module_for_pattern(
+ _WrapperModule(bn_train),
+ example_inputs,
+ is_cuda,
+ )
+ bn_eval_aten = _get_aten_graph_module_for_pattern(
+ _WrapperModule(bn_eval),
+ example_inputs,
+ is_cuda,
+ )
+
if train_to_eval:
- match_pattern = _get_aten_graph_module_for_pattern(
- _WrapperModule(bn_train), example_inputs
- )
- replacement_pattern = _get_aten_graph_module_for_pattern(
- _WrapperModule(bn_eval), example_inputs
- )
+ match_pattern = bn_train_aten
+ replacement_pattern = bn_eval_aten
else:
- match_pattern = _get_aten_graph_module_for_pattern(
- _WrapperModule(bn_eval), example_inputs
- )
- replacement_pattern = _get_aten_graph_module_for_pattern(
- _WrapperModule(bn_train), example_inputs
- )
+ match_pattern = bn_eval_aten
+ replacement_pattern = bn_train_aten
from torch.fx.subgraph_rewriter import replace_pattern_with_filters
diff --git a/torch/ao/quantization/utils.py b/torch/ao/quantization/utils.py
index 70b45b92fb..d0de50bbeb 100644
--- a/torch/ao/quantization/utils.py
+++ b/torch/ao/quantization/utils.py
@@ -688,6 +688,27 @@ def get_fqn_to_example_inputs(
torch.nn.Module.__call__ = orig_module_call # type: ignore[method-assign]
return fqn_to_example_inputs
+def _assert_and_get_unique_device(module: torch.nn.Module) -> Any:
+ """
+ Returns the unique device for a module, or None if no device is found.
+ Throws an error if multiple devices are detected.
+ """
+ devices = {p.device for p in module.parameters()} | \
+ {p.device for p in module.buffers()}
+ """
+ As a temp workaround for AIMP HHC publish we added CPU check.remove it later. T163614564
+ """
+ if {torch.device("cpu"), torch.device("meta")} == devices:
+ warnings.warn("Both 'meta' and 'cpu' are present in the list of devices. Module can have one device. We Select 'cpu'.")
+ devices = {torch.device("cpu")}
+ ""
+ assert len(devices) <= 1, (
+ "prepare only works with cpu or single-device CUDA modules, "
+ f"but got devices {devices}"
+ )
+ device = next(iter(devices)) if len(devices) > 0 else None
+ return device
+
__all__ = [
"NodePattern",
"Pattern",
|
2.41.0
|
2ed2992d94e6bb09d95fb1409883fc61cf19e13
|
Wed, 24 Apr 2024 23:12:19 +0000
|
[PATCH 0607/1000] [export] Capture tensor.to() under export. (#123732)
|
Summary: We use to skip tensor.to() during tracing when the device is the same. This will bring some performance improvement in eager but making graph capture losing the semantics from original model. In this diff, we add an additional condition to skip the fast path when we don't have actual data inside a tensor, which is the case when we're using FakeTensor / FunctionalTensor to trace the model. This won't have perf impact on previous eager models while making sure we can capture the _to_copy() node in the graph. Test Plan: buck test mode/opt caffe2/test:test_export -- -r device_to Differential Revision: D55969674 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123732 Approved by: https://github.com/angelayi, https://github.com/tugsbayasgalan
|
diff --git a/test/export/test_export.py b/test/export/test_export.py
index f73d93a926..9d946d0fa0 100644
--- a/test/export/test_export.py
+++ b/test/export/test_export.py
@@ -1902,6 +1902,50 @@ class TestExport(TestCase):
):
_ = export(mod, inp, strict=True)
+ def test_device_to_static(self):
+ class Module(torch.nn.Module):
+ def forward(self, x):
+ return x.to("cpu")
+
+ ep = export(Module(), (torch.tensor(1, device="cpu"),))
+ ops = []
+ for node in ep.graph.nodes:
+ if node.op == "call_function":
+ ops.append(node.target)
+ self.assertGreater(len(ops), 0)
+ for op in ops:
+ self.assertIn(op, (torch.ops.aten._to_copy.default,))
+
+ def test_device_to_dynamic(self):
+ class Module(torch.nn.Module):
+ def forward(self, x):
+ return x.to("cpu")
+
+ ep = export(
+ Module(),
+ (torch.tensor([1, 2], device="cpu"),),
+ dynamic_shapes={"x": {0: Dim("i")}},
+ )
+ ops = []
+ for node in ep.graph.nodes:
+ if node.op == "call_function":
+ ops.append(node.target)
+ self.assertGreater(len(ops), 0)
+ for op in ops:
+ self.assertIn(op, (torch.ops.aten._to_copy.default,))
+
+ def test_device_to_mutation(self):
+ class Module(torch.nn.Module):
+ def forward(self, x):
+ y = x.to("cpu")
+ y.add_(1)
+ return y, x
+
+ with self.assertRaisesRegex(
+ RuntimeError, "cannot mutate tensors with frozen storage"
+ ):
+ export(Module(), (torch.tensor(1, device="cpu"),))
+
def test_module(self):
class MyLinear(torch.nn.Module):
def __init__(self):
diff --git a/torch/_subclasses/functional_tensor.py b/torch/_subclasses/functional_tensor.py
index 8b74d069c6..fb2a81b8ae 100644
--- a/torch/_subclasses/functional_tensor.py
+++ b/torch/_subclasses/functional_tensor.py
@@ -218,6 +218,13 @@ class FunctionalTensor(torch.Tensor):
else:
return [elem.tolist() for elem in self.elem]
+ def to(self, *args, **kwargs):
+ if _detect_functional_mode().export:
+ # If copy is specified as pos arg, it's always the second one.
+ if len([arg for arg in args if isinstance(arg, bool)]) <= 1:
+ return super().to(*args, **{**kwargs, "copy": True})
+ return super().to(*args, **kwargs)
+
class FunctionalTensorMode(TorchDispatchMode):
def __init__(self, pre_dispatch=False, export=False, _allow_token_discovery=False):
@@ -423,9 +430,13 @@ class FunctionalTensorMode(TorchDispatchMode):
*args_unwrapped,
**kwargs_unwrapped,
)
- # We don't allow any mutation on result of dropout
- if self.export and func == torch.ops.aten.dropout.default:
- torch._freeze_functional_tensor(outs_unwrapped) # type: ignore[attr-defined]
+ # We don't allow any mutation on result of dropout or _to_copy
+ if self.export:
+ if func in (
+ torch.ops.aten.dropout.default,
+ torch.ops.aten._to_copy.default,
+ ):
+ torch._freeze_functional_tensor(outs_unwrapped) # type: ignore[attr-defined]
outs_wrapped = pytree.tree_map_only(
torch.Tensor, wrap, outs_unwrapped
)
|
2.41.0
|
b94845b148fd47090a9f26f09d5e714bb91265d
|
Wed, 24 Apr 2024 23:14:41 +0000
|
[PATCH 0608/1000] Force upsample to be float32 (#121324)
|
Fixes #121072 Pull Request resolved: https://github.com/pytorch/pytorch/pull/121324 Approved by: https://github.com/albanD
|
diff --git a/aten/src/ATen/autocast_mode.cpp b/aten/src/ATen/autocast_mode.cpp
index 923c2e42c9..0b99b11430 100644
--- a/aten/src/ATen/autocast_mode.cpp
+++ b/aten/src/ATen/autocast_mode.cpp
@@ -239,7 +239,19 @@ Explicit registration for out-of-place ops
_(pdist) \
_(cdist) \
_(renorm) \
- _(logsumexp)
+ _(logsumexp) \
+ _(upsample_nearest1d) \
+ _(_upsample_nearest_exact1d) \
+ _(upsample_nearest2d) \
+ _(_upsample_nearest_exact2d) \
+ _(upsample_nearest3d) \
+ _(_upsample_nearest_exact3d) \
+ _(upsample_linear1d) \
+ _(upsample_bilinear2d) \
+ _(_upsample_bilinear2d_aa) \
+ _(upsample_trilinear3d) \
+ _(upsample_bicubic2d) \
+ _(_upsample_bicubic2d_aa)
#define AT_FORALL_FP32_SET_OPT_DTYPE(_) \
_(prod) \
diff --git a/benchmarks/dynamo/ci_expected_accuracy/aot_eager_torchbench_training.csv b/benchmarks/dynamo/ci_expected_accuracy/aot_eager_torchbench_training.csv
index 96b90827e7..4c56993ef6 100644
--- a/benchmarks/dynamo/ci_expected_accuracy/aot_eager_torchbench_training.csv
+++ b/benchmarks/dynamo/ci_expected_accuracy/aot_eager_torchbench_training.csv
@@ -194,7 +194,7 @@ pytorch_stargan,pass,6
-pytorch_unet,pass,7
+pytorch_unet,pass_due_to_skip,7
diff --git a/benchmarks/dynamo/ci_expected_accuracy/dynamic_aot_eager_torchbench_training.csv b/benchmarks/dynamo/ci_expected_accuracy/dynamic_aot_eager_torchbench_training.csv
index e916324734..28da82627a 100644
--- a/benchmarks/dynamo/ci_expected_accuracy/dynamic_aot_eager_torchbench_training.csv
+++ b/benchmarks/dynamo/ci_expected_accuracy/dynamic_aot_eager_torchbench_training.csv
@@ -194,7 +194,7 @@ pytorch_stargan,pass,6
-pytorch_unet,pass,7
+pytorch_unet,pass_due_to_skip,7
diff --git a/benchmarks/dynamo/ci_expected_accuracy/dynamic_inductor_torchbench_training.csv b/benchmarks/dynamo/ci_expected_accuracy/dynamic_inductor_torchbench_training.csv
index bf7b7e8df1..5e5653c940 100644
--- a/benchmarks/dynamo/ci_expected_accuracy/dynamic_inductor_torchbench_training.csv
+++ b/benchmarks/dynamo/ci_expected_accuracy/dynamic_inductor_torchbench_training.csv
@@ -194,7 +194,7 @@ pytorch_stargan,pass,6
-pytorch_unet,pass,7
+pytorch_unet,pass_due_to_skip,7
diff --git a/benchmarks/dynamo/ci_expected_accuracy/dynamo_eager_torchbench_training.csv b/benchmarks/dynamo/ci_expected_accuracy/dynamo_eager_torchbench_training.csv
index 96b90827e7..4c56993ef6 100644
--- a/benchmarks/dynamo/ci_expected_accuracy/dynamo_eager_torchbench_training.csv
+++ b/benchmarks/dynamo/ci_expected_accuracy/dynamo_eager_torchbench_training.csv
@@ -194,7 +194,7 @@ pytorch_stargan,pass,6
-pytorch_unet,pass,7
+pytorch_unet,pass_due_to_skip,7
diff --git a/benchmarks/dynamo/ci_expected_accuracy/inductor_torchbench_training.csv b/benchmarks/dynamo/ci_expected_accuracy/inductor_torchbench_training.csv
index 96b90827e7..4c56993ef6 100644
--- a/benchmarks/dynamo/ci_expected_accuracy/inductor_torchbench_training.csv
+++ b/benchmarks/dynamo/ci_expected_accuracy/inductor_torchbench_training.csv
@@ -194,7 +194,7 @@ pytorch_stargan,pass,6
-pytorch_unet,pass,7
+pytorch_unet,pass_due_to_skip,7
diff --git a/benchmarks/dynamo/torchbench.yaml b/benchmarks/dynamo/torchbench.yaml
index 3ecfbc9784..bf848e81b3 100644
--- a/benchmarks/dynamo/torchbench.yaml
+++ b/benchmarks/dynamo/torchbench.yaml
@@ -252,6 +252,7 @@ accuracy:
eager_not_deterministic:
# Models that deterministic algorithms can not be turned on for eager mode.
- Background_Matting
+ - pytorch_unet
max_batch_size:
hf_GPT2: 2
|
2.41.0
|
6c983a9735f6b61073a3dc15dcf6f122637a349
|
Wed, 24 Apr 2024 10:32:03 -0700
|
[PATCH 0609/1000] [DeviceMesh] Added `DeviceMesh.from_group()` (#124787)
|
This PR adds a `DeviceMesh.from_group()` static method to convert an existing process group to a device mesh. Motivation: We need `DeviceMesh.from_group()` to allow FSDP2 to interoperate with distributed libraries that do not use `DeviceMesh` for all parallelisms. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124787 Approved by: https://github.com/wanchaol ghstack dependencies: #124651, #124741, #124767, #124768, #124780
|
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_init.py b/test/distributed/_composable/fsdp/test_fully_shard_init.py
index 02cd3c3e35..2e2e97e3a3 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_init.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_init.py
@@ -1,5 +1,6 @@
# Owner(s): ["oncall: distributed"]
+import copy
import itertools
import unittest
from typing import List
@@ -665,5 +666,64 @@ class TestFullyShardMetaDeviceInit(FSDPTestMultiThread):
self.assertEqual(param.grad, ref_param.grad)
+class TestFullyShardProcessGroupInit(FSDPTestMultiThread):
+ @property
+ def world_size(self) -> int:
+ return 4
+
+ @unittest.skipIf(not TEST_CUDA, "no cuda")
+ def test_process_group_init(self):
+ assert self.world_size == 4, f"{self.world_size}"
+ # For convenience, use device mesh's infra to construct the DP PG
+ # (in practice, the trainer would do it manually via `new_group()`)
+ dp_size = 2
+ global_mesh = init_device_mesh(
+ "cuda", (dp_size, self.world_size // dp_size), mesh_dim_names=("dp", "tp")
+ )
+ ref_dp_mesh, tp_mesh = global_mesh["dp"], global_mesh["tp"]
+ dp_pg = ref_dp_mesh.get_group(0)
+
+ # Check the `from_group()` API for correctness
+ dp_mesh = DeviceMesh.from_group(dp_pg, "cuda")
+ self.assertEqual(dp_mesh.mesh, ref_dp_mesh.mesh)
+ self.assertEqual(dp_mesh, ref_dp_mesh)
+ # self.assertFalse(hasattr(dp_mesh, "_coordinate_on_dim"))
+ self.assertEqual(dp_mesh._coordinate_on_dim, ref_dp_mesh._coordinate_on_dim)
+ self.assertEqual(dp_mesh._dim_group_infos, ref_dp_mesh._dim_group_infos)
+
+ # Check 1D FSDP forward/backward parity over the DP mesh
+ # NOTE: We cannot use 2D DTensor-based training here because the DP
+ # mesh from `from_group` does not respect the parent mesh.
+ torch.manual_seed(42)
+ mlp_dim = 8
+ ref_model = MLP(mlp_dim)
+ for param in ref_model.parameters():
+ dist.broadcast(param.detach(), src=0)
+ model = copy.deepcopy(ref_model)
+
+ # Parallelize the test model with the ref DP mesh
+ for module in (ref_model.in_proj, ref_model.out_proj, ref_model):
+ fully_shard(module, mesh=ref_dp_mesh)
+ # Parallelize the test model with the new DP mesh from the PG
+ for module in (model.in_proj, model.out_proj, model):
+ fully_shard(module, mesh=dp_mesh)
+
+ # Ensure that TP ranks have the same input
+ inp = torch.randn((4, mlp_dim), device="cuda")
+ if self.rank in (0, 1):
+ dist.broadcast(inp, src=0, group=tp_mesh.get_group(0))
+ elif self.rank in (2, 3):
+ dist.broadcast(inp, src=2, group=tp_mesh.get_group(0))
+
+ ref_loss = ref_model(inp).sum()
+ ref_loss.backward()
+ loss = model(inp).sum()
+ loss.backward()
+ self.assertEqual(loss, ref_loss)
+ for param, ref_param in zip(model.parameters(), ref_model.parameters()):
+ self.assertEqual(param, ref_param)
+ self.assertEqual(param.grad, ref_param.grad)
+
+
if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/test_device_mesh.py b/test/distributed/test_device_mesh.py
index 1a74347add..534185f67a 100644
--- a/test/distributed/test_device_mesh.py
+++ b/test/distributed/test_device_mesh.py
@@ -14,6 +14,7 @@ from torch.distributed._tensor.placement_types import _Partial, Shard
from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh
from torch.distributed.distributed_c10d import (
+ _get_default_group,
_world,
get_global_rank,
get_world_size,
@@ -166,6 +167,19 @@ class DeviceMeshTest(DTensorTestBase):
)
self.assertEqual(global_tensor.shape, (self.world_size * 2, 8))
+ @with_comms
+ def test_from_group(self):
+ # Simple test: check `from_group` for a global PG vs. directly
+ # initializing via `init_device_mesh`
+ global_pg = _get_default_group()
+ ref_global_mesh = init_device_mesh("cuda", (self.world_size,))
+ global_mesh = DeviceMesh.from_group(global_pg, "cuda")
+ self.assertEqual(ref_global_mesh, global_mesh)
+ self.assertEqual(ref_global_mesh._dim_group_infos, global_mesh._dim_group_infos)
+ self.assertEqual(
+ ref_global_mesh._coordinate_on_dim, global_mesh._coordinate_on_dim
+ )
+
class DeviceMeshTestNDim(DTensorTestBase):
@property
diff --git a/torch/distributed/device_mesh.py b/torch/distributed/device_mesh.py
index a1bb6de987..79e5c508a6 100644
--- a/torch/distributed/device_mesh.py
+++ b/torch/distributed/device_mesh.py
@@ -37,6 +37,7 @@ else:
_find_pg_by_ranks_and_tag,
_get_default_group,
_get_group_tag,
+ get_process_group_ranks,
get_rank,
get_world_size,
init_process_group,
@@ -438,6 +439,23 @@ else:
)
return dim_groups
+ @staticmethod
+ def from_group(group: ProcessGroup, device_type: str) -> "DeviceMesh":
+ """
+ Contstructs a :class:`DeviceMesh` with ``device_type`` from an
+ existing :class:`ProcessGroup`.
+
+ The constructed device mesh is assumed to be 1D.
+ """
+ # Manually define `_dim_group_infos` instead of relying on the
+ # normal logic since we already have the PG
+ group_ranks = get_process_group_ranks(group)
+ mesh = DeviceMesh(device_type, group_ranks, _init_backend=False)
+ mesh._dim_group_infos = [
+ (_get_group_tag(group), group_ranks, group.group_name)
+ ]
+ return mesh
+
def size(self, mesh_dim: Optional[int] = None) -> int:
return self.mesh.numel() if mesh_dim is None else self.mesh.size(mesh_dim)
|
2.41.0
|
8225072e864afcbc3cf0e4a078f117c9ab582b6
|
Wed, 24 Apr 2024 10:31:47 -0700
|
[PATCH 0610/1000] Match insignificant strides for sdpa inputs (#124859)
|
Fix for https://github.com/pytorch/pytorch/issues/124289. There was a tensor which had a single, expanded element. inductor generated the strides as all 0, while sdpa expects a dense last dimension `t.stride(-1) == 1`. While these are equivalent, we still hit an error in the kernel. We could make fixes in sdpa, but matching the insignificant strides in inductor also works and I am less aware of the downstream sdpa kernel details. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124859 Approved by: https://github.com/drisspg ghstack dependencies: #124751
|
diff --git a/test/inductor/test_fused_attention.py b/test/inductor/test_fused_attention.py
index 6a9f3c442b..63e14b58c0 100644
--- a/test/inductor/test_fused_attention.py
+++ b/test/inductor/test_fused_attention.py
@@ -6,6 +6,7 @@ import math
import torch
import torch._inductor.config
import torch.utils.checkpoint
+from torch._dynamo.debug_utils import aot_graph_input_parser
from torch._dynamo.utils import counters
from torch._inductor.test_case import run_tests, TestCase
from torch._inductor.utils import run_and_get_code
@@ -160,6 +161,83 @@ class TestSDPAPatternRewriterTemplate(TestCase):
check_train=False,
)
+ def _test_insignificant_strides(self):
+ f32 = torch.float32
+
+ # repro taken from https://github.com/pytorch/pytorch/issues/124289
+ # constant_pad_nd is a single element tensor that gets expanded
+
+ def forward(
+ permute_3: "f32[1, 32, 1, 128]",
+ permute_4: "f32[1, 32, 1, 128]",
+ permute_5: "f32[1, 32, 1, 128]",
+ permute_6: "f32[1, 1, 64]",
+ mul_2: "f32[1, 1, 1, 1]",
+ ):
+ cat = torch.ops.aten.cat.default([permute_6, permute_6], 2)
+ permute_6 = None
+ cos = torch.ops.aten.cos.default(cat)
+ sin = torch.ops.aten.sin.default(cat)
+ unsqueeze_10 = torch.ops.aten.unsqueeze.default(cos, 1)
+ cos = None
+ unsqueeze_11 = torch.ops.aten.unsqueeze.default(sin, 1)
+ sin = None
+ mul_5 = torch.ops.aten.mul.Tensor(permute_3, unsqueeze_10)
+ slice_10 = torch.ops.aten.slice.Tensor(permute_3, 3, 0, 64)
+ slice_11 = torch.ops.aten.slice.Tensor(
+ permute_3, 3, 64, 9223372036854775807
+ )
+ permute_3 = None
+ neg = torch.ops.aten.neg.default(slice_11)
+ slice_11 = None
+ cat_1 = torch.ops.aten.cat.default([neg, slice_10], 3)
+ neg = slice_10 = None
+ mul_6 = torch.ops.aten.mul.Tensor(cat_1, unsqueeze_11)
+ cat_1 = None
+ add_1 = torch.ops.aten.add.Tensor(mul_5, mul_6)
+ mul_5 = mul_6 = None
+ mul_7 = torch.ops.aten.mul.Tensor(permute_4, unsqueeze_10)
+ unsqueeze_10 = None
+ slice_12 = torch.ops.aten.slice.Tensor(permute_4, 3, 0, 64)
+ slice_13 = torch.ops.aten.slice.Tensor(
+ permute_4, 3, 64, 9223372036854775807
+ )
+ permute_4 = None
+ neg_1 = torch.ops.aten.neg.default(slice_13)
+ slice_13 = None
+ cat_2 = torch.ops.aten.cat.default([neg_1, slice_12], 3)
+ neg_1 = slice_12 = None
+ mul_8 = torch.ops.aten.mul.Tensor(cat_2, unsqueeze_11)
+ cat_2 = unsqueeze_11 = None
+ add_2 = torch.ops.aten.add.Tensor(mul_7, mul_8)
+ mul_7 = mul_8 = None
+ slice_14 = torch.ops.aten.slice.Tensor(mul_2, 0, 0, 9223372036854775807)
+ mul_2 = None
+ slice_15 = torch.ops.aten.slice.Tensor(slice_14, 1, 0, 9223372036854775807)
+ slice_14 = None
+ slice_16 = torch.ops.aten.slice.Tensor(slice_15, 2, 0, 9223372036854775807)
+ slice_15 = None
+ constant_pad_nd = torch.ops.aten.constant_pad_nd.default(
+ slice_16, [0, 7], 0.0
+ )
+ slice_16 = None
+ slice_17 = torch.ops.aten.slice.Tensor(constant_pad_nd, -1, 0, 1)
+ constant_pad_nd = None
+ expand_5 = torch.ops.aten.expand.default(slice_17, [1, 32, 1, 1])
+ _scaled_dot_product_efficient_attention = (
+ torch.ops.aten._scaled_dot_product_efficient_attention.default(
+ add_1, add_2, permute_5, expand_5, True
+ )
+ )
+ return _scaled_dot_product_efficient_attention
+
+ kwargs = aot_graph_input_parser(forward, device="cuda")
+ # runs successfully
+ out_eager = forward(**kwargs)
+ out_c = torch.compile(forward)(**kwargs)
+ # dont compare philox_seed/offset
+ torch.testing.assert_close(out_eager[0:2], out_c[0:2])
+
def _test_pattern_fails_with_reuse(self):
"""
This test checks that the replacement is not done
@@ -839,6 +917,9 @@ if HAS_CUDA and PLATFORM_SUPPORTS_FUSED_ATTENTION:
test_sdpa_rewriter_1_freezing = (
TestSDPAPatternRewriterTemplate._test_sdpa_rewriter_1_freezing
)
+ test_insignificant_strides = (
+ TestSDPAPatternRewriterTemplate._test_insignificant_strides
+ )
test_pattern_fails_with_reuse_cuda = (
TestSDPAPatternRewriterTemplate._test_pattern_fails_with_reuse
)
diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py
index a160055ee1..34d5b02575 100644
--- a/torch/_inductor/graph.py
+++ b/torch/_inductor/graph.py
@@ -1005,7 +1005,9 @@ class GraphLowering(torch.fx.Interpreter):
# AOT Autograd tries to detect stride divergence of inductor from output metadata.
# Here, we try to avoid spurious divergence by matching insignificant strides such as
result_correct_strides.append(
- self.match_insignificant_strides(r, fx_node.meta["val"].stride())
+ self.try_match_insignificant_strides(
+ r, fx_node.meta["val"].stride()
+ )
)
self.graph_outputs = result_correct_strides
@@ -1054,11 +1056,18 @@ class GraphLowering(torch.fx.Interpreter):
finally:
self.current_node = old
- def match_insignificant_strides(
+ def try_match_insignificant_strides(
self,
tensor,
meta_strides_inp: Tuple[Union[int, torch.SymInt], ...],
) -> ir.TensorBox:
+ """
+ Tries to match the strides of the tensor to those in the meta_strides. Strides of insignificant
+ dimensions - size 0 or 1 - will be updated.
+
+ If there are real stride differences (NHWC vs NCHW) then the input will be returned.
+ """
+
# should have already been realized
assert torch._inductor.ir.is_storage_and_layout(tensor)
diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py
index 4166cdf576..e9ebeb6dae 100644
--- a/torch/_inductor/lowering.py
+++ b/torch/_inductor/lowering.py
@@ -1856,8 +1856,9 @@ def sdpa_constraint(fx_node, *args, **kwargs):
return arg
meta_val = fx_arg.meta["val"]
+ meta_stride = meta_val.stride()
- stride_order = ir.get_stride_order(meta_val.stride())
+ stride_order = ir.get_stride_order(meta_stride)
if stride_order and stride_order[-1] != 0:
# contiguous stride order
stride_order = list(reversed(range(len(arg.get_size()))))
@@ -1885,7 +1886,9 @@ def sdpa_constraint(fx_node, *args, **kwargs):
try:
arg.get_stride()
if is_aligned_realized_tensor(arg):
- return arg
+ return V.graph.try_match_insignificant_strides(
+ ir.ExternKernel.realize_input(arg), meta_stride
+ )
except AttributeError:
pass
@@ -1895,7 +1898,9 @@ def sdpa_constraint(fx_node, *args, **kwargs):
if isinstance(arg.data, ir.BaseView):
if not is_aligned(arg):
if is_aligned(arg.unwrap_view()):
- return arg
+ return V.graph.try_match_insignificant_strides(
+ ir.ExternKernel.realize_input(arg), meta_stride
+ )
return ir.ExternKernel.require_stride_order(arg, stride_order)
|
2.41.0
|
3313a506aaa1a453ca3a236d00deb603f079987
|
Wed, 24 Apr 2024 12:26:09 -0700
|
[PATCH 0611/1000] Dont precompile if we search_autotune_cache but not max autotune is set (#124870)
|
Differential Revision: [D56534950](https://our.internmc.facebook.com/intern/diff/D56534950) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124870 Approved by: https://github.com/xw285cornell
|
diff --git a/test/inductor/test_max_autotune.py b/test/inductor/test_max_autotune.py
index f74fa4ed1d..c8622de6fa 100644
--- a/test/inductor/test_max_autotune.py
+++ b/test/inductor/test_max_autotune.py
@@ -447,6 +447,22 @@ class TestMaxAutotune(TestCase):
fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn)
self.assertEqual(counters["inductor"]["select_algorithm_precompile"], 0)
+ @skipIfRocm
+ @fresh_inductor_cache()
+ @config.patch(search_autotune_cache=True)
+ def test_search_autotune_cache(self):
+ def fn(a, b, c):
+ a = (a @ b) @ c
+ a, b, c = (t.to(torch.float16) for t in [a, b, c])
+ return (a @ b) @ c
+
+ fn_c = torch.compile()(fn)
+ inputs = [torch.rand([256, 256], device="cuda") for _ in range(3)]
+ from torch._dynamo.utils import counters
+
+ self.assertEqual(fn(*inputs), fn_c(*inputs), atol=1e-2, rtol=1e-2)
+ self.assertEqual(counters["inductor"]["select_algorithm_precompile"], 0)
+
@config.patch(autotune_local_cache=False, autotune_remote_cache=False)
def test_precompilations(self):
def fn(a, b, c):
diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py
index 3a09238da3..056bcce9a8 100644
--- a/torch/_inductor/select_algorithm.py
+++ b/torch/_inductor/select_algorithm.py
@@ -990,6 +990,11 @@ class AlgorithmSelectorCache(PersistentCache):
if timings:
return no_op
+ if config.search_autotune_cache and not (
+ config.max_autotune or config.max_autotune_gemm
+ ):
+ return no_op
+
precompile_key = (
f"{name}: {inputs_key} : {torch.get_float32_matmul_precision()}"
)
|
2.41.0
|
5e567c5736d89f4dc9162aefab00dc741e02193
|
Thu, 25 Apr 2024 01:15:52 +0000
|
[PATCH 0612/1000] [Torch][Timer] Adding debug info logging interface for expired timers (#123883)
|
Summary: Adding function to log additional debug information before killing the expired watchdog timers. Additional information like stack trace can be added in the debug function using worker process IDs from expired timers. Test Plan: buck test mode/opt caffe2/test/distributed/elastic/timer:file_based_timer_test Differential Revision: D56044153 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123883 Approved by: https://github.com/kurman
|
diff --git a/docs/source/elastic/timer.rst b/docs/source/elastic/timer.rst
index f64597c4ce..3f124a00b3 100644
--- a/docs/source/elastic/timer.rst
+++ b/docs/source/elastic/timer.rst
@@ -50,3 +50,11 @@ the server and client.
.. autoclass:: TimerClient
:members:
+
+
+Debug info logging
+-------------------
+
+.. automodule:: torch.distributed.elastic.timer.debug_info_logging
+
+.. autofunction:: torch.distributed.elastic.timer.debug_info_logging.log_debug_info_for_expired_timers
diff --git a/test/distributed/elastic/timer/file_based_local_timer_test.py b/test/distributed/elastic/timer/file_based_local_timer_test.py
index 6c7a92c35a..4616ae061b 100644
--- a/test/distributed/elastic/timer/file_based_local_timer_test.py
+++ b/test/distributed/elastic/timer/file_based_local_timer_test.py
@@ -38,7 +38,9 @@ if not (IS_WINDOWS or IS_MACOS):
super().setUp()
self.max_interval = 0.01
self.file_path = "/tmp/test_file_path_" + str(uuid.uuid4())
- self.server = timer.FileTimerServer(self.file_path, self.max_interval)
+ self.server = timer.FileTimerServer(
+ self.file_path, "test", self.max_interval
+ )
self.server.start()
def tearDown(self):
@@ -204,7 +206,9 @@ if not (IS_WINDOWS or IS_MACOS):
super().setUp()
self.file_path = "/tmp/test_file_path_" + str(uuid.uuid4())
self.max_interval = 0.01
- self.server = timer.FileTimerServer(self.file_path, self.max_interval)
+ self.server = timer.FileTimerServer(
+ self.file_path, "test", self.max_interval
+ )
def tearDown(self):
super().tearDown()
@@ -260,7 +264,8 @@ if not (IS_WINDOWS or IS_MACOS):
)
@mock.patch("os.kill")
- def test_expired_timers(self, mock_os_kill):
+ @mock.patch("torch.distributed.elastic.timer.log_debug_info_for_expired_timers")
+ def test_expired_timers(self, mock_debug_info, mock_os_kill):
"""
tests that a single expired timer on a process should terminate
the process and clean up all pending timers that was owned by the process
@@ -275,6 +280,7 @@ if not (IS_WINDOWS or IS_MACOS):
self.server.run_once() # Allows the server to process all requests
self.assertEqual(0, len(self.server._timers))
mock_os_kill.assert_called_once_with(test_pid, signal.SIGKILL)
+ mock_debug_info.assert_called()
@mock.patch("os.kill")
def test_send_request_release(self, mock_os_kill):
diff --git a/torch/distributed/elastic/agent/server/local_elastic_agent.py b/torch/distributed/elastic/agent/server/local_elastic_agent.py
index 60469c09dd..e308d53568 100644
--- a/torch/distributed/elastic/agent/server/local_elastic_agent.py
+++ b/torch/distributed/elastic/agent/server/local_elastic_agent.py
@@ -165,8 +165,14 @@ class LocalElasticAgent(SimpleElasticAgent):
if watchdog_file_path is None:
watchdog_file_path = "/tmp/watchdog_timer_" + str(uuid.uuid4())
logger.info("Starting a FileTimerServer with %s ...", watchdog_file_path)
+ if not envs:
+ logger.warning("Empty envs variables, using empty run_id for FileTimerServer")
+ run_id = ''
+ else:
+ run_id = envs[0]["TORCHELASTIC_RUN_ID"]
self._worker_watchdog = timer.FileTimerServer(
file_path=watchdog_file_path,
+ run_id=run_id,
max_interval=0.1,
daemon=True,
log_event=self._log_watchdog_event)
diff --git a/torch/distributed/elastic/timer/debug_info_logging.py b/torch/distributed/elastic/timer/debug_info_logging.py
new file mode 100644
index 0000000000..8c8645d108
--- /dev/null
+++ b/torch/distributed/elastic/timer/debug_info_logging.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import Dict, List
+
+from torch.distributed.elastic.utils.logging import get_logger
+
+logger = get_logger(__name__)
+
+
+def log_debug_info_for_expired_timers(
+ run_id: str,
+ expired_timers: Dict[int, List[str]],
+):
+ logger.info("Timers expired for run:[%s] [%s].", run_id, expired_timers)
diff --git a/torch/distributed/elastic/timer/file_based_local_timer.py b/torch/distributed/elastic/timer/file_based_local_timer.py
index 2842c72f37..f2ded8ba84 100644
--- a/torch/distributed/elastic/timer/file_based_local_timer.py
+++ b/torch/distributed/elastic/timer/file_based_local_timer.py
@@ -15,6 +15,7 @@ import time
from typing import Callable, Dict, List, Optional, Set, Tuple
from torch.distributed.elastic.timer.api import TimerClient, TimerRequest
+from torch.distributed.elastic.timer.debug_info_logging import log_debug_info_for_expired_timers
from torch.distributed.elastic.utils.logging import get_logger
__all__ = ["FileTimerClient", "FileTimerRequest", "FileTimerServer"]
@@ -156,11 +157,13 @@ class FileTimerServer:
def __init__(
self,
file_path: str,
+ run_id: str,
max_interval: float = 10,
daemon: bool = True,
log_event: Optional[Callable[[str, Optional[FileTimerRequest]], None]] = None
) -> None:
self._file_path = file_path
+ self._run_id = run_id
self._max_interval = max_interval
self._daemon = daemon
self._timers: Dict[Tuple[int, str], FileTimerRequest] = {}
@@ -247,7 +250,14 @@ class FileTimerServer:
self.register_timers(timer_requests)
now = time.time()
reaped_worker_pids = set()
- for worker_pid, expired_timers in self.get_expired_timers(now).items():
+
+ all_expired_timers = self.get_expired_timers(now)
+ log_debug_info_for_expired_timers(
+ self._run_id,
+ {pid: self._get_scopes(expired_timers) for pid, expired_timers in all_expired_timers.items()},
+ )
+
+ for worker_pid, expired_timers in all_expired_timers.items():
logger.info("Reaping worker_pid=[%s]. Expired timers: %s", worker_pid, self._get_scopes(expired_timers))
reaped_worker_pids.add(worker_pid)
# In case we have multiple expired timers, we find the first timer
|
2.41.0
|
b1c13e3a3018347cee466f18dbf49cf17d58666
|
Tue, 23 Apr 2024 17:26:45 -0700
|
[PATCH 0613/1000] [custom_op] fix schema inference for kwarg-only args (#124637)
|
Test Plan: - new tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/124637 Approved by: https://github.com/williamwen42, https://github.com/albanD
|
diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py
index 9c367624d5..58c2ba7855 100644
--- a/test/test_custom_ops.py
+++ b/test/test_custom_ops.py
@@ -598,6 +598,18 @@ class TestCustomOp(CustomOpTestCaseBase):
self.assertExpectedInline(infer_schema(a), """(Tensor x) -> Tensor""")
+ def kwonly1(x: Tensor, *, y: int, z: float) -> Tensor:
+ return torch.empty([])
+
+ self.assertExpectedInline(
+ infer_schema(kwonly1), """(Tensor x, *, SymInt y, float z) -> Tensor"""
+ )
+
+ def kwonly2(*, y: Tensor) -> Tensor:
+ return torch.empty([])
+
+ self.assertExpectedInline(infer_schema(kwonly2), """(*, Tensor y) -> Tensor""")
+
def b(
x: Tensor,
y: int,
diff --git a/torch/_library/infer_schema.py b/torch/_library/infer_schema.py
index e85803db37..fd03f91824 100644
--- a/torch/_library/infer_schema.py
+++ b/torch/_library/infer_schema.py
@@ -23,10 +23,17 @@ def infer_schema(prototype_function: typing.Callable, mutates_args=()) -> str:
params = []
seen_args = set()
+ saw_kwarg_only_arg = False
for idx, (name, param) in enumerate(sig.parameters.items()):
if not supported_param(param):
error_fn("We do not support positional-only args, varargs, or varkwargs.")
+ if param.kind == inspect.Parameter.KEYWORD_ONLY:
+ # The first time we see a kwarg-only arg, add "*" to the schema.
+ if not saw_kwarg_only_arg:
+ params.append("*")
+ saw_kwarg_only_arg = True
+
if param.annotation is inspect.Parameter.empty:
error_fn(f"Parameter {name} must have a type annotation.")
|
2.41.0
|
50dd65a87fb015bb2d35e490b4c4d2c5301d662
|
Thu, 25 Apr 2024 01:56:32 +0000
|
[PATCH 0616/1000] [onnx.export] Track new nodes added during _run_symbolic_function (#123027)
|
This PR is part of an effort to speed up torch.onnx.export (#121422). - This copies the shape and type from the node to the nodes that are produced by the export. However, for 1-to-N exports, which are very common, this doesn't make much sense and can give the graph in broken shape or type information. As far as I can tell, a shape inference pass is used to propagate the correct shape and type for all interemediate (and final) nodes. - If there is a situation where this is necessary (shape inference turned off and only 1-to-1 ops are exported ??), perhaps this can be conditionally skipped. It does incur a quadratic cost. Another option is to set a global default for the metadata and use that for all nodes that get created. Again, this meta data may not make sense for all ops and seems dangerous to do. - Resolves (8) in #121422. (partial fix of #121422) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123027 Approved by: https://github.com/BowenBao
|
diff --git a/torch/csrc/jit/passes/onnx.cpp b/torch/csrc/jit/passes/onnx.cpp
index 630a20a4d2..c5698fe34d 100644
--- a/torch/csrc/jit/passes/onnx.cpp
+++ b/torch/csrc/jit/passes/onnx.cpp
@@ -430,30 +430,27 @@ void NodeToONNX(
}
Graph* g = new_block->owningGraph();
- std::unordered_set<Node*> nodes_before;
- for (auto node : g->nodes()) {
- nodes_before.emplace(node);
- }
WithInsertPoint insert_point_guard(new_block);
WithCurrentScope scope_guard(*g, n->scope());
// IMPORTANT: NEVER pass raw pointer of smart pointer managed objects to
// Python. Check #87343 for details.
+ py::list new_nodes = py::list();
py::object raw_output = onnx.attr("_run_symbolic_function")(
g->shared_from_this(),
new_block,
n,
py_inputs,
env,
+ new_nodes,
operator_export_type);
// Find new nodes that have been created by _run_symbolic_function and
// propagate metadata
- for (auto node : g->nodes()) {
- if (nodes_before.find(node) == nodes_before.end()) {
- node->copyMetadata(n);
- }
+ for (py::handle py_node : new_nodes) {
+ Node* node = py_node.cast<Node*>();
+ node->copyMetadata(n);
}
// TODO: Assert it's an ATen identifier???
@@ -569,12 +566,14 @@ void NodeToONNX(
// Call symbolic function
// IMPORTANT: NEVER pass raw pointer of smart pointer managed objects to
// Python. Check #87343 for details.
+ py::list new_nodes = py::list();
py::object raw_output = onnx.attr("_run_symbolic_function")(
new_block->owningGraph()->shared_from_this(),
new_block,
n,
py_symbolic_args,
env,
+ new_nodes,
operator_export_type);
processSymbolicOutput(op->kind().toUnqualString(), n, raw_output);
diff --git a/torch/csrc/jit/passes/onnx/shape_type_inference.cpp b/torch/csrc/jit/passes/onnx/shape_type_inference.cpp
index 7db74a8460..186623bf4e 100644
--- a/torch/csrc/jit/passes/onnx/shape_type_inference.cpp
+++ b/torch/csrc/jit/passes/onnx/shape_type_inference.cpp
@@ -2054,11 +2054,16 @@ void ONNXShapeTypeInference(
clone_node->output(i)->debugName();
}
// Make inferred_shape_data use name from temporal ONNX graph
- // instead of original PyTorch graph
- for (const auto& gs_data : original_shape_data) {
- const auto onnx_output_name = torch_to_onnx_input.find(gs_data.first);
- if (onnx_output_name != torch_to_onnx_input.end()) {
- inferred_shape_data[onnx_output_name->second] = gs_data.second;
+ // instead of original PyTorch graph. Only copy what we need,
+ // which are the inputs of n.
+ for (auto input : n->inputs()) {
+ const auto maybe_shape = original_shape_data.find(input->debugName());
+ if (maybe_shape != original_shape_data.end()) {
+ const auto onnx_output_name =
+ torch_to_onnx_input.find(input->debugName());
+ if (onnx_output_name != torch_to_onnx_input.end()) {
+ inferred_shape_data[onnx_output_name->second] = maybe_shape->second;
+ }
}
}
// Use scalar_type_analysis without low precision cast
diff --git a/torch/onnx/_internal/jit_utils.py b/torch/onnx/_internal/jit_utils.py
index d192f35bd7..068b87cca9 100644
--- a/torch/onnx/_internal/jit_utils.py
+++ b/torch/onnx/_internal/jit_utils.py
@@ -7,7 +7,7 @@ from __future__ import annotations
import dataclasses
import re
import typing
-from typing import Any, Dict, Iterable, Optional, Sequence, Tuple, Union
+from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from torch import _C
@@ -34,6 +34,8 @@ class GraphContext:
original_node: Current node that is being converted from.
params_dict: Mapping from graph initializer name to IValue.
env: Mapping from Torch domain graph Value to ONNX domain graph Value.
+ new_nodes: List that tracks all new nodes that are added (used to make
+ sure metadata is propagated to all new nodes).
"""
graph: _C.Graph
@@ -42,6 +44,7 @@ class GraphContext:
original_node: _C.Node
params_dict: Dict[str, "_C.IValue"]
env: Dict[_C.Value, _C.Value]
+ new_nodes: List[_C.Node] = dataclasses.field(default_factory=list)
# Relay methods from _C.Graph for compatibility with symbolic functions that expect
# a _C.Graph
@@ -253,6 +256,7 @@ def _add_op(
n_outputs=outputs,
shape_inference=GLOBALS.onnx_shape_inference,
)
+ graph_context.new_nodes.append(node)
if outputs == 1:
return node.output()
diff --git a/torch/onnx/utils.py b/torch/onnx/utils.py
index 143534ff63..3bea348770 100644
--- a/torch/onnx/utils.py
+++ b/torch/onnx/utils.py
@@ -1771,6 +1771,7 @@ def _run_symbolic_method(g, op_name, symbolic_fn, args):
original_node=None, # type: ignore[arg-type]
params_dict=_params_dict,
env={},
+ new_nodes=[],
)
return symbolic_fn(graph_context, *args)
except TypeError as e:
@@ -1885,6 +1886,7 @@ def _run_symbolic_function(
node: _C.Node,
inputs: Any,
env: Dict[_C.Value, _C.Value],
+ new_nodes: List[_C.Node],
operator_export_type=_C_onnx.OperatorExportTypes.ONNX,
) -> Optional[Union[_C.Value, Sequence[Optional[_C.Value]]]]:
"""Runs a symbolic function.
@@ -1915,6 +1917,7 @@ def _run_symbolic_function(
original_node=node,
params_dict=_params_dict,
env=env,
+ new_nodes=new_nodes,
)
# Direct ATen export requested
|
2.41.0
|
41888765bba129914448a9609ad5e182778cbdc
|
Wed, 24 Apr 2024 15:08:09 -0700
|
[PATCH 0617/1000] Verify types in custom op schemas (#124520)
|
Before this PR, we didn't check that types in a schema were valid. This is because TorchScript treats unknown types as type variables. This PR checks types in a schema for the TORCH_LIBRARY APIs. To do this, we add an `allow_typevars` flag to parseSchema so that TorchScript can use allow_typevars=True. We also add some error messages for common mistakes (e.g. using int64_t or double in schema). Test Plan: - new tests Differential Revision: [D56432690](https://our.internmc.facebook.com/intern/diff/D56432690) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124520 Approved by: https://github.com/albanD
|
diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py
index 1db71cec6b..fccfd5100c 100644
--- a/test/test_custom_ops.py
+++ b/test/test_custom_ops.py
@@ -1752,6 +1752,17 @@ dynamic shape operator: _torch_testing.numpy_nonzero.default
res = torch._library.utils.is_functional_schema(schema)
self.assertEqual(res, expected)
+ def test_incorrect_schema_types(self):
+ with torch.library._scoped_library("mylib", "FRAGMENT") as lib:
+ with self.assertRaisesRegex(RuntimeError, "unknown type specifier"):
+ lib.define("foo12(Tensor a) -> asdfasdf")
+ with self.assertRaisesRegex(RuntimeError, "unknown type specifier"):
+ lib.define("foo12(asdf a) -> Tensor")
+ with self.assertRaisesRegex(RuntimeError, "Use `SymInt` or `int`"):
+ lib.define("foo12(int64_t a) -> Tensor")
+ with self.assertRaisesRegex(RuntimeError, "Use `float`"):
+ lib.define("foo12(double a) -> Tensor")
+
def test_is_tensorlist_like_type(self):
tensorlists = [
# Tensor[]
diff --git a/torch/csrc/jit/frontend/function_schema_parser.cpp b/torch/csrc/jit/frontend/function_schema_parser.cpp
index 4b681055bd..1ffb8decba 100644
--- a/torch/csrc/jit/frontend/function_schema_parser.cpp
+++ b/torch/csrc/jit/frontend/function_schema_parser.cpp
@@ -23,14 +23,14 @@ namespace torch::jit {
namespace {
struct SchemaParser {
- explicit SchemaParser(const std::string& str)
+ explicit SchemaParser(const std::string& str, bool allow_typevars)
: L(std::make_shared<Source>(
c10::string_view(str),
c10::nullopt,
0,
nullptr,
Source::DONT_COPY)),
- type_parser(L, /*parse_complete_tensor_types*/ false) {}
+ type_parser(L, /*parse_complete_tensor_types*/ false, allow_typevars) {}
std::variant<OperatorName, FunctionSchema> parseDeclaration() {
OperatorName name = parseName();
@@ -361,16 +361,24 @@ struct SchemaParser {
}
Lexer L;
SchemaTypeParser type_parser;
+ bool allow_typevars_;
};
} // namespace
std::variant<OperatorName, FunctionSchema> parseSchemaOrName(
- const std::string& schemaOrName) {
- return SchemaParser(schemaOrName).parseExactlyOneDeclaration();
+ const std::string& schemaOrName,
+ bool allow_typevars) {
+ // We're ignoring aten and prim for BC reasons
+ if (schemaOrName.rfind("aten::", 0) == 0 ||
+ schemaOrName.rfind("prim::", 0) == 0) {
+ allow_typevars = true;
+ }
+ return SchemaParser(schemaOrName, allow_typevars)
+ .parseExactlyOneDeclaration();
}
-FunctionSchema parseSchema(const std::string& schema) {
- auto parsed = parseSchemaOrName(schema);
+FunctionSchema parseSchema(const std::string& schema, bool allow_typevars) {
+ auto parsed = parseSchemaOrName(schema, allow_typevars);
TORCH_CHECK(
std::holds_alternative<FunctionSchema>(parsed),
"Tried to parse a function schema but only the operator name was given");
diff --git a/torch/csrc/jit/frontend/function_schema_parser.h b/torch/csrc/jit/frontend/function_schema_parser.h
index a01ca7ad0b..75782e1bc7 100644
--- a/torch/csrc/jit/frontend/function_schema_parser.h
+++ b/torch/csrc/jit/frontend/function_schema_parser.h
@@ -8,9 +8,17 @@
namespace torch {
namespace jit {
+// allow_typevars: If true, we assume that lowercase types that we don't
+// understand are type variables. This is only needed for TorchScript (and not
+// not needed for custom ops).
+// If false, we disallow typevars, except in certain cases for BC reason (i.e.
+// your op is in the aten or prim namespace).
TORCH_API std::variant<c10::OperatorName, c10::FunctionSchema> parseSchemaOrName(
- const std::string& schemaOrName);
-TORCH_API c10::FunctionSchema parseSchema(const std::string& schema);
+ const std::string& schemaOrName,
+ bool allow_typevars = true);
+TORCH_API c10::FunctionSchema parseSchema(
+ const std::string& schema,
+ bool allow_typevars = true);
TORCH_API c10::OperatorName parseName(const std::string& name);
} // namespace jit
diff --git a/torch/csrc/jit/frontend/schema_type_parser.cpp b/torch/csrc/jit/frontend/schema_type_parser.cpp
index 7c4b8ba0ca..68e6f7a02b 100644
--- a/torch/csrc/jit/frontend/schema_type_parser.cpp
+++ b/torch/csrc/jit/frontend/schema_type_parser.cpp
@@ -82,12 +82,27 @@ TypePtr SchemaTypeParser::parseBaseType() {
auto it = type_map.find(text);
if (it == type_map.end()) {
- if (!text.empty() && islower(text[0])) {
+ if (allow_typevars_ && !text.empty() && islower(text[0])) {
// lower case identifiers that are not otherwise valid types
// are treated as type variables
return c10::TypeFactory::createNamed<VarType>(text);
}
- throw ErrorReport(tok.range) << "unknown type specifier";
+ if (text == "double") {
+ throw ErrorReport(tok.range)
+ << "Use `float` instead of `double` in an operator's schema string. "
+ "`float` in schema corresponds to the double type in C++";
+ }
+ if (text == "int64_t") {
+ throw ErrorReport(tok.range)
+ << "Use `SymInt` or `int` instead of `int64_t` in an operator's schema string. "
+ "`SymInt` corresponds to c10::SymInt in C++ while `int` in schema corresponds "
+ "to the int64_t type in C++.";
+ }
+ throw ErrorReport(tok.range)
+ << "unknown type specifier. Common valid schema types include "
+ "Tensor, SymInt, int, float, bool, Scalar; "
+ "for a full list, please see "
+ "https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/README.md#func ";
}
return it->second;
}
diff --git a/torch/csrc/jit/frontend/schema_type_parser.h b/torch/csrc/jit/frontend/schema_type_parser.h
index c43e4363da..e43a24beb5 100644
--- a/torch/csrc/jit/frontend/schema_type_parser.h
+++ b/torch/csrc/jit/frontend/schema_type_parser.h
@@ -20,8 +20,13 @@ struct TORCH_API SchemaTypeParser {
c10::optional<at::ScalarType> parseTensorDType(const std::string& dtype);
TypePtr parseRefinedTensor();
- SchemaTypeParser(Lexer& L, bool parse_complete_tensor_types)
- : complete_tensor_types(parse_complete_tensor_types), L(L) {}
+ SchemaTypeParser(
+ Lexer& L,
+ bool parse_complete_tensor_types,
+ bool allow_typevars)
+ : complete_tensor_types(parse_complete_tensor_types),
+ L(L),
+ allow_typevars_(allow_typevars) {}
private:
c10::optional<bool> tryToParseRequiresGrad();
@@ -35,6 +40,7 @@ struct TORCH_API SchemaTypeParser {
bool complete_tensor_types;
Lexer& L;
size_t next_id = 0;
+ bool allow_typevars_;
};
} // namespace jit
} // namespace torch
diff --git a/torch/csrc/jit/ir/irparser.cpp b/torch/csrc/jit/ir/irparser.cpp
index c37988e322..30cb5ad9eb 100644
--- a/torch/csrc/jit/ir/irparser.cpp
+++ b/torch/csrc/jit/ir/irparser.cpp
@@ -35,7 +35,10 @@ class IRParser {
: L(std::make_shared<Source>(str)),
g(graph),
vmap(vmap),
- type_parser(L, /*parse_complete_tensor_types*/ true),
+ type_parser(
+ L,
+ /*parse_complete_tensor_types*/ true,
+ /*allow_type_vars*/ true),
parse_tensor_constants_(parse_tensor_constants) {}
std::string parseVar();
diff --git a/torch/csrc/jit/python/init.cpp b/torch/csrc/jit/python/init.cpp
index 5eb4851089..2023ec27ba 100644
--- a/torch/csrc/jit/python/init.cpp
+++ b/torch/csrc/jit/python/init.cpp
@@ -1765,7 +1765,11 @@ void initJITBindings(PyObject* module) {
},
py::arg("input"),
py::arg("parse_tensor_constants") = false);
- m.def("parse_schema", parseSchema);
+ m.def(
+ "parse_schema",
+ &parseSchema,
+ py::arg("schema"),
+ py::arg("allow_typevars") = true);
m.def("unify_type_list", [](const std::vector<TypePtr>& types) {
std::ostringstream s;
auto type = unifyTypeList(types, s);
diff --git a/torch/csrc/jit/runtime/static/passes.cpp b/torch/csrc/jit/runtime/static/passes.cpp
index fcd2b79e39..68fd8a270c 100644
--- a/torch/csrc/jit/runtime/static/passes.cpp
+++ b/torch/csrc/jit/runtime/static/passes.cpp
@@ -1347,7 +1347,8 @@ bool isNoOpSlice(Node* node) {
void EliminateNoOpSlice(std::shared_ptr<Graph>& graph) {
DepthFirstGraphNodeIterator it(graph);
auto schema = torch::schema(
- "aten::slice.t(t[] l, int? start=None, int? end=None, int step=1) -> t[]");
+ "aten::slice.t(t[] l, int? start=None, int? end=None, int step=1) -> t[]",
+ /*allow_typevars*/ true);
Node* node = nullptr;
std::vector<Node*> to_delete;
while ((node = it.next()) != nullptr) {
diff --git a/torch/library.h b/torch/library.h
index c38179a6ee..035cc17597 100644
--- a/torch/library.h
+++ b/torch/library.h
@@ -406,8 +406,8 @@ inline CppFunction dispatch(c10::DeviceType type, Func&& raw_f) {
/// ```
///
/// \ingroup torch-schema-overloads
-inline c10::FunctionSchema schema(const char* str, c10::AliasAnalysisKind k) {
- c10::FunctionSchema s = torch::jit::parseSchema(str);
+inline c10::FunctionSchema schema(const char* str, c10::AliasAnalysisKind k, bool allow_typevars=false) {
+ c10::FunctionSchema s = torch::jit::parseSchema(str, /*allow_typevars*/allow_typevars);
s.setAliasAnalysis(k);
return s;
}
@@ -415,8 +415,8 @@ inline c10::FunctionSchema schema(const char* str, c10::AliasAnalysisKind k) {
/// Function schemas can be directly constructed from string literals.
///
/// \ingroup torch-schema-overloads
-inline c10::FunctionSchema schema(const char* s) {
- return schema(s, c10::AliasAnalysisKind::FROM_SCHEMA);
+inline c10::FunctionSchema schema(const char* s, bool allow_typevars=false) {
+ return schema(s, c10::AliasAnalysisKind::FROM_SCHEMA, allow_typevars);
}
/// \private
|
2.41.0
|
692b954c6519fafb3e7949cb32c31f33abd394a
|
Wed, 24 Apr 2024 13:50:16 -0700
|
[PATCH 0618/1000] FakeTensorProp works with unbacked bindings (#124310)
|
This is a partial revert of https://github.com/pytorch/pytorch/pull/124059 Like in #124297, profiling has revealed that testing equality on *every* output is kind of expensive. So we only test equality when we know there is an unbacked binding. This is the same playbook as the previous PR, just on FakeTensorProp instead of PropagateUnbackedSymInts. Note that we also need to populate `unbacked_bindings` in proxy_tensor.py, since we're generating an entirely new graph in that case. We now have enough propagation that we're able to trigger a bug related to divisibility replacement. In https://github.com/pytorch/pytorch/pull/113165 we allowed to replace `u0` with `u1 * c` for some constant c, when we have determined that u0 is divisible by c. However, where does the binding for u1 come from? What we will have in practice is that there is some node that is supposed to have bound u1, but which actually is getting a `u1 * c` in its output. So, to get u1, we must divide out c. Fortunately, under the divisibility condition, this is always possible (but remember, we must test divisibility at runtime!) Because we have tightened up asserts, it is now an error to allocate unbacked SymInts and then fail to track them under unbacked_bindings. In torch/_dynamo/eval_frame.py and torch/_functorch/_aot_autograd/collect_metadata_analysis.py there are examples of benign cases where we repropagated fake tensors but then immediately threw away the results. In these cases, it's not appropriate to rebind, since we're still using the old FX graph that has all of the old symbols. So we just manually clear it. It is possible that other cases will need to be updated, so this PR is "risky" from the perspective of hitting fbcode. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124310 Approved by: https://github.com/lezcano
|
diff --git a/docs/source/fx.experimental.rst b/docs/source/fx.experimental.rst
index 76cb96337b..fb46ba5f93 100644
--- a/docs/source/fx.experimental.rst
+++ b/docs/source/fx.experimental.rst
@@ -28,6 +28,7 @@ torch.fx.experimental.symbolic_shapes
ShapeEnvSettings
ConvertIntKey
CallMethodKey
+ DivideByKey
hint_int
is_concrete_int
diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py
index fccfd5100c..bbd6d563d7 100644
--- a/test/test_custom_ops.py
+++ b/test/test_custom_ops.py
@@ -1466,12 +1466,13 @@ class TestCustomOp(CustomOpTestCaseBase):
@torch.library.impl_abstract(f"{TestCustomOp.test_ns}::foo", lib=self.lib())
def foo_meta(x):
ctx = torch.library.get_ctx()
- ctx.new_dynamic_size(min=1)
+ r = ctx.new_dynamic_size(min=1)
with self.assertRaisesRegex(ValueError, "greater than or equal to 0"):
ctx.new_dynamic_size(min=-1)
with self.assertRaisesRegex(ValueError, "SymInt"):
ctx.new_dynamic_size(max=x.numel())
- return torch.clone(x)
+ # NB: You must return dynamic sizes!
+ return x.new_empty(r)
x = torch.randn(2, 3, device="cpu")
op = self.get_op(f"{self.test_ns}::foo")
diff --git a/torch/_dynamo/eval_frame.py b/torch/_dynamo/eval_frame.py
index cf0fe7fcf2..4dd7d97c68 100644
--- a/torch/_dynamo/eval_frame.py
+++ b/torch/_dynamo/eval_frame.py
@@ -1199,6 +1199,14 @@ def export(
graph, fake_params_buffers, fake_graph_inputs
)
+ # We reran fake tensor propagation, but we didn't do
+ # anything with the resulting unbacked SymInts. Drop them
+ # from the pending list.
+ # NB: this is wrong if graph_captured_result has
+ # data-dependent output size!
+ if shape_env := ambient_fake_mode.shape_env:
+ shape_env.pending_fresh_unbacked_symbols.clear()
+
return graph_captured_result
return result_capturing_wrapper
diff --git a/torch/_functorch/_aot_autograd/collect_metadata_analysis.py b/torch/_functorch/_aot_autograd/collect_metadata_analysis.py
index 29c3ef0831..4dba14f751 100644
--- a/torch/_functorch/_aot_autograd/collect_metadata_analysis.py
+++ b/torch/_functorch/_aot_autograd/collect_metadata_analysis.py
@@ -15,6 +15,7 @@ from typing import Callable, DefaultDict, Dict, List
import torch
import torch.utils._pytree as pytree
from torch import Tensor
+from torch._guards import detect_fake_mode
from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode
from torch._subclasses.meta_utils import safe_is_leaf
from torch.fx.experimental.symbolic_shapes import is_concrete_int
@@ -699,6 +700,16 @@ from a multi-output view call"
grad_enabled_mutation=grad_enabled_mutation,
tokens=mode._tokens,
)
+ # AOTAutograd collect metadata will do fake tensor propagation, but it
+ # throws out all the resulting fake tensors and doesn't save anything
+ # about sizes (TODO: Actually, the subclass metadata does save size
+ # info, this is likely to be incorrect if unbacked SymInts are
+ # allowed). The net effect is we generate a bunch of fresh unbacked
+ # symbols that we immediately throw out and don't use. NB: we don't
+ # want to rename into these symbols, because we aren't going to have
+ # binding sites for them.
+ if (fake_mode := detect_fake_mode()) and fake_mode.shape_env:
+ fake_mode.shape_env.pending_fresh_unbacked_symbols.clear()
return metadata
return inner
diff --git a/torch/fx/experimental/proxy_tensor.py b/torch/fx/experimental/proxy_tensor.py
index ce91acfa6d..084217a586 100644
--- a/torch/fx/experimental/proxy_tensor.py
+++ b/torch/fx/experimental/proxy_tensor.py
@@ -174,6 +174,7 @@ def extract_val(val):
# ADInplaceOrView, but you shouldn't rely on it.)
def set_meta(proxy, val):
proxy.node.meta['val'] = extract_val(val)
+
# Best effort tensor_meta setting; prefer using val!
if is_fake(val):
proxy.node.meta['tensor_meta'] = _extract_tensor_metadata(val)
@@ -520,6 +521,21 @@ def proxy_call(proxy_mode, func, pre_dispatch, args, kwargs):
else:
constant = None
+ from .symbolic_shapes import compute_unbacked_bindings
+ # Can't use detect_fake_mode here,
+ #
+ # python test/distributed/_tensor/test_dtensor_compile.py -k
+ # test_tp_compile_fullgraph_is_seq_parallel_False
+ #
+ # will fail. Very strange, it probably isn't right for them to be using
+ # two fake modes there...
+ fake_mode = torch._C._get_dispatch_mode(
+ torch._C._TorchDispatchModeKey.FAKE
+ )
+ if fake_mode and fake_mode.shape_env:
+ if symbol_to_path := compute_unbacked_bindings(fake_mode.shape_env, out):
+ proxy_out.node.meta["unbacked_bindings"] = symbol_to_path
+
track_tensor_tree(out, proxy_out, constant=constant, tracer=tracer)
return out
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index 95fd7c7de4..99f7a4a7fc 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -449,6 +449,18 @@ class CallMethodKey:
return getattr(o, self.name)()
+@dataclass(frozen=True)
+class DivideByKey:
+ divisor: int
+
+ def __str__(self) -> str:
+ return f".__floordiv__({self.divisor})"
+
+ def get(self, o: int) -> int:
+ """Divide object by divisor"""
+ return o // self.divisor
+
+
def compute_unbacked_bindings(shape_env, example_value):
"""
After having run fake tensor propagation and producing example_value
@@ -459,9 +471,11 @@ def compute_unbacked_bindings(shape_env, example_value):
function, you must call this on the tuple of tensor output, you
cannot wait!)
"""
- pending = set(shape_env.pending_fresh_unbacked_symbols)
+ fs = shape_env.pending_fresh_unbacked_symbols
+ pending = set(fs)
if pending:
- shape_env.pending_fresh_unbacked_symbols.clear()
+ log.info("compute_unbacked_bindings %s", fs)
+ fs.clear()
def free_unbacked_symbols_with_path(
a, path
@@ -500,6 +514,22 @@ def compute_unbacked_bindings(shape_env, example_value):
):
r[s] = path
pending.remove(s)
+ # When an unbacked SymInt is perfectly divisible by an integer
+ # constant, we replace it with the integer constant to improve
+ # reasoning capabilities. However, in synthetic examples, it is
+ # then possible that the factor never is explicitly allocated.
+ # Fortunately, we can compute it by division.
+ elif (
+ isinstance(a, torch.SymInt)
+ and isinstance(s := a.node._expr, sympy.Mul)
+ and len(s.args) == 2
+ and isinstance(lhs := s.args[0], sympy.Integer)
+ and isinstance(rhs := s.args[1], sympy.Symbol)
+ and rhs in pending
+ ):
+ # TODO: DivideByKey needs to test divisibility at runtime!
+ r[s] = path + (DivideByKey(int(lhs)),)
+ pending.remove(rhs)
# The annoyance here arises from the fact that SymBool is
# allocated by allocating a SymInt and then testing if it's equal
# to one. So you have a complicated binding site logic for this.
@@ -517,7 +547,14 @@ def compute_unbacked_bindings(shape_env, example_value):
return r
symbol_to_path = free_unbacked_symbols_with_path(example_value, ())
- assert not pending, f"pending {pending} not in {example_value}"
+ assert not pending, (
+ f"pending {pending} not in {example_value} " +
+ (
+ repr((example_value.stride(), example_value.storage_offset()))
+ if isinstance(example_value, torch.Tensor)
+ else ""
+ )
+ )
return symbol_to_path
def definitely_true(a):
diff --git a/torch/fx/passes/fake_tensor_prop.py b/torch/fx/passes/fake_tensor_prop.py
index 2961b5e0eb..340a958ea0 100644
--- a/torch/fx/passes/fake_tensor_prop.py
+++ b/torch/fx/passes/fake_tensor_prop.py
@@ -2,10 +2,10 @@ from typing import Optional
import torch.fx
from torch.fx import Node
+from torch.fx.node import map_aggregate
from torch.fx._compatibility import compatibility
from torch._subclasses.fake_tensor import FakeTensorMode, FakeTensor
from torch.fx.experimental.proxy_tensor import snapshot_fake, py_sym_types
-from torch.utils._pytree import tree_map
__all__ = ['FakeTensorProp']
@@ -23,43 +23,35 @@ class FakeTensorProp(torch.fx.Interpreter):
module (GraphModule): The module to be executed
mode (Optional[FakeTensorMode]): The dispatch mode used to execute computation indicated by each FX Node.
"""
- def __init__(self, module: torch.fx.GraphModule, mode: Optional[FakeTensorMode] = None, *, check_consistency: bool = True):
+ def __init__(self, module: torch.fx.GraphModule, mode: Optional[FakeTensorMode] = None):
super().__init__(module)
if mode is None:
mode = FakeTensorMode()
self._mode = mode
- self.check_consistency = check_consistency
def run_node(self, n: Node):
+ from torch.fx.experimental.symbolic_shapes import rebind_unbacked, compute_unbacked_bindings
result = super().run_node(n)
+ rebind_unbacked(self._mode.shape_env, n, result)
- nil = object()
-
- def check_consistent_and_snapshot(new, old=nil):
- from torch.fx.experimental.symbolic_shapes import check_consistent
-
- if old is not nil and old is not None and self.check_consistency:
- check_consistent(new, old)
-
- if isinstance(new, FakeTensor):
- return snapshot_fake(new)
- elif isinstance(new, torch.Tensor):
+ def extract_val(obj):
+ if isinstance(obj, FakeTensor):
+ return snapshot_fake(obj)
+ elif isinstance(obj, torch.Tensor):
# TODO: How is it possible that we get a non fake tensor? We
# should be running under the mode...
- return snapshot_fake(self._mode.from_tensor(new, static_shapes=True))
- elif isinstance(new, py_sym_types):
- return new
+ return snapshot_fake(self._mode.from_tensor(obj, static_shapes=True))
+ elif isinstance(obj, py_sym_types):
+ return obj
else:
return None
- meta_arg = []
- if 'val' in n.meta and n.meta['val'] is not None:
- meta_arg = [n.meta['val']]
-
- meta = tree_map(check_consistent_and_snapshot, result, *meta_arg)
+ meta = map_aggregate(result, extract_val)
if meta is not None:
n.meta['val'] = meta
+ if (shape_env := self._mode.shape_env) and (symbol_to_path := compute_unbacked_bindings(shape_env, result)):
+ n.meta["unbacked_bindings"] = symbol_to_path
return result
diff --git a/torch/onnx/_internal/onnxruntime.py b/torch/onnx/_internal/onnxruntime.py
index 723235c33d..4be4d3fbff 100644
--- a/torch/onnx/_internal/onnxruntime.py
+++ b/torch/onnx/_internal/onnxruntime.py
@@ -887,9 +887,9 @@ class OrtBackend:
)
else:
try:
- prim_outputs = FakeTensorProp(
- graph_module, check_consistency=False
- ).propagate(*args, **kwargs)
+ prim_outputs = FakeTensorProp(graph_module).propagate(
+ *args, **kwargs
+ )
except Exception:
logger.warning("FakeTensorProb failed for %s", graph_module)
# When FakeTensorProp fails, it is not possible to preallocate output buffers
|
2.41.0
|
e58227d27b7b053d2f11c5b2c501f3b87c81d85
|
Wed, 24 Apr 2024 13:50:17 -0700
|
[PATCH 0619/1000] Rebind and refresh unbacked bindings in FakeTensorUpdater (#124314)
|
Like the previous two PRs, this is doing the rebinding and binding computation, just in FakeTensorUpdater. FakeTensorUpdater modifies FX graph in place so its usage pattern is slightly different, but still pretty short. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124314 Approved by: https://github.com/IvanKobzarev, https://github.com/lezcano ghstack dependencies: #124310
|
diff --git a/test/inductor/test_torchinductor_dynamic_shapes.py b/test/inductor/test_torchinductor_dynamic_shapes.py
index 12ff856f28..9c083668d3 100644
--- a/test/inductor/test_torchinductor_dynamic_shapes.py
+++ b/test/inductor/test_torchinductor_dynamic_shapes.py
@@ -244,6 +244,18 @@ class TestInductorDynamic(TestCase):
f(torch.tensor([True], device=device))
+ @torch._dynamo.config.patch(
+ capture_scalar_outputs=True, capture_dynamic_output_shape_ops=True
+ )
+ def test_noops_tensor_repropagate(self, device):
+ @torch.compile(fullgraph=True)
+ def f(x):
+ b = torch.ops.prims.convert_element_type.default(x, torch.int64)
+ r = b.nonzero()
+ return r * 2
+
+ f(torch.tensor([0, 4, 2, 0, 1], dtype=torch.int64, device=device))
+
@torch._dynamo.config.patch(capture_scalar_outputs=True)
def test_item_zeros_nobreak(self, device):
@torch.compile(fullgraph=True)
diff --git a/torch/_inductor/fx_utils.py b/torch/_inductor/fx_utils.py
index 877a0b0a09..5ccff50c1d 100644
--- a/torch/_inductor/fx_utils.py
+++ b/torch/_inductor/fx_utils.py
@@ -6,7 +6,12 @@ import sympy
import torch
import torch.fx
-from torch.fx.experimental.symbolic_shapes import statically_known_true, sym_eq
+from torch.fx.experimental.symbolic_shapes import (
+ compute_unbacked_bindings,
+ rebind_unbacked,
+ statically_known_true,
+ sym_eq,
+)
from torch.utils import _pytree as pytree
from torch.utils._pytree import tree_map
from .virtualized import V
@@ -161,7 +166,15 @@ class FakeTensorUpdater:
new_fake_tensor, node.meta["val"]
):
continue
+
+ rebind_unbacked(V.fake_mode.shape_env, node, new_fake_tensor)
+
node.meta["val"] = new_fake_tensor
+ if (shape_env := V.fake_mode.shape_env) and (
+ symbol_to_path := compute_unbacked_bindings(shape_env, new_fake_tensor)
+ ):
+ # Refresh the bindings to the new symbols
+ node.meta["unbacked_bindings"] = symbol_to_path
existing_storages[get_node_storage(node)] += 1
|
2.41.0
|
6b0156e0bba432f42cc9dc45e2580d3f232ee75
|
Wed, 24 Apr 2024 13:50:17 -0700
|
[PATCH 0620/1000] Ban replacements with unbacked SymInt on both sides (#124316)
|
Fixes https://github.com/pytorch/pytorch/issues/123854 Important comment: ``` # Never replace unbacked symbols with other unbacked symbols. # This is error prone because you can cause references to # unbacked symbols to time travel backwards. E.g., # # u1 = x.item() # ... use of u1 ... # u2 = y.item() # u3 = z.item() # torch._check(u1 == u2 + u3) # # If you replace u1 with u2 + u3, then the use of u1 now # references u2 and u3 prior to them actually being bound at # runtime. It's pretty inconvenient to setup control # dependencies for substitutions, so ban it entirely. ``` This is kind of risky for the internal MRS workstream, because we added these substitutions upon their request in the first place. Fortunately, we still allow substitutions to backed SymInts and constants, and I believe that is what is actually load bearing. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124316 Approved by: https://github.com/ColinPeppler, https://github.com/lezcano ghstack dependencies: #124310, #124314
|
diff --git a/test/test_dynamic_shapes.py b/test/test_dynamic_shapes.py
index 752638104d..6e6525e7e9 100644
--- a/test/test_dynamic_shapes.py
+++ b/test/test_dynamic_shapes.py
@@ -562,14 +562,14 @@ def forward(self, x_1):
_constrain_range_for_size(i0)
_constrain_range_for_size(i1)
self.assertTrue(expect_true(i0 == i1 * 4))
- self.assertExpectedInline(str(i0), """4*u1""")
+ self.assertExpectedInline(str(i0), """u0""")
i2 = shape_env.create_unbacked_symint()
i3 = shape_env.create_unbacked_symint()
_constrain_range_for_size(i2)
_constrain_range_for_size(i3)
self.assertTrue(expect_true(i2 * 4 == i3))
- self.assertExpectedInline(str(i3), """4*u2""")
+ self.assertExpectedInline(str(i3), """u3""")
def test_avoid_unbacked_substitution(self):
shape_env = ShapeEnv()
diff --git a/test/test_proxy_tensor.py b/test/test_proxy_tensor.py
index b6a4626925..fea87eebaa 100644
--- a/test/test_proxy_tensor.py
+++ b/test/test_proxy_tensor.py
@@ -1449,6 +1449,12 @@ def forward(self, x_1, y_1):
add = torch.ops.aten.add.Tensor(y_1, 2); y_1 = None
return add""") # noqa: B950
+ # This is due to https://github.com/pytorch/pytorch/pull/124316 which bans
+ # i0 = i1 refinement. To work around it, you should assert i1 = s0 by
+ # hand. This particular example the refinement is OK because i0 is always
+ # available when i1 and vice versa, but it is difficult to tell if it
+ # is safe in general.
+ @unittest.expectedFailure
def test_unbacked_unify_guard_transitivity(self):
def f(x1, x2, y):
z1 = torch.zeros(x1.item())
@@ -1460,15 +1466,7 @@ def forward(self, x_1, y_1):
else:
return y + 2
- r = str(make_fx(f, tracing_mode="symbolic")(torch.tensor(10), torch.tensor(10), torch.randn(10)).code).strip()
- self.assertExpectedInline(r, """\
-def forward(self, x1_1, x2_1, y_1):
- _local_scalar_dense = torch.ops.aten._local_scalar_dense.default(x1_1); x1_1 = None
- zeros = torch.ops.aten.zeros.default([_local_scalar_dense], device = device(type='cpu'), pin_memory = False); _local_scalar_dense = None
- _local_scalar_dense_1 = torch.ops.aten._local_scalar_dense.default(x2_1); x2_1 = None
- zeros_1 = torch.ops.aten.zeros.default([_local_scalar_dense_1], device = device(type='cpu'), pin_memory = False); _local_scalar_dense_1 = None
- add = torch.ops.aten.add.Tensor(y_1, 2); y_1 = None
- return add""") # noqa: B950
+ make_fx(f, tracing_mode="symbolic")(torch.tensor(10), torch.tensor(10), torch.randn(10))
def test_split_unbacked_sizes(self):
def f(lengths, values):
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index 99f7a4a7fc..37991985ed 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -286,8 +286,7 @@ def rebind_unbacked(shape_env, n: torch.fx.Node, result):
raw_u1 = sympy.sympify(u1)
else:
raw_u1 = u1.node.expr
- # TODO: replace with rename unbacked to
- shape_env.defer_runtime_assert(sympy.Eq(raw_u0, raw_u1), "")
+ shape_env._rename_unbacked_to(raw_u0, raw_u1)
def canonicalize_bool_expr(expr: SympyBoolean) -> SympyBoolean:
r""" Canonicalize a boolean expression by transforming it into a lt / le
@@ -2394,6 +2393,11 @@ class ShapeEnv:
finally:
self.is_recording = False
+ # Unlike set_replacement, this records a shapeenv event
+ @record_shapeenv_event()
+ def _rename_unbacked_to(self, orig_s: sympy.Expr, new_s: sympy.Expr):
+ self._set_replacement(orig_s, new_s, "rename_unbacked_to")
+
@record_shapeenv_event()
def freeze(self):
"""Freeze this ShapeEnv to stop accumulating guards
@@ -4234,31 +4238,32 @@ class ShapeEnv:
floor_div_atoms = lhs.atoms(FloorDiv).union(rhs.atoms(FloorDiv))
if len(floor_div_atoms) > 0 and any(a.divisor != 1 for a in floor_div_atoms):
raise NotImplementedError
- # short-circuit when no solving is needed
-
- if isinstance(lhs, sympy.Symbol) and free_unbacked_symbols(lhs):
+ # Never replace unbacked symbols with other unbacked symbols.
+ # This is error prone because you can cause references to
+ # unbacked symbols to time travel backwards. E.g.,
+ #
+ # u1 = x.item()
+ # ... use of u1 ...
+ # u2 = y.item()
+ # u3 = z.item()
+ # torch._check(u1 == u2 + u3)
+ #
+ # If you replace u1 with u2 + u3, then the use of u1 now
+ # references u2 and u3 prior to them actually being bound at
+ # runtime. It's pretty inconvenient to setup control
+ # dependencies for substitutions, so ban it entirely.
+ if isinstance(lhs, sympy.Symbol) and free_unbacked_symbols(lhs) and not free_unbacked_symbols(rhs):
+ # short-circuit when no solving is needed
self._set_replacement(lhs, self._find(rhs), "trivial_lhs")
- elif isinstance(rhs, sympy.Symbol) and free_unbacked_symbols(rhs):
+ elif isinstance(rhs, sympy.Symbol) and free_unbacked_symbols(rhs) and not free_unbacked_symbols(lhs):
self._set_replacement(rhs, self._find(lhs), "trivial_rhs")
else:
r = try_solve(expr, free[0], floordiv_inequality=False)
if r is not None and all(t.is_integer for t in sympy.preorder_traversal(r[1])):
new_var = self._find(r[1])
- ok = False
- if self.is_unbacked_symint(free[0]):
- # If you have i0 + i1 + i2 = s0, don't substitute i2 =
- # s0 - i0 - i1. Arguably this should be OK but the
- # runtime assert machinery is very delicate right now
- # so this causes things to fail e.g.,
- # test_split_unbacked_sizes
- ok = len(free_unbacked_symbols(new_var)) <= 1
- msg = "solve_unbacked"
- else:
- # Never substitute backed with unbacked
- ok = len(free_unbacked_symbols(new_var)) == 0
- msg = "solve_backed"
+ ok = len(free_unbacked_symbols(new_var)) == 0
if ok:
- self._set_replacement(cast(sympy.Symbol, free[0]), new_var, msg)
+ self._set_replacement(cast(sympy.Symbol, free[0]), new_var, "solve")
except NotImplementedError:
pass
if expr.has(Mod):
|
2.41.0
|
3ab24f1923359cdfe795a32cdd3d753f940efbe
|
Wed, 24 Apr 2024 13:50:18 -0700
|
[PATCH 0621/1000] Reimplement unbacked symbol bindings in Inductor (#124394)
|
This PR has a lot of "draw the rest of the fucking owl" energy. Here's how to break it down. 1. **torch/_inductor/graph.py** - We start by tightening unbacked symbol invariants. Specifically, as we lower FX nodes, we check whether or not every unbacked_binding recorded on the FX node meta, actually ends up getting bound (according to get_unbacked_symbol_defs) in all the buffers generated by the lowering. Hopefully this invariant is self evident. This leads to a lot of failures. 2. **torch/_inductor/ir.py** - Problem 1: There is softness in how Inductor computes defs of unbacked symbols in IR node. Previously, we tried to infer it by looking at the output sizes/strides/etc and see if new unbacked symbols popped up that we hadn't seen in the inputs. I don't know exactly what was buggy about the old code, but sometimes we would fail to notice an unbacked symbol had been bound, or rebind an unbacked symbol multiple times. Fortunately, thanks to the earlier PRs in our stack, we now have a nice list of unbacked symbol bindings from FX, so we now just store it directly on ExternKernel and use it directly to report defs. This has to be done twice: once for FallbackKernel (e.g., nonzero) and once for DynamicScalar (e.g., item) (see also **torch/_inductor/lowering.py**, **torch/_inductor/codegen/wrapper.py** and **torch/_inductor/codegen/cpp_wrapper_cpu.py** for the lowering and codegen changes for item) * **process_kernel** - Sidequest! It turns out that Inductor lowering can reallocate unbacked symbols. This happens specifically when we repropagate fake tensors through the operator in `process_kernel`. This repropagation process is necessary because Inductor may have changed the strides of input tensors, and it must now recompute the strides so that it can continue to appropriately plan the rest of the lowering process. This is fine: we just make sure we do the rebind unbacked + compute_unbacked_bindings dance we've been doing previously in the PR stack. But instead of putting unbacked_bindings on a new FX node, they go straight into our unbacked_bindings on the Inductor IR node. * **codegen_unbacked_symbol_defs** - Sidequest! FallbackKernel lowering is done in two steps. First, you emit the FallbackKernel buffer. Then, you emit MultiOutput buffers which actually give access to the individual outputs of FallbackKernel, which may have been multi-output. There is a design decision here: does the FallbackKernel bind the unbacked symbols, or the MultiOutput buffer? Historically, we put the binding on MultiOutput buffer, because it's more convenient: the FallbackKernel buffer is fake, in fact, it doesn't even get a name in C++ codegen. But it's kind of inconsistent with the keypath model that we've been tracking unbacked bindings with: if you have a multi-output node, you'd expect a keypath like `[0].size()[0]` representing the first output's first dimension size. That suggests that it's the FallbackKernel that should define the things. So that was my first implementation. Unfortunately, the C++ codegen is too cursed and I could not understand how to make it work in that case. So now we just unsoundly assume you cannot have multi-output data dependent output, and do the codegen in MultiOutput. There are some comments explaining exactly what we are improperly assuming. 3. **_rename_unbacked_to** in **torch/fx/experimental/symbolic_shapes.py** - Previously, when we renamed unbacked symbols, we clobbered any facts we previously knew about them. So for example, if we had a replacement `u0 -> s0` but then we renamed u0 to u1, we would now setup the replacement `u0 -> u1`, clobbering the old replacement. This apparently didn't matter in earlier PRs in the stack, but with Inductor now on the ball, there were some tests that indicated this was a problem. The solution is easy: if u0 had a preexisting replacement, reapply it to u1. However... * **torch/_functorch/_aot_autograd/collect_metadata_analysis.py** - When we run forward analysis, this triggers fake tensor repropagation and fresh allocations. Previously, we just cleared out the pending symbols when finished the analysis. But with the change above, this would also migrate replacements to the new symbols... which are now dead. So now we explicitly suppress generation of these symbols with `ignore_fresh_unbacked_symbols` so that no rebinding happens at all. * **torch/_dynamo/eval_frame.py** - same deal; I just searched for all sites we called clear() on pending 4. The last step is fixing the long tail of extra problems that show up, now that unbacked_bindings are load bearing into Inductor * **torch/_dynamo/eval_frame.py** - Some of the exports are making copies of nodes without repropagating fake tensors, so in this case, it is important to also copy the `unbacked_bindings` (apparently this didn't matter before without the Inductor changes) * **torch/_export/pass_base.py** - I discover that this is doing fake tensor repropagation via a test suite failure. Do the same playbook as AOTAutograd: PropagateUnbackedSymInts too! Actually, they also have implemented their own tracer as well, so do the same playbook as proxy_tensor: record unbacked_bindings on the newly traced nodes. UGH code duplication. * **torch/_subclasses/fake_tensor.py**, **torch/_subclasses/fake_impls.py** (with call site updates at **torch/_functorch/_aot_autograd/traced_function_transforms.py** and **torch/fx/passes/fake_tensor_prop.py**) - What's this new epoch thing? I noticed that sometimes I would be retracing, call nonzero() on a fake tensor, and not allocate a new unbacked symbol. This is actually bad, because if I don't get a new unbacked symbol, I don't know there's a binding site, and `unbacked_bindings` is now missing a binding. The reason for this is memoization: if I reuse the exact same fake tensor on my retrace, it will already have an unbacked symint memoized on it and we will short circuit allocation. Well, that's no good. So I associate the memos with a fake tensor epoch, and every time you start a new fake tensor propagation from scratch, you bump the epoch so that I clear all the memos. * **torch/_inductor/scheduler.py** - I notice in unit tests that V.current_node is not always set when we call process_kernel. So I save it into the IR node and restore it when we are running `get_estimated_runtime`. * **torch/fx/experimental/symbolic_shapes.py** - A few things * **rebind_unbacked** (re **_tensor_version**). Ordinarily, when you have an unbacked SymInt, you persistently hvae it all the way to the end of the program. `_tensor_version` violates this: this generates an unbacked SymInt (for reasons I don't quite understand?) and then gets rid of it later. This triggered an assert violation. I think this op is kind of misusing unbacked SymInt, but I didn't know how to refactor it, so it gets a special case. * **rebind_unbacked** (re **Simplify SymBool binding**). Ugh, SymBool, what a pain in the butt. I have an assert that you can only rebind unbacked symbol to another unbacked symbol. This assert fails when a boolean is involved, because the result of running keypath on the result is not `u1`, it's `sympy.Piecewise(... sympy.Eq(u1, 1) ...)`. This is actually just `u1`, but Sympy doesn't know it because it doesn't know that `u1` value range is `[0, 1]`. So we manually implement the simplification needed to get the assert to pass. * **compute_unbacked_bindings** (re **This is pretty fragile**). There is a really funny disaster involving memoization and Inductor process kernel. Ordinarily when I retrace, if there was a memo hit in the old trace, there will be a memo hit in the new trace. However, Inductor process kernel breaks this, because it recreates fake tensor inputs to the operator call from scratch (since they might have different strides), and obviously these tensor inputs don't have the memo from the old one. I tried a little bit to try to manually transplant the memo to the new fake tensor but it seemed hopeless, so I just let the fresh symbol ride, allocating a new unbacked symbol. However, in one of our tests, we rely on knowing that the first nonzero call is equal to the second (memoized) nonzero call. The equality test looked pretty easy to discharge, so I just went ahead and added a deferred runtime assert to this effect and it worked. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124394 Approved by: https://github.com/jansel ghstack dependencies: #124310, #124314, #124316
|
diff --git a/aten/src/ATen/ExpandUtils.h b/aten/src/ATen/ExpandUtils.h
index 82db1f8b65..03cfca36e7 100644
--- a/aten/src/ATen/ExpandUtils.h
+++ b/aten/src/ATen/ExpandUtils.h
@@ -462,7 +462,8 @@ inline Tensor _sum_to(
reduce_dims.push_back(i);
}
for (int64_t i = leading_dims; i < static_cast<int64_t>(sizes.size()); ++i) {
- if (shape[i - leading_dims] == 1 && sizes[i] != 1) {
+ if (shape[i - leading_dims] == 1 &&
+ TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(sizes[i], 1))) {
reduce_dims.push_back(i);
}
}
diff --git a/docs/source/fx.experimental.rst b/docs/source/fx.experimental.rst
index fb46ba5f93..ae8dc7ef4e 100644
--- a/docs/source/fx.experimental.rst
+++ b/docs/source/fx.experimental.rst
@@ -28,6 +28,7 @@ torch.fx.experimental.symbolic_shapes
ShapeEnvSettings
ConvertIntKey
CallMethodKey
+ PropagateUnbackedSymInts
DivideByKey
hint_int
diff --git a/torch/_dynamo/eval_frame.py b/torch/_dynamo/eval_frame.py
index 4dd7d97c68..6dd245ad80 100644
--- a/torch/_dynamo/eval_frame.py
+++ b/torch/_dynamo/eval_frame.py
@@ -768,6 +768,10 @@ class FlattenInputOutputSignature(torch.fx.interpreter.Transformer):
if "example_value" in self.current_node.meta:
# NB: intentionally do not use set_example_value
arg.node.meta["example_value"] = self.current_node.meta["example_value"]
+ if "unbacked_bindings" in self.current_node.meta:
+ arg.node.meta["unbacked_bindings"] = self.current_node.meta[
+ "unbacked_bindings"
+ ]
return arg
def output(self, target, args, kwargs):
@@ -795,6 +799,10 @@ class FlattenInputOutputSignature(torch.fx.interpreter.Transformer):
result_proxy.node.meta["example_value"] = self.current_node.meta[
"example_value"
]
+ if "unbacked_bindings" in self.current_node.meta:
+ result_proxy.node.meta["unbacked_bindings"] = self.current_node.meta[
+ "unbacked_bindings"
+ ]
if self.current_node.op != "output":
result_proxy.node._rename(
getattr(self.current_node, "name", result_proxy.node.name)
@@ -1180,7 +1188,18 @@ def export(
else fake_mode
)
- with ambient_fake_mode, enable_python_dispatcher():
+ # We reran fake tensor propagation, but we didn't do
+ # anything with the resulting unbacked SymInts. Drop them
+ # from the pending list.
+ # NB: this is wrong if graph_captured_result has
+ # data-dependent output size!
+ ignore_fresh_unbacked = null_context()
+ if shape_env := ambient_fake_mode.shape_env:
+ ignore_fresh_unbacked = shape_env.ignore_fresh_unbacked_symbols()
+
+ with (
+ ambient_fake_mode
+ ), enable_python_dispatcher(), ignore_fresh_unbacked:
params_and_buffers = {
**named_parameters,
**named_buffers,
@@ -1199,14 +1218,6 @@ def export(
graph, fake_params_buffers, fake_graph_inputs
)
- # We reran fake tensor propagation, but we didn't do
- # anything with the resulting unbacked SymInts. Drop them
- # from the pending list.
- # NB: this is wrong if graph_captured_result has
- # data-dependent output size!
- if shape_env := ambient_fake_mode.shape_env:
- shape_env.pending_fresh_unbacked_symbols.clear()
-
return graph_captured_result
return result_capturing_wrapper
diff --git a/torch/_export/pass_base.py b/torch/_export/pass_base.py
index 4f31e71dc1..1cf7e75ad5 100644
--- a/torch/_export/pass_base.py
+++ b/torch/_export/pass_base.py
@@ -18,6 +18,7 @@ from torch.fx.graph import CodeGen
from torch.fx.passes.infra.pass_base import PassBase, PassResult
from torch.fx.passes.shape_prop import _extract_tensor_metadata, TensorMetadata
from torch.utils import _pytree as pytree
+from torch.fx.experimental.symbolic_shapes import PropagateUnbackedSymInts, compute_unbacked_bindings
__all__ = ["_ExportPassBaseDeprecatedDoNotUse"]
@@ -238,7 +239,7 @@ class _ExportPassBaseDeprecatedDoNotUse(PassBase):
return super().run_node(n)
def __init__(self) -> None:
- self.interpreter = torch.fx.Interpreter(
+ self.interpreter = PropagateUnbackedSymInts(
torch.fx.GraphModule(torch.nn.Module(), torch.fx.Graph())
)
self.tracer = self.ExportTracer(self, CodeGen())
@@ -268,6 +269,9 @@ class _ExportPassBaseDeprecatedDoNotUse(PassBase):
res_proxy = self.tracer.create_proxy(kind, target, args_proxy, kwargs_proxy, name=name)
res_proxy.node.meta.update(meta.data)
+ if self.fake_tensor_mode and (shape_env := self.fake_tensor_mode.shape_env):
+ if symbol_to_path := compute_unbacked_bindings(shape_env, res_data):
+ res_proxy.node.meta["unbacked_bindings"] = symbol_to_path
self.tracer.set_metadata(res_proxy.node, res_data)
return ProxyValue(res_data, res_proxy)
diff --git a/torch/_functorch/_aot_autograd/collect_metadata_analysis.py b/torch/_functorch/_aot_autograd/collect_metadata_analysis.py
index 4dba14f751..ac86e1822f 100644
--- a/torch/_functorch/_aot_autograd/collect_metadata_analysis.py
+++ b/torch/_functorch/_aot_autograd/collect_metadata_analysis.py
@@ -8,6 +8,7 @@ a functionalized version of the graph under compilation.
"""
import collections
+import contextlib
import logging
from functools import wraps
from typing import Callable, DefaultDict, Dict, List
@@ -143,10 +144,22 @@ def run_functionalized_fw_and_collect_metadata(
torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
)
+ # AOTAutograd collect metadata will do fake tensor propagation, but it
+ # throws out all the resulting fake tensors and doesn't save anything
+ # about sizes (TODO: Actually, the subclass metadata does save size
+ # info, this is likely to be incorrect if unbacked SymInts are
+ # allowed). The net effect is we generate a bunch of fresh unbacked
+ # symbols that we immediately throw out and don't use. NB: we don't
+ # want to rename into these symbols, because we aren't going to have
+ # binding sites for them.
+ ignore_fresh_unbacked = contextlib.nullcontext()
+ if (fake_mode := detect_fake_mode()) and fake_mode.shape_env:
+ ignore_fresh_unbacked = fake_mode.shape_env.ignore_fresh_unbacked_symbols()
+
# It doesn't matter if we run this under predispatch or not because it is
# only for figuring out metadata
mode = FunctionalTensorMode(_allow_token_discovery=True)
- with disable_above, mode:
+ with disable_above, mode, ignore_fresh_unbacked:
# precondition: The passed in function already handles unflattening inputs + flattening outputs
flat_f_args = pytree.tree_map(_to_fun, flat_args)
flat_f_outs = f(*flat_f_args)
@@ -700,16 +713,6 @@ from a multi-output view call"
grad_enabled_mutation=grad_enabled_mutation,
tokens=mode._tokens,
)
- # AOTAutograd collect metadata will do fake tensor propagation, but it
- # throws out all the resulting fake tensors and doesn't save anything
- # about sizes (TODO: Actually, the subclass metadata does save size
- # info, this is likely to be incorrect if unbacked SymInts are
- # allowed). The net effect is we generate a bunch of fresh unbacked
- # symbols that we immediately throw out and don't use. NB: we don't
- # want to rename into these symbols, because we aren't going to have
- # binding sites for them.
- if (fake_mode := detect_fake_mode()) and fake_mode.shape_env:
- fake_mode.shape_env.pending_fresh_unbacked_symbols.clear()
return metadata
return inner
diff --git a/torch/_functorch/_aot_autograd/traced_function_transforms.py b/torch/_functorch/_aot_autograd/traced_function_transforms.py
index 4053f39d4f..cce76e0108 100644
--- a/torch/_functorch/_aot_autograd/traced_function_transforms.py
+++ b/torch/_functorch/_aot_autograd/traced_function_transforms.py
@@ -25,7 +25,7 @@ from torch._guards import detect_fake_mode
from torch._prims_common import CUDARngStateHelper
from torch.fx.experimental.symbolic_shapes import (
definitely_false,
- rebind_unbacked,
+ PropagateUnbackedSymInts,
sym_eq,
)
from torch.nn.utils import stateless
@@ -677,13 +677,6 @@ def aot_dispatch_subclass(
)
-class PropagateUnbackedSymInts(torch.fx.Interpreter):
- def run_node(self, n: torch.fx.Node):
- result = super().run_node(n)
- rebind_unbacked(detect_fake_mode().shape_env, n, result)
- return result
-
-
def create_functional_call(mod, params_spec, params_len, store_orig_mod=False):
# Redundant with dynamo, but worth having in case this gets invoked elsewhere.
# https://github.com/pytorch/pytorch/issues/103569
@@ -698,6 +691,7 @@ def create_functional_call(mod, params_spec, params_len, store_orig_mod=False):
"ignore", "Anomaly Detection has been enabled."
)
with torch.autograd.detect_anomaly(check_nan=False):
+ detect_fake_mode().epoch += 1
out = PropagateUnbackedSymInts(mod).run(
*args[params_len:], **kwargs
)
diff --git a/torch/_higher_order_ops/cond.py b/torch/_higher_order_ops/cond.py
index 654364e5d9..40aee90aff 100644
--- a/torch/_higher_order_ops/cond.py
+++ b/torch/_higher_order_ops/cond.py
@@ -1,3 +1,5 @@
+import contextlib
+
import torch
import torch._subclasses.functional_tensor
@@ -11,6 +13,7 @@ from torch._C._functorch import (
maybe_get_bdim,
)
from torch._functorch.utils import exposed_in
+from torch._guards import detect_fake_mode
from torch._higher_order_ops.utils import (
_has_potential_branch_input_alias,
@@ -211,12 +214,25 @@ def trace_cond(proxy_mode, func_overload, pred, true_fn, false_fn, operands):
# true or false branch is indistinguishable. So, as this is just for tracing
# purposes, choose the true branch.
+ # TODO: the unbacked symbol allocations MUST NOT leak out, if you want to
+ # support this we need to arrange for the reenter_make_fx unbacked SymInts
+ # to be used, AND we need to arrange for some sort of unification between
+ # the two branches (but not really unification; e.g., if one branch
+ # returns [u0] and the other returns [5] this is OK but you MUST NOT
+ # conclude the result is 5. Also if one branch returns [3] and another
+ # branch returns [5] you can make it work by immediately allocating a new
+ # unbacked SymInt here).
+ ignore_fresh_unbacked = contextlib.nullcontext()
+ if (fake_mode := detect_fake_mode()) and fake_mode.shape_env:
+ ignore_fresh_unbacked = fake_mode.shape_env.ignore_fresh_unbacked_symbols()
+
# TODO: Uhh.... it shouldn't matter, but changing this to true_fn results in
# a FakeTensorMode error :
# `Current active mode <class 'torch._subclasses.fake_tensor.FakeTensorMode'> not registered`
# TODO Sometimes the operands are not completely FakeTensor, something seems went wrong in
# dynamo? Because of that it runs real computation sometimes and re-triggering downstream dispatch keys.
- out = false_fn(*operands)
+ with ignore_fresh_unbacked:
+ out = false_fn(*operands)
return track_tensor_tree(out, out_proxy, constant=None, tracer=proxy_mode.tracer)
@@ -246,7 +262,15 @@ def inner(mode, pred, true_fn, false_fn, operands):
@cond_op.py_impl(FakeTensorMode)
def cond_fake_tensor_mode(mode, pred, true_fn, false_fn, operands):
- with mode:
+ # Ignore here, because if you've gotten here but you're not manually
+ # tracing the inner graphs, that means that you intend to reuse the graph
+ # directly. Which means the old unbacked symbol bindings are appropriate.
+ # This strategy will not work if unbacked symbols can escape.
+ ignore_fresh_unbacked = contextlib.nullcontext()
+ if mode.shape_env:
+ ignore_fresh_unbacked = mode.shape_env.ignore_fresh_unbacked_symbols()
+
+ with mode, ignore_fresh_unbacked:
true_outs = true_fn(*operands)
flat_true_outs = pytree.tree_leaves(true_outs)
flat_false_outs = pytree.tree_leaves(false_fn(*operands))
diff --git a/torch/_inductor/codegen/cpp_wrapper_cpu.py b/torch/_inductor/codegen/cpp_wrapper_cpu.py
index a8da96a5ea..95e4ef3ac7 100644
--- a/torch/_inductor/codegen/cpp_wrapper_cpu.py
+++ b/torch/_inductor/codegen/cpp_wrapper_cpu.py
@@ -10,6 +10,7 @@ from sympy import Expr
import torch
import torch._ops
+from torch.fx.experimental.symbolic_shapes import ConvertIntKey, DivideByKey
from .. import config, ir
from ..codecache import CudaKernelParamCache
@@ -1206,6 +1207,7 @@ class CppWrapperCpu(WrapperCodeGen):
output_name_base = fallback_kernel.get_name()
for idx, output in enumerate(fallback_kernel.outputs):
if isinstance(output, ir.MultiOutput):
+ # TODO: handle integer output (e.g., as in attention)
name = f"{output.get_name()}"
output_handle_name = f"{name}_handle"
if output.indices:
@@ -1393,18 +1395,28 @@ class CppWrapperCpu(WrapperCodeGen):
if config.abi_compatible:
dtype = node.inputs[0].get_dtype()
dtype_str = str(dtype).split(".")[-1]
- self.writeline(f"{DTYPE_TO_CPP[dtype]} {node.sym};")
- self.writeline(f"aoti_torch_item_{dtype_str}({data}, &{node.sym});")
- # record in unbacked_symbol_decls so we won't generate a declaration of the symbol again
- self.unbacked_symbol_decls.add(str(node.sym))
+ self.writeline(f"{DTYPE_TO_CPP[dtype]} {node.sym}_raw;")
+ self.writeline(f"aoti_torch_item_{dtype_str}({data}, &{node.sym}_raw);")
else:
- if node.is_bool:
- self.writeline(f"bool {node.sym} = {data}.item() ? 1 : 0;")
- else:
- convert_type = DTYPE_TO_ATEN[node.inputs[0].get_dtype()].replace(
- "at::k", "to"
- )
- self.writeline(f"auto {node.sym} = {data}.item().{convert_type}();")
+ convert_type = DTYPE_TO_ATEN[node.inputs[0].get_dtype()].replace(
+ "at::k", "to"
+ )
+ self.writeline(f"auto {node.sym}_raw = {data}.item().{convert_type}();")
+
+ if len(node.keypath) == 0:
+ self.writeline(f"auto {node.sym} = {node.sym}_raw;")
+ elif len(node.keypath == 1) and isinstance(node.keypath[0], ConvertIntKey):
+ self.writeline(f"int64_t {node.sym} = {node.sym}_raw ? 1 : 0;")
+ elif len(node.keypath == 1) and isinstance(node.keypath[0], DivideByKey):
+ # TODO: assert divisibility here
+ self.writeline(
+ f"int64_t {node.sym} = {node.sym}_raw / {node.keypath[0].divisor};"
+ )
+ else:
+ raise AssertionError(f"unrecognized keypath {node.keypath}")
+
+ # record in unbacked_symbol_decls so we won't generate a declaration of the symbol again
+ self.unbacked_symbol_decls.add(str(node.sym))
def can_stack_allocate_buffer(self, buffer):
return (
diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py
index 3271682ef1..adbc93f0dc 100644
--- a/torch/_inductor/codegen/wrapper.py
+++ b/torch/_inductor/codegen/wrapper.py
@@ -28,7 +28,7 @@ import torch._ops
from torch._dynamo.utils import counters, dynamo_timed
from torch._inductor.codegen.multi_kernel import MultiKernelState
-from torch.fx.experimental.symbolic_shapes import SymTypes
+from torch.fx.experimental.symbolic_shapes import ConvertIntKey, DivideByKey, SymTypes
from torch.fx.node import _get_qualified_name
from torch.utils._sympy.singleton_int import SingletonInt
@@ -955,10 +955,21 @@ class WrapperCodeGen(CodeGen):
def codegen_dynamic_scalar(self, node):
(data,) = (t.codegen_reference() for t in node.inputs)
- if node.is_bool:
+ if len(node.keypath) == 0:
+ self.writeline(f"{node.sym} = {data}.item()")
+ elif len(node.keypath) == 1 and isinstance(node.keypath[0], ConvertIntKey):
self.writeline(f"{node.sym} = 1 if {data}.item() else 0")
+ elif len(node.keypath) == 1 and isinstance(node.keypath[0], DivideByKey):
+ self.writeline(f"{node.sym}_undivided = {data}.item()")
+ self.writeline(
+ f"assert {node.sym}_undivided % {node.keypath[0].divisor} == 0, "
+ f"f'{{{node.sym}_undivided}} not divisible by {node.keypath[0].divisor}'"
+ )
+ self.writeline(
+ f"{node.sym} = {node.sym}_undivided // {node.keypath[0].divisor}"
+ )
else:
- self.writeline(f"{node.sym} = {data}.item()")
+ raise AssertionError(f"unrecognized keypath {node.keypath}")
# No one should ever use this buffer, but for uniformity
# define the variable and assign it None
self.writeline(f"{node.get_name()} = None")
diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py
index 34d5b02575..c80b006566 100644
--- a/torch/_inductor/graph.py
+++ b/torch/_inductor/graph.py
@@ -1115,6 +1115,8 @@ class GraphLowering(torch.fx.Interpreter):
def debug(msg):
log.debug("lowering %s %s", LazyString(n.format_node), msg)
+ buffer_watermark = len(self.buffers)
+
origins = {n}
if n.op == "call_function":
args, kwargs = self.fetch_args_kwargs_from_env(n)
@@ -1319,6 +1321,43 @@ class GraphLowering(torch.fx.Interpreter):
self.register_users_of(result)
+ new_unbacked_defs = set()
+ for i in range(buffer_watermark, len(self.buffers)):
+ new_unbacked_defs |= self.buffers[i].get_unbacked_symbol_defs()
+
+ def format_buffers():
+ r = []
+ for b in self.buffers[buffer_watermark:]:
+ r.append(
+ f"unbacked_symbol_defs={b.get_unbacked_symbol_defs()} in:\n{b}\n"
+ )
+ return "***\n".join(r)
+
+ if n.op != "placeholder":
+ unbacked_bindings = n.meta.get("unbacked_bindings", {})
+ # When we do lowering, it is possible we reallocate unbacked SymInts.
+ # So we need to line up the unbacked SymInts when performing the test
+ # here
+ #
+ # In principle, we could permit lowering to introduce MORE unbacked
+ # SymInts: as long as all the old unbacked ones are accounted for,
+ # it's fine for inductor to introduce extra calls to item()/unbacked()
+ # whatever. This actually happens in practice when an unbacked SymInt
+ # gets memoized away; naively, when Inductor reprocesses a kernel, it
+ # doesn't know that the memo still applies, and ends up allocating a
+ # new symbol. However, this is generally a bad thing: we may still
+ # end up needing to test equalities on the symbols, and a fresh
+ # symbol is likely to hit lots of GuardOnDataDependent errors that
+ # we already know facts for.
+ assert new_unbacked_defs >= {
+ V.fake_mode.shape_env.unbacked_renamings.get(s, s)
+ for s in unbacked_bindings.keys()
+ }, (
+ f"{unbacked_bindings} != {new_unbacked_defs} (fx != inductor)\n"
+ f"fx node is: {n.format_node()}\n"
+ f"new buffers are:\n\n{format_buffers()}"
+ )
+
return result
def validate_can_generate_cpp_wrapper(self):
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index 0131a52239..129e569537 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -48,7 +48,14 @@ from torch._prims_common import (
StrideType,
)
from torch._subclasses.fake_tensor import get_schema_info
-from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols, SymTypes
+from torch.fx.experimental.symbolic_shapes import (
+ CallMethodKey,
+ compute_unbacked_bindings,
+ DivideByKey,
+ free_unbacked_symbols,
+ rebind_unbacked,
+ SymTypes,
+)
from torch.utils._sympy.functions import CleanDiv, FloorDiv, ModularIndexing
from . import config, dependencies
@@ -3101,46 +3108,7 @@ class Buffer(IRNode):
return self.get_read_writes().reads
def get_unbacked_symbol_defs(self) -> Set[sympy.Symbol]:
- """
- Returns the unbacked symbols which are defined by this IR node,
- because this is a data-dependent IR node, or item()
- """
- # So this is a little unusual. In principle, you could imagine
- # defining a MultiOutputLayout buffer so that it DOES define
- # unbacked symints. However, we can't easily tell what symints
- # such a buffer defines, because MultiOutputLayout doesn't actually
- # define any useful information about what it returns.
- #
- # An easier and better approach is to delay the symint allocation
- # to the MultiOutput IR nodes, which are when we actually extract
- # out the buffers and know what their sizes are.
- #
- # There are two subleties here:
- #
- # 1. Suppose you have a kernel that produces out1: (i0,), out2: (i0,)
- # Both of these actually count as defs! The scheduler will just
- # arbitrarily pick one of these as the canonical definer and
- # ensure it stays live. It's not a big deal if we pick the
- # wrong one because tuple accesses are cheap, and all this means
- # is we accidentally keep a MultiOutput node live when it wasn't
- # strictly necessary.
- #
- # 2. Suppose you have a MultiOutput buffer whose size is (i0,), but
- # the MultiOutputLayout buffer it is projecting from isn't actually
- # dynamic; it has i0 as one of the arguments. We cannot tell this
- # directly from MultiOutput, we have to look at the input buffer's
- # uses to work this out. No big deal.
- if isinstance(self.layout, (NoneLayout, MultiOutputLayout)):
- return set()
-
- # This kernel defines all unbacked symbols... that it didn't get in as
- # arguments!
- defs = (
- free_unbacked_symbols(self.get_size())
- | free_unbacked_symbols(self.get_stride())
- | free_unbacked_symbols(self.get_offset())
- )
- return defs - self.get_unbacked_symbol_uses()
+ return set()
def get_unbacked_symbol_uses(self) -> Set[sympy.Symbol]:
"""
@@ -3159,32 +3127,6 @@ class Buffer(IRNode):
"""
return set()
- def codegen_unbacked_symbol_defs(self, wrapper):
- # NB: If it is possible for other ir node types to return unbacked
- # symints, you need to make sure their codegen calls this method.
- # Don't forget to update get_unbacked_symbol_defs too.
- symbols_to_define = self.get_unbacked_symbol_defs()
- for i, s in enumerate(self.get_size()):
- if s in symbols_to_define:
- wrapper.writeline(
- f"{wrapper.codegen_unbacked_symbol_decl(s)} = {self.get_name()}.size({i}){wrapper.ending}"
- )
- symbols_to_define.remove(s)
- for i, s in enumerate(self.get_stride()):
- if s in symbols_to_define:
- wrapper.writeline(
- f"{wrapper.codegen_unbacked_symbol_decl(s)} = {self.get_name()}.stride({i}){wrapper.ending}"
- )
- symbols_to_define.remove(s)
- if (s := self.get_offset()) in symbols_to_define:
- wrapper.writeline(
- f"{wrapper.codegen_unbacked_symbol_decl(s)} = {self.get_name()}.storage_offset(){wrapper.ending}"
- )
- symbols_to_define.remove(s)
- assert (
- not symbols_to_define
- ), f"unbacked symint {symbols_to_define} not written out, check comment above"
-
def realize(self):
pass
@@ -3988,6 +3930,9 @@ class ExternKernel(InputsKernel):
] = None
arg_properties: Optional[List[Dict[str, Any]]] = None
kwarg_properties: Optional[Dict[str, Dict[str, Any]]] = None
+ unbacked_bindings: Dict[sympy.Symbol, pytree.KeyPath] = dataclasses.field(
+ default_factory=dict
+ )
def __init__(
self,
@@ -4015,6 +3960,11 @@ class ExternKernel(InputsKernel):
self.ordered_kwargs_for_cpp_kernel = ordered_kwargs_for_cpp_kernel
self.op_overload = op_overload
self.collect_arg_kwarg_properties()
+ self.unbacked_bindings = {}
+ self.fx_node = V.graph.current_node
+
+ def get_unbacked_symbol_defs(self) -> Set[sympy.Symbol]:
+ return set()
def collect_arg_kwarg_properties(self):
# if self.op_overload is torch._ops.OpOverload, we can use its schema to collect additional
@@ -4079,7 +4029,15 @@ class ExternKernel(InputsKernel):
return pw
@classmethod
- def process_kernel(cls, kernel, *args, **kwargs):
+ def process_kernel(
+ cls, kernel, *args, **kwargs
+ ) -> Tuple[
+ Any,
+ List[Any],
+ List[Any],
+ Callable[[Any, Any], Any],
+ Optional[Dict[sympy.Symbol, pytree.KeyPath]],
+ ]:
binded_args = {"args": args, "kwargs": kwargs}
args_flat, args_spec = pytree.tree_flatten(binded_args)
@@ -4116,9 +4074,9 @@ class ExternKernel(InputsKernel):
if is_storage_and_layout(x):
as_storage_and_layout(x, freeze=True)
- # We don't have generic shape formulas, so just burn in the
- # shapes and run an example input.
- # TODO(jansel): replace this with dynamic shape formulas
+ # Rerun fake tensor propagation, because Inductor may have changed the
+ # strides of inputs and we need to determine accurately what the
+ # output stride will be.
example_args = []
# We need to retain the constant values of fake tensors that we originally
@@ -4133,6 +4091,13 @@ class ExternKernel(InputsKernel):
new_args, new_kwargs = unflatten_args(example_args, non_tensor_args)
example_output = kernel(*new_args, **new_kwargs)
+ unbacked_bindings: Optional[Dict[sympy.Symbol, pytree.KeyPath]] = None
+ if shape_env := V.fake_mode.shape_env:
+ rebind_unbacked(shape_env, V.current_node, example_output)
+ unbacked_bindings = compute_unbacked_bindings(
+ shape_env, example_output, V.current_node.meta.get("val")
+ )
+
example_out_li = (
[example_output]
if not isinstance(example_output, (list, tuple))
@@ -4145,12 +4110,13 @@ class ExternKernel(InputsKernel):
msg = f"{msg} Found from : \n {stack_trace}"
V.graph.disable_cudagraphs_reason = msg
- # TODO: Unconditionally do this, not just when example_output has
- # unbacked symbols
- if maybe_free_unbacked_symbols(example_output):
- example_output = V.graph.current_node.meta["val"]
-
- return example_output, tensor_args, non_tensor_args, unflatten_args
+ return (
+ example_output,
+ tensor_args,
+ non_tensor_args,
+ unflatten_args,
+ unbacked_bindings,
+ )
@classmethod
def convert_to_reinterpret_view(cls, x):
@@ -5056,25 +5022,11 @@ class DynamicScalar(ExternKernel):
def should_allocate(self):
return False
- # TODO: handle bools carefully
- def __init__(self, sym, data):
+ def __init__(self, sym, keypath, data):
data.realize()
super().__init__(None, NoneLayout(torch.device("cpu")), self.unwrap_storage([data])) # type: ignore[arg-type]
- if isinstance(sym, sympy.Symbol):
- self.sym = sym
- self.is_bool = False
- else:
- # Special case for boolean. For Reasons(TM), we don't represent
- # boolean variables directly in sympy; instead, we generate an
- # indicator integer variable which we then convert to a boolean by
- # testing i0 == 1. We have to identify the underlying indicator
- # variable, and then bind i0 to the appropriate integer value
- # based on the runtime boolean.
- assert isinstance(sym, sympy.Eq), sym
- assert isinstance(sym.args[0], sympy.Symbol), sym
- assert sym.args[1] == 1, sym
- self.sym = sym.args[0]
- self.is_bool = True
+ self.sym = sym
+ self.keypath = keypath
def get_unbacked_symbol_defs(self) -> Set[sympy.Symbol]:
return {self.sym}
@@ -5174,6 +5126,8 @@ class FallbackKernel(ExternKernelAlloc):
nontensor_args,
unflatten_args,
kwargs=None,
+ *,
+ unbacked_bindings=None,
):
super().__init__(
layout,
@@ -5186,6 +5140,7 @@ class FallbackKernel(ExternKernelAlloc):
# output through the abi-compatible interface.
self.outputs: Sequence[Any] = []
self.use_runtime_dispatch = False
+ self.unbacked_bindings = unbacked_bindings
assert isinstance(
kernel,
@@ -5270,6 +5225,64 @@ class FallbackKernel(ExternKernelAlloc):
for info, arg in torch._library.utils.zip_schema(schema, args, kwargs):
handle_aliasing_and_mutation(info, arg)
+ def codegen_unbacked_symbol_defs(self, wrapper):
+ if not hasattr(self, "unbacked_bindings"):
+ return
+
+ unbacked_bindings = self.unbacked_bindings
+
+ if not unbacked_bindings:
+ return
+
+ for s, keypath in unbacked_bindings.items():
+
+ def go(expr, keypath):
+ if keypath == ():
+ return expr
+
+ if (
+ len(keypath) >= 2
+ and isinstance(keypath[0], CallMethodKey)
+ and isinstance(keypath[1], pytree.SequenceKey)
+ ):
+ return go(
+ f"{expr}.{keypath[0].name}({keypath[1].idx})", keypath[2:]
+ )
+ elif isinstance(keypath[0], CallMethodKey):
+ return go(f"{expr}.{keypath[0].name}()", keypath[1:])
+ elif isinstance(keypath[0], pytree.SequenceKey):
+ return go(f"{expr}[{keypath[0].idx}]", keypath[1:])
+ elif isinstance(keypath[0], DivideByKey):
+ # TODO: need to assert divisibility
+ # TODO: this is invalid C++ codegen
+ return go(f"{expr}.__floordiv__({keypath[0].divisor})", keypath[1:])
+ else:
+ raise AssertionError(f"unrecognized keypath {keypath}")
+
+ def go_outer():
+ if V.graph.cpp_wrapper and config.abi_compatible:
+ # Special handling for the top level buffer access,
+ # because self.get_name() is actually never bound; the
+ # individual output arguments are bound by
+ # generate_c_shim_fallback_kernel
+ if len(self.outputs) == 1:
+ return go(self.outputs[0].get_name(), keypath)
+ else:
+ assert isinstance(keypath[0], pytree.SequenceKey)
+ return go(self.outputs[keypath[0].idx].get_name(), keypath[1:])
+ else:
+ return go(self.get_name(), keypath)
+
+ wrapper.writeline(
+ f"{wrapper.codegen_unbacked_symbol_decl(s)} = {go_outer()}{wrapper.ending}"
+ )
+
+ def get_unbacked_symbol_defs(self) -> Set[sympy.Symbol]:
+ if unbacked_bindings := getattr(self, "unbacked_bindings", None):
+ return unbacked_bindings.keys()
+ else:
+ return set()
+
def set_cpp_kernel(self, kernel):
from .codegen.wrapper import get_cpp_op_schema
@@ -5559,6 +5572,8 @@ class FallbackKernel(ExternKernelAlloc):
if isinstance(self.layout, Layout):
self.codegen_size_asserts(wrapper)
+ self.codegen_unbacked_symbol_defs(wrapper)
+
@staticmethod
def tensor_to_layout(output: torch.Tensor):
return FixedLayout(
@@ -5580,6 +5595,7 @@ class FallbackKernel(ExternKernelAlloc):
tensor_args,
non_tensor_args,
unflatten_args,
+ unbacked_bindings,
) = cls.process_kernel(kernel, *args, **kwargs)
if example_output is None:
@@ -5589,6 +5605,7 @@ class FallbackKernel(ExternKernelAlloc):
tensor_args,
non_tensor_args,
unflatten_args,
+ unbacked_bindings=unbacked_bindings,
)
else:
@@ -5601,6 +5618,7 @@ class FallbackKernel(ExternKernelAlloc):
tensor_args,
non_tensor_args,
unflatten_args,
+ unbacked_bindings=unbacked_bindings,
)
def generate_output(output, indices):
@@ -5659,6 +5677,8 @@ class ComplexView(FallbackKernel):
tensor_args,
nontensor_args,
unflatten_args,
+ *,
+ unbacked_bindings=None,
):
super().__init__(
layout,
@@ -5666,6 +5686,7 @@ class ComplexView(FallbackKernel):
tensor_args,
nontensor_args,
unflatten_args,
+ unbacked_bindings=unbacked_bindings,
)
@@ -5701,7 +5722,6 @@ class MultiOutput(ExternKernel):
self.get_name(),
self.codegen_list_tuple_access(self.inputs[0].get_name(), self.indices),
)
- self.codegen_unbacked_symbol_defs(wrapper)
def __init__(self, layout, input, indices: List[Tuple[Any, ...]]):
super().__init__(None, layout, [input], ())
@@ -7576,6 +7596,8 @@ class EffectfulKernel(FallbackKernel):
nontensor_args,
unflatten_args,
kwargs=None,
+ *,
+ unbacked_bindings=None,
):
super().__init__(
NoneLayout(layout.device),
@@ -7584,6 +7606,7 @@ class EffectfulKernel(FallbackKernel):
nontensor_args,
unflatten_args,
kwargs=None,
+ unbacked_bindings=unbacked_bindings,
)
from torch._higher_order_ops.effects import get_effect_key
@@ -8452,7 +8475,9 @@ class _CollectiveKernel(FallbackKernel):
tensor_args,
non_tensor_args,
unflatten_args,
+ unbacked_bindings,
) = cls.process_kernel(kernel, inputs, *args, **kwargs)
+ assert not unbacked_bindings, f"{kernel} {unbacked_bindings}"
for tensor_arg in tensor_args:
tensor_arg.realize()
@@ -8507,7 +8532,9 @@ class _CollectiveKernel(FallbackKernel):
tensor_args,
non_tensor_args,
unflatten_args,
+ unbacked_bindings,
) = cls.process_kernel(kernel, inputs, *args, **kwargs)
+ assert not unbacked_bindings, f"{kernel}, {unbacked_bindings}"
for tensor_arg in tensor_args:
tensor_arg.realize()
@@ -8575,7 +8602,9 @@ class _WaitKernel(_CollectiveKernel):
tensor_args,
non_tensor_args,
unflatten_args,
+ unbacked_bindings,
) = cls.process_kernel(kernel, inp)
+ assert not unbacked_bindings, f"{kernel} {unbacked_bindings}"
packed = cls(
NoneLayout(inp.get_device()),
kernel,
diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py
index e9ebeb6dae..eea5333451 100644
--- a/torch/_inductor/lowering.py
+++ b/torch/_inductor/lowering.py
@@ -2360,10 +2360,33 @@ def _local_scalar_dense(data):
# solely responsible for generating this .item(). The buffer is
# not used for anything (notice we discard it); at codegen time,
# the "buffer" just gets assigned None.
- sym = V.graph.current_node.meta["val"].node.expr
- buffer = ir.DynamicScalar(sym, data)
+ unbacked_bindings = V.graph.current_node.meta["unbacked_bindings"]
+ assert len(unbacked_bindings) == 1, unbacked_bindings
+ # NB: Have to be very careful here. V.graph.current_node.meta["val"]
+ # seemingly also contains a symbol which you want to do binding for,
+ # but it actually isn't. In particular, if we have later performed
+ # a deferred runtime assert saying that u0 == s0, you will actually
+ # see s0 from expr! This is bad because we need to actually generate
+ # the assert that says u0 == s0, so we need to know where to get u0
+ # from (this call). In particular, we must use unbacked_bindings, which
+ # is guaranteed to have the original, unreplaced symbol in question.
+ #
+ # NB2: Another thing we have to be very careful about are symbol bindings
+ # that require nontrivial refinement, e.g., when you have a binding site
+ # x: Sym(u0 * 4) = y.item(). Here, the code generation must do a division
+ # in order to appropriately bind u0. This is communicated via the keypath
+ # in unbacked_bindings, and we need to hold onto it in order to generate
+ # code appropriately for this case.
+ binding_sym, keypath = next(iter(unbacked_bindings.items()))
+ buffer = ir.DynamicScalar(binding_sym, keypath, data)
buffer.name = V.graph.register_buffer(buffer)
- return sym
+ # NB: the replaced expr is OK to use directly downstream, we want
+ # simplifications in this case!
+ val = V.graph.current_node.meta["val"]
+ if isinstance(val, (torch.SymInt, torch.SymFloat, torch.SymBool)):
+ return val.node.expr
+ else:
+ return sympy.sympify(val)
@register_lowering(aten._assert_scalar)
diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py
index 375d65d99f..7f3d435168 100644
--- a/torch/_inductor/scheduler.py
+++ b/torch/_inductor/scheduler.py
@@ -617,9 +617,15 @@ class BaseSchedulerNode:
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.utils.flop_counter import FlopCounterMode
- with FakeTensorMode(), FlopCounterMode(
+ assert self.node.fx_node is not None
+ with FakeTensorMode() as fake_mode, FlopCounterMode(
display=False
- ) as flop_counter_mode:
+ ) as flop_counter_mode, V.set_current_node(
+ self.node.fx_node
+ ), V.set_fake_mode(
+ fake_mode
+ ):
+ assert V.current_node is not None
from .ir import ir_node_to_tensor
fake_inputs = [
diff --git a/torch/_subclasses/fake_impls.py b/torch/_subclasses/fake_impls.py
index 244e90adcc..b7977049e7 100644
--- a/torch/_subclasses/fake_impls.py
+++ b/torch/_subclasses/fake_impls.py
@@ -377,6 +377,7 @@ def nonzero(fake_mode, func, arg):
# but this seems more precise.
nnz = arg._nonzero_memo = 0
arg._nonzero_memo_vc = arg._version
+ arg._nonzero_memo_epoch = fake_mode.epoch
else:
nnz = fake_mode.shape_env.create_unbacked_symint()
@@ -391,6 +392,7 @@ def nonzero(fake_mode, func, arg):
# arg._version N/A in inference mode
arg._nonzero_memo = nnz
arg._nonzero_memo_vc = arg._version
+ arg._nonzero_memo_epoch = fake_mode.epoch
return arg.new_empty((nnz, arg.dim()), dtype=torch.int64)
diff --git a/torch/_subclasses/fake_tensor.py b/torch/_subclasses/fake_tensor.py
index dcd559f836..8174f0658f 100644
--- a/torch/_subclasses/fake_tensor.py
+++ b/torch/_subclasses/fake_tensor.py
@@ -381,6 +381,11 @@ class FakeTensor(torch.Tensor):
# TODO: Generalize this as needed, e.g., into a trie of memos
_nonzero_memo: Optional[torch.SymInt]
_nonzero_memo_vc: Optional[int]
+ # When we retrace, we need to invalidate all the memos so that we can
+ # accurately identify the first time unbacked SymInts are allocated.
+ # This is only relevant for inputs; for intermediates, they will get fresh
+ # fake tensors so you won't have a memo anyway
+ _nonzero_memo_epoch: Optional[int]
# Indicates to our torch_dispatch dispatching infra that
# this is an "infra" mode with lower dispatching precedence.
@@ -392,7 +397,10 @@ class FakeTensor(torch.Tensor):
return None
# Version counter based tracking isn't 100% sound but it's close
# enough
- if self._nonzero_memo_vc != self._version:
+ if (
+ self._nonzero_memo_vc != self._version
+ or self._nonzero_memo_epoch != self.fake_mode.epoch
+ ):
self._nonzero_memo = None
return None
return self._nonzero_memo
@@ -799,6 +807,9 @@ class FakeTensorMode(TorchDispatchMode):
cache_hits: int = 0
cache_misses: int = 0
cache_bypasses = defaultdict(int)
+ # Every time you retrace using the same fake tensor mode, you should
+ # advance the epoch so we don't reuse unbacked memos
+ epoch: int = 0
def __init__(
self,
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index 37991985ed..ae2ddddb0e 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -276,16 +276,43 @@ def rebind_unbacked(shape_env, n: torch.fx.Node, result):
has the old binding information) and the new result (which we can extract the
new unbacked SymInts out from).
"""
+ from torch._dynamo.tensor_version_op import _tensor_version
+
+ # Inputs never need rebinding
+ if n.op == "placeholder":
+ return
+
if bindings := n.meta.get("unbacked_bindings"):
for raw_u0, path in bindings.items():
u1 = pytree.key_get(result, path)
- # We should never have bindings for raw bools; instead they should
- # have been converted to ints via ConvertIntKey
- assert type(u1) is not bool
- if isinstance(u1, (int, float)):
- raw_u1 = sympy.sympify(u1)
- else:
- raw_u1 = u1.node.expr
+ # tensor_version ops get specialized after AOTAutograd, it's OK,
+ # we don't actually want to do asserts on them. This is all a bit
+ # questionable though
+ if isinstance(u1, int) and n.target is _tensor_version:
+ log.info("rebind_unbacked: discard _tensor_version %s %s -> %s", raw_u0, path, u1)
+ continue
+ raw_u1 = u1.node.expr
+ # Simplify SymBool binding
+ if (
+ isinstance(raw_u1, sympy.Piecewise) and
+ len(raw_u1.args) == 2 and
+ raw_u1.args[0][0] == 1 and
+ isinstance(eq := raw_u1.args[0][1], sympy.Eq) and
+ isinstance(new_raw_u1 := eq.lhs, sympy.Symbol) and
+ shape_env.var_to_range[new_raw_u1].issubset(ValueRanges(0, 1)) and
+ eq.rhs == 1 and
+ raw_u1.args[1] == (0, True)
+ ):
+ # This is what the pattern match above is testing
+ repacked = _sympy_cast_symbool_to_symint_guardless(sympy.Eq(new_raw_u1, 1))
+ assert repacked == raw_u1, f"{repacked} != {raw_u1}"
+ # Cancel the to_int(to_bool(x)). This is sound because x in
+ # [0, 1]
+ raw_u1 = new_raw_u1
+ assert isinstance(raw_u1, sympy.Symbol)
+ # The old and new could be the same if you improperly hit the memo
+ # while retracing. Make sure you updated FakeTensorMode.epoch
+ assert raw_u0 != raw_u1, f"{raw_u0} possible memo disaster"
shape_env._rename_unbacked_to(raw_u0, raw_u1)
def canonicalize_bool_expr(expr: SympyBoolean) -> SympyBoolean:
@@ -460,7 +487,7 @@ class DivideByKey:
return o // self.divisor
-def compute_unbacked_bindings(shape_env, example_value):
+def compute_unbacked_bindings(shape_env, example_value, old_example_value=None):
"""
After having run fake tensor propagation and producing example_value
result, traverse example_value looking for freshly bound unbacked
@@ -470,6 +497,8 @@ def compute_unbacked_bindings(shape_env, example_value):
function, you must call this on the tuple of tensor output, you
cannot wait!)
"""
+ if shape_env._ignore_fresh_unbacked_symbols_tls():
+ return
fs = shape_env.pending_fresh_unbacked_symbols
pending = set(fs)
if pending:
@@ -554,6 +583,24 @@ def compute_unbacked_bindings(shape_env, example_value):
else ""
)
)
+ # TODO: This is pretty fragile
+ # Normally, the equality test is supposed to be a no-op here, because
+ # you've already called rebind_unbacked first which takes all the old
+ # binding sites and discovers how they are newly bound. But this does
+ # not always work. For example, if the original FX node wasn't a
+ # binding site because you had a memo hit, but post translation you
+ # aren't a memo hit anymore, there's now a new binding site... but we
+ # know (because it's the same FX node) that the value is actually the
+ # same, they're just not obviously equal anymore. So we just insert
+ # a runtime assert in this case.
+ #
+ # This is very fragile, because u0 == u1 assertion does not generate
+ # a replacement. Here, I think it might be acceptable to do a
+ # replacement, so long as we replace the newer thing with the older
+ # thing. Fix this if it becomes an issue.
+ if old_example_value is not None:
+ for keypath in symbol_to_path.values():
+ torch._check(pytree.key_get(old_example_value, keypath) == pytree.key_get(example_value, keypath))
return symbol_to_path
def definitely_true(a):
@@ -1287,8 +1334,12 @@ def _eval_is_non_overlapping_and_dense(sizes, strides):
return True
+def _sympy_cast_symbool_to_symint_guardless(x: sympy.Expr) -> sympy.Expr:
+ return sympy.Piecewise((1, x), (0, True))
+
+
def cast_symbool_to_symint_guardless(symbool: torch.SymBool) -> torch.SymInt:
- int_sym = sympy.Piecewise((1, symbool.node.expr), (0, True))
+ int_sym = _sympy_cast_symbool_to_symint_guardless(symbool.node.expr)
return symbool.node.shape_env.create_symintnode(int_sym, hint=int(symbool.node.require_hint()) if has_hint(symbool) else None)
SYMPY_INTERP = {
@@ -2172,6 +2223,7 @@ class ShapeEnv:
# Maps from sympy ints to expressions representing them
# Populated from equality guards (i.e. a.shape[0] == b.shape[0])
self.replacements: Dict[sympy.Symbol, sympy.Expr] = {}
+ self.unbacked_renamings: Dict[sympy.Symbol, sympy.Symbol] = {}
# Set holds a % b expressions that evaluate to 0.
self.divisible: Set[sympy.Expr] = set()
# Set that holds "size-like" symbols. When we perform
@@ -2396,7 +2448,37 @@ class ShapeEnv:
# Unlike set_replacement, this records a shapeenv event
@record_shapeenv_event()
def _rename_unbacked_to(self, orig_s: sympy.Expr, new_s: sympy.Expr):
+ if self._ignore_fresh_unbacked_symbols_tls():
+ return
+ dest = self.replacements.get(orig_s)
+ assert not free_unbacked_symbols(dest), f"{orig_s} -> {dest}"
self._set_replacement(orig_s, new_s, "rename_unbacked_to")
+ self.unbacked_renamings[orig_s] = new_s
+ if dest is not None:
+ self._set_replacement(new_s, dest, "rename_unbacked_to_dest")
+
+ def _ignore_fresh_unbacked_symbols_tls(self):
+ return getattr(TLS, "ignore_fresh_unbacked_symbols", False)
+
+ @record_shapeenv_event()
+ def _ignore_fresh_unbacked_symbols_enter(self):
+ TLS.ignore_fresh_unbacked_symbols = True
+
+ @record_shapeenv_event()
+ def _ignore_fresh_unbacked_symbols_exit(self):
+ TLS.ignore_fresh_unbacked_symbols = False
+
+ @contextmanager
+ def ignore_fresh_unbacked_symbols(self):
+ """
+ Indicates that the newly allocated unbacked SymInts are being
+ discarded
+ """
+ self._ignore_fresh_unbacked_symbols_enter()
+ try:
+ yield
+ finally:
+ self._ignore_fresh_unbacked_symbols_exit()
@record_shapeenv_event()
def freeze(self):
@@ -2825,7 +2907,8 @@ class ShapeEnv:
"""
symbol: sympy.Symbol = sympy.Symbol(f"f{next(self.unbacked_symfloat_counter)}")
self.counter["create_unbacked_symbol"] += 1
- self.pending_fresh_unbacked_symbols.append(symbol)
+ if not self._ignore_fresh_unbacked_symbols_tls():
+ self.pending_fresh_unbacked_symbols.append(symbol)
self.var_to_stack[symbol] = CapturedTraceback.extract(skip=1)
vr = self.var_to_range[symbol] = ValueRanges.unknown()
@@ -2841,7 +2924,8 @@ class ShapeEnv:
"""Create a symbolic integer without a hint value
"""
symbol: sympy.Symbol = sympy.Symbol(f"u{next(self.unbacked_symint_counter)}", integer=True)
- self.pending_fresh_unbacked_symbols.append(symbol)
+ if not self._ignore_fresh_unbacked_symbols_tls():
+ self.pending_fresh_unbacked_symbols.append(symbol)
self.counter["create_unbacked_symbol"] += 1
self.var_to_stack[symbol] = CapturedTraceback.extract(skip=1)
vr = self.var_to_range[symbol] = self._default_unspecified_value_range()
@@ -2864,7 +2948,8 @@ class ShapeEnv:
"""Create a symbolic boolean without a hint value
"""
symbol: sympy.Symbol = sympy.Symbol(f"u{next(self.unbacked_symint_counter)}", integer=True)
- self.pending_fresh_unbacked_symbols.append(symbol)
+ if not self._ignore_fresh_unbacked_symbols_tls():
+ self.pending_fresh_unbacked_symbols.append(symbol)
self.counter["create_unbacked_symbol"] += 1
self.var_to_stack[symbol] = CapturedTraceback.extract(skip=1)
vr = self.var_to_range[symbol] = ValueRanges(0, 1)
@@ -4739,3 +4824,14 @@ def _is_int(expr):
# WARNING: This is legacy, DO NOT USE
def _is_dim_dynamic(t, d):
return hasattr(t, "_dynamo_dynamic_indices") and d in t._dynamo_dynamic_indices
+
+class PropagateUnbackedSymInts(torch.fx.Interpreter):
+ def run_node(self, n: torch.fx.Node):
+ """
+ Run an FX node, propagating unbacked Symbol bindings to the new fake tensor
+ """
+ from torch._guards import detect_fake_mode
+
+ result = super().run_node(n)
+ rebind_unbacked(detect_fake_mode().shape_env, n, result)
+ return result
diff --git a/torch/fx/passes/fake_tensor_prop.py b/torch/fx/passes/fake_tensor_prop.py
index 340a958ea0..58ee61f100 100644
--- a/torch/fx/passes/fake_tensor_prop.py
+++ b/torch/fx/passes/fake_tensor_prop.py
@@ -28,6 +28,7 @@ class FakeTensorProp(torch.fx.Interpreter):
if mode is None:
mode = FakeTensorMode()
self._mode = mode
+ mode.epoch += 1
def run_node(self, n: Node):
from torch.fx.experimental.symbolic_shapes import rebind_unbacked, compute_unbacked_bindings
|
2.41.0
|
b0eea222978e6b377e2c67f89902d5eb1aa7da3
|
Wed, 24 Apr 2024 13:29:17 -0700
|
[PATCH 0622/1000] [dtensor] move pad/unpad_tensor to separate utils (#124871)
|
as titled, 1. pad/unpad is a general util not specific to the Shard placement, 2. for the propose of the next PR, move these two out of Shard placement itself, and give additional pad_dim argument Pull Request resolved: https://github.com/pytorch/pytorch/pull/124871 Approved by: https://github.com/awgu, https://github.com/wz337
|
diff --git a/test/distributed/_tensor/test_dtensor.py b/test/distributed/_tensor/test_dtensor.py
index 653dfcbb58..224ca8c673 100644
--- a/test/distributed/_tensor/test_dtensor.py
+++ b/test/distributed/_tensor/test_dtensor.py
@@ -809,8 +809,10 @@ class TestDTensorPlacementTypes(DTensorTestBase):
]
assert_array_equal(expected_pad_sizes, pad_sizes)
+ from torch.distributed._tensor._collective_utils import unpad_tensor
+
unpadded_list = [
- shard_placement._unpad_tensor(tensor, pad_sizes[i])
+ unpad_tensor(tensor, shard_placement.dim, pad_sizes[i])
if pad_sizes[i] > 0
else tensor
for i, tensor in enumerate(splitted_tensor_list)
diff --git a/torch/distributed/_tensor/_collective_utils.py b/torch/distributed/_tensor/_collective_utils.py
index 9cf8376bd2..603ac09f4a 100644
--- a/torch/distributed/_tensor/_collective_utils.py
+++ b/torch/distributed/_tensor/_collective_utils.py
@@ -164,6 +164,24 @@ def mesh_all_to_all(
return work
+def pad_tensor(tensor: torch.Tensor, pad_dim: int, pad_size: int) -> torch.Tensor:
+ if pad_size == 0:
+ return tensor
+ pad = [0, 0] * (tensor.ndim - pad_dim)
+ pad[-1] = pad_size
+ return torch.nn.functional.pad(tensor, pad)
+
+
+def unpad_tensor(tensor: torch.Tensor, pad_dim: int, pad_size: int) -> torch.Tensor:
+ if pad_size == 0:
+ return tensor
+ return tensor.narrow(
+ pad_dim,
+ start=0,
+ length=tensor.size(pad_dim) - pad_size,
+ )
+
+
def spec_to_bytes(spec: "placement_types.DTensorSpec") -> int:
assert spec.tensor_meta is not None, "spec should have tensor meta defined!"
return spec.tensor_meta.dtype.itemsize * math.prod(spec.shape)
diff --git a/torch/distributed/_tensor/placement_types.py b/torch/distributed/_tensor/placement_types.py
index 8d88d064e8..d06c317c16 100644
--- a/torch/distributed/_tensor/placement_types.py
+++ b/torch/distributed/_tensor/placement_types.py
@@ -7,7 +7,12 @@ import torch
import torch.distributed._functional_collectives as funcol
import torch.distributed.distributed_c10d as c10d
-from torch.distributed._tensor._collective_utils import mesh_broadcast, mesh_scatter
+from torch.distributed._tensor._collective_utils import (
+ mesh_broadcast,
+ mesh_scatter,
+ pad_tensor,
+ unpad_tensor,
+)
from torch.distributed.device_mesh import DeviceMesh
@@ -83,37 +88,13 @@ class Shard(Placement):
for shard, pad_size in zip(tensor_list, pad_sizes):
# Fill the empty tensor with zeroes with padding.
if with_padding and pad_size > 0:
- shard = self._pad_tensor(shard, pad_size)
+ shard = pad_tensor(shard, self.dim, pad_size)
shard = shard.contiguous() if contiguous else shard
shard_list.append(shard)
return shard_list, pad_sizes
else:
return tensor_list, pad_sizes
- def _pad_tensor(
- self,
- tensor: torch.Tensor,
- pad_size: int,
- ) -> torch.Tensor:
- if pad_size == 0:
- return tensor
- pad = [0, 0] * (tensor.ndim - self.dim)
- pad[-1] = pad_size
- return torch.nn.functional.pad(tensor, pad)
-
- def _unpad_tensor(
- self,
- tensor: torch.Tensor,
- pad_size: int,
- ) -> torch.Tensor:
- if pad_size == 0:
- return tensor
- return tensor.narrow(
- self.dim,
- start=0,
- length=tensor.size(self.dim) - pad_size,
- )
-
@staticmethod
def _local_shard_size_on_dim(
size_on_dim: int,
@@ -166,7 +147,7 @@ class Shard(Placement):
# Only unpad if the local_tensor was padded on the dimension.
pad_size = pad_sizes[my_coordinate[mesh_dim]]
if pad_size > 0:
- output = self._unpad_tensor(output, pad_size)
+ output = unpad_tensor(output, self.dim, pad_size)
return output
def _reduce_shard_tensor(
@@ -201,7 +182,7 @@ class Shard(Placement):
)
if is_padded:
- output = self._unpad_tensor(output, pad_sizes[my_coordinate[mesh_dim]]) # type: ignore[possibly-undefined]
+ output = unpad_tensor(output, self.dim, pad_sizes[my_coordinate[mesh_dim]]) # type: ignore[possibly-undefined]
return output
def _to_replicate_tensor(
@@ -225,7 +206,7 @@ class Shard(Placement):
if is_padded:
full_chunk_size = (logical_dim_size + num_chunks - 1) // num_chunks
pad_size = full_chunk_size - local_shape[self.dim]
- local_tensor = self._pad_tensor(local_tensor, pad_size)
+ local_tensor = pad_tensor(local_tensor, self.dim, pad_size)
if not local_tensor.is_contiguous():
local_tensor = local_tensor.contiguous()
@@ -237,7 +218,7 @@ class Shard(Placement):
)
if is_padded:
unpad_size = full_chunk_size * num_chunks - logical_dim_size # type: ignore[possibly-undefined]
- result = self._unpad_tensor(result, unpad_size)
+ result = unpad_tensor(result, self.dim, unpad_size)
return result
def _replicate_to_shard(
|
2.41.0
|
021c9b8e48b8e787b75fd69a3076beffffb8208
|
Wed, 24 Apr 2024 11:58:01 -0700
|
[PATCH 0623/1000] [benchmark][cudagraph] Explicitly call aten.div with CUDA denominator for cudagraphs (#119729)
|
aten.div's output device will be its numerator's device. so it is acceptable to do cuda / cpu type divisions. post grad passes operate only on graphs and can't handle runtime graph inputs. so we change user code to move inputs to cuda for cudagraph. this affects any graph that has cpu tensors as graph inputs. Pull Request resolved: https://github.com/pytorch/pytorch/pull/119729 Approved by: https://github.com/eellison
|
diff --git a/test/inductor/test_compiled_autograd.py b/test/inductor/test_compiled_autograd.py
index 5f9dd9b84d..7c9c84c894 100644
--- a/test/inductor/test_compiled_autograd.py
+++ b/test/inductor/test_compiled_autograd.py
@@ -1,5 +1,6 @@
# Owner(s): ["module: inductor"]
import functools
+import io
import re
import sys
import unittest
@@ -1342,6 +1343,24 @@ TORCH_LIBRARY(test_autograd_cpp_node_data_dependent, m) {
out = compiled_fn(activations)
self.assertTrue(len(activations) == 0)
+ @unittest.skipIf(not HAS_CUDA, "requires cuda")
+ def test_cudagraphs_cpu_division(self):
+ from torch._dynamo.testing import reduce_to_scalar_loss
+
+ model = torch.nn.Linear(10, 10, dtype=torch.float16).cuda()
+ inputs = torch.randn(10, 10, dtype=torch.float16).cuda()
+ out = model(inputs)
+ loss = reduce_to_scalar_loss(out)
+ torch._inductor.config.triton.cudagraphs = True
+
+ stderr_msgs = io.StringIO()
+ with mock.patch("sys.stderr", stderr_msgs), compiled_autograd.enable(
+ compiler_fn
+ ):
+ loss.backward()
+
+ self.assertFalse("skipping cudagraphs" in stderr_msgs.getvalue())
+
def load_test_module(name):
testdir = Path(__file__).absolute().parent.parent
diff --git a/torch/_dynamo/testing.py b/torch/_dynamo/testing.py
index c115e1cc09..2dd384f4d8 100644
--- a/torch/_dynamo/testing.py
+++ b/torch/_dynamo/testing.py
@@ -103,7 +103,7 @@ def reduce_to_scalar_loss(out):
"""Reduce the output of a model to get scalar loss"""
if isinstance(out, torch.Tensor):
# Mean does not work on integer tensors
- return out.sum() / out.numel()
+ return out.sum() / torch.tensor(out.numel(), device=out.device)
elif isinstance(out, (list, tuple)):
return sum(reduce_to_scalar_loss(x) for x in out) / len(out)
elif type(out).__name__ in (
|
2.41.0
|
ed38c9b227f2099c77f4b34fbbe72afa176ac25
|
Wed, 24 Apr 2024 11:58:01 -0700
|
[PATCH 0624/1000] [cudagraphs] add more info to skip messages (#124700)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124700 Approved by: https://github.com/eellison ghstack dependencies: #119729
|
diff --git a/test/inductor/test_cudagraph_trees.py b/test/inductor/test_cudagraph_trees.py
index f80c610829..5e8ee760f1 100644
--- a/test/inductor/test_cudagraph_trees.py
+++ b/test/inductor/test_cudagraph_trees.py
@@ -253,7 +253,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
opt = torch.compile(model.forward, mode="reduce-overhead")(x, y, z)
FileCheck().check(
- "skipping cudagraphs due to mutation on input. Found from"
+ "skipping cudagraphs due to mutated inputs (1 instances). Found from"
).check("torch.logical_xor").run(captured_output[0])
@requires_multigpu()
@@ -266,9 +266,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
with capture_stderr() as captured_output:
foo(torch.ones([10], device="cuda"), torch.ones([20]))
- FileCheck().check("skipping cudagraphs due to cpu device.").check(
- "y + 2"
- ).run(captured_output[0])
+ FileCheck().check(
+ "skipping cudagraphs due to cpu device (arg1_1). Found from"
+ ).check("y + 2").run(captured_output[0])
with capture_stderr() as captured_output:
foo(
@@ -309,9 +309,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
with capture_stderr() as captured_output:
foo(inp())
- FileCheck().check("skipping cudagraphs due to mutation on input.").check(
- ".add_(2)"
- ).run(captured_output[0])
+ FileCheck().check(
+ "skipping cudagraphs due to mutated inputs (1 instances). Found from"
+ ).check(".add_(2)").run(captured_output[0])
# mutation on inp doesnt hit cudagraphs
self.assertEqual(len(self.get_manager().roots), 0)
@@ -396,7 +396,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
mut_out = mut(tmp)
self.assertEqual(mut_out, non_mut(foo(inp)))
FileCheck().check_count(
- "skipping cudagraphs due to mutation on input.", 0, exactly=True
+ "skipping cudagraphs due to mutated inputs (1 instances). Found from",
+ 0,
+ exactly=True,
).run(captured_output[0])
torch.compiler.cudagraph_mark_step_begin()
@@ -407,9 +409,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
# now its an input from eager we should fallback to inductor without cudagraphs
with capture_stderr() as captured_output:
mut(mut_inp)
- FileCheck().check("skipping cudagraphs due to mutation on input.").check(
- "x.add_(2)"
- ).run(captured_output[0])
+ FileCheck().check(
+ "skipping cudagraphs due to mutated inputs (1 instances). Found from"
+ ).check("x.add_(2)").run(captured_output[0])
self.assertEqual(mut_inp, non_mut(foo(inp)))
@parametrize("backend", ("inductor", "cudagraphs"))
@@ -434,7 +436,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
torch.compiler.cudagraph_mark_step_begin()
fee(inp(), foo(inp()))
FileCheck().check_count(
- "skipping cudagraphs due to mutation on input.", 1, exactly=True
+ "skipping cudagraphs due to mutated inputs (1 instances). Found from",
+ 1,
+ exactly=True,
).run(captured_output[0])
@parametrize("backend", ("inductor", "cudagraphs"))
@@ -468,7 +472,9 @@ if HAS_CUDA and not TEST_WITH_ASAN:
mut(mut_inp) # should not warn since mut has warned
FileCheck().check_count(
- "skipping cudagraphs due to mutation on input.", 1, exactly=True
+ "skipping cudagraphs due to mutated inputs (1 instances). Found from",
+ 1,
+ exactly=True,
).run(captured_output[0])
def test_function_compiled_multiple_times(self):
@@ -1611,8 +1617,15 @@ if HAS_CUDA and not TEST_WITH_ASAN:
def foo(x):
return x.item()
- self.assertEqual(foo(torch.tensor(3.0, device="cuda")), 3.0)
- self.assertEqual(foo(torch.tensor(6.0, device="cuda")), 6.0)
+ with capture_stderr() as captured_output:
+ self.assertEqual(foo(torch.tensor(3.0, device="cuda")), 3.0)
+ self.assertEqual(foo(torch.tensor(6.0, device="cuda")), 6.0)
+
+ # NOTE: this test is named after incompatible ops, but is not skipping due to incompatible ops.
+ # This should get fixed.
+ FileCheck().check(
+ "skipping cudagraphs due to cpu device (_local_scalar_dense)"
+ ).run(captured_output[0])
@torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
def test_incompatible_cudagraph_ops_nonzero(self):
@@ -1620,13 +1633,38 @@ if HAS_CUDA and not TEST_WITH_ASAN:
def foo(x):
return x.nonzero()
- self.assertEqual(
- foo(torch.tensor([1, 0, 2], device="cuda")), torch.tensor([[0], [2]])
- )
- self.assertEqual(
- foo(torch.tensor([1, 0, 0], device="cuda")), torch.tensor([[0]])
+ with capture_stderr() as captured_output:
+ self.assertEqual(
+ foo(torch.tensor([1, 0, 2], device="cuda")),
+ torch.tensor([[0], [2]]),
+ )
+ self.assertEqual(
+ foo(torch.tensor([1, 0, 0], device="cuda")), torch.tensor([[0]])
+ )
+
+ FileCheck().check("skipping cudagraphs due to ['incompatible ops']").run(
+ captured_output[0]
)
+ @torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
+ def test_incompatible_cudagraph_ops_nonzero_backend(self):
+ @torch.compile(backend="cudagraphs")
+ def foo(x):
+ return x.nonzero()
+
+ with capture_stderr() as captured_output:
+ self.assertEqual(
+ foo(torch.tensor([1, 0, 2], device="cuda")),
+ torch.tensor([[0], [2]]),
+ )
+ self.assertEqual(
+ foo(torch.tensor([1, 0, 0], device="cuda")), torch.tensor([[0]])
+ )
+
+ FileCheck().check(
+ "skipping cudagraphs due to incompatible op (nonzero)"
+ ).run(captured_output[0])
+
def test_storage_access_error(self):
x = torch.rand([4], device="cuda")
torch._C._set_storage_access_error_msg(x, "custom error msg")
diff --git a/torch/_dynamo/backends/cudagraphs.py b/torch/_dynamo/backends/cudagraphs.py
index ee89b79690..742e141c71 100644
--- a/torch/_dynamo/backends/cudagraphs.py
+++ b/torch/_dynamo/backends/cudagraphs.py
@@ -12,13 +12,14 @@ from torch._dynamo.backends.debugging import boxed_nop
from torch._inductor.cudagraph_utils import (
BoxedDeviceIndex,
check_multiple_devices_or_any_cpu_nodes,
+ format_default_skip_message,
get_mutation_stack_trace,
get_placeholders,
)
from torch._inductor.utils import (
BoxedBool,
count_tangents,
- has_incompatible_cudagraph_ops,
+ get_first_incompatible_cudagraph_node,
num_fw_fixed_arguments,
output_node,
)
@@ -99,8 +100,8 @@ def check_for_skip(aot_model: torch.fx.GraphModule, num_fixed) -> Optional[str]:
):
return skip
- if has_incompatible_cudagraph_ops(aot_model):
- return "skipping cudagraphs due to incompatible op"
+ if node := get_first_incompatible_cudagraph_node(aot_model):
+ return format_default_skip_message(f"incompatible op ({node.name})")
return None
diff --git a/torch/_inductor/cudagraph_utils.py b/torch/_inductor/cudagraph_utils.py
index e897096f4e..dd551fad03 100644
--- a/torch/_inductor/cudagraph_utils.py
+++ b/torch/_inductor/cudagraph_utils.py
@@ -1,5 +1,5 @@
import dataclasses
-from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
+from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
@@ -48,7 +48,7 @@ def format_default_skip_message(reason: str) -> str:
def get_mutation_stack_trace(
- placeholders: List[torch.fx.Node], mutation_indices: Iterable[int]
+ placeholders: List[torch.fx.Node], mutation_indices: List[int]
) -> str:
stack_trace: Optional[str] = ""
@@ -57,11 +57,13 @@ def get_mutation_stack_trace(
if stack_trace := get_mutating_use_stack_trace(placeholder):
break
+ msg = format_default_skip_message(
+ f"mutated inputs ({len(mutation_indices)} instances)"
+ )
if stack_trace:
- msg = f"skipping cudagraphs due to mutation on input. Found from : \n {stack_trace}"
- return msg
+ return f"{msg}. Found from : \n {stack_trace}"
- return format_default_skip_message("mutated inputs")
+ return msg
def check_for_mutation(
@@ -69,8 +71,6 @@ def check_for_mutation(
inputs: List[torch.Tensor],
is_cuda_graph_recorded_tensor: Callable[[torch.Tensor], bool],
) -> Optional[str]:
- default_msg = format_default_skip_message("mutated inputs")
-
# doesnt work for non-trees because the warmup run would apply mutation twice
if torch._inductor.config.triton.cudagraph_trees:
# checking if mutation is only on parameters/static inputs
@@ -82,15 +82,14 @@ def check_for_mutation(
or is_cuda_graph_recorded_tensor(inputs[idx])
)
]
- has_mutation = len(mutation_indices) != 0
- if not has_mutation:
- return None
-
- return get_mutation_stack_trace(func.placeholders, mutation_indices)
-
else:
- has_mutation = len(func.mutated_input_idxs) != 0
- return None if not has_mutation else default_msg
+ mutation_indices = func.mutated_input_idxs
+
+ return (
+ get_mutation_stack_trace(func.placeholders, mutation_indices)
+ if mutation_indices
+ else None
+ )
def get_use_stack_trace(node) -> Optional[str]:
@@ -104,12 +103,11 @@ def check_multiple_devices_or_any_cpu_nodes(
device_node_mapping: Dict[torch.device, torch.fx.Node]
) -> Optional[str]:
if cpu_node := device_node_mapping.get(torch.device("cpu")):
+ msg = f"cpu device ({cpu_node.name})"
if stack_trace := get_use_stack_trace(cpu_node):
- return format_default_skip_message(
- f"cpu device. Found from : \n {stack_trace}"
- )
+ return format_default_skip_message(f"{msg}. Found from : \n {stack_trace}")
- return format_default_skip_message("cpu device")
+ return format_default_skip_message(msg)
if (
len(device_node_mapping) == 1
diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py
index 3cf4faa24b..8f218fb97c 100644
--- a/torch/_inductor/utils.py
+++ b/torch/_inductor/utils.py
@@ -590,7 +590,7 @@ def any_is_symbolic(*args: Any) -> bool:
return any(is_symbolic(a) for a in args)
-def has_incompatible_cudagraph_ops(gm):
+def get_first_incompatible_cudagraph_node(gm):
from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
forbidden_set = {
@@ -626,10 +626,14 @@ def has_incompatible_cudagraph_ops(gm):
)
for node in gm.graph.nodes:
if str(node.target) in forbidden_set:
- return True
+ return node
if (val := node.meta.get("val")) is not None and free_unbacked_symbols(val):
- return True
- return False
+ return node
+ return None
+
+
+def has_incompatible_cudagraph_ops(gm):
+ return get_first_incompatible_cudagraph_node(gm) is not None
def output_node(gm: torch.fx.GraphModule):
|
2.41.0
|
dad16b85108209bc021107f312f4b221422a012
|
Wed, 24 Apr 2024 11:58:02 -0700
|
[PATCH 0625/1000] [cudagraphs] add cudagraph_skips counter (#124804)
|
used in tests and benchmark csv Pull Request resolved: https://github.com/pytorch/pytorch/pull/124804 Approved by: https://github.com/eellison ghstack dependencies: #119729, #124700
|
diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py
index d610d7dd13..99fbd7b86d 100644
--- a/benchmarks/dynamo/common.py
+++ b/benchmarks/dynamo/common.py
@@ -1956,6 +1956,9 @@ def get_dynamo_stats():
"autograd_compiles": torch._dynamo.utils.counters["compiled_autograd"][
"compiles"
],
+ "cudagraph_skips": torch._dynamo.utils.counters["inductor"][
+ "cudagraph_skips"
+ ],
}
)
diff --git a/test/inductor/test_cudagraph_trees.py b/test/inductor/test_cudagraph_trees.py
index 5e8ee760f1..583643e123 100644
--- a/test/inductor/test_cudagraph_trees.py
+++ b/test/inductor/test_cudagraph_trees.py
@@ -11,6 +11,7 @@ import torch
import torch._dynamo.config as dynamo_config
import torch.nn as nn
+from torch._dynamo.utils import counters
from torch._inductor import config
from torch._inductor.compile_fx import compile_fx_inner
from torch._inductor.cudagraph_trees import cudagraphify_impl as tree_cudagraphify_impl
@@ -255,6 +256,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to mutated inputs (1 instances). Found from"
).check("torch.logical_xor").run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@requires_multigpu()
@parametrize("backend", ("inductor", "cudagraphs"))
@@ -269,6 +271,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to cpu device (arg1_1). Found from"
).check("y + 2").run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
with capture_stderr() as captured_output:
foo(
@@ -278,6 +281,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check("skipping cudagraphs due to multiple devices").run(
captured_output[0]
)
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 2)
@torch._inductor.config.patch("triton.cudagraph_skip_dynamic_graphs", True)
def test_skip_symbolic(self):
@@ -291,6 +295,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to graph with symbolic shapes inputs"
).check("x + y").run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
@@ -312,6 +317,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to mutated inputs (1 instances). Found from"
).check(".add_(2)").run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
# mutation on inp doesnt hit cudagraphs
self.assertEqual(len(self.get_manager().roots), 0)
@@ -400,6 +406,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
0,
exactly=True,
).run(captured_output[0])
+ self.assertTrue("cudagraph_skips" not in counters["inductor"])
torch.compiler.cudagraph_mark_step_begin()
inp = torch.rand([4], device="cuda")
@@ -413,6 +420,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
"skipping cudagraphs due to mutated inputs (1 instances). Found from"
).check("x.add_(2)").run(captured_output[0])
self.assertEqual(mut_inp, non_mut(foo(inp)))
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
@@ -440,6 +448,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
1,
exactly=True,
).run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@parametrize("backend", ("inductor", "cudagraphs"))
@torch._dynamo.config.patch("cudagraph_backend_keep_input_mutation", True)
@@ -476,6 +485,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
1,
exactly=True,
).run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
def test_function_compiled_multiple_times(self):
def foo(x):
@@ -1626,6 +1636,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to cpu device (_local_scalar_dense)"
).run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
@torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
def test_incompatible_cudagraph_ops_nonzero(self):
@@ -1645,6 +1656,20 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check("skipping cudagraphs due to ['incompatible ops']").run(
captured_output[0]
)
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
+
+ @torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
+ def test_incompatible_cudagraph_ops_nonzero_graph_breaks(self):
+ @torch.compile(mode="reduce-overhead")
+ def foo(x):
+ y = x.nonzero() # skip
+ torch._dynamo.graph_break()
+ return y.nonzero() # skip 2 times (due to recompile)
+
+ foo(torch.tensor([1, 0, 2], device="cuda"))
+ foo(torch.tensor([1, 0, 0], device="cuda"))
+
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 3)
@torch._dynamo.config.patch("capture_dynamic_output_shape_ops", True)
def test_incompatible_cudagraph_ops_nonzero_backend(self):
@@ -1664,6 +1689,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
FileCheck().check(
"skipping cudagraphs due to incompatible op (nonzero)"
).run(captured_output[0])
+ self.assertEqual(counters["inductor"]["cudagraph_skips"], 1)
def test_storage_access_error(self):
x = torch.rand([4], device="cuda")
diff --git a/torch/_dynamo/backends/cudagraphs.py b/torch/_dynamo/backends/cudagraphs.py
index 742e141c71..4cef3a68fd 100644
--- a/torch/_dynamo/backends/cudagraphs.py
+++ b/torch/_dynamo/backends/cudagraphs.py
@@ -15,6 +15,7 @@ from torch._inductor.cudagraph_utils import (
format_default_skip_message,
get_mutation_stack_trace,
get_placeholders,
+ log_cudagraph_skip_and_bump_counter,
)
from torch._inductor.utils import (
BoxedBool,
@@ -27,8 +28,6 @@ from torch._inductor.utils import (
from torch.multiprocessing.reductions import StorageWeakRef
from .registry import register_backend
-perf_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
-
def find_input_mutations(g):
def meta_fk(meta):
@@ -132,7 +131,9 @@ def cudagraphs(dynamo_model, dynamo_inputs):
fixed = num_fw_fixed_arguments(len(dynamo_inputs), len(aot_inputs))
if skip_msg := check_for_skip(aot_model, fixed):
BoxedBool.disable(do_cudagraphs)
- perf_log.warning("skipping cudagraphs due to %s", skip_msg)
+ log_cudagraph_skip_and_bump_counter(
+ f"skipping cudagraphs due to {skip_msg}"
+ )
return interp
boxed_device_index.set(get_device_index(aot_model))
@@ -157,7 +158,9 @@ def cudagraphs(dynamo_model, dynamo_inputs):
fixed = count_tangents(aot_model)
if skip_msg := check_for_skip(aot_model, fixed):
- perf_log.warning("skipping cudagraphs due to %s", skip_msg)
+ log_cudagraph_skip_and_bump_counter(
+ "skipping cudagraphs due to %s", skip_msg
+ )
# See [Backward Generation Handling]
manager = torch._inductor.cudagraph_trees.get_manager(
diff --git a/torch/_inductor/compile_fx.py b/torch/_inductor/compile_fx.py
index c99d15a86f..c93fd8535b 100644
--- a/torch/_inductor/compile_fx.py
+++ b/torch/_inductor/compile_fx.py
@@ -29,7 +29,11 @@ from torch._dynamo.utils import (
from torch._functorch import config as functorch_config
from torch._functorch.aot_autograd import aot_export_module, make_boxed_func
from torch._inductor.codecache import code_hash, CompiledFxGraph, FxGraphCache
-from torch._inductor.cudagraph_utils import BoxedDeviceIndex, get_placeholders
+from torch._inductor.cudagraph_utils import (
+ BoxedDeviceIndex,
+ get_placeholders,
+ log_cudagraph_skip_and_bump_counter,
+)
from torch._inductor.debug import save_args_for_compile_fx_inner
from torch._inductor.utils import BoxedBool, count_tangents
@@ -483,9 +487,8 @@ def compile_fx_inner(
# check cudagraph disabling reasons from inductor lowering
if cudagraphs and compiled_graph.disabled_cudagraphs_reason:
if "cuda" in compiled_graph.device_types:
- perf_hint_log.warning(
- "skipping cudagraphs due to %s",
- compiled_graph.disabled_cudagraphs_reason,
+ log_cudagraph_skip_and_bump_counter(
+ f"skipping cudagraphs due to {compiled_graph.disabled_cudagraphs_reason}"
)
BoxedBool.disable(cudagraphs)
@@ -596,10 +599,12 @@ def compile_fx_inner(
# prefer better disable_cudagraphs_reason bc stack trace
# TODO: migrate all disable reasons to stack trace, refactor
if compiled_graph.disabled_cudagraphs_reason:
- perf_hint_log.warning(compiled_graph.disabled_cudagraphs_reason)
+ log_cudagraph_skip_and_bump_counter(
+ compiled_graph.disabled_cudagraphs_reason
+ )
else:
- perf_hint_log.warning(
- "skipping cudagraphs due to %s", cudagraph_fail_reasons
+ log_cudagraph_skip_and_bump_counter(
+ f"skipping cudagraphs due to {cudagraph_fail_reasons}"
)
# cudagraphs does its own aligning of inputs
diff --git a/torch/_inductor/cudagraph_trees.py b/torch/_inductor/cudagraph_trees.py
index 141354d43a..f1ca0950b9 100644
--- a/torch/_inductor/cudagraph_trees.py
+++ b/torch/_inductor/cudagraph_trees.py
@@ -79,6 +79,7 @@ from torch._inductor.compile_fx import (
from torch._inductor.cudagraph_utils import (
check_for_mutation,
FunctionID,
+ log_cudagraph_skip_and_bump_counter,
WrappedFunction,
)
from torch.multiprocessing.reductions import StorageWeakRef
@@ -111,9 +112,6 @@ log = torch._logging.getArtifactLogger(__name__, "cudagraphs")
from . import config
-perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
-
-
@dataclasses.dataclass(frozen=True)
class GraphID:
"Unique counter of a cuda graph recording"
@@ -1816,7 +1814,7 @@ class CUDAGraphTreeManager:
self, function_id: FunctionID, inputs: List[Tensor]
):
node_id = self._get_node_id()
- if has_mutation_str := check_for_mutation(
+ if maybe_mutation_str := check_for_mutation(
self.ids_to_funcs[function_id],
inputs,
self._get_cuda_graph_recorded_tensor_checker(),
@@ -1826,7 +1824,7 @@ class CUDAGraphTreeManager:
if function_id in self.warned_mutation:
return
self.warned_mutation.add(function_id)
- perf_hint_log.warning(has_mutation_str)
+ log_cudagraph_skip_and_bump_counter(maybe_mutation_str)
else:
self.non_cudagraph_managed_mutation_hint[node_id][function_id] = False
diff --git a/torch/_inductor/cudagraph_utils.py b/torch/_inductor/cudagraph_utils.py
index dd551fad03..c87022fcb7 100644
--- a/torch/_inductor/cudagraph_utils.py
+++ b/torch/_inductor/cudagraph_utils.py
@@ -2,6 +2,9 @@ import dataclasses
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
+from torch._dynamo.utils import counters
+
+perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints")
@dataclasses.dataclass(frozen=True)
@@ -125,6 +128,11 @@ def check_lowering_disable_cudagraph(
return check_multiple_devices_or_any_cpu_nodes(device_node_mapping)
+def log_cudagraph_skip_and_bump_counter(msg):
+ perf_hint_log.warning(msg)
+ counters["inductor"]["cudagraph_skips"] += 1
+
+
@dataclasses.dataclass
class BoxedDeviceIndex:
value: Optional[int]
|
2.41.0
|
4666389e1ab33ae729d2cab234d053ce0133908
|
Thu, 25 Apr 2024 04:38:44 +0000
|
[PATCH 0627/1000] [FX] Update opinfo tests (flattened diff) (#124657)
|
Summary: This diff updates opinfo tests to compute more statistics. The results are described in this post: https://fb.workplace.com/groups/ai.acceleration.team/permalink/825131926110067/ New features: - Optionally dump kernels to a directory - Optionally disable block pointers - Impose a time limit (2 min) on individual tests - Report a variety of specific error codes when a fails: - MIXED - FALLBACK - EXPORT_ERROR - COMPILE_ERROR - MULTIPLE_KERNELS - MISSING_KERNELS - TIMEOUT - Disable setting the RNG seed inside of opinfo, since Dynamo doesn't like this and it caused a lot of tests to fail which otherwise would be able to generate Triton. - Check each test's `(op,dtype)` pair against {HuggingFace, TIMM, TorchBench} benchmark logs, to see whether tests are representative of real-world usage. Test Plan: `buck2 test @//mode/{dev-nosan,mtia} fbcode//triton_mtia/python/test:` passed locally. This code is also exercised by the CI. Added a bunch of new unit tests: - Dumping kernels to a directory - Disabling block pointers - Mocking various error conditions in inductor - No kernels - Multiple kernels - ATen fallback - Partial ATen fallback (mixed Triton + ATen) - `torch.export` raised exception - `torch.inductor._compile` raised exception - Timeout while running test - Test harness raised uncaught exception - Check that return code == Success when exceptions were raised - Checking whether various (op,dtype) combos are in benchmarks - Check that `aten.add.Tensor` IS in the benchmarks - Check that a made up op is NOT in them Differential Revision: D56336160 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124657 Approved by: https://github.com/eellison
|
diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py
index 7f3d435168..30cc4e525f 100644
--- a/torch/_inductor/scheduler.py
+++ b/torch/_inductor/scheduler.py
@@ -26,7 +26,7 @@ from typing import (
import sympy
import torch
-from torch._dynamo.utils import dynamo_timed
+from torch._dynamo.utils import counters, dynamo_timed
from torch._inductor.metrics import get_metric_table, is_metric_table_enabled
from torch.utils._triton import has_triton
@@ -2387,6 +2387,7 @@ class Scheduler:
# the current kernel from where 'allocate' retrieve those decisions.
# We have to make sure there is a non-NULL kernel handler to store
# those inplace update decisions.
+ counters["inductor"]["extern_calls"] += 1
with V.set_kernel_handler(Kernel(increase_kernel_count=False)):
scheduler_node.decide_inplace_update()
scheduler_node.allocate()
|
2.41.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.