commitId
stringlengths 40
40
| datetime
stringlengths 30
31
| subject
stringlengths 37
266
| comment
stringlengths 109
15.2k
| diff
stringlengths 238
914k
| gitVersion
stringclasses 9
values |
|---|---|---|---|---|---|
592a609fdcff4dd5f6443630cfbf5d91425cea6
|
Fri, 26 Apr 2024 09:02:55 +0800
|
[PATCH 0748/1000] [Quant][ONEDNN] improve performance of qconv by reducing integration overhead (#123240)
|
## Description Framework overhead is found to be big for the onednn qconv op (used for quantization with PT2E X86Inductor backend). This PR reduces the integration overhead by modifying the implementation of qconv. ## performance results Running quantized Resnet50 on an Intel(R) Xeon(R) Platinum 8490H machine Before ``` Average latency: 8.378 ms. ------------------------- ------------ ------------ ------------ ------------ ------------ ------------ Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls ------------------------- ------------ ------------ ------------ ------------ ------------ ------------ onednn::qconv2d_pointwise 86.54% 6.954ms 87.42% 7.025ms 132.547us 53 ``` After ``` Average latency: 6.255 ms. ------------------------- ------------ ------------ ------------ ------------ ------------ ------------ Name Self CPU % Self CPU CPU total % CPU total CPU time avg # of Calls ------------------------- ------------ ------------ ------------ ------------ ------------ ------------ onednn::qconv2d_pointwise 85.05% 6.381ms 85.98% 6.451ms 121.717us 53 ``` Test script: ```python import torch import torchvision import time import copy import numpy as np from torch._export import capture_pre_autograd_graph from torch.ao.quantization.quantize_pt2e import ( prepare_pt2e, convert_pt2e, ) import torch.ao.quantization.quantizer.x86_inductor_quantizer as xiq from torch.ao.quantization.quantizer.x86_inductor_quantizer import X86InductorQuantizer torch._inductor.config.cpp.enable_kernel_profile=True torch._inductor.config.profiler_mark_wrapper_call = True torch._inductor.config.freezing = True torch._inductor.config.cpp_wrapper = True def bench_model(model, inputs): times =[] with torch.no_grad(): for _ in range(5): # warm-up output = model(inputs) for _ in range(20): start_time = time.time() output = model(inputs) end_time = time.time() times.append(end_time - start_time) print ('Average latency: %0.3f ms.' % (np.median(times) * 1000.0)) with torch.profiler.profile(activities=[torch.profiler.ProfilerActivity.CPU]) as p: out_ipex = model(inputs) print(p.key_averages().table(sort_by="self_cpu_time_total", row_limit=-1)) def pt2e_ptq(m, example_inputs): m = m.eval() exported_model = capture_pre_autograd_graph(m, example_inputs) quantizer = X86InductorQuantizer() quantizer.set_global(xiq.get_default_x86_inductor_quantization_config()) prepared_model = prepare_pt2e(exported_model, quantizer) _ = prepared_model(*example_inputs) converted_model = convert_pt2e(prepared_model) torch.ao.quantization.move_exported_model_to_eval(converted_model) with torch.no_grad(): optimized_model = torch.compile(converted_model) _ = optimized_model(*example_inputs) _ = optimized_model(*example_inputs) bench_model(optimized_model, *example_inputs) return optimized_model if __name__ == "__main__": data = torch.randn(16, 3, 224, 224) model_fp = torchvision.models.resnet50(weights=torchvision.models.ResNet50_Weights.DEFAULT) pt2e_ptq(copy.deepcopy(model_fp), (data,)) ``` Differential Revision: [D56288440](https://our.internmc.facebook.com/intern/diff/D56288440) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123240 Approved by: https://github.com/leslie-fang-intel, https://github.com/jgong5, https://github.com/jerryzh168
|
diff --git a/aten/src/ATen/native/quantized/cpu/qconv.cpp b/aten/src/ATen/native/quantized/cpu/qconv.cpp
index 50155d85d4..0b75e35004 100644
--- a/aten/src/ATen/native/quantized/cpu/qconv.cpp
+++ b/aten/src/ATen/native/quantized/cpu/qconv.cpp
@@ -1504,9 +1504,6 @@ static at::Tensor _quantized_convolution_onednn(
kSpatialDim, "D convolution.");
// Parameters
- // Scales of ONEDNN and PyTorch are reciprocal
- const ideep::scale_t& src_scales = ideep::scale_t(1, 1.0 / act_scale);
-
#if IDEEP_PREREQ(3, 1, 0, 1)
// 1. If the weight scale generated by observer should with dtype float32
// https://github.com/pytorch/pytorch/blob/d2c24eca8a60c56b31ca967a44d5cc4522802aa6/torch/ao/quantization/observer.py#L323
@@ -1592,47 +1589,20 @@ static at::Tensor _quantized_convolution_onednn(
output_sizes = at::native::conv_output_size(input_size, kernel_size, padding.vec(), stride.vec(), dilation.vec());
ideep::dims dst_dims = ideep::dims({output_sizes.cbegin(), output_sizes.cend()});
// Output is not a quantized tensor but data type is uint8
- at::Tensor output;
- if (fp32_output || bfloat16_output) {
- output = at::empty(
+ at::Tensor output = has_accum_postop_sum ?
+ accum.value() :
+ at::empty(
dst_dims,
device(c10::kCPU)
- .dtype(fp32_output ? c10::kFloat : c10::kBFloat16)
- .memory_format(kSpatialDim == 2 ?
- c10::MemoryFormat::ChannelsLast :
- c10::MemoryFormat::ChannelsLast3d),
- c10::nullopt);
- } else {
- output = at::empty(
- dst_dims,
- device(c10::kCPU)
- .dtype(c10::kByte)
+ .dtype(fp32_output ? c10::kFloat : (bfloat16_output ? c10::kBFloat16 : c10::kByte))
.memory_format(kSpatialDim == 2 ?
c10::MemoryFormat::ChannelsLast :
c10::MemoryFormat::ChannelsLast3d)
);
- }
if (output.numel() == 0) {
return output;
}
- ideep::tensor dst;
- if (has_accum_postop_sum) {
- auto dst_desc = ideep::tensor::desc(dst_dims, fp32_output ? ideep::tensor::data_type::f32 : (
- bfloat16_output ? ideep::tensor::data_type::bf16 : src_data_type),
- kSpatialDim == 2 ? ideep::format_tag::nhwc : ideep::format_tag::ndhwc);
- TORCH_CHECK(accum.value().dtype() == output.dtype(), "The output tensor should have same dtype as the accum tensor.");
- // When fused with sum, the dst tensor will share the data ptr as the accum tensor.
- dst.init(dst_desc, accum.value().data_ptr());
- } else {
- if (fp32_output || bfloat16_output) {
- // Conv without add: int8-in, fp32-output
- dst = ideep::tensor({dst_dims, fp32_output ? ideep::tensor::data_type::f32 : ideep::tensor::data_type::bf16, {output.strides().cbegin(), output.strides().cend()}},
- output.data_ptr());
- } else {
- dst = ideep::tensor({dst_dims, ideep::tensor::data_type::u8, {output.strides().cbegin(), output.strides().cend()}},
- output.data_ptr());
- }
- }
+ ideep::tensor dst = at::native::itensor_view_from_dense(output);
static ideep::tensor::desc dummy_accum_desc;
ideep::attr_t op_attr = onednn_utils::create_attr_by_post_op(
binary_attr.has_value() ? binary_attr.value() : "none",
@@ -1644,6 +1614,84 @@ static at::Tensor _quantized_convolution_onednn(
unary_scalars,
unary_algorithm.has_value() ? unary_algorithm.value() : ""
);
+
+#if IDEEP_PREREQ(3, 1, 0, 0)
+ // Use oneDNN's APIs instead of prepare/compute from ideep to reduce integration overhead.
+ // The functions from ideep are heavy because they have complex data structures for unified API
+ // oneDNN version >= 3.1.0 is required.
+ using ideep::tensor;
+ auto weights_desc = packed_weight.get_desc();
+ auto dst_desc = dst.get_desc();
+ auto bias_desc = with_bias ?
+ tensor::desc(expected_bias.get_dims(), ideep::data_type::f32, ideep::format_tag::any) :
+ tensor::desc();
+ if (act_scale != 1.0f) {
+ op_attr.set_scales_mask(DNNL_ARG_SRC, 0);
+ }
+ if (act_zero_point != 0) {
+ op_attr.set_zero_points_mask(DNNL_ARG_SRC, 0);
+ }
+ int oc_per_group = packed_weight.get_dim(0) / groups;
+ int wei_scale_mask = ideep::utils::conv_weight_scale_mask(weight_scales.numel(), oc_per_group, groups, false);
+ op_attr.set_scales_mask(DNNL_ARG_WEIGHTS, wei_scale_mask);
+ if (inv_output_scale != 1.0f) {
+ op_attr.set_scales_mask(DNNL_ARG_DST, 0);
+ }
+ if (output_zero_point != 0) {
+ op_attr.set_zero_points_mask(DNNL_ARG_DST, 0);
+ }
+ op_attr.set_scratchpad_mode(dnnl::scratchpad_mode::user);
+ auto engine = ideep::engine::cpu_engine();
+ auto dilates_dnnl = ideep::utils::get_compatible_dilates(dilation.vec());
+ auto primitive_desc = with_bias ?
+ dnnl::convolution_forward::primitive_desc(
+ engine, dnnl::prop_kind::forward_inference, dnnl::algorithm::convolution_direct,
+ src_desc, weights_desc, bias_desc, dst_desc,
+ stride.vec(), dilates_dnnl, padding.vec(), padding.vec(), op_attr
+ ) :
+ dnnl::convolution_forward::primitive_desc(
+ engine, dnnl::prop_kind::forward_inference, dnnl::algorithm::convolution_direct,
+ src_desc, weights_desc, dst_desc,
+ stride.vec(), dilates_dnnl, padding.vec(), padding.vec(), op_attr
+ );
+ auto primitive = dnnl::convolution_forward(primitive_desc);
+
+ // Reorder weight if needed
+ auto expected_weight = packed_weight.reorder_if_differ_in(primitive_desc.weights_desc());
+
+ // Prepare args and execute primitive
+ tensor scratchpad(primitive_desc.scratchpad_desc());
+ ideep::exec_args args;
+ args.insert({DNNL_ARG_SRC, src});
+ args.insert({DNNL_ARG_WEIGHTS, expected_weight});
+ args.insert({DNNL_ARG_DST, dst});
+ args.insert({DNNL_ARG_SCRATCHPAD, scratchpad});
+ if (with_bias) {
+ args.insert({DNNL_ARG_BIAS, expected_bias});
+ }
+ tensor src_scales_t = tensor(ideep::scale_t(1, act_scale));
+ tensor wei_scales_t = tensor(weights_scales);
+ tensor dst_scales_t = tensor(ideep::scale_t(1, 1.0/inv_output_scale));
+ tensor src_zp_t = tensor(ideep::zero_point_t(1, act_zero_point));
+ tensor dst_zp_t = tensor(ideep::zero_point_t(1, output_zero_point));
+ if (act_scale != 1.0f) {
+ args.insert({DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC, src_scales_t});
+ }
+ if (inv_output_scale != 1.0f) {
+ args.insert({DNNL_ARG_ATTR_SCALES | DNNL_ARG_DST, dst_scales_t});
+ }
+ args.insert({DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS, wei_scales_t});
+ if (act_zero_point != 0) {
+ args.insert({DNNL_ARG_ATTR_ZERO_POINTS | DNNL_ARG_SRC, src_zp_t});
+ }
+ if (output_zero_point != 0) {
+ args.insert({DNNL_ARG_ATTR_ZERO_POINTS | DNNL_ARG_DST, dst_zp_t});
+ }
+ primitive.execute(ideep::stream::default_stream(), args);
+#else
+ // Scales of ONEDNN and PyTorch are reciprocal
+ const ideep::scale_t& src_scales = ideep::scale_t(1, 1.0 / act_scale);
+
// set accum scale/zero point to dst
if (has_accum_postop_sum) {
const ideep::scale_t accum_ideep_scale = ideep::scale_t(1, 1.0/accum_scale);
@@ -1669,6 +1717,7 @@ static at::Tensor _quantized_convolution_onednn(
// Computation
ideep::convolution_forward::compute<false, false>(params, src, expected_weight, expected_bias, dst);
+#endif
if (is_1d) {
output.squeeze_(quant_utils::kConv1dSqueezeDim + 2);
|
2.41.0
|
3a7ab2a217f0ee007f65bbdec56c21e6e310aba
|
Fri, 26 Apr 2024 14:19:39 -0700
|
[PATCH 0749/1000] [compiled autograd] introduce verbose logs, add autograd node info to graph (#124954)
|
- sets it as a fake stack trace as we don't have a generic comment feature - when verbose is disabled, still adds a contextmanager and flag checks. the alternative is to use MACROS, but that wouldn't be usable with TORCH_LOGS Pull Request resolved: https://github.com/pytorch/pytorch/pull/124954 Approved by: https://github.com/jansel
|
diff --git a/test/dynamo/test_logging.py b/test/dynamo/test_logging.py
index 8c37ef1a7f..cdd7050670 100644
--- a/test/dynamo/test_logging.py
+++ b/test/dynamo/test_logging.py
@@ -701,6 +701,7 @@ exclusions = {
"aot_graphs",
"post_grad_graphs",
"compiled_autograd",
+ "compiled_autograd_verbose",
"recompiles",
"recompiles_verbose",
"graph_breaks",
diff --git a/test/inductor/test_compiled_autograd.py b/test/inductor/test_compiled_autograd.py
index 7c9c84c894..ddb4410196 100644
--- a/test/inductor/test_compiled_autograd.py
+++ b/test/inductor/test_compiled_autograd.py
@@ -15,6 +15,7 @@ from torch._dynamo import compiled_autograd
from torch._dynamo.utils import counters
from torch._inductor.test_case import run_tests, TestCase
from torch.testing._internal.inductor_utils import HAS_CPU, HAS_CUDA
+from torch.testing._internal.logging_utils import logs_to_string
# note: these tests are not run on windows due to inductor_utils.HAS_CPU
@@ -1361,6 +1362,89 @@ TORCH_LIBRARY(test_autograd_cpp_node_data_dependent, m) {
self.assertFalse("skipping cudagraphs" in stderr_msgs.getvalue())
+ def test_verbose_logs_graph(self):
+ torch._logging.set_logs(compiled_autograd_verbose=True)
+
+ def fn():
+ model = torch.nn.Sequential(
+ torch.nn.Linear(4, 4),
+ torch.nn.ReLU(),
+ torch.nn.Linear(4, 4),
+ torch.nn.ReLU(),
+ )
+ x = torch.randn([2, 4])
+ result = model(x).sum()
+ result.backward()
+ yield model[0].weight.grad
+ yield model[0].bias.grad
+ yield model[2].weight.grad
+ yield model[2].bias.grad
+
+ logs, ctx = logs_to_string(
+ torch._dynamo.compiled_autograd.__name__, "compiled_autograd_verbose"
+ )
+ with ctx():
+ self.check_output_and_recompiles(fn)
+
+ expected_logs = [
+ "SumBackward0 (NodeCall 1)",
+ "ReluBackward0 (NodeCall 2)",
+ "AddmmBackward0 (NodeCall 3)",
+ "TBackward0 (NodeCall 4)",
+ "torch::autograd::AccumulateGrad (NodeCall 5)",
+ "ReluBackward0 (NodeCall 6)",
+ "AddmmBackward0 (NodeCall 7)",
+ "TBackward0 (NodeCall 8)",
+ "torch::autograd::AccumulateGrad (NodeCall 9)",
+ "torch::autograd::AccumulateGrad (NodeCall 10)",
+ "torch::autograd::AccumulateGrad (NodeCall 11)",
+ ]
+
+ self.assertEqual(
+ sum(1 for e in expected_logs if e in logs.getvalue()), len(expected_logs)
+ )
+
+ def test_verbose_logs_snapshot(self):
+ def fn():
+ model = torch.nn.Sequential(
+ torch.nn.Linear(4, 4),
+ torch.nn.ReLU(),
+ torch.nn.Linear(4, 4),
+ torch.nn.ReLU(),
+ )
+ x = torch.randn([2, 4])
+ result = model(x).sum()
+ result.backward()
+ yield model[0].weight.grad
+ yield model[0].bias.grad
+ yield model[2].weight.grad
+ yield model[2].bias.grad
+
+ logs, ctx = logs_to_string(
+ torch._dynamo.compiled_autograd.__name__, "compiled_autograd_verbose"
+ )
+ with ctx():
+ with compiled_autograd.enable(compiler_fn):
+ # unused, verbose level already snapshot with contextmanager
+ torch._logging.set_logs(compiled_autograd_verbose=True)
+ fn()
+
+ unexpected_logs = [
+ "SumBackward0 (NodeCall 1)",
+ "ReluBackward0 (NodeCall 2)",
+ "AddmmBackward0 (NodeCall 3)",
+ "TBackward0 (NodeCall 4)",
+ "torch::autograd::AccumulateGrad (NodeCall 5)",
+ "ReluBackward0 (NodeCall 6)",
+ "AddmmBackward0 (NodeCall 7)",
+ "TBackward0 (NodeCall 8)",
+ "torch::autograd::AccumulateGrad (NodeCall 9)",
+ "torch::autograd::AccumulateGrad (NodeCall 10)",
+ "torch::autograd::AccumulateGrad (NodeCall 11)",
+ ]
+
+ self.assertEqual(sum(1 for e in unexpected_logs if e in logs.getvalue()), 0)
+
def load_test_module(name):
testdir = Path(__file__).absolute().parent.parent
diff --git a/torch/_C/_dynamo/compiled_autograd.pyi b/torch/_C/_dynamo/compiled_autograd.pyi
index ffc4713d0e..8ec4fbbdae 100644
--- a/torch/_C/_dynamo/compiled_autograd.pyi
+++ b/torch/_C/_dynamo/compiled_autograd.pyi
@@ -7,3 +7,4 @@ def set_autograd_compiler(
) -> Optional[Callable[[], AutogradCompilerInstance]]: ...
def clear_cache() -> None: ...
def is_cache_empty() -> bool: ...
+def set_verbose_logging(enable: bool) -> bool: ...
diff --git a/torch/_dynamo/compiled_autograd.py b/torch/_dynamo/compiled_autograd.py
index 18385171fa..3ac5441c48 100644
--- a/torch/_dynamo/compiled_autograd.py
+++ b/torch/_dynamo/compiled_autograd.py
@@ -22,8 +22,17 @@ from torch.fx.experimental.proxy_tensor import (
)
from torch.fx.experimental.symbolic_shapes import DimDynamic, ShapeEnv
from torch.fx.proxy import Proxy
+from torch.fx.traceback import preserve_node_meta, set_stack_trace
+from torch.utils._traceback import CapturedTraceback
compiled_autograd_log = getArtifactLogger(__name__, "compiled_autograd")
+verbose_log = getArtifactLogger(__name__, "compiled_autograd_verbose")
+
+
+def snapshot_verbose_logging_enabled():
+ return torch._logging._internal.log_state.is_artifact_enabled(
+ "compiled_autograd_verbose"
+ )
def maybe_clone(x):
@@ -89,6 +98,7 @@ class AutogradCompilerInstance:
self.stack.enter_context(self.proxy_mode.sym_mode)
self.stack.enter_context(self.proxy_mode)
self.stack.enter_context(disable_autocast_cache())
+ self.stack.enter_context(preserve_node_meta())
return inputs, sizes
def proxy_call_backward(
@@ -203,6 +213,9 @@ class AutogradCompilerInstance:
compiled_autograd_log.info(
"%s", lazy_format_graph_code("Compiled autograd graph", graph)
)
+ verbose_log.debug(
+ "%s", lazy_format_graph_code("Compiled autograd graph", graph)
+ )
trace_structured(
"compiled_autograd_graph",
payload_fn=lambda: graph.print_readable(print_output=False),
@@ -245,6 +258,14 @@ class AutogradCompilerInstance:
track_tensor_tree(bw_state, proxy, constant=None, tracer=self.fx_tracer)
return bw_state
+ def set_node_origin(self, node_name, node_index):
+ raw_stack_trace = CapturedTraceback.extract().format()[-1]
+ new_code = f"{node_name} (NodeCall {node_index})"
+ new_stack_trace = raw_stack_trace.replace(
+ "raw_stack_trace = CapturedTraceback.extract().format()[-1]", new_code
+ )
+ set_stack_trace(new_stack_trace)
+
compiled_autograd_enabled = False
@@ -269,6 +290,9 @@ def enable(compiler_fn):
prior = torch._C._dynamo.compiled_autograd.set_autograd_compiler(
functools.partial(AutogradCompilerInstance, compiler_fn)
)
+ torch._C._dynamo.compiled_autograd.set_verbose_logging(
+ snapshot_verbose_logging_enabled()
+ )
global compiled_autograd_enabled, compiled_autograd_enabled_count
compiled_autograd_enabled = True
compiled_autograd_enabled_count += 1
diff --git a/torch/_logging/_internal.py b/torch/_logging/_internal.py
index be5589a359..47f82f70dc 100644
--- a/torch/_logging/_internal.py
+++ b/torch/_logging/_internal.py
@@ -223,6 +223,7 @@ def set_logs(
modules: Optional[Dict[str, Union[int, bool]]] = None,
cudagraphs: bool = False,
sym_node: bool = False,
+ compiled_autograd_verbose: bool = False,
):
"""
Sets the log level for individual components and toggles individual log
@@ -476,6 +477,7 @@ def set_logs(
sym_node=sym_node,
export=export,
cudagraphs=cudagraphs,
+ compiled_autograd_verbose=compiled_autograd_verbose,
)
diff --git a/torch/_logging/_registrations.py b/torch/_logging/_registrations.py
index 5ff3372feb..509d6961b1 100644
--- a/torch/_logging/_registrations.py
+++ b/torch/_logging/_registrations.py
@@ -76,6 +76,11 @@ register_artifact(
"Prints various logs in compiled_autograd, including but not limited to the graphs. Useful for debugging compiled_autograd.",
visible=True,
)
+register_artifact(
+ "compiled_autograd_verbose",
+ "Will affect performance. Prints compiled_autograd logs with C++ info e.g. autograd node -> fx node mapping",
+ off_by_default=True,
+)
register_artifact(
"ddp_graphs",
"Only relevant for compiling DDP. DDP splits into multiple graphs to trigger comms early. This will print each individual graph here.",
diff --git a/torch/csrc/dynamo/python_compiled_autograd.cpp b/torch/csrc/dynamo/python_compiled_autograd.cpp
index 980baf94e6..05b7af1cce 100644
--- a/torch/csrc/dynamo/python_compiled_autograd.cpp
+++ b/torch/csrc/dynamo/python_compiled_autograd.cpp
@@ -237,11 +237,23 @@ static PyObject* is_cache_empty(PyObject* dummy, PyObject* args) {
END_HANDLE_TH_ERRORS;
}
+// snapshot of python verbose logging toggle
+static bool is_verbose_logging_enabled;
+static PyObject* set_verbose_logging(PyObject* dummy, PyObject* args) {
+ HANDLE_TH_ERRORS;
+ if (!PyArg_ParseTuple(args, "p", &is_verbose_logging_enabled)) {
+ Py_RETURN_FALSE;
+ }
+ Py_RETURN_TRUE;
+ END_HANDLE_TH_ERRORS;
+}
+
// NOLINTNEXTLINE(*array*)
static PyMethodDef _methods[] = {
{"set_autograd_compiler", set_autograd_compiler, METH_VARARGS, nullptr},
{"clear_cache", clear_cache, METH_NOARGS, nullptr},
{"is_cache_empty", is_cache_empty, METH_NOARGS, nullptr},
+ {"set_verbose_logging", set_verbose_logging, METH_VARARGS, nullptr},
{nullptr, nullptr, 0, nullptr}};
static struct PyModuleDef _module = {
@@ -366,12 +378,13 @@ CacheNode* _compiled_autograd_impl(
// cache miss, need to capture FX graph
ClosingTHPObjectPtr py_compiler(
check(PyObject_CallNoArgs((the_autograd_compiler))));
+
TraceState state = call_begin_capture(
py_compiler, *cache, compiler_call, output_edges.size());
InputBuffers input_buffers;
- for (NodeCall* call_ptr : calls) {
- NodeCall& call = *call_ptr;
+ for (size_t i = 0; i < calls.size(); i++) {
+ NodeCall& call = *calls[i];
// TODO(jansel): consider adding some of this stuff:
// guard(local_graph_task); NodeGuard ndguard(task.fn_); const auto
// opt_parent_stream = (*func).stream(c10::DeviceType::CUDA);
@@ -417,6 +430,16 @@ CacheNode* _compiled_autograd_impl(
inputs = THPVariable_UnpackList(pyinputs);
}
+ if (is_verbose_logging_enabled) {
+ std::string _node_name = call.node->name();
+ THPObjectPtr node_name(PyUnicode_FromString(_node_name.data()));
+ TORCH_INTERNAL_ASSERT(node_name != nullptr);
+ THPObjectPtr set_node_origin(
+ PyObject_GetAttrString(py_compiler.get(), "set_node_origin"));
+ check(PyObject_CallFunction(
+ set_node_origin, "OI", node_name.get(), i, nullptr));
+ }
+
SwapSavedVariables saved(compiler_call, state, py_compiler.get(), call);
variable_list outputs = call.node->apply_with_saved(inputs, saved);
diff --git a/torch/fx/graph.py b/torch/fx/graph.py
index 7ff8f94dbf..c7a66290e9 100644
--- a/torch/fx/graph.py
+++ b/torch/fx/graph.py
@@ -306,6 +306,9 @@ class _ParsedStackTrace:
name: str
code: str
+ def get_summary_str(self):
+ return f'File: {self.file}:{self.lineno} in {self.name}, code: {self.code}'
+
# get File:lineno code from stack_trace
def _parse_stack_trace(stack_trace: str):
if stack_trace is None:
@@ -521,13 +524,8 @@ class CodeGen:
prev_stacktrace = node.stack_trace
summary_str = ""
- parsed_stack_trace = _parse_stack_trace(node.stack_trace)
-
- if parsed_stack_trace is not None:
- lineno = parsed_stack_trace.lineno
- code = parsed_stack_trace.code
- name = parsed_stack_trace.name
- summary_str = f'File: {parsed_stack_trace.file}:{lineno} in {name}, code: {code}'
+ if parsed_stack_trace := _parse_stack_trace(node.stack_trace):
+ summary_str = parsed_stack_trace.get_summary_str()
body.append(f'\n# {summary_str}\n')
elif prev_stacktrace != "":
|
2.41.0
|
a6fef15ef22bca51762e73eb7e71ed514bd94b6
|
Fri, 26 Apr 2024 14:19:40 -0700
|
[PATCH 0750/1000] [compiled autograd] verbose logs for debugging cache misses (#124980)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124980 Approved by: https://github.com/jansel ghstack dependencies: #124954
|
diff --git a/test/inductor/test_compiled_autograd.py b/test/inductor/test_compiled_autograd.py
index ddb4410196..2a94e4cee5 100644
--- a/test/inductor/test_compiled_autograd.py
+++ b/test/inductor/test_compiled_autograd.py
@@ -1404,7 +1404,67 @@ TORCH_LIBRARY(test_autograd_cpp_node_data_dependent, m) {
sum(1 for e in expected_logs if e in logs.getvalue()), len(expected_logs)
)
- def test_verbose_logs_snapshot(self):
+ def test_verbose_logs_cpp(self):
+ script = """
+import torch
+
+def compiler_fn(gm):
+ return torch.compile(gm, backend="eager")
+
+def main():
+ torch._logging.set_logs(compiled_autograd_verbose=True)
+ model = torch.nn.Sequential(
+ torch.nn.Linear(4, 4),
+ torch.nn.ReLU(),
+ torch.nn.Linear(4, 4),
+ torch.nn.ReLU(),
+ )
+
+ for i in range(10, 100):
+ x = torch.randn([i, 4])
+ result = model(x).sum()
+ with torch._dynamo.compiled_autograd.enable(compiler_fn):
+ result.backward()
+
+main()
+"""
+ stdout, _ = self.run_process_no_exception(script)
+ stdout = stdout.decode("utf-8")
+
+ patterns = [
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for SumBackward0, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for ReluBackward0, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for AddmmBackward0, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for TBackward0, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for torch::autograd::AccumulateGrad, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for ReluBackward0, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for AddmmBackward0, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for TBackward0, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for torch::autograd::AccumulateGrad, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for torch::autograd::AccumulateGrad, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for torch::autograd::AccumulateGrad, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for torch::autograd::AccumulateGrad, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for ReluBackward0, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for AddmmBackward0, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for TBackward0, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for torch::autograd::AccumulateGrad, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for torch::autograd::AccumulateGrad, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] Creating cache entry for torch::autograd::AccumulateGrad, with key of size (\d+)\n",
+ r"\[python_compiled_autograd.cpp\] cache miss: marking sizes\[(\d+)\] as dynamic\n",
+ r"\[python_compiled_autograd.cpp\] cache miss: marking sizes\[(\d+)\] as dynamic\n",
+ r"\[python_compiled_autograd.cpp\] cache miss: marking sizes\[(\d+)\] as dynamic\n",
+ r"\[python_compiled_autograd.cpp\] cache miss: marking sizes\[(\d+)\] as dynamic\n",
+ r"\[python_compiled_autograd.cpp\] cache miss: marking sizes\[(\d+)\] as dynamic\n",
+ r"\[python_compiled_autograd.cpp\] cache miss: marking sizes\[(\d+)\] as dynamic\n",
+ r"\[python_compiled_autograd.cpp\] cache miss: marking sizes\[(\d+)\] as dynamic\n",
+ ]
+
+ pattern = r"".join(patterns)
+ matches = re.findall(pattern, stdout)
+ self.assertEqual(len(matches), 1)
+ self.assertEqual(len(matches[0]), len(patterns))
+
+ def test_snapshot_verbose_logs_flag(self):
def fn():
model = torch.nn.Sequential(
torch.nn.Linear(4, 4),
diff --git a/torch/csrc/dynamo/python_compiled_autograd.cpp b/torch/csrc/dynamo/python_compiled_autograd.cpp
index 05b7af1cce..dd5ea7cbd0 100644
--- a/torch/csrc/dynamo/python_compiled_autograd.cpp
+++ b/torch/csrc/dynamo/python_compiled_autograd.cpp
@@ -6,6 +6,7 @@
#include <torch/csrc/jit/python/pybind_utils.h>
#include <torch/csrc/python_headers.h>
#include <torch/csrc/utils/pythoncapi_compat.h>
+#include <iostream>
#include <sstream>
#include <vector>
@@ -49,6 +50,14 @@ Notes:
namespace torch::dynamo::autograd {
using c10::SymInt;
+// snapshot of python verbose logging toggle
+static bool is_verbose_logging_enabled;
+static constexpr std::string_view VLOG_PREFIX =
+ "[python_compiled_autograd.cpp] ";
+std::ostream& vcout() {
+ return std::cout << VLOG_PREFIX;
+}
+
static PyObject* wrap_int_list(const std::vector<int64_t>& inputs) {
PyObject* pyinput = PyTuple_New(static_cast<Py_ssize_t>(inputs.size()));
for (const auto i : c10::irange(inputs.size())) {
@@ -90,9 +99,11 @@ struct CacheNode {
return &_root;
}
- CacheNode* lookup(const CacheKey& key) {
+ CacheNode* lookup(const CacheKey& key, bool create = true) {
auto it = next.find(key);
if (it == next.end()) {
+ if (!create)
+ return nullptr;
// caller's key is in temporary memory, must copy it
CacheKeyBuffer buffer(key.key, key.key_size);
CacheKey key_with_storage(key.node_type, buffer.get(), key.key_size);
@@ -145,12 +156,20 @@ struct CacheNode {
TORCH_INTERNAL_ASSERT(expected_sizes.size() == call.all_size_inputs.size());
for (const auto i : c10::irange(len)) {
auto& expected = expected_sizes[i];
- if (expected.dyn_type == SizeInput::DYNAMIC ||
- expected.value != data[i].value) {
- cache_hit = cache_hit && expected.dyn_type == SizeInput::DYNAMIC;
- if (expected.value != data[i].value) {
- expected = SizeInput(SizeInput::DYNAMIC, data[i].value);
+ bool was_dynamic = expected.dyn_type == SizeInput::DYNAMIC;
+ bool changed_value = expected.value != data[i].value;
+ if (changed_value) {
+ if (!was_dynamic) {
+ cache_hit = false;
+ if (is_verbose_logging_enabled) {
+ vcout() << "cache miss: marking sizes[" << i << "] as dynamic"
+ << std::endl;
+ }
}
+ expected = SizeInput(SizeInput::DYNAMIC, data[i].value);
+ }
+
+ if (changed_value || was_dynamic) {
if (call.dyn_size_inputs.empty()) {
call.dyn_size_inputs.reserve(len);
}
@@ -166,7 +185,7 @@ struct CacheNode {
return cache_hit;
}
- PyObject* wrap_dynamic_inputs() {
+ PyObject* wrap_dynamic_inputs() const {
size_t dynamic_count = 0;
size_t idx = 0;
for (const auto& i : expected_sizes) {
@@ -184,7 +203,8 @@ struct CacheNode {
return pyinput;
}
- std::vector<c10::optional<SymInt>> unwrap_dynamic_inputs(PyObject* pyresult) {
+ std::vector<c10::optional<SymInt>> unwrap_dynamic_inputs(
+ PyObject* pyresult) const {
TORCH_INTERNAL_ASSERT(PyList_CheckExact(pyresult));
size_t idx = 0;
size_t result_len = PyList_GET_SIZE(pyresult);
@@ -237,8 +257,6 @@ static PyObject* is_cache_empty(PyObject* dummy, PyObject* args) {
END_HANDLE_TH_ERRORS;
}
-// snapshot of python verbose logging toggle
-static bool is_verbose_logging_enabled;
static PyObject* set_verbose_logging(PyObject* dummy, PyObject* args) {
HANDLE_TH_ERRORS;
if (!PyArg_ParseTuple(args, "p", &is_verbose_logging_enabled)) {
@@ -348,7 +366,13 @@ CacheNode* _compiled_autograd_impl(
fn->compiled_args(node_args);
node_args.collect(call.node->next_edges());
}
- cache = cache->lookup(node_args.key());
+ CacheKey key = node_args.key();
+ if (is_verbose_logging_enabled &&
+ cache->lookup(key, /*create=*/false) == nullptr) {
+ vcout() << "Creating cache entry for " << fn->name()
+ << ", with key of size " << key.key_size << std::endl;
+ }
+ cache = cache->lookup(key);
}
for (const auto& edge : fn->next_edges()) {
|
2.41.0
|
c4c75ba722d3fa573bb900af065e8205ce667f8
|
Sat, 27 Apr 2024 02:21:44 +0000
|
[PATCH 0751/1000] elastic/rendezvous: make barrier and rank assignment operations O(n) instead of O(n^2) (#124982)
|
Summary: This makes barrier and rank operations linear instead of quadratic with the number of workers. This drastically improves performance for rendezvous when running with over 1000 hosts. This uses 2 approaches for different areas: * local rank assignment: each worker does 1 set and 1 get, local ranks are assigned on the rank 0 host in a O(n) operation which reduces total store operations to be linear with number of workers. * exit_barrier: use a counter and a final flag so each worker has to do max 1 set, 1 get and 1 add. At 4000 hosts we see torchelastic be able to run in as little as 10 seconds down from 373 seconds. Test Plan: This is testing using many small tests running on a remote cluster. {D56549942} ``` torchx run --scheduler mast -- --image=torchelastic_benchmark --j=4000x1 ``` Differential Revision: D56605193 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124982 Approved by: https://github.com/kiukchung, https://github.com/kurman
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
index fb7e2bdb84..0f89d2799f 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -579,6 +579,7 @@ coverage_ignore_functions = [
"barrier",
"get_all",
"synchronize",
+ "store_timeout",
# torch.distributed.fsdp.wrap
"always_wrap_policy",
"enable_wrap",
diff --git a/test/distributed/elastic/agent/server/test/api_test.py b/test/distributed/elastic/agent/server/test/api_test.py
index e57b7b9fcb..e1dd16bcf9 100644
--- a/test/distributed/elastic/agent/server/test/api_test.py
+++ b/test/distributed/elastic/agent/server/test/api_test.py
@@ -11,8 +11,11 @@
import signal
import unittest
import uuid
-from typing import Any, Dict
-from unittest.mock import call, MagicMock, patch
+from multiprocessing.pool import ThreadPool
+from typing import Any, Dict, List
+from unittest.mock import call, patch
+
+import torch.distributed as dist
import torch.distributed.elastic.rendezvous.registry as rdzv_registry
from torch.distributed.elastic.agent.server.api import (
@@ -20,6 +23,7 @@ from torch.distributed.elastic.agent.server.api import (
_RoleInstanceInfo,
RunResult,
SimpleElasticAgent,
+ Worker,
WorkerGroup,
WorkerSpec,
WorkerState,
@@ -470,22 +474,6 @@ class SimpleElasticAgentTest(unittest.TestCase):
self.assertEqual(1, mock_monitor_workers.call_count)
self.assertEqual(spec.max_restarts, agent._remaining_restarts)
- def test_get_ranks(self):
- role_infos = [
- _RoleInstanceInfo("parameter_server", 0, 4),
- _RoleInstanceInfo("trainer", 1, 1),
- _RoleInstanceInfo("trainer", 2, 2),
- _RoleInstanceInfo("trainer", 3, 3),
- _RoleInstanceInfo("parameter_server", 4, 5),
- ]
- spec = self._get_worker_spec(
- max_restarts=3, monitor_interval=0.1, role="not_used", local_world_size=8
- )
- agent = TestAgent(spec)
- total_sum, ranks = agent._get_ranks(role_infos, 0, 0, len(role_infos))
- self.assertEqual(15, total_sum)
- self.assertEqual([0, 1, 2, 3], list(ranks))
-
def test_assign_worker_ranks(self):
role_infos = [
_RoleInstanceInfo("parameter_server", 0, 4),
@@ -494,56 +482,64 @@ class SimpleElasticAgentTest(unittest.TestCase):
_RoleInstanceInfo("trainer", 3, 3),
_RoleInstanceInfo("parameter_server", 4, 5),
]
- num_agents = len(role_infos)
- with patch.object(TestAgent, "_share_and_gather", return_value=role_infos):
- self.verify_worker_ranks(
- role_infos[0], num_agents, [0, 1, 2, 3], [0, 1, 2, 3]
+ store = dist.HashStore()
+
+ def f(info) -> List[Worker]:
+ i, role_info = info
+ spec = self._get_worker_spec(
+ max_restarts=3,
+ monitor_interval=0.1,
+ role=role_info.role,
+ local_world_size=role_info.local_world_size,
)
- self.verify_worker_ranks(role_infos[1], num_agents, [4], [0])
- self.verify_worker_ranks(role_infos[2], num_agents, [5, 6], [1, 2])
- self.verify_worker_ranks(role_infos[3], num_agents, [7, 8, 9], [3, 4, 5])
-
- def verify_worker_ranks(
- self, agent_config, total_agents, expected_global_ranks, expected_role_ranks
- ):
- role, agent_rank, local_world_size = (
- agent_config.role,
- agent_config.rank,
- agent_config.local_world_size,
- )
- spec = self._get_worker_spec(
- max_restarts=3,
- monitor_interval=0.1,
- role=role,
- local_world_size=local_world_size,
- )
- agent = TestAgent(spec)
- workers = agent._assign_worker_ranks(None, agent_rank, total_agents, spec)
- self.assertEqual(
- expected_global_ranks, [worker.global_rank for worker in workers]
- )
- self.assertEqual(expected_role_ranks, [worker.role_rank for worker in workers])
-
- @patch("torch.distributed.elastic.utils.store.synchronize")
- def test_share_and_gather(self, sync_mock):
- # when the state is unknown we exit immediately; no retries
- spec = self._get_worker_spec(max_restarts=100, monitor_interval=0.1)
- agent = TestAgent(spec)
- expected_agent_infos = [
- _RoleInstanceInfo("trainer", 0, 10),
- _RoleInstanceInfo("trainer", 1, 10),
- _RoleInstanceInfo("validator", 2, 10),
- ]
-
- sync_mock.return_value = [obj.serialize() for obj in expected_agent_infos]
- result = agent._share_and_gather(MagicMock(), 1, 3, spec)
- sync_mock.assert_called_once()
- for expected_role_info, actual_role_info in zip(expected_agent_infos, result):
- self.assertEqual(expected_role_info.role, actual_role_info.role)
- self.assertEqual(expected_role_info.rank, actual_role_info.rank)
- self.assertEqual(
- expected_role_info.local_world_size, actual_role_info.local_world_size
+ agent = TestAgent(spec)
+ workers = agent._assign_worker_ranks(
+ store, role_info.rank, len(role_infos), spec
)
+ return [
+ (
+ w.local_rank,
+ w.role_rank,
+ w.global_rank,
+ w.world_size,
+ w.role_world_size,
+ )
+ for w in workers
+ ]
+
+ with ThreadPool(len(role_infos)) as pool:
+ out = pool.map(f, enumerate(role_infos))
+
+ self.assertListEqual(
+ out,
+ [
+ [
+ (0, 0, 0, 15, 9),
+ (1, 1, 1, 15, 9),
+ (2, 2, 2, 15, 9),
+ (3, 3, 3, 15, 9),
+ ],
+ [
+ (0, 0, 4, 15, 6),
+ ],
+ [
+ (0, 1, 5, 15, 6),
+ (1, 2, 6, 15, 6),
+ ],
+ [
+ (0, 3, 7, 15, 6),
+ (1, 4, 8, 15, 6),
+ (2, 5, 9, 15, 6),
+ ],
+ [
+ (0, 4, 10, 15, 9),
+ (1, 5, 11, 15, 9),
+ (2, 6, 12, 15, 9),
+ (3, 7, 13, 15, 9),
+ (4, 8, 14, 15, 9),
+ ],
+ ],
+ )
def test_get_event(self):
spec = self._get_worker_spec(max_restarts=1)
diff --git a/test/distributed/elastic/utils/util_test.py b/test/distributed/elastic/utils/util_test.py
index 60db327d5b..ab890b0375 100644
--- a/test/distributed/elastic/utils/util_test.py
+++ b/test/distributed/elastic/utils/util_test.py
@@ -7,77 +7,142 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
-from unittest import mock
+import datetime
+from multiprocessing.pool import ThreadPool
+from typing import List
+
+import torch.distributed as dist
import torch.distributed.elastic.utils.store as store_util
from torch.distributed.elastic.utils.logging import get_logger
from torch.testing._internal.common_utils import run_tests, TestCase
+class MockStore:
+ def __init__(self):
+ self.ops = []
+
+ def set_timeout(self, timeout: float) -> None:
+ self.ops.append(("set_timeout", timeout))
+
+ @property
+ def timeout(self) -> datetime.timedelta:
+ self.ops.append(("timeout",))
+
+ return datetime.timedelta(seconds=1234)
+
+ def set(self, key: str, value: str) -> None:
+ self.ops.append(("set", key, value))
+
+ def get(self, key: str) -> str:
+ self.ops.append(("get", key))
+ return "value"
+
+ def multi_get(self, keys: List[str]) -> List[str]:
+ self.ops.append(("multi_get", keys))
+ return ["value"] * len(keys)
+
+ def add(self, key: str, val: int) -> int:
+ self.ops.append(("add", key, val))
+ return 3
+
+
class StoreUtilTest(TestCase):
def test_get_all_rank_0(self):
- store = mock.MagicMock()
world_size = 3
+
+ store = MockStore()
+
store_util.get_all(store, 0, "test/store", world_size)
- # omit empty kwargs, get only key
- actual_set_call_args = [
- call_args[0][0] for call_args in store.set.call_args_list
- ]
- self.assertListEqual(["test/store0.FIN"], actual_set_call_args)
-
- actual_get_call_args = [call_args[0] for call_args in store.get.call_args_list]
- expected_get_call_args = [
- ("test/store0",),
- ("test/store1",),
- ("test/store2",),
- ("test/store0.FIN",),
- ("test/store1.FIN",),
- ("test/store2.FIN",),
- ]
- self.assertListEqual(expected_get_call_args, actual_get_call_args)
+
+ self.assertListEqual(
+ store.ops,
+ [
+ ("multi_get", ["test/store0", "test/store1", "test/store2"]),
+ ("add", "test/store/finished/num_members", 1),
+ ("set", "test/store/finished/last_member", "<val_ignored>"),
+ ("get", "test/store/finished/last_member"),
+ ],
+ )
def test_get_all_rank_n(self):
- store = mock.MagicMock()
+ store = MockStore()
world_size = 3
store_util.get_all(store, 1, "test/store", world_size)
- # omit empty kwargs, get only key
- actual_set_call_args = [
- call_args[0][0] for call_args in store.set.call_args_list
- ]
- self.assertListEqual(["test/store1.FIN"], actual_set_call_args)
-
- actual_get_call_args = [call_args[0] for call_args in store.get.call_args_list]
- expected_get_call_args = [
- ("test/store0",),
- ("test/store1",),
- ("test/store2",),
- ]
- self.assertListEqual(expected_get_call_args, actual_get_call_args)
+
+ self.assertListEqual(
+ store.ops,
+ [
+ ("multi_get", ["test/store0", "test/store1", "test/store2"]),
+ ("add", "test/store/finished/num_members", 1),
+ ("set", "test/store/finished/last_member", "<val_ignored>"),
+ ],
+ )
def test_synchronize(self):
- store_mock = mock.MagicMock()
+ store = MockStore()
+
data = b"data0"
- store_util.synchronize(store_mock, data, 0, 3, key_prefix="torchelastic/test")
- actual_set_call_args = store_mock.set.call_args_list
- # omit empty kwargs
- actual_set_call_args = [call_args[0] for call_args in actual_set_call_args]
- expected_set_call_args = [
- ("torchelastic/test0", b"data0"),
- ("torchelastic/test0.FIN", b"FIN"),
- ]
- self.assertListEqual(expected_set_call_args, actual_set_call_args)
-
- expected_get_call_args = [
- ("torchelastic/test0",),
- ("torchelastic/test1",),
- ("torchelastic/test2",),
- ("torchelastic/test0.FIN",),
- ("torchelastic/test1.FIN",),
- ("torchelastic/test2.FIN",),
- ]
- actual_get_call_args = store_mock.get.call_args_list
- actual_get_call_args = [call_args[0] for call_args in actual_get_call_args]
- self.assertListEqual(expected_get_call_args, actual_get_call_args)
+ store_util.synchronize(store, data, 0, 3, key_prefix="test/store")
+
+ self.assertListEqual(
+ store.ops,
+ [
+ ("timeout",),
+ ("set_timeout", datetime.timedelta(seconds=300)),
+ ("set", "test/store0", data),
+ ("multi_get", ["test/store0", "test/store1", "test/store2"]),
+ ("add", "test/store/finished/num_members", 1),
+ ("set", "test/store/finished/last_member", "<val_ignored>"),
+ ("get", "test/store/finished/last_member"),
+ ("set_timeout", datetime.timedelta(seconds=1234)),
+ ],
+ )
+
+ def test_synchronize_hash_store(self) -> None:
+ N = 4
+
+ store = dist.HashStore()
+
+ def f(i: int):
+ return store_util.synchronize(
+ store, f"data{i}", i, N, key_prefix="test/store"
+ )
+
+ with ThreadPool(N) as pool:
+ out = pool.map(f, range(N))
+
+ self.assertListEqual(out, [[f"data{i}".encode() for i in range(N)]] * N)
+
+ def test_barrier(self):
+ store = MockStore()
+
+ store_util.barrier(store, 3, key_prefix="test/store")
+
+ self.assertListEqual(
+ store.ops,
+ [
+ ("timeout",),
+ ("set_timeout", datetime.timedelta(seconds=300)),
+ ("add", "test/store/num_members", 1),
+ ("set", "test/store/last_member", "<val_ignored>"),
+ ("get", "test/store/last_member"),
+ ("set_timeout", datetime.timedelta(seconds=1234)),
+ ],
+ )
+
+ def test_barrier_hash_store(self) -> None:
+ N = 4
+
+ store = dist.HashStore()
+
+ def f(i: int):
+ store_util.barrier(store, N, key_prefix="test/store")
+
+ with ThreadPool(N) as pool:
+ out = pool.map(f, range(N))
+
+ self.assertEqual(out, [None] * N)
class UtilTest(TestCase):
diff --git a/torch/distributed/elastic/agent/server/api.py b/torch/distributed/elastic/agent/server/api.py
index dd20703ced..c9f76e5917 100644
--- a/torch/distributed/elastic/agent/server/api.py
+++ b/torch/distributed/elastic/agent/server/api.py
@@ -7,7 +7,6 @@
# LICENSE file in the root directory of this source tree.
import abc
-import functools
import json
import os
import signal
@@ -30,6 +29,7 @@ from torch.distributed.elastic.multiprocessing import (
ProcessFailure,
SignalException,
)
+from collections import defaultdict
from torch.distributed.elastic.utils.logging import get_logger
__all__ = [
@@ -592,26 +592,6 @@ class SimpleElasticAgent(ElasticAgent):
}
)
- def _get_ranks(
- self,
- role_infos: List[_RoleInstanceInfo],
- role_idx: int,
- start_idx: int = 0,
- end_idx: int = -1,
- ) -> Tuple[int, List[int]]:
- if end_idx == -1:
- end_idx = len(role_infos)
- prefix_sum = 0
- total_sum = 0
- for idx in range(start_idx, end_idx):
- if role_idx > idx:
- prefix_sum += role_infos[idx].local_world_size
- total_sum += role_infos[idx].local_world_size
- return (
- total_sum,
- list(range(prefix_sum, prefix_sum + role_infos[role_idx].local_world_size)),
- )
-
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
@@ -624,63 +604,86 @@ class SimpleElasticAgent(ElasticAgent):
1. Each agent writes its configuration(group_rank, group_world_size
, num_workers) to the common store.
- 2. Each agent retrieves configuration for all agents
- and performs two level sort using role and rank.
- 3. Determine the global rank: the global rank of the workers for the current
- agent is the offset of the infos array up to group_rank of the agent.
- The offset is computed as a sum of local_world_size of all agents that
- have rank less than the group_rank. The workers would have the ranks:
- [offset, offset+local_world_size)
+ 2. The rank 0 agent reads all the role_info from the store and
+ determines each agents worker ranks.
+ 3. Determine the global rank: the global rank of the workers is computed
+ by cumulative sum of the local_world_size for all workers in front of it.
+ For efficiency reasons each worker is assigned a base global rank
+ such that it's workers are in the range [base_global_rank,
+ base_global_rank + local_world_size).
4. Determine the role rank: The role rank is determined using the algorithms
- in the point 3 with the exception that the offset is done from the first
- agent that has the same role as current one and has the minimum group rank.
+ in the point 3 with the exception that the ranks are calculated with
+ respect to the role name.
+ 5. The rank 0 agent writes the assigned ranks to the store.
+ 6. Each agent reads the assigned ranks from the store.
+
+ Time complexity: each worker O(1), rank0 O(n), overall O(n)
"""
- role_infos = self._share_and_gather(store, group_rank, group_world_size, spec)
- my_role_info = role_infos[group_rank]
- worker_world_size, worker_global_ranks = self._get_ranks(role_infos, group_rank)
- role_infos = sorted(
- role_infos, key=functools.cmp_to_key(_RoleInstanceInfo.compare)
- )
- role_start_idx, role_end_idx = _RoleInstanceInfo.find_role_boundaries(
- role_infos, my_role_info.role
- )
- role_pos = next(
- idx
- for idx, role_info in enumerate(role_infos)
- if _RoleInstanceInfo.compare(role_info, my_role_info) == 0
+
+ ROLE_INFO_PREFIX = "torchelastic/role_info/"
+ ASSIGNED_RANKS_PREFIX = "torchelastic/assigned_ranks/"
+
+ agent_role_info = _RoleInstanceInfo(
+ spec.role, group_rank, spec.local_world_size
)
- role_world_size, role_ranks = self._get_ranks(
- role_infos, role_pos, role_start_idx, role_end_idx + 1
+ store.set(f"{ROLE_INFO_PREFIX}{group_rank}", agent_role_info.serialize())
+
+ # tcp store is collocated with rank 0 so we can use it to do extra compute to reduce overall # of operations.
+ if group_rank == 0:
+ role_infos_bytes = store.multi_get(
+ [f"torchelastic/role_info/{i}" for i in range(group_world_size)]
+ )
+ role_infos = [
+ _RoleInstanceInfo.deserialize(info_bytes)
+ for info_bytes in role_infos_bytes
+ ]
+
+ role_sizes = defaultdict(lambda: 0)
+ global_size = 0
+ for role_info in role_infos:
+ role_sizes[role_info.role] += role_info.local_world_size
+ global_size += role_info.local_world_size
+
+ base_global_rank = 0
+ role_ranks = defaultdict(lambda: 0)
+
+ keys = []
+ values = []
+ for i, role_info in enumerate(role_infos):
+ keys.append(f"{ASSIGNED_RANKS_PREFIX}{i}")
+ values.append(
+ json.dumps(
+ [
+ base_global_rank,
+ global_size,
+ role_ranks[role_info.role],
+ role_sizes[role_info.role],
+ ]
+ )
+ )
+
+ base_global_rank += role_info.local_world_size
+ role_ranks[role_info.role] += role_info.local_world_size
+
+ store.multi_set(keys, values)
+
+ # get will block until the data is available in the store.
+ base_global_rank, global_world_size, base_role_rank, role_world_size = json.loads(
+ store.get(f"{ASSIGNED_RANKS_PREFIX}{group_rank}")
)
+
workers = []
- for ind in range(spec.local_world_size):
+ for local_rank in range(spec.local_world_size):
worker = Worker(
- local_rank=ind,
- global_rank=worker_global_ranks[ind],
- role_rank=role_ranks[ind],
- world_size=worker_world_size,
+ local_rank=local_rank,
+ global_rank=base_global_rank + local_rank,
+ role_rank=base_role_rank + local_rank,
+ world_size=global_world_size,
role_world_size=role_world_size,
)
workers.append(worker)
return workers
- def _share_and_gather(
- self, store, group_rank: int, group_world_size: int, spec: WorkerSpec
- ) -> List:
- agent_role_info = _RoleInstanceInfo(
- spec.role, group_rank, spec.local_world_size
- )
- key_prefix = "torchelastic/role_info"
- agent_config_enc = agent_role_info.serialize()
- role_infos_bytes = store_util.synchronize(
- store, agent_config_enc, group_rank, group_world_size, key_prefix
- )
- role_infos = [
- _RoleInstanceInfo.deserialize(role_info_bytes)
- for role_info_bytes in role_infos_bytes
- ]
- return role_infos
-
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
@@ -935,9 +938,8 @@ class SimpleElasticAgent(ElasticAgent):
start = time.time()
try:
store_util.barrier(
- self._store,
- self._worker_group.group_rank,
- self._worker_group.group_world_size,
+ store=self._store,
+ world_size=self._worker_group.group_world_size,
key_prefix=_TERMINAL_STATE_SYNC_ID,
barrier_timeout=self._exit_barrier_timeout,
)
diff --git a/torch/distributed/elastic/utils/distributed.py b/torch/distributed/elastic/utils/distributed.py
index 808b965d50..bf4a537bbf 100644
--- a/torch/distributed/elastic/utils/distributed.py
+++ b/torch/distributed/elastic/utils/distributed.py
@@ -13,6 +13,7 @@ from typing import Optional
import torch.distributed as dist
from torch.distributed.elastic.utils.logging import get_logger
+from torch.distributed.elastic.utils.store import barrier
logger = get_logger(__name__)
@@ -20,8 +21,7 @@ logger = get_logger(__name__)
_ADDRESS_IN_USE = "Address already in use"
_SOCKET_TIMEOUT = "Socket Timeout"
-_MEMBER_CHECKIN = "_tcp_store/num_members"
-_LAST_MEMBER_CHECKIN = "_tcp_store/last_member"
+_TCP_STORE_INIT = "_tcp_store/num_members"
def create_c10d_store(
@@ -54,8 +54,9 @@ def create_c10d_store(
"Creating c10d store on %s:%s\n"
" world_size : %s\n"
" is_server : %s\n"
- " timeout(sec): %s\n",
- server_addr, port, world_size, is_server, timeout
+ " timeout(sec): %s\n"
+ " use_libuv : %s\n",
+ server_addr, port, world_size, is_server, timeout, use_libuv,
)
try:
@@ -75,7 +76,7 @@ def create_c10d_store(
store = store_builder(use_libuv=use_libuv)
# skips full rank check when we don't have to wait for all workers
if wait_for_workers:
- _check_full_rank(store, world_size)
+ _check_full_rank(store, world_size, timeout=timeout)
logger.info("Successfully created c10d store")
return store
except RuntimeError as e:
@@ -98,13 +99,9 @@ def create_c10d_store(
raise
-def _check_full_rank(store, world_size):
- idx = store.add(_MEMBER_CHECKIN, 1)
- if idx == world_size:
- store.set(_LAST_MEMBER_CHECKIN, "<val_ignored>")
-
+def _check_full_rank(store, world_size, timeout):
try:
- store.get(_LAST_MEMBER_CHECKIN)
+ barrier(store, world_size, key_prefix=_TCP_STORE_INIT, barrier_timeout=timeout)
except RuntimeError as e:
if str(e) == _SOCKET_TIMEOUT:
raise TimeoutError(
diff --git a/torch/distributed/elastic/utils/store.py b/torch/distributed/elastic/utils/store.py
index 9c7abab929..719c83b826 100644
--- a/torch/distributed/elastic/utils/store.py
+++ b/torch/distributed/elastic/utils/store.py
@@ -8,9 +8,29 @@
from datetime import timedelta
from typing import List
+from contextlib import contextmanager
+_NUM_MEMBERS = "/num_members"
+_LAST_MEMBER_CHECKIN = "/last_member"
-def get_all(store, rank: int, prefix: str, size: int):
+@contextmanager
+def store_timeout(store, timeout: float):
+ """
+ This sets the timeout and then restores the old timeout when the context
+ manager exits.
+
+ Args:
+ store: the store to set the timeout on
+ timeout: the timeout to set
+ """
+
+ old_timeout = store.timeout
+ store.set_timeout(timedelta(seconds=timeout))
+ yield
+ store.set_timeout(old_timeout)
+
+
+def get_all(store, rank: int, prefix: str, world_size: int):
r"""
Given a store and a prefix, the method goes through the array of keys
of the following format: ``{prefix}{idx}``, where idx is in a range
@@ -29,17 +49,20 @@ def get_all(store, rank: int, prefix: str, size: int):
value3 = values[2] # retrieves the data for key torchelastic/data2
"""
- data_arr = []
- for idx in range(size):
- data = store.get(f"{prefix}{idx}")
- data_arr.append(data)
- store.set(f"{prefix}{rank}.FIN", b"FIN")
+ data_arr = store.multi_get(
+ [f"{prefix}{idx}" for idx in range(world_size)]
+ )
+
+ barrier_key = _barrier_nonblocking(
+ store=store,
+ world_size=world_size,
+ key_prefix=f"{prefix}/finished",
+ )
if rank == 0:
# Rank0 runs the TCPStore daemon, as a result it needs to exit last.
# Otherwise, the barrier may timeout if rank0 process finished the work
# before other processes finished `get_all` method
- for node_rank in range(size):
- store.get(f"{prefix}{node_rank}.FIN")
+ store.get(barrier_key)
return data_arr
@@ -50,7 +73,7 @@ def synchronize(
rank: int,
world_size: int,
key_prefix: str,
- barrier_timeout: float = 300,
+ timeout: float = 300,
) -> List[bytes]:
"""
Synchronizes ``world_size`` agents between each other using the underlying c10d store.
@@ -58,21 +81,47 @@ def synchronize(
Note: The data on the path is not deleted, as a result there can be stale data if
you use the same key_prefix twice.
+
+ Time complexity: O(N) per worker, O(N^2) globally.
"""
- store.set_timeout(timedelta(seconds=barrier_timeout))
- store.set(f"{key_prefix}{rank}", data)
- agent_data = get_all(store, rank, key_prefix, world_size)
- return agent_data
+ with store_timeout(store, timeout):
+ store.set(f"{key_prefix}{rank}", data)
+ agent_data = get_all(store, rank, key_prefix, world_size)
+ return agent_data
+
+
+def _barrier_nonblocking(store, world_size: int, key_prefix: str) -> str:
+ """
+ Does all the non-blocking operations for a barrier and returns the final key
+ that can be waited on.
+ """
+ num_members_key = key_prefix + _NUM_MEMBERS
+ last_member_key = key_prefix + _LAST_MEMBER_CHECKIN
+
+
+ idx = store.add(num_members_key, 1)
+ if idx == world_size:
+ store.set(last_member_key, "<val_ignored>")
+
+ return last_member_key
def barrier(
- store, rank: int, world_size: int, key_prefix: str, barrier_timeout: float = 300
+ store, world_size: int, key_prefix: str, barrier_timeout: float = 300
) -> None:
"""
- A global lock between agents.
+ A global lock between agents. This will pause all workers until at least
+ ``world_size`` workers respond.
+
+ This uses a fast incrementing index to assign waiting ranks and a success
+ flag set by the last worker.
+
+ Time complexity: O(1) per worker, O(N) globally.
Note: Since the data is not removed from the store, the barrier can be used
once per unique ``key_prefix``.
"""
- data = f"{rank}".encode()
- synchronize(store, data, rank, world_size, key_prefix, barrier_timeout)
+
+ with store_timeout(store, barrier_timeout):
+ last_member_key = _barrier_nonblocking(store=store, world_size=world_size, key_prefix=key_prefix)
+ store.get(last_member_key)
|
2.41.0
|
944a5355501cb45fac966d3cf4277cbc718eff1
|
Sat, 27 Apr 2024 02:58:05 +0000
|
[PATCH 0753/1000] [MPS] Fix nextafter for negative values (#125029)
|
By changing the logic to on older MacOS: ```cpp bits += ((input > 0) ^ (input > other)) ? 1 : -1; ``` And use native `nextafter` on MacOS Sonoma (i.e. if Metal 3.1 is available) TODO: - Add tests for infs and denorms Fixes https://github.com/pytorch/pytorch/issues/124985 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125029 Approved by: https://github.com/Skylion007
|
diff --git a/aten/src/ATen/native/mps/operations/BinaryKernel.mm b/aten/src/ATen/native/mps/operations/BinaryKernel.mm
index 7efaaa2c7e..48e43145d0 100644
--- a/aten/src/ATen/native/mps/operations/BinaryKernel.mm
+++ b/aten/src/ATen/native/mps/operations/BinaryKernel.mm
@@ -6,6 +6,8 @@
#include <ATen/native/TensorIterator.h>
#include <ATen/native/mps/OperationUtils.h>
#include <ATen/native/mps/operations/BinaryKernel.h>
+// For MTLLanguageVersion_3_1
+#include <ATen/native/mps/MPSGraphSonomaOps.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
@@ -190,24 +192,25 @@ kernel void nextafter_kernel(constant void * input_ [[buffer(0)]],
device void * out_ [[buffer(2)]],
constant uint3 * offsets [[buffer(3)]],
uint tid [[thread_position_in_grid]]) {
- device T* out = (device T*)((device uint8_t*)out_ + offsets[tid].x);
- constant T* input = (constant T*)((constant uint8_t*)input_ + offsets[tid].y);
- constant T* other = (constant T*)((constant uint8_t*)other_ + offsets[tid].z);
-
- if (*input == *other)
- {
- *out = *other;
- }
- else if (isnan(*input) || isnan(*other))
- {
+ auto out = (device T*)((device uint8_t*)out_ + offsets[tid].x);
+ auto input = *(constant T*)((constant uint8_t*)input_ + offsets[tid].y);
+ auto other = *(constant T*)((constant uint8_t*)other_ + offsets[tid].z);
+#if __METAL_VERSION__ >= 310
+ *out = nextafter(input, other);
+#else
+ if (input == other) {
+ *out = input;
+ } else if (isnan(input) || isnan(other)) {
*out = NAN;
- }
- else
- {
- U bits = as_type<U>(*input);
- bits = bits + ((*other > *input) ? 1 : -1);
+ } else if (input == 0) {
+ constexpr auto one = as_type<T>(static_cast<U>(1));
+ *out = other > 0 ? one : -one;
+ } else {
+ U bits = as_type<U>(input);
+ (input > 0) ^ (input > other) ? bits++ : bits--;
*out = as_type<T>(bits);
}
+#endif
}
#define REGISTER_NEXTAFTER_OP(DTYPE, UTYPE) \
@@ -261,7 +264,8 @@ static id<MTLLibrary> compileBinaryOpsLibrary(id<MTLDevice> device) {
NSError* error = nil;
MTLCompileOptions* options = [[MTLCompileOptions new] autorelease];
- [options setLanguageVersion:MTLLanguageVersion2_3];
+ [options setLanguageVersion:is_macos_13_or_newer(MacOSVersion::MACOS_VER_14_0_PLUS) ? MTLLanguageVersion3_1
+ : MTLLanguageVersion2_3];
binaryLibrary = [device newLibraryWithSource:[NSString stringWithCString:METAL_BINARY encoding:NSASCIIStringEncoding]
options:options
error:&error];
diff --git a/test/test_mps.py b/test/test_mps.py
index 7f87c1ccd4..27ff47e702 100644
--- a/test/test_mps.py
+++ b/test/test_mps.py
@@ -11173,6 +11173,18 @@ class TestAdvancedIndexing(TestCaseMPS):
out = x[idx] # index
self.assertEqual(out, torch.zeros(2, device=device), atol=0, rtol=0)
+ def test_nextafter(self, device="mps"):
+ for dtype in [torch.float16, torch.float32]:
+ x = torch.tensor([1, -1, 0, 0, 2, -2], device=device, dtype=dtype)
+ y = torch.tensor([2, -2, -1, 1, -3, 3], device=device, dtype=dtype)
+ na = torch.nextafter(x, y)
+ na_cpu = torch.nextafter(x.cpu(), y.cpu())
+ na_ge_x_mps = na.cpu() > x.cpu()
+ # greater is broken on MPS, see https://github.com/pytorch/pytorch/issues/125051
+ na_ge_x_cpu = na_cpu > x.cpu()
+ self.assertEqual(na_ge_x_mps, na_ge_x_cpu)
+
+
class TestRNNMPS(TestCaseMPS):
def _lstm_helper(self, num_layers, dtype, device, bidirectional=False, bias=True, batch_first=False,
seq_len=3, batch_size=5, hidden_size=7, input_size=11, backward=False):
|
2.41.0
|
866bfff452a1f29cdeeb6e9deb3cdf727509255
|
Sat, 27 Apr 2024 04:15:47 +0000
|
[PATCH 0754/1000] [cuDNN] cuDNN SDPA (Flash Attention) Backward (#122510)
|
#113713 currently passing trivial smoke tests but I just totally pattern-matched bits and pieces of the autograd defs Will also collect benchmark data, CC @drisspg Co-authored-by: Eli Uriegas <1700823+seemethere@users.noreply.github.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/122510 Approved by: https://github.com/drisspg
|
diff --git a/aten/src/ATen/native/cudnn/MHA.cpp b/aten/src/ATen/native/cudnn/MHA.cpp
index c3f5f63af6..1f6bdbf530 100644
--- a/aten/src/ATen/native/cudnn/MHA.cpp
+++ b/aten/src/ATen/native/cudnn/MHA.cpp
@@ -29,6 +29,30 @@ void run_cudnn_SDP_fprop(
false, "PyTorch was not compiled with cuDNN Flash Attention enabled!");
}
+void run_cudnn_SDP_bprop(
+ int64_t b,
+ int64_t h,
+ int64_t s_q,
+ int64_t s_kv,
+ int64_t d,
+ float scaling_factor,
+ bool is_causal,
+ float dropout_probability,
+ const Tensor& q,
+ const Tensor& k,
+ const Tensor& v,
+ const Tensor& o,
+ const Tensor& dO,
+ const Tensor& softmaxstats,
+ Tensor& dQ,
+ Tensor& dK,
+ Tensor& dV,
+ const Tensor& dropoutseed,
+ const Tensor& dropoutoffset) {
+ TORCH_CHECK(
+ false, "PyTorch was not compiled with cuDNN Flash Attention enabled!");
+}
+
} // namespace native
} // namespace at
@@ -73,6 +97,22 @@ using graph_and_tensors = std::tuple<
std::shared_ptr<fe::graph::Tensor_attributes> // Stats
>;
+using graph_and_tensors_backward = std::tuple<
+ std::shared_ptr<fe::graph::Graph>,
+ std::shared_ptr<fe::graph::Tensor_attributes>, // Q,
+ std::shared_ptr<fe::graph::Tensor_attributes>, // K,
+ std::shared_ptr<fe::graph::Tensor_attributes>, // V,
+ std::shared_ptr<fe::graph::Tensor_attributes>, // Attn_scale
+ std::shared_ptr<fe::graph::Tensor_attributes>, // Seed,
+ std::shared_ptr<fe::graph::Tensor_attributes>, // Offset,
+ std::shared_ptr<fe::graph::Tensor_attributes>, // O,
+ std::shared_ptr<fe::graph::Tensor_attributes>, // dO,
+ std::shared_ptr<fe::graph::Tensor_attributes>, // stats,
+ std::shared_ptr<fe::graph::Tensor_attributes>, // dQ,
+ std::shared_ptr<fe::graph::Tensor_attributes>, // dK,,
+ std::shared_ptr<fe::graph::Tensor_attributes> // dV,
+ >;
+
#define MAX_MHA_DIM 4
struct MHAParams {
@@ -178,8 +218,7 @@ struct MHACacheKeyWrapper : ParamsWrapper<MHAParams> {
template <typename T, typename KeyType>
struct MHAGraphCache {
- std::unordered_map<KeyType, graph_and_tensors, ParamsWrapperHash<KeyType>>
- engine_cache;
+ std::unordered_map<KeyType, T, ParamsWrapperHash<KeyType>> engine_cache;
// no mutexes here as caches are now thread local for v8, can also return a
// pointer to the Execution Plan if we know it will not be invalidated by
@@ -202,6 +241,8 @@ struct MHAGraphCache {
// be thread safe across all engines see Limitations in
// https://docs.nvidia.com/deeplearning/cudnn/release-notes/index.html
thread_local MHAGraphCache<graph_and_tensors, MHACacheKeyWrapper> mhagraphcache;
+thread_local MHAGraphCache<graph_and_tensors_backward, MHACacheKeyWrapper>
+ mhagraphbackwardcache;
auto build_graph_and_tensors(
int64_t b,
@@ -227,10 +268,12 @@ auto build_graph_and_tensors(
dtype = fe::DataType_t::BFLOAT16;
}
auto mha_graph = std::make_shared<fe::graph::Graph>();
+ // We're baking in float accumulation and scale types
+ // in theory the graph may support other types, but they
+ // have not been tested
mha_graph->set_io_data_type(dtype)
.set_intermediate_data_type(fe::DataType_t::FLOAT)
.set_compute_data_type(fe::DataType_t::FLOAT);
-
auto Q = mha_graph->tensor(
fe::graph::Tensor_attributes()
.set_name("Q")
@@ -254,7 +297,7 @@ auto build_graph_and_tensors(
params.v_stride.begin(), params.v_stride.end())));
auto attn_scale =
mha_graph->tensor(fe::graph::Tensor_attributes()
- .set_name("attn_scale")
+ .set_name("Attn_scale")
.set_dim({1, 1, 1, 1})
.set_stride({1, 1, 1, 1})
.set_is_pass_by_value(true)
@@ -276,7 +319,7 @@ auto build_graph_and_tensors(
.set_data_type(fe::DataType_t::INT32));
auto scaled_dot_product_flash_attention_options =
fe::graph::SDPA_attributes()
- .set_name("flash_attention")
+ .set_name("CUDNN_SDPA")
.set_is_inference(return_softmaxstats == false)
.set_causal_mask(is_causal)
.set_attn_scale(attn_scale)
@@ -287,12 +330,12 @@ auto build_graph_and_tensors(
}
auto seq_q = mha_graph->tensor(fe::graph::Tensor_attributes()
- .set_name("seq_q")
+ .set_name("Seq_q")
.set_dim({b, 1, 1, 1})
.set_stride({1, 1, 1, 1})
.set_data_type(fe::DataType_t::INT32));
auto seq_kv = mha_graph->tensor(fe::graph::Tensor_attributes()
- .set_name("seq_kv")
+ .set_name("Seq_kv")
.set_dim({b, 1, 1, 1})
.set_stride({1, 1, 1, 1})
.set_data_type(fe::DataType_t::INT32));
@@ -324,7 +367,146 @@ auto build_graph_and_tensors(
AT_CUDNN_FRONTEND_CHECK(mha_graph->build_plans(handle));
return std::make_tuple(
- mha_graph, Q, K, V, attn_scale, seed, offset, O, Stats);
+ std::move(mha_graph),
+ std::move(Q),
+ std::move(K),
+ std::move(V),
+ std::move(attn_scale),
+ std::move(seed),
+ std::move(offset),
+ std::move(O),
+ std::move(Stats));
+}
+
+auto build_graph_and_tensors_backward(
+ int64_t b,
+ int64_t h,
+ int64_t s_q,
+ int64_t s_kv,
+ int64_t d,
+ float scaling_factor,
+ bool is_causal,
+ float dropout_probability,
+ const Tensor& q,
+ const Tensor& k,
+ const Tensor& v,
+ const Tensor& o,
+ const Tensor& dO,
+ const Tensor& softmaxstats,
+ Tensor& dQ,
+ Tensor& dK,
+ Tensor& dV,
+ const Tensor& dropoutseed,
+ const Tensor& dropoutoffset,
+ cudnnHandle_t& handle,
+ MHAParams& params) {
+ auto dtype = fe::DataType_t::HALF;
+ if (q.scalar_type() == kBFloat16) {
+ dtype = fe::DataType_t::BFLOAT16;
+ }
+ auto mha_graph = std::make_shared<fe::graph::Graph>();
+ // We're baking in float accumulation and scale types
+ // in theory the graph may support other types, but they
+ // have not been tested
+ mha_graph->set_io_data_type(dtype)
+ .set_intermediate_data_type(fe::DataType_t::FLOAT)
+ .set_compute_data_type(fe::DataType_t::FLOAT);
+ auto Q = mha_graph->tensor(
+ fe::graph::Tensor_attributes()
+ .set_name("Q")
+ .set_dim(std::vector<int64_t>(q.sizes().begin(), q.sizes().end()))
+ .set_stride(
+ std::vector<int64_t>(q.strides().begin(), q.strides().end())));
+ auto K = mha_graph->tensor(
+ fe::graph::Tensor_attributes()
+ .set_name("K")
+ .set_dim(std::vector<int64_t>(k.sizes().begin(), k.sizes().end()))
+ .set_stride(
+ std::vector<int64_t>(k.strides().begin(), k.strides().end())));
+ auto V = mha_graph->tensor(
+ fe::graph::Tensor_attributes()
+ .set_name("V")
+ .set_dim(std::vector<int64_t>(v.sizes().begin(), v.sizes().end()))
+ .set_stride(
+ std::vector<int64_t>(v.strides().begin(), v.strides().end())));
+ auto attn_scale =
+ mha_graph->tensor(fe::graph::Tensor_attributes()
+ .set_name("Attn_scale")
+ .set_dim({1, 1, 1, 1})
+ .set_stride({1, 1, 1, 1})
+ .set_is_pass_by_value(true)
+ .set_data_type(fe::DataType_t::FLOAT));
+ auto Seed = mha_graph->tensor(fe::graph::Tensor_attributes()
+ .set_name("Seed")
+ .set_dim({1, 1, 1, 1})
+ .set_stride({1, 1, 1, 1})
+ .set_data_type(fe::DataType_t::INT32));
+ auto Offset = mha_graph->tensor(fe::graph::Tensor_attributes()
+ .set_name("Offset")
+ .set_dim({1, 1, 1, 1})
+ .set_stride({1, 1, 1, 1})
+ .set_data_type(fe::DataType_t::INT32));
+ auto O = mha_graph->tensor(
+ fe::graph::Tensor_attributes()
+ .set_name("O")
+ .set_dim(std::vector<int64_t>(o.sizes().begin(), o.sizes().end()))
+ .set_stride(
+ std::vector<int64_t>(o.strides().begin(), o.strides().end())));
+ auto STATS = mha_graph->tensor(
+ fe::graph::Tensor_attributes()
+ .set_name("Stats")
+ .set_dim(std::vector<int64_t>(
+ softmaxstats.sizes().begin(), softmaxstats.sizes().end()))
+ .set_stride(std::vector<int64_t>(
+ softmaxstats.strides().begin(), softmaxstats.strides().end()))
+ .set_data_type(fe::DataType_t::FLOAT));
+ auto DO = mha_graph->tensor(
+ fe::graph::Tensor_attributes()
+ .set_name("DO")
+ .set_dim(std::vector<int64_t>(dO.sizes().begin(), dO.sizes().end()))
+ .set_stride(
+ std::vector<int64_t>(dO.strides().begin(), dO.strides().end())));
+ auto sdpa_backward_options = fe::graph::SDPA_backward_attributes()
+ .set_name("CUDNN_SDPA_BACKWARD")
+ .set_causal_mask(is_causal)
+ .set_attn_scale(attn_scale);
+ if (dropout_probability != 0.0f) {
+ sdpa_backward_options.set_dropout(dropout_probability, Seed, Offset);
+ }
+ auto [DQ, DK, DV] =
+ mha_graph->sdpa_backward(Q, K, V, O, DO, STATS, sdpa_backward_options);
+ DQ->set_output(true)
+ .set_dim(std::vector<int64_t>(dQ.sizes().begin(), dQ.sizes().end()))
+ .set_stride(
+ std::vector<int64_t>(dQ.strides().begin(), dQ.strides().end()));
+ DK->set_output(true)
+ .set_dim(std::vector<int64_t>(dK.sizes().begin(), dK.sizes().end()))
+ .set_stride(
+ std::vector<int64_t>(dK.strides().begin(), dK.strides().end()));
+ DV->set_output(true)
+ .set_dim(std::vector<int64_t>(dV.sizes().begin(), dV.sizes().end()))
+ .set_stride(
+ std::vector<int64_t>(dV.strides().begin(), dV.strides().end()));
+ AT_CUDNN_FRONTEND_CHECK(mha_graph->validate());
+ AT_CUDNN_FRONTEND_CHECK(mha_graph->build_operation_graph(handle));
+ AT_CUDNN_FRONTEND_CHECK(
+ mha_graph->create_execution_plans({fe::HeurMode_t::A}));
+ AT_CUDNN_FRONTEND_CHECK(mha_graph->check_support(handle));
+ AT_CUDNN_FRONTEND_CHECK(mha_graph->build_plans(handle));
+ return std::make_tuple(
+ std::move(mha_graph),
+ std::move(Q),
+ std::move(K),
+ std::move(V),
+ std::move(attn_scale),
+ std::move(Seed),
+ std::move(Offset),
+ std::move(O),
+ std::move(DO),
+ std::move(STATS),
+ std::move(DQ),
+ std::move(DK),
+ std::move(DV));
}
void run_cudnn_SDP_fprop(
@@ -407,11 +589,92 @@ void run_cudnn_SDP_fprop(
auto workspace_size = mha_graph->get_workspace_size();
auto workspace_ptr =
c10::cuda::CUDACachingAllocator::get()->allocate(workspace_size);
- TORCH_INTERNAL_ASSERT(
+ TORCH_CHECK(
mha_graph->execute(handle, variant_pack, workspace_ptr.get()).is_good());
mhagraphcache.update(key, graph_and_tensors_values);
}
+void run_cudnn_SDP_bprop(
+ int64_t b,
+ int64_t h,
+ int64_t s_q,
+ int64_t s_kv,
+ int64_t d,
+ float scaling_factor,
+ bool is_causal,
+ float dropout_probability,
+ const Tensor& q,
+ const Tensor& k,
+ const Tensor& v,
+ const Tensor& o,
+ const Tensor& dO,
+ const Tensor& softmaxstats,
+ Tensor& dQ,
+ Tensor& dK,
+ Tensor& dV,
+ const Tensor& dropoutseed,
+ const Tensor& dropoutoffset) {
+ cudnnHandle_t handle = getCudnnHandle();
+ auto key = MHACacheKeyWrapper(
+ b, h, s_q, s_kv, d, q, k, v, dropout_probability, is_causal, true);
+ auto graph_and_tensors_backward_ptr = mhagraphbackwardcache.find(key);
+ graph_and_tensors_backward graph_and_tensors_backward_values;
+ if (graph_and_tensors_backward_ptr) {
+ graph_and_tensors_backward_values = *graph_and_tensors_backward_ptr;
+ } else {
+ graph_and_tensors_backward_values = build_graph_and_tensors_backward(
+ b,
+ h,
+ s_q,
+ s_kv,
+ d,
+ scaling_factor,
+ is_causal,
+ dropout_probability,
+ q,
+ k,
+ v,
+ o,
+ dO,
+ softmaxstats,
+ dQ,
+ dK,
+ dV,
+ dropoutseed,
+ dropoutoffset,
+ handle,
+ key.pod);
+ }
+ auto
+ [mha_graph, Q, K, V, attn_scale, Seed, Offset, O, Do, Stats, Dq, Dk, Dv] =
+ graph_and_tensors_backward_values;
+ std::unordered_map<std::shared_ptr<fe::graph::Tensor_attributes>, void*>
+ variant_pack = {// inputs
+ {Q, q.data_ptr()},
+ {K, k.data_ptr()},
+ {V, v.data_ptr()},
+ {O, o.data_ptr()},
+ {Do, dO.data_ptr()},
+ {Stats, softmaxstats.data_ptr()},
+ // outputs
+ {Dq, dQ.data_ptr()},
+ {Dk, dK.data_ptr()},
+ {Dv, dV.data_ptr()},
+ // pass by value
+ {attn_scale, &scaling_factor}};
+ if (dropout_probability != 0.0f) {
+ variant_pack[Seed] = dropoutseed.data_ptr();
+ variant_pack[Offset] = dropoutoffset.data_ptr();
+ }
+ auto workspace_size = mha_graph->get_workspace_size();
+ auto workspace_ptr =
+ c10::cuda::CUDACachingAllocator::get()->allocate(workspace_size);
+ TORCH_CHECK(!workspace_size || workspace_ptr.get());
+ TORCH_CHECK(
+ mha_graph->execute(handle, variant_pack, workspace_ptr.get()).is_good());
+ mhagraphbackwardcache.update(key, graph_and_tensors_backward_values);
+}
+
} // namespace native
} // namespace at
diff --git a/aten/src/ATen/native/cudnn/MHA.h b/aten/src/ATen/native/cudnn/MHA.h
index 6b3b9db862..0406cf783d 100644
--- a/aten/src/ATen/native/cudnn/MHA.h
+++ b/aten/src/ATen/native/cudnn/MHA.h
@@ -21,5 +21,27 @@ void run_cudnn_SDP_fprop(
Tensor& o,
Tensor& dropoutseed,
Tensor& dropoutoffset);
-}
+
+void run_cudnn_SDP_bprop(
+ int64_t b,
+ int64_t h,
+ int64_t s_q,
+ int64_t s_kv,
+ int64_t d,
+ float scaling_factor,
+ bool is_causal,
+ float dropout_probability,
+ const Tensor& q,
+ const Tensor& k,
+ const Tensor& v,
+ const Tensor& o,
+ const Tensor& dO,
+ const Tensor& softmaxstats,
+ Tensor& dQ,
+ Tensor& dK,
+ Tensor& dV,
+ const Tensor& dropoutseed,
+ const Tensor& dropoutoffset);
+
+} // namespace native
} // namespace at
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index 517a65fa0e..7b48d2116f 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -14700,11 +14700,16 @@
CUDA: _scaled_dot_product_efficient_attention_backward_cuda
tags: nondeterministic_seeded
|
+- func: _scaled_dot_product_cudnn_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)
|
e503c1b40207dab770c28cbd4568cd9e105277b
|
Sat, 27 Apr 2024 04:57:13 +0000
|
[PATCH 0755/1000] Dynamo x autograd.Function supports setup_context (#124802)
|
Fixes part of #118397 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124802 Approved by: https://github.com/zou3519
|
diff --git a/test/dynamo/test_autograd_function.py b/test/dynamo/test_autograd_function.py
index 492936d0a9..e30bb4bf17 100644
--- a/test/dynamo/test_autograd_function.py
+++ b/test/dynamo/test_autograd_function.py
@@ -253,11 +253,11 @@ class AutogradFunctionTests(torch._dynamo.test_case.TestCase):
def test_linear_setup_context(self):
model = ModuleLinear()
- opt_model = torch._dynamo.optimize("eager")(model)
+ opt_model = torch._dynamo.optimize("eager", nopython=True)(model)
input = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
weight = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
- optim_result = opt_model(input, weight)
eager_result = model(input, weight)
+ optim_result = opt_model(input, weight)
self.assertEqual(optim_result, eager_result)
def test_materialize_grad(self):
diff --git a/torch/_dynamo/trace_rules.py b/torch/_dynamo/trace_rules.py
index 2fb8dc7241..2a940eb600 100644
--- a/torch/_dynamo/trace_rules.py
+++ b/torch/_dynamo/trace_rules.py
@@ -3205,6 +3205,7 @@ MOD_INLINELIST = {
"torch._dynamo.comptime",
"torch._dynamo.polyfill",
"torch._functorch.vmap",
+ "torch._functorch.autograd_function",
"torch._library.custom_ops",
"torch._functorch.eager_transforms",
"torch._inductor.test_operators",
diff --git a/torch/_dynamo/variables/higher_order_ops.py b/torch/_dynamo/variables/higher_order_ops.py
index 26f1eeb91c..e3a6ece18d 100644
--- a/torch/_dynamo/variables/higher_order_ops.py
+++ b/torch/_dynamo/variables/higher_order_ops.py
@@ -1628,13 +1628,12 @@ class AutogradFunctionApplyVariable(VariableTracker):
fwd_src = AttrSource(self.parent_source, member="forward")
ctx = AutogradFunctionContextVariable.create(tx, args, kwargs)
if isinstance(self.fwd_graph, types.FunctionType):
- fwd_fn = UserFunctionVariable(self.fwd_graph, source=fwd_src)
+ fwd_fn = UserFunctionVariable(self.fwd_graph)
fwd_args = [ctx, *args]
elif isinstance(self.fwd_graph, types.MethodType):
fwd_fn = UserMethodVariable(
self.fwd_graph.__func__,
UserDefinedClassVariable(self.fwd_graph.__class__),
- source=fwd_src,
)
fwd_args = [fwd_fn.obj, ctx, *args]
else:
diff --git a/torch/_dynamo/variables/misc.py b/torch/_dynamo/variables/misc.py
index 83ddc372bd..39de1c4a50 100644
--- a/torch/_dynamo/variables/misc.py
+++ b/torch/_dynamo/variables/misc.py
@@ -357,14 +357,19 @@ class AutogradFunctionVariable(VariableTracker):
and torch.is_grad_enabled()
and config.capture_autograd_function
):
- # Note - this is the same check used in autograd/function.py, except inverted.
- # If we want to support functorch transforms here, we will need to enable this.
- if (
- self.fn_cls.setup_context
- != torch.autograd.function._SingleLevelFunction.setup_context
- ):
- unimplemented(
- "NYI - autograd.Function with custom setup_context method"
+ from torch._functorch.autograd_function import (
+ autograd_function_forward_rewritten,
+ )
+ from torch.autograd.function import _is_setup_context_defined
+
+ forward_fn = self.fn_cls.forward
+
+ is_setup_ctx_defined = _is_setup_context_defined(self.fn_cls.setup_context)
+ if is_setup_ctx_defined:
+ # If setup_context is defined, we generate a new forward function which includes
+ # the original forward and setup_context function, and trace the new forward function.
+ forward_fn = autograd_function_forward_rewritten(
+ self.fn_cls.forward, self.fn_cls.setup_context
)
vjp_fn = self.fn_cls.vjp # type: ignore[attr-defined]
@@ -383,12 +388,25 @@ class AutogradFunctionVariable(VariableTracker):
tx.import_source(self.fn_cls.__module__), self.fn_cls.__name__
)
- return AutogradFunctionApplyVariable(
- self.fn_cls.forward,
+ val = AutogradFunctionApplyVariable(
+ forward_fn,
self.fn_cls.backward,
source,
source=AttrSource(source, member="apply"),
).call_function(tx, args, kwargs)
+ # Inside of AutogradFunctionApplyVariable.call_function, we use sourceless variable wrapping
+ # the forward function, as we don't want to generate guards for new_forward.__closure__
+ # if forward is rewritten by autograd_function_forward_rewritten.
+ # But we still need to generate correct guards for the original forward and setup_context
+ # functions, so we have to add guards manually.
+ if self.source:
+ fwd_src = AttrSource(self.source, "forward")
+ install_guard(fwd_src.make_guard(GuardBuilder.FUNCTION_MATCH))
+ if is_setup_ctx_defined:
+ setup_ctx_src = AttrSource(self.source, "setup_context")
+ install_guard(setup_ctx_src.make_guard(GuardBuilder.FUNCTION_MATCH))
+
+ return val
if self.source:
source = AttrSource(self.source, "forward")
@@ -443,7 +461,32 @@ class AutogradFunctionVariable(VariableTracker):
return self.call_apply(tx, args, kwargs)
else:
- unimplemented(f"Unsupported method: {name}")
+ from .. import trace_rules
+
+ source = AttrSource(self.source, name) if self.source is not None else None
+ try:
+ obj = inspect.getattr_static(self.fn_cls, name)
+ except AttributeError:
+ obj = None
+
+ if isinstance(obj, staticmethod):
+ func = obj.__get__(self.fn_cls)
+ if source is not None:
+ return (
+ trace_rules.lookup(func)
+ .create_with_source(func, source=source)
+ .call_function(tx, args, kwargs)
+ )
+ else:
+ return trace_rules.lookup(func)(func).call_function(
+ tx, args, kwargs
+ )
+ elif isinstance(obj, classmethod):
+ return variables.UserMethodVariable(
+ obj.__func__, self, source=source
+ ).call_function(tx, args, kwargs)
+ else:
+ unimplemented(f"Unsupported method: {name}")
@dataclasses.dataclass
diff --git a/torch/_dynamo/variables/user_defined.py b/torch/_dynamo/variables/user_defined.py
index 9db504cd16..544773f08a 100644
--- a/torch/_dynamo/variables/user_defined.py
+++ b/torch/_dynamo/variables/user_defined.py
@@ -2,6 +2,7 @@
import collections
import contextlib
+import enum
import functools
import importlib
import inspect
@@ -107,7 +108,7 @@ class UserDefinedClassVariable(UserDefinedVariable):
def var_getattr(self, tx, name: str) -> "VariableTracker":
from .. import trace_rules
- from . import ConstantVariable
+ from . import ConstantVariable, EnumVariable
from .builder import VariableBuilder
if name == "__name__":
@@ -144,14 +145,16 @@ class UserDefinedClassVariable(UserDefinedVariable):
if self.value is collections.OrderedDict and name == "fromkeys":
return super().var_getattr(tx, name)
- if name in getattr(self.value, "__dict__", {}) or (
+ if ConstantVariable.is_literal(obj):
+ return ConstantVariable.create(obj)
+ elif isinstance(obj, enum.Enum):
+ return EnumVariable(obj)
+ elif name in getattr(self.value, "__dict__", {}) or (
self.value.__module__.startswith("torch.")
or self.value.__module__ == "torch"
):
if source:
return VariableBuilder(tx, source)(obj)
- elif ConstantVariable.is_literal(obj):
- return ConstantVariable.create(obj)
return super().var_getattr(tx, name)
diff --git a/torch/_functorch/autograd_function.py b/torch/_functorch/autograd_function.py
index 98ffe6dd16..03bfd710ae 100644
--- a/torch/_functorch/autograd_function.py
+++ b/torch/_functorch/autograd_function.py
@@ -682,6 +682,15 @@ def reductify_leaf(
return grad_input
+def autograd_function_forward_rewritten(original_forward, original_setup_context):
+ def new_forward(ctx, *args, **kwargs):
+ output = original_forward(*args, **kwargs)
+ original_setup_context(ctx, args, output)
+ return output
+
+ return new_forward
+
+
class AutogradFunctionApply(HigherOrderOperator):
def __init__(self):
super().__init__("autograd_function_apply")
diff --git a/torch/autograd/function.py b/torch/autograd/function.py
index 3ff96953b2..9c624ce5d1 100644
--- a/torch/autograd/function.py
+++ b/torch/autograd/function.py
@@ -561,7 +561,7 @@ class Function(_SingleLevelFunction):
return bound_args.args
- is_setup_ctx_defined = cls.setup_context != _SingleLevelFunction.setup_context
+ is_setup_ctx_defined = _is_setup_context_defined(cls.setup_context)
if is_setup_ctx_defined:
args = bind_default_args(cls.forward, *args, **kwargs)
@@ -585,6 +585,10 @@ class Function(_SingleLevelFunction):
return (ctx._autograd_function_id,)
+def _is_setup_context_defined(fn):
+ return fn != _SingleLevelFunction.setup_context
+
+
def once_differentiable(fn):
@functools.wraps(fn)
def wrapper(ctx, *args):
|
2.41.0
|
3fd94d15ef49c99ffa32a8226d1f00b0cc26f68
|
Sat, 27 Apr 2024 07:22:27 +0000
|
[PATCH 0756/1000] [Distributed] [7/N] Fix clang-tidy warnings in torch/csrc/distributed/c10d (#124987)
|
This PR continues to clean clang-tidy warnings in torch/csrc/distributed/c10d, following #124701. In addition, libfmt dependency is added in CMake code to enable using it in the headers. The libfmt has to be added as private dependency to torch_cuda and torch_hip because they include torch/csrc/distributed/c10d/Utils.hpp which uses libfmt. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124987 Approved by: https://github.com/malfet
|
diff --git a/caffe2/CMakeLists.txt b/caffe2/CMakeLists.txt
index 83bf15893c..c7c0e8ad4e 100644
--- a/caffe2/CMakeLists.txt
+++ b/caffe2/CMakeLists.txt
@@ -1033,7 +1033,7 @@ elseif(USE_CUDA)
target_compile_definitions(torch_cuda PRIVATE USE_CUSPARSELT)
endif()
if(USE_NCCL)
- target_link_libraries(torch_cuda PRIVATE __caffe2_nccl)
+ target_link_libraries(torch_cuda PRIVATE __caffe2_nccl fmt::fmt-header-only)
target_compile_definitions(torch_cuda PRIVATE USE_NCCL)
endif()
if(USE_UCC)
@@ -1776,7 +1776,7 @@ if(USE_ROCM)
target_link_libraries(torch_hip PRIVATE ATEN_CUDA_FILES_GEN_LIB)
endif()
target_link_libraries(torch_hip PUBLIC torch_cpu_library ${Caffe2_PUBLIC_HIP_DEPENDENCY_LIBS})
- target_link_libraries(torch_hip PRIVATE ${Caffe2_HIP_DEPENDENCY_LIBS})
+ target_link_libraries(torch_hip PRIVATE ${Caffe2_HIP_DEPENDENCY_LIBS} fmt::fmt-header-only)
# Since PyTorch files contain HIP headers, this is also needed to capture the includes.
target_include_directories(torch_hip PRIVATE ${Caffe2_HIP_INCLUDE})
diff --git a/test/cpp/c10d/CMakeLists.txt b/test/cpp/c10d/CMakeLists.txt
index 5c8974836d..b7524da4f4 100644
--- a/test/cpp/c10d/CMakeLists.txt
+++ b/test/cpp/c10d/CMakeLists.txt
@@ -13,6 +13,7 @@ function(c10d_add_test test_src)
if(NOT WIN32)
target_link_libraries(${test_name} pthread)
endif()
+ target_link_libraries(${test_name} fmt::fmt-header-only)
add_test(NAME ${test_name} COMMAND $<TARGET_FILE:${test_name}>)
endfunction()
@@ -92,4 +93,5 @@ if(LINUX AND USE_GLOO AND USE_C10D_GLOO)
if(USE_CUDA)
target_link_libraries(example_allreduce torch_cuda)
endif()
+ target_link_libraries(example_allreduce fmt::fmt-header-only)
endif()
diff --git a/test/cpp/rpc/CMakeLists.txt b/test/cpp/rpc/CMakeLists.txt
index 6834b428ff..a430291f9d 100644
--- a/test/cpp/rpc/CMakeLists.txt
+++ b/test/cpp/rpc/CMakeLists.txt
@@ -5,7 +5,7 @@ set(TORCH_RPC_TEST_SOURCES
${TORCH_RPC_TEST_DIR}/test_wire_serialization.cpp
)
set(TORCH_RPC_TEST_DEPENDENCY_LIBS
- torch gtest
+ torch gtest fmt::fmt-header-only
)
if(USE_GLOO)
diff --git a/torch/csrc/distributed/c10d/Backend.hpp b/torch/csrc/distributed/c10d/Backend.hpp
index c9e8aec439..05a39ddc90 100644
--- a/torch/csrc/distributed/c10d/Backend.hpp
+++ b/torch/csrc/distributed/c10d/Backend.hpp
@@ -33,6 +33,7 @@ class TORCH_API Backend : public torch::CustomClassHolder {
std::chrono::milliseconds timeout;
// backend name
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const std::string backend;
};
@@ -397,7 +398,9 @@ class TORCH_API Backend : public torch::CustomClassHolder {
// appropriate logging etc.
void init();
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const int rank_;
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const int size_;
// Debug level setting. It is parsed once when ProcessGroup is constructed and
// remains the same across use of this process group.
diff --git a/torch/csrc/distributed/c10d/ProcessGroup.hpp b/torch/csrc/distributed/c10d/ProcessGroup.hpp
index dcb6d15547..8c805020e8 100644
--- a/torch/csrc/distributed/c10d/ProcessGroup.hpp
+++ b/torch/csrc/distributed/c10d/ProcessGroup.hpp
@@ -59,10 +59,11 @@ class TORCH_API ProcessGroup : public torch::CustomClassHolder {
std::chrono::milliseconds timeout;
// backend name
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const std::string backend;
};
- enum BackendType {
+ enum BackendType : uint8_t {
UNDEFINED = 0,
GLOO = 1,
NCCL = 2,
@@ -719,9 +720,13 @@ class TORCH_API ProcessGroup : public torch::CustomClassHolder {
void init();
c10::intrusive_ptr<c10d::Store> store_;
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const int rank_;
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const int size_;
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const c10::intrusive_ptr<Options> options_;
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const BackendType backendType_;
std::string pg_desc_;
diff --git a/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp b/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp
index e95191436b..ada56cbee1 100644
--- a/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp
+++ b/torch/csrc/distributed/c10d/ProcessGroupGloo.cpp
@@ -975,7 +975,8 @@ c10::intrusive_ptr<Work> ProcessGroupGloo::broadcast(
};
assertRootRank(invalidArgument, opts.rootRank, size_);
- assertRootTensor(invalidArgument, opts.rootTensor, inputs.size());
+ assertRootTensor(
+ invalidArgument, opts.rootTensor, static_cast<int64_t>(inputs.size()));
assertDense(invalidArgument, inputs);
assertTypeAndSizesMatch(invalidArgument, inputs);
@@ -1300,7 +1301,9 @@ class AsyncSparseAllreduceWork : public ProcessGroupGloo::AsyncWork {
// Allgatherv indices.
gloo::AllgathervOptions opts(context);
opts.setInput(
- const_cast<int64_t*>(input.const_data_ptr<int64_t>()), input.numel());
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
+ const_cast<int64_t*>(input.const_data_ptr<int64_t>()),
+ input.numel());
opts.setOutput(output.mutable_data_ptr<int64_t>(), counts);
opts.setTag(tag);
gloo::allgatherv(opts);
@@ -1308,7 +1311,7 @@ class AsyncSparseAllreduceWork : public ProcessGroupGloo::AsyncWork {
// Compile indices tensor per rank.
std::vector<at::Tensor> indices;
indices.reserve(metadata.size());
- size_t offset = 0;
+ int64_t offset = 0;
for (const auto& i : metadata) {
const auto nnz = i.nnz();
const auto numel = sparseDim * nnz;
@@ -1325,7 +1328,7 @@ class AsyncSparseAllreduceWork : public ProcessGroupGloo::AsyncWork {
const std::vector<SparseTensorMetadata>& metadata) {
// There are nnz #dense_dim()-dimensional tensors per rank.
const auto valueShape = tensor.sizes().slice(tensor.sparse_dim());
- size_t denseNumel = 1;
+ int64_t denseNumel = 1;
for (auto dim : valueShape) {
denseNumel *= dim;
}
@@ -1334,7 +1337,7 @@ class AsyncSparseAllreduceWork : public ProcessGroupGloo::AsyncWork {
int64_t totalSize = 0;
for (const auto i : c10::irange(metadata.size())) {
counts[i] = metadata[i].nnz() * denseNumel;
- totalSize += counts[i];
+ totalSize += static_cast<int64_t>(counts[i]);
}
auto output = at::empty({totalSize}, tensor.scalar_type());
@@ -1353,7 +1356,7 @@ class AsyncSparseAllreduceWork : public ProcessGroupGloo::AsyncWork {
// Compile values tensor per rank.
std::vector<at::Tensor> values;
values.reserve(metadata.size());
- size_t offset = 0;
+ int64_t offset = 0;
for (const auto& i : metadata) {
const auto nnz = i.nnz();
const auto numel = denseNumel * nnz;
@@ -1740,7 +1743,8 @@ c10::intrusive_ptr<Work> ProcessGroupGloo::reduce(
};
assertRootRank(invalidArgument, opts.rootRank, size_);
- assertRootTensor(invalidArgument, opts.rootTensor, inputs.size());
+ assertRootTensor(
+ invalidArgument, opts.rootTensor, static_cast<int64_t>(inputs.size()));
assertSingleElement(invalidArgument, inputs);
assertDense(invalidArgument, inputs);
@@ -1832,7 +1836,7 @@ class AsyncAllgatherWork : public ProcessGroupGloo::AsyncWork {
// Unflatten into output tensors.
for (auto& outputgroup : outputs) {
for (const auto j : c10::irange(outputgroup.size())) {
- outputgroup[j].copy_(flatOutputTensor[j]);
+ outputgroup[j].copy_(flatOutputTensor[static_cast<int64_t>(j)]);
}
}
}
@@ -2102,7 +2106,7 @@ class AsyncAllgatherCoalescedWork : public ProcessGroupGloo::AsyncWork {
for (const auto& t : output_lists[0]) {
output_numel += t.numel();
}
- output_numel *= output_lists.size();
+ output_numel *= static_cast<int64_t>(output_lists.size());
// Use single flat output tensor.
at::Tensor flatOutputTensor =
at::empty({output_numel}, output_lists[0][0].options());
@@ -2251,7 +2255,7 @@ class AsyncGatherWork : public ProcessGroupGloo::AsyncWork {
// Unflatten into output tensors on root process.
if (context->rank == root) {
for (const auto i : c10::irange(outputs[0].size())) {
- outputs[0][i].copy_(flatOutputTensor[i]);
+ outputs[0][i].copy_(flatOutputTensor[static_cast<int64_t>(i)]);
}
}
}
@@ -2805,6 +2809,7 @@ c10::intrusive_ptr<Work> ProcessGroupGloo::send(
// Construct unbound buffer.
auto context = getContext(tag);
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
auto buf = context->createUnboundBuffer(const_cast<void*>(ptr), size);
buf->send(dstRank, utag);
++seq_;
@@ -2945,8 +2950,8 @@ void ProcessGroupGloo::monitoredBarrier(
// only enforce timeout on rank 0. This is so that other ranks aren't timed
// out first, bringing down the job without reporting which rank timed out.
if (rank != 0) {
- auto sendWork = send(commTensor, 0, t1);
- auto recvWork = recv(commTensor, 0, t2);
+ auto sendWork = send(commTensor, 0, static_cast<int>(t1));
+ auto recvWork = recv(commTensor, 0, static_cast<int>(t2));
try {
sendWork->wait();
recvWork->wait();
@@ -2970,7 +2975,8 @@ void ProcessGroupGloo::monitoredBarrier(
// Failed/hanging ranks will not ack this call, letting rank 0 know about the
// failure.
for (const auto dstRank : c10::irange(1, worldSize)) {
- recvWorkMap.insert({dstRank, recv(commTensor, dstRank, t1)});
+ recvWorkMap.emplace(
+ dstRank, recv(commTensor, dstRank, static_cast<int>(t1)));
}
auto waitLoop = [&](const std::map<int, c10::intrusive_ptr<Work>>& works) {
@@ -3042,7 +3048,8 @@ void ProcessGroupGloo::monitoredBarrier(
// ensures that this is a true barrier in that all ranks exit it successfully
// or none of them do.
for (const auto dstRank : c10::irange(1, worldSize)) {
- sendWorkMap.insert({dstRank, send(commTensor, dstRank, t2)});
+ sendWorkMap.emplace(
+ dstRank, send(commTensor, dstRank, static_cast<int>(t2)));
}
waitLoop(sendWorkMap);
diff --git a/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp b/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp
index 94d7cd9cca..29d05a9693 100644
--- a/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp
+++ b/torch/csrc/distributed/c10d/ProcessGroupMPI.cpp
@@ -514,7 +514,7 @@ c10::intrusive_ptr<Work> ProcessGroupMPI::allgather(
pgComm_));
for (const auto i : c10::irange(outputDataVec.size())) {
- outputDataVec[i].copy_(flatOutputTensor[i]);
+ outputDataVec[i].copy_(flatOutputTensor[static_cast<int64_t>(i)]);
}
};
auto entry = std::make_unique<WorkEntry>(
@@ -586,7 +586,8 @@ c10::intrusive_ptr<Work> ProcessGroupMPI::gather(
const std::vector<at::Tensor>& outputDataVec = entry->dst;
// copy the flattened output tensors to the outputs
for (const auto i : c10::irange(outputDataVec.size())) {
- outputDataVec.at(i).copy_(flatOutputTensor[i]);
+ outputDataVec.at(i).copy_(
+ flatOutputTensor[static_cast<int64_t>(i)]);
}
}
};
@@ -647,7 +648,7 @@ c10::intrusive_ptr<Work> ProcessGroupMPI::scatter(
// copy the input tensors to the flatten large send buffer
for (const auto i : c10::irange(inputDataVec.size())) {
- flatInputTensor[i].copy_(inputDataVec.at(i));
+ flatInputTensor[static_cast<int64_t>(i)].copy_(inputDataVec.at(i));
}
}
@@ -793,16 +794,18 @@ c10::intrusive_ptr<Work> ProcessGroupMPI::alltoall(
std::vector<int> recv_offsets(size_);
auto srcdata = entry->src;
auto dstdata = entry->dst;
- int64_t src_len = c10d::computeLengthsAndOffsets(
+ auto src_len = c10d::computeLengthsAndOffsets(
srcdata, &send_lengths, &send_offsets);
- int64_t dst_len = c10d::computeLengthsAndOffsets(
+ auto dst_len = c10d::computeLengthsAndOffsets(
dstdata, &recv_lengths, &recv_offsets);
std::vector<int64_t> send_lengthsL(
send_lengths.begin(), send_lengths.end());
std::vector<int64_t> recv_lengthsL(
recv_lengths.begin(), recv_lengths.end());
- at::Tensor srcFlatData = at::empty({src_len}, srcdata[0].options());
- at::Tensor dstFlatData = at::empty({dst_len}, dstdata[0].options());
+ at::Tensor srcFlatData =
+ at::empty({static_cast<int64_t>(src_len)}, srcdata[0].options());
+ at::Tensor dstFlatData =
+ at::empty({static_cast<int64_t>(dst_len)}, dstdata[0].options());
auto srcFlatDataSplits =
srcFlatData.split_with_sizes(c10::IntArrayRef(send_lengthsL), 0);
for (const auto i : c10::irange(size_)) {
diff --git a/torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp b/torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp
index a6086d28e9..e1b9900938 100644
--- a/torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp
+++ b/torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp
@@ -31,12 +31,12 @@ struct CollectiveFingerPrint {
std::vector<int8_t> tensor_device_types_;
// input tensor sizes
std::vector<std::vector<int64_t>> tensor_sizes_;
- int sequence_number_;
+ uint64_t sequence_number_;
CollectiveFingerPrint(
OpType op_type,
const std::vector<at::Tensor>& input_tensors,
- int sequence_number)
+ uint64_t sequence_number)
: op_type_(op_type),
num_tensors_(input_tensors.size()),
sequence_number_(sequence_number) {
@@ -57,7 +57,7 @@ struct CollectiveFingerPrint {
std::vector<int8_t> tensor_dtypes,
std::vector<int8_t> tensor_device_types,
std::vector<std::vector<int64_t>> tensor_sizes,
- int sequence_number)
+ uint64_t sequence_number)
: op_type_(op_type),
num_tensors_(num_tensors),
tensor_dtypes_(std::move(tensor_dtypes)),
@@ -296,7 +296,7 @@ struct CollectiveFingerPrint {
// 1. OpType
data->push_back(static_cast<int64_t>(op_type_));
// sequence number
- data->push_back(sequence_number_);
+ data->push_back(static_cast<int64_t>(sequence_number_));
// 2. Num tensors
data->push_back(static_cast<int64_t>(num_tensors_));
// 3. Tensor dtypes
@@ -309,13 +309,13 @@ struct CollectiveFingerPrint {
}
// 5. Shapes
for (const auto& sizes : tensor_sizes_) {
- data->push_back(sizes.size());
+ data->push_back(static_cast<int64_t>(sizes.size()));
for (const auto& s : sizes) {
data->push_back(s);
}
}
// Serialize data into tensor
- int64_t data_size = data->size();
+ int64_t data_size = static_cast<int64_t>(data->size());
// Need to release here and get the ptr due to C++ parameter evaluation
// order.
auto d = data.release();
diff --git a/torch/csrc/distributed/c10d/PyProcessGroup.hpp b/torch/csrc/distributed/c10d/PyProcessGroup.hpp
index 684af0c2ec..1b1d307ea8 100644
--- a/torch/csrc/distributed/c10d/PyProcessGroup.hpp
+++ b/torch/csrc/distributed/c10d/PyProcessGroup.hpp
@@ -207,7 +207,7 @@ class TORCH_PYTHON_API PythonOnCompletionHook {
hook_.ptr() = nullptr;
}
- void operator()(std::shared_ptr<WorkInfo> workInfo) const {
+ void operator()(const std::shared_ptr<WorkInfo>& workInfo) const {
std::exception_ptr eptr;
{
py::gil_scoped_acquire acquire;
diff --git a/torch/csrc/distributed/c10d/TCPStore.hpp b/torch/csrc/distributed/c10d/TCPStore.hpp
index 91ed895266..03a7f124ca 100644
--- a/torch/csrc/distributed/c10d/TCPStore.hpp
+++ b/torch/csrc/distributed/c10d/TCPStore.hpp
@@ -30,10 +30,10 @@ class Counter {
return count_;
}
double variance() const noexcept {
- return m2_ / count_;
+ return m2_ / static_cast<double>(count_);
}
double sample_variance() const noexcept {
- return m2_ / (count_ - 1);
+ return m2_ / static_cast<double>(count_ - 1);
}
private:
diff --git a/torch/csrc/distributed/c10d/Utils.cpp b/torch/csrc/distributed/c10d/Utils.cpp
index 170ed8f8b5..c35c99d008 100644
--- a/torch/csrc/distributed/c10d/Utils.cpp
+++ b/torch/csrc/distributed/c10d/Utils.cpp
@@ -1,10 +1,6 @@
#include <torch/csrc/distributed/c10d/Utils.hpp>
-#include <algorithm>
#include <cstring>
-#include <memory>
-#include <string>
-#include <thread>
namespace c10d {
diff --git a/torch/csrc/distributed/c10d/Utils.hpp b/torch/csrc/distributed/c10d/Utils.hpp
index 36add3ad15..f0dfd562dd 100644
--- a/torch/csrc/distributed/c10d/Utils.hpp
+++ b/torch/csrc/distributed/c10d/Utils.hpp
@@ -4,6 +4,7 @@
#include <c10/util/Exception.h>
#include <c10/util/accumulate.h>
#include <c10/util/irange.h>
+#include <fmt/format.h>
#include <torch/csrc/distributed/c10d/Types.hpp>
#ifdef _WIN32
@@ -66,7 +67,7 @@ inline void assertSameType(
const std::string expected = type.toString();
const std::string actual = tensors[i].toString();
throw std::invalid_argument(
- "mixed types (" + expected + " and " + actual + ")");
+ fmt::format("mixed types ({} and {})", expected, actual));
}
}
}
@@ -96,7 +97,7 @@ inline std::string getCvarString(
/* parse environment variable in reverse order, so the early
* versions of a variable get higher priority than the latter
* versions of the same variable */
- for (int i = env.size() - 1; i >= 0; i--) {
+ for (ssize_t i = static_cast<ssize_t>(env.size()) - 1; i >= 0; i--) {
const char* val = std::getenv(env[i].c_str());
if (val == nullptr) {
continue;
@@ -123,7 +124,7 @@ inline int getCvarInt(const std::vector<std::string>& env, int def) {
/* parse environment variable in reverse order, so the early
* versions of a variable get higher priority than the latter
* versions of the same variable */
- for (int i = env.size() - 1; i >= 0; i--) {
+ for (ssize_t i = static_cast<ssize_t>(env.size()) - 1; i >= 0; i--) {
char* val = std::getenv(env[i].c_str());
if (val == nullptr) {
continue;
@@ -154,7 +155,7 @@ inline bool getCvarBool(const std::vector<std::string>& env, bool def) {
/* parse environment variable in reverse order, so the early
* versions of a variable get higher priority than the latter
* versions of the same variable */
- for (int i = env.size() - 1; i >= 0; i--) {
+ for (ssize_t i = static_cast<ssize_t>(env.size()) - 1; i >= 0; i--) {
char* val_ = std::getenv(env[i].c_str());
if (val_ == nullptr) {
continue;
@@ -166,6 +167,7 @@ inline bool getCvarBool(const std::vector<std::string>& env, bool def) {
std::string val = std::string(val_);
for (auto& x : val) {
+ // NOLINTNEXTLINE(*-narrowing-conversions)
x = std::tolower(x);
}
@@ -193,7 +195,7 @@ inline void assertSameSizes(
const auto expected = toString(sizes);
const auto actual = toString(tensors[i].sizes());
throw std::invalid_argument(
- "mixed sizes (" + expected + " and " + actual + ")");
+ fmt::format("mixed sizes ({} and {})", expected, actual));
}
}
}
@@ -211,22 +213,20 @@ inline void assertSameSizeAndType(const std::vector<at::Tensor>& tensors) {
if (!tensors[i].options().type_equal(options)) {
const auto expected = toString(options);
const auto actual = toString(tensors[i].options());
- throw std::invalid_argument(
- "argument contains mixed types (" + expected + " and " + actual +
- ")");
+ throw std::invalid_argument(fmt::format(
+ "argument contains mixed types ({} and {})", expected, actual));
}
if (!tensors[i].sizes().equals(sizes)) {
const auto expected = toString(sizes);
const auto actual = toString(tensors[i].sizes());
- throw std::invalid_argument(
- "argument contains mixed sizes (" + expected + " and " + actual +
- ")");
+ throw std::invalid_argument(fmt::format(
+ "argument contains mixed types ({} and {})", expected, actual));
}
}
}
inline void assertTypeMatch(
- std::function<void(const std::string&)> fn,
+ const std::function<void(const std::string&)>& fn,
const at::DeprecatedTypeProperties& type,
const at::ArrayRef<at::Tensor> tensors,
size_t index) {
@@ -237,7 +237,7 @@ inline void assertTypeMatch(
}
inline void assertTypeMatch(
- std::function<void(const std::string&)> fn,
+ const std::function<void(const std::string&)>& fn,
const at::TensorOptions& options,
const at::ArrayRef<at::Tensor> tensors,
size_t index) {
@@ -248,7 +248,7 @@ inline void assertTypeMatch(
}
inline void assertSizesMatch(
- std::function<void(const std::string&)> fn,
+ const std::function<void(const std::string&)>& fn,
const at::IntArrayRef& sizes,
const at::ArrayRef<at::Tensor> tensors,
size_t index) {
@@ -259,7 +259,7 @@ inline void assertSizesMatch(
}
inline void assertLayoutMatch(
- std::function<void(const std::string&)> fn,
+ const std::function<void(const std::string&)>& fn,
const c10::Layout& expected,
const at::ArrayRef<at::Tensor> tensors,
size_t index) {
@@ -271,7 +271,7 @@ inline void assertLayoutMatch(
}
inline void assertLayoutMatch(
- std::function<void(const std::string&)> fn,
+ const std::function<void(const std::string&)>& fn,
const at::ArrayRef<at::Tensor> tensors) {
const auto& layout = tensors[0].layout();
for (const auto i : c10::irange(1, tensors.size())) {
@@ -362,7 +362,7 @@ inline void assertSameDevice(
}
inline void assertTypeAndSizesMatch(
- std::function<void(const std::string&)> fn,
+ const std::function<void(const std::string&)>& fn,
const at::ArrayRef<at::Tensor> tensors,
const at::DeprecatedTypeProperties& type,
const at::IntArrayRef& sizes) {
@@ -373,7 +373,7 @@ inline void assertTypeAndSizesMatch(
}
inline void assertTypeAndSizesMatch(
- std::function<void(const std::string&)> fn,
+ const std::function<void(const std::string&)>& fn,
const at::ArrayRef<at::Tensor> tensors,
const at::TensorOptions& options,
const at::IntArrayRef& sizes) {
@@ -384,7 +384,7 @@ inline void assertTypeAndSizesMatch(
}
inline void assertTypeAndSizesMatch(
- std::function<void(const std::string&)> fn,
+ const std::function<void(const std::string&)>& fn,
const at::ArrayRef<at::Tensor> tensors) {
const auto& options = tensors[0].options();
const auto sizes = tensors[0].sizes();
@@ -463,6 +463,7 @@ inline std::vector<int> getDevices(const std::vector<at::Tensor>& tensors) {
std::vector<int> devices(tensors.size(), -1);
if (tensors[0].device().is_cuda()) {
for (const auto i : c10::irange(tensors.size())) {
+ // NOLINTNEXTLINE(bugprone-signed-char-misuse)
devices[i] = tensors[i].storage().device().index();
}
}
@@ -620,8 +621,7 @@ void sendBytes(
return;
}
- auto bytes = reinterpret_cast<const uint8_t*>(buffer);
- uint8_t* currentBytes = const_cast<uint8_t*>(bytes);
+ auto currentBytes = reinterpret_cast<const char*>(buffer);
int flags = 0;
@@ -637,10 +637,9 @@ void sendBytes(
#endif
while (bytesToSend > 0) {
- ssize_t bytesSent;
+ ssize_t bytesSent = 0;
SYSCHECK_ERR_RETURN_NEG1(
- bytesSent =
- ::send(socket, (const char*)currentBytes, bytesToSend, flags))
+ bytesSent = ::send(socket, currentBytes, bytesToSend, flags))
if (bytesSent == 0) {
C10_THROW_ERROR(DistNetworkError, std::strerror(ECONNRESET));
}
@@ -657,13 +656,12 @@ void recvBytes(int socket, T* buffer, size_t length) {
return;
}
- auto bytes = reinterpret_cast<uint8_t*>(buffer);
- uint8_t* currentBytes = bytes;
+ auto currentBytes = reinterpret_cast<char*>(buffer);
while (bytesToReceive > 0) {
- ssize_t bytesReceived;
+ ssize_t bytesReceived = 0;
SYSCHECK_ERR_RETURN_NEG1(
- bytesReceived = recv(socket, (char*)currentBytes, bytesToReceive, 0))
+ bytesReceived = recv(socket, currentBytes, bytesToReceive, 0))
if (bytesReceived == 0) {
C10_THROW_ERROR(DistNetworkError, std::strerror(ECONNRESET));
}
@@ -684,7 +682,7 @@ void sendVector(int socket, const std::vector<T>& vec, bool moreData = false) {
// receive a vector as sent in sendVector
template <typename T>
std::vector<T> recvVector(int socket) {
- SizeType valueSize;
+ SizeType valueSize = 0;
recvBytes<SizeType>(socket, &valueSize, 1);
std::vector<T> value(valueSize);
recvBytes<T>(socket, value.data(), value.size());
@@ -716,7 +714,7 @@ inline void sendString(
// receive a string as sent in sendString
inline std::string recvString(int socket) {
- SizeType valueSize;
+ SizeType valueSize = 0;
recvBytes<SizeType>(socket, &valueSize, 1);
std::vector<char> value(valueSize);
recvBytes<char>(socket, value.data(), value.size());
diff --git a/torch/csrc/distributed/c10d/comm.hpp b/torch/csrc/distributed/c10d/comm.hpp
index ee8db21c17..d2c608532b 100644
--- a/torch/csrc/distributed/c10d/comm.hpp
+++ b/torch/csrc/distributed/c10d/comm.hpp
@@ -87,6 +87,7 @@ class TORCH_API GradBucket {
std::vector<c10::IntArrayRef> sizes_vec_;
// Model parameters for this bucket.
+ // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
const std::vector<at::Tensor> parameters_;
// Predefined sparse indices for this bucket (only used for sparse tensors).
diff --git a/torch/csrc/distributed/c10d/default_comm_hooks.hpp b/torch/csrc/distributed/c10d/default_comm_hooks.hpp
index 683841f3ba..a4f47f13bb 100644
--- a/torch/csrc/distributed/c10d/default_comm_hooks.hpp
+++ b/torch/csrc/distributed/c10d/default_comm_hooks.hpp
@@ -5,7 +5,7 @@
namespace c10d {
-enum class BuiltinCommHookType {
+enum class BuiltinCommHookType : uint8_t {
ALLREDUCE = 1,
FP16_COMPRESS = 2,
};
diff --git a/torch/csrc/distributed/c10d/intra_node_comm.hpp b/torch/csrc/distributed/c10d/intra_node_comm.hpp
index 0e65ebf8d6..ab27ecef97 100644
--- a/torch/csrc/distributed/c10d/intra_node_comm.hpp
+++ b/torch/csrc/distributed/c10d/intra_node_comm.hpp
@@ -14,9 +14,18 @@ constexpr size_t kDefaultBufferSize = 10ull * 1024 * 1024;
using NvlMesh = std::array<std::array<size_t, kMaxDevices>, kMaxDevices>;
using HybridCubeMesh = std::array<std::array<int, 4>, kMaxDevices>;
-enum class Topology { UNKNOWN = 0, FULLY_CONNECTED = 1, HYBRID_CUBE_MESH = 2 };
+enum class Topology : uint8_t {
+ UNKNOWN = 0,
+ FULLY_CONNECTED = 1,
+ HYBRID_CUBE_MESH = 2
+};
-enum class AllReduceAlgo { NONE = 0, ONE_SHOT = 1, TWO_SHOT = 2, HCM = 3 };
+enum class AllReduceAlgo : uint8_t {
+ NONE = 0,
+ ONE_SHOT = 1,
+ TWO_SHOT = 2,
+ HCM = 3
+};
class TORCH_API IntraNodeComm : public c10::intrusive_ptr_target {
public:
diff --git a/torch/csrc/distributed/c10d/reducer_timer.hpp b/torch/csrc/distributed/c10d/reducer_timer.hpp
index acd8975c4d..5f57051455 100644
--- a/torch/csrc/distributed/c10d/reducer_timer.hpp
+++ b/torch/csrc/distributed/c10d/reducer_timer.hpp
@@ -23,7 +23,7 @@ class TORCH_API Timer {
int64_t backward_comm_end_time = kUnsetTime;
public:
- enum class Event {
+ enum class Event : uint8_t {
kForwardStart,
kBackwardComputeStart,
kBackwardComputeEnd,
|
2.41.0
|
1a4740e72fd954fa18b8e2737029447f5781bda
|
Fri, 26 Apr 2024 15:37:09 -0700
|
[PATCH 0757/1000] Disable the CUDA fast path for split_with_sizes_copy when capturing (#125052)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/125052 Approved by: https://github.com/awgu, https://github.com/eellison, https://github.com/eqy
|
diff --git a/aten/src/ATen/native/cuda/TensorShape.cu b/aten/src/ATen/native/cuda/TensorShape.cu
index 97cf4dade1..d82901ef94 100644
--- a/aten/src/ATen/native/cuda/TensorShape.cu
+++ b/aten/src/ATen/native/cuda/TensorShape.cu
@@ -4,6 +4,7 @@
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/Resize.h>
#include <ATen/native/TensorShape.h>
+#include <c10/cuda/CUDAGraphsC10Utils.h>
#include <c10/util/TypeCast.h>
#ifndef AT_PER_OPERATOR_HEADERS
@@ -703,12 +704,15 @@ void split_with_sizes_copy_out_cuda(
IntArrayRef split_sizes,
int64_t dim,
TensorList out) {
+ const bool is_capturing = at::cuda::currentStreamCaptureStatusMayInitCtx() !=
+ at::cuda::CaptureStatus::None;
bool contiguous_no_cast = self.is_non_overlapping_and_dense();
for (const auto& t : out) {
contiguous_no_cast &= t.is_non_overlapping_and_dense();
contiguous_no_cast &= (t.dtype() == self.dtype());
}
- if (contiguous_no_cast) {
+ // TODO(yifu): make the fast path work for CUDA graph
+ if (!is_capturing && contiguous_no_cast) {
// Perform equivalent checks performed by the composite impl
if (dim < 0) {
dim = at::maybe_wrap_dim(dim, self.dim());
diff --git a/test/test_torch.py b/test/test_torch.py
index 641dac417f..21318f3b16 100644
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -9399,6 +9399,23 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j],
for expect, t in zip(expects, out):
self.assertTrue(expect.eq(t).all().item())
+ if not torch.cuda.is_available():
+ continue
+
+ # Test with cuda graph
+ out = [torch.zeros_like(v) for v in views]
+ for expect, t in zip(expects, out):
+ if expect.numel() != 0:
+ self.assertFalse(expect.eq(t).all().item())
+
+ g = torch.cuda.CUDAGraph()
+ with torch.cuda.graph(g):
+ torch.split_with_sizes_copy(x, split_sizes, dim=dim, out=out)
+
+ g.replay()
+ for expect, t in zip(expects, out):
+ self.assertTrue(expect.eq(t).all().item())
+
def test_type(self):
x = torch.randn(3, 3).double()
self.assertEqual(x.type('torch.FloatTensor').dtype, torch.float32)
|
2.41.0
|
4a1b3e09349de3a6a2dd43020e265a88965e4d7
|
Fri, 26 Apr 2024 15:11:14 -0700
|
[PATCH 0758/1000] Make c10d_functional ops call into _c10d_functional ops (#124979)
|
This PR removes the legacy impls of c10d_functional ops which are now irrelevant. For backward compatibility purpose, c10d_functional ops now call into _c10d_functional ops. We also changed c10d_functional ops to be CompositeExplicitAutograd, so that when traced, only _c10d_functional ops appear in the graph. After this, we'll be able to remove the Inductor IR for the legacy functional collectives. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124979 Approved by: https://github.com/wanchaol
|
diff --git a/test/distributed/test_inductor_collectives.py b/test/distributed/test_inductor_collectives.py
index 84db55d055..19853718b9 100644
--- a/test/distributed/test_inductor_collectives.py
+++ b/test/distributed/test_inductor_collectives.py
@@ -429,100 +429,16 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
compiled_fn = torch.compile(example, fullgraph=True, dynamic=True)
code = run_and_get_triton_code(compiled_fn, *inputs, **trs)
-
- FileCheck().check_regex(
- "all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=\\[u\\d+, u\\d+\\], input_split_sizes=\\[u\\d+, u\\d+\\]" # noqa: B950
- ).run(code)
-
- eager_out = example(*inputs, **trs)
- inductor_out = compiled_fn(*inputs, **trs)
- self.assertTrue(same(eager_out, inductor_out, tol=0.001))
-
- @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
- @skip_if_lt_x_gpu(2)
- @patch.object(torch._dynamo.config, "capture_scalar_outputs", True)
- # TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
- @patch.object(torch._inductor.config, "compile_threads", 1)
- def test_all_to_all_single_inductor_output_split_sizes_none(self):
- def example(inp, input_split_sizes_tensor, *, tag, ranks, group_size):
- input_split_sizes = _tolist_with_constrain_as_size(input_split_sizes_tensor)
- a2a = torch.ops.c10d_functional.all_to_all_single(
- inp,
- None,
- input_split_sizes,
- tag,
- ranks,
- group_size,
- )
- a2a = torch.ops.c10d_functional.wait_tensor(a2a)
- out = a2a / a2a.sum(dim=0)
- return out
-
- with _dynamo_dist_per_rank_init(self.rank, self.world_size):
- input_split_sizes_tensor = torch.tensor(
- [1] * self.world_size, dtype=torch.int64
+ (
+ FileCheck()
+ .check_regex(
+ "torch.ops._c10d_functional.all_to_all_single.default\\("
+ "arg\\d+_\\d+, "
+ "\\[u\\d+, u\\d+\\], "
+ "\\[u\\d+, u\\d+\\]"
+ )
+ .run(code)
)
- inputs = (
- torch.ones(self.world_size, self.world_size, device="cuda")
- * (self.rank + 1),
- input_split_sizes_tensor,
- )
- trs = self.get_world_trs()
-
- compiled_fn = torch.compile(example, fullgraph=True, dynamic=True)
- code = run_and_get_triton_code(compiled_fn, *inputs, **trs)
- FileCheck().check_regex(
- "all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=None, input_split_sizes=\\[u\\d+, u\\d+\\]" # noqa: B950
- ).run(code)
-
- eager_out = example(*inputs, **trs)
- inductor_out = compiled_fn(*inputs, **trs)
- self.assertTrue(same(eager_out, inductor_out, tol=0.001))
-
- @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
- @skip_if_lt_x_gpu(2)
- @patch.object(torch._dynamo.config, "capture_scalar_outputs", True)
- # TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
- @patch.object(torch._inductor.config, "compile_threads", 1)
- def test_all_to_all_single_inductor_input_split_sizes_none(self):
- def example(inp, output_split_sizes_tensor, *, tag, ranks, group_size):
- output_split_sizes = _tolist_with_constrain_as_size(
- output_split_sizes_tensor
- )
- a2a = torch.ops.c10d_functional.all_to_all_single(
- inp,
- output_split_sizes,
- None,
- tag,
- ranks,
- group_size,
- )
- a2a = torch.ops.c10d_functional.wait_tensor(a2a)
- out = a2a / a2a.sum(dim=0)
- return out
-
- with _dynamo_dist_per_rank_init(
- self.rank, self.world_size
- ), torch._dynamo.config.patch(
- dynamic_shapes=True,
- capture_dynamic_output_shape_ops=True,
- capture_scalar_outputs=True,
- ):
- output_split_sizes_tensor = torch.tensor(
- [1] * self.world_size, dtype=torch.int64
- )
- inputs = (
- torch.ones(self.world_size, self.world_size, device="cuda")
- * (self.rank + 1),
- output_split_sizes_tensor,
- )
- trs = self.get_world_trs()
-
- compiled_fn = torch.compile(example, fullgraph=True, dynamic=True)
- code = run_and_get_triton_code(compiled_fn, *inputs, **trs)
- FileCheck().check_regex(
- "all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=\\[u\\d+, u\\d+\\], input_split_sizes=None" # noqa: B950
- ).run(code)
eager_out = example(*inputs, **trs)
inductor_out = compiled_fn(*inputs, **trs)
@@ -555,11 +471,16 @@ class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase):
compiled_fn = torch.compile(example, fullgraph=True, dynamic=True)
code = run_and_get_triton_code(compiled_fn, *inputs, **trs)
- FileCheck().check_regex(
- "all_to_all_single\\(buf\\d+\\[0\\], buf\\d+_inputs\\[0\\], output_split_sizes=None, input_split_sizes=None"
- ).run(
- code
- ) # noqa: B950
+ (
+ FileCheck()
+ .check_regex(
+ "torch.ops._c10d_functional.all_to_all_single.default\\("
+ "arg\\d+_\\d+, "
+ "\\[\\(s\\d+ // \\d\\), \\(s\\d+ // \\d\\)\\], "
+ "\\[\\(s\\d+ // \\d\\), \\(s\\d+ // \\d\\)\\]"
+ )
+ .run(code)
+ )
eager_out = example(*inputs, **trs)
inductor_out = compiled_fn(*inputs, **trs)
@@ -598,16 +519,14 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
# NOTE: Make sure we are not unneccessarily copying the outputs of
# wait_tensors before they are returned from the graph.
- FileCheck().check("buf0 = empty").check("buf0.copy_(arg0_1)").check(
- "buf1 = buf0"
- ).check("buf1_work = dist.all_reduce(buf1").check(
- "fun_col_impl._register_tensor_work(buf1, buf1_work)"
- ).check(
- "buf0 = _wait_tensor(buf0)"
- ).check(
- "return (buf0, )"
- ).run(
- code
+ (
+ FileCheck()
+ .check("buf0 = empty_strided")
+ .check(".run(arg0_1, buf0, 16")
+ .check("buf1 = torch.ops._c10d_functional.all_reduce_.default(buf0")
+ .check("buf3 = torch.ops._c10d_functional.wait_tensor.default(buf0")
+ .check("return (buf0")
+ .run(code)
)
correct = func(inputs, **self.get_world_trs())
self.assertTrue(same(out, correct))
@@ -632,22 +551,16 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
compiled = torch.compile(func)
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
- # NOTE: Make sure we are not unneccessarily copying the outputs of
- # wait_tensors before they are returned from the graph.
- FileCheck().check("buf1 = buf0; del buf0 # reuse").check_not(
- "buf1.copy_("
- ).check("buf2 = buf1").check("buf2_work = dist.all_reduce(buf2").check(
- "fun_col_impl._register_tensor_work(buf2, buf2_work)"
- ).check(
- "buf1 = _wait_tensor(buf1)"
- ).check(
- "buf4 = buf1"
- ).check(
- "buf5 = empty"
- ).check(
- "return (buf1, buf5"
- ).run(
- code
+ (
+ FileCheck()
+ .check("buf0 = empty_strided")
+ .check(".run(arg0_1, buf0")
+ .check("buf1 = torch.ops._c10d_functional.all_reduce_.default(buf0")
+ .check("buf3 = torch.ops._c10d_functional.wait_tensor.default(buf0")
+ .check("buf5 = empty_strided")
+ .check(".run(buf5, 16")
+ .check("return (buf0, buf5")
+ .run(code)
)
out = compiled(inputs, **self.get_world_trs())
correct = func(inputs, **self.get_world_trs())
@@ -675,22 +588,17 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
# NOTE: Make sure we are not unneccessarily copying the outputs of
# wait_tensors before they are returned from the graph.
- FileCheck().check("buf0 = empty").check("buf5 = empty").check(
- "triton_poi__0.run(arg0_1, buf0, buf5"
- ).check_not("copy_(").check("buf1 = buf0; del buf0 # reuse").check(
- "buf2 = buf1"
- ).check(
- "buf2_work = dist.all_reduce(buf2"
- ).check(
- "fun_col_impl._register_tensor_work(buf2, buf2_work)"
- ).check(
- "buf1 = _wait_tensor(buf1)"
- ).check(
- "buf4 = buf1"
- ).check(
- "return (buf1, buf5, buf6"
- ).run(
- code
+ (
+ FileCheck()
+ .check("buf0 = empty_strided")
+ .check("buf5 = empty_strided")
+ .check(".run(arg0_1, buf0, buf5, 16")
+ .check("buf1 = torch.ops._c10d_functional.all_reduce_.default(buf0")
+ .check("buf3 = torch.ops._c10d_functional.wait_tensor.default(buf0")
+ .check("buf6 = empty_strided")
+ .check(".run(buf6, 16")
+ .check("return (buf0, buf5, buf6")
+ .run(code)
)
out = compiled(inputs, **self.get_world_trs())
correct = func(inputs, **self.get_world_trs())
@@ -1136,31 +1044,22 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
# NOTE: Make sure we are not unneccessarily copying the outputs of
# wait_tensors before they are returned from the graph.
- FileCheck().check("buf0 = empty").check("buf5 = empty").check(
- "triton_poi__0.run(arg0_1, buf0, buf5"
- ).check("buf1 = empty").check("buf2 = empty").check_not("copy_(").check(
- "buf3_inputs = [buf0,arg0_1]"
- ).check(
- "buf3 = [buf1,buf2]"
- ).check(
- "buf3_work = fun_col_impl._all_gather_into_tensor_coalesced_fallback("
- "output_tensors=buf3, input_tensors=buf3_inputs"
- ).check(
- "fun_col_impl._register_tensor_work(buf3, buf3_work)"
- ).check(
- "buf1 = _wait_tensor(buf1)"
- ).check(
- "buf4 = buf1"
- ).check(
- "buf6 = buf0; del buf0 # reuse"
- ).check(
- "buf2 = _wait_tensor(buf2)"
- ).check(
- "buf7 = buf2"
- ).check(
- "return (buf1, buf5, buf6, buf2"
- ).run(
- code
+ (
+ FileCheck()
+ .check("buf0 = empty_strided")
+ .check("buf6 = empty_strided")
+ .check(".run(arg0_1, buf0, buf6, 16")
+ .check(
+ "buf1 = torch.ops._c10d_functional.all_gather_into_tensor_coalesced.default([buf0, arg0_1]"
+ )
+ .check("buf2 = buf1[0]")
+ .check("buf3 = buf1[1]")
+ .check("buf4 = torch.ops._c10d_functional.wait_tensor.default(buf2")
+ .check("buf7 = buf0; del buf0 # reuse")
+ .check(".run(buf7, 16")
+ .check("buf8 = torch.ops._c10d_functional.wait_tensor.default(buf3")
+ .check("return (buf2, buf6, buf7, buf3")
+ .run(code)
)
out = compiled(inputs, **self.get_world_trs())
correct = func(inputs, **self.get_world_trs())
@@ -1191,29 +1090,22 @@ class TestCollectivesInductor(DynamoDistributedSingleProcTestCase):
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
# NOTE: The first return value should be the output of the first wait_tensor.
# We want to make sure no unneccessary copy is made.
- FileCheck().check("buf0 = empty").check("buf5 = empty").check(
- "triton_poi__0.run(arg0_1, buf0, buf5"
- ).check("buf1 = empty").check("buf2 = empty").check_not("copy_(").check(
- "buf3 = [buf1,buf2]"
- ).check(
- "buf3_work = fun_col_impl._reduce_scatter_tensor_coalesced_fallback("
- "output_tensors=buf3, input_tensors=buf3_inputs"
- ).check(
- "fun_col_impl._register_tensor_work(buf3, buf3_work)"
- ).check(
- "buf1 = _wait_tensor(buf1)"
- ).check(
- "buf4 = buf1"
- ).check(
- "buf6 = buf0; del buf0 # reuse"
- ).check(
- "buf2 = _wait_tensor(buf2)"
- ).check(
- "buf7 = buf2"
- ).check(
- "return (buf1, buf5, buf6, buf2"
- ).run(
- code
+ (
+ FileCheck()
+ .check("buf0 = empty_strided")
+ .check("buf6 = empty_strided")
+ .check(".run(arg0_1, buf0, buf6, 16")
+ .check(
+ "buf1 = torch.ops._c10d_functional.reduce_scatter_tensor_coalesced.default([buf0, arg0_1]"
+ )
+ .check("buf2 = buf1[0]")
+ .check("buf3 = buf1[1]")
+ .check("buf4 = torch.ops._c10d_functional.wait_tensor.default(buf2")
+ .check("buf7 = buf0; del buf0 # reuse")
+ .check(".run(buf7, 16")
+ .check("buf8 = torch.ops._c10d_functional.wait_tensor.default(buf3")
+ .check("return (buf2, buf6, buf7, buf3")
+ .run(code)
)
out = compiled(inputs, **self.get_world_trs())
correct = func(inputs, **self.get_world_trs())
diff --git a/torch/distributed/_functional_collectives.py b/torch/distributed/_functional_collectives.py
index afd2b382f9..93aa88d9e7 100644
--- a/torch/distributed/_functional_collectives.py
+++ b/torch/distributed/_functional_collectives.py
@@ -5,12 +5,10 @@ from typing import cast, List, Optional, Tuple, TYPE_CHECKING, Union
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
-from torch._custom_ops import impl_abstract
from torch.distributed.device_mesh import DeviceMesh
from torch.fx.experimental.proxy_tensor import get_innermost_proxy_mode
from . import _functional_collectives_impl as fun_col_impl
-from ._functional_collectives_impl import _register_tensor_wrapper # noqa: F401
try:
from torch.utils._cxx_pytree import tree_map_only
@@ -626,7 +624,6 @@ class AsyncCollectiveTensor(torch.Tensor):
# eventually, this avoids pytree slowdown
res = func(args[0].elem, args[1])
wrapper_res = AsyncCollectiveTensor(res)
- _register_tensor_wrapper(wrapper_res)
return wrapper_res
is_view_op = _is_view_op(func)
@@ -641,7 +638,6 @@ class AsyncCollectiveTensor(torch.Tensor):
# wait_tensor is idepotent and will do stream sync only once
assert not isinstance(e, AsyncCollectiveTensor)
res = AsyncCollectiveTensor(e)
- _register_tensor_wrapper(res)
return res
unwrapped_args = tree_map_only(AsyncCollectiveTensor, unwrap, args)
@@ -927,7 +923,36 @@ def _reduce_scatter_tensor_coalesced_native_meta(
]
-def _register_ops():
+if not torch._running_with_deploy():
+ # Library MUST be defined at module scope or it doesn't work
+ # Creating a "DEF" Library always crashes torch::deploy so we create our
+ # Library instances here guarded against running inside it
+ lib_impl = torch.library.Library("_c10d_functional", "IMPL")
+ lib_impl.impl("all_reduce", _all_reduce_meta, "Meta")
+ lib_impl.impl("all_reduce_", _all_reduce__meta, "Meta")
+ lib_impl.impl("all_reduce_coalesced", _all_reduce_coalesced_meta, "Meta")
+ lib_impl.impl("all_reduce_coalesced_", _all_reduce_coalesced__meta, "Meta")
+ lib_impl.impl("wait_tensor", _wait_tensor_meta, "Meta")
+ lib_impl.impl("all_gather_into_tensor", _all_gather_into_tensor_native_meta, "Meta")
+ lib_impl.impl(
+ "all_gather_into_tensor_coalesced",
+ _all_gather_into_tensor_coalesced_native_meta,
+ "Meta",
+ )
+ lib_impl.impl("reduce_scatter_tensor", _reduce_scatter_tensor_native_meta, "Meta")
+ lib_impl.impl(
+ "reduce_scatter_tensor_coalesced",
+ _reduce_scatter_tensor_coalesced_native_meta,
+ "Meta",
+ )
+ lib_impl.impl("all_to_all_single", _all_to_all_single_meta, "Meta")
+ lib_impl.impl("broadcast", _broadcast_meta, "Meta")
+ lib_impl.impl("broadcast_", _broadcast__meta, "Meta")
+
+ # Register legacy ops for backward compatibility
+ # TODO(yifu): remove these in functional collective beta release
+ legacy_lib = torch.library.Library("c10d_functional", "DEF")
+ legacy_lib_impl = torch.library.Library("c10d_functional", "IMPL")
ops_defs = [
"broadcast(Tensor self, int src, str tag, int[] ranks, int group_size) -> Tensor",
"all_reduce(Tensor self, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor",
@@ -944,45 +969,9 @@ def _register_ops():
for op_def in ops_defs:
op_name = op_def[0 : op_def.index("(")]
backend_impl = getattr(fun_col_impl, f"_{op_name}")
- meta_impl = getattr(my_module, f"_{op_name}_meta")
- c10_lib.define(op_def, tags=torch.Tag.pt2_compliant_tag)
- c10_lib_impl.impl(op_name, backend_impl, "CompositeExplicitAutograd")
- impl_abstract(f"c10d_functional::{op_name}")(meta_impl)
-
+ legacy_lib.define(op_def, tags=torch.Tag.pt2_compliant_tag)
+ legacy_lib_impl.impl(op_name, backend_impl, "CompositeImplicitAutograd")
-if not torch._running_with_deploy():
- # Library MUST be defined at module scope or it doesn't work
- # Creating a "DEF" Library always crashes torch::deploy so we create our Library instances here
- # guarded against running inside it
- c10_lib = torch.library.Library("c10d_functional", "DEF")
- c10_lib_impl = torch.library.Library("c10d_functional", "IMPL")
- _register_ops()
-
- _c10_lib_impl = torch.library.Library("_c10d_functional", "IMPL")
- _c10_lib_impl.impl("all_reduce", _all_reduce_meta, "Meta")
- _c10_lib_impl.impl("all_reduce_", _all_reduce__meta, "Meta")
- _c10_lib_impl.impl("all_reduce_coalesced", _all_reduce_coalesced_meta, "Meta")
- _c10_lib_impl.impl("all_reduce_coalesced_", _all_reduce_coalesced__meta, "Meta")
- _c10_lib_impl.impl("wait_tensor", _wait_tensor_meta, "Meta")
- _c10_lib_impl.impl(
- "all_gather_into_tensor", _all_gather_into_tensor_native_meta, "Meta"
- )
- _c10_lib_impl.impl(
- "all_gather_into_tensor_coalesced",
- _all_gather_into_tensor_coalesced_native_meta,
- "Meta",
- )
- _c10_lib_impl.impl(
- "reduce_scatter_tensor", _reduce_scatter_tensor_native_meta, "Meta"
- )
- _c10_lib_impl.impl(
- "reduce_scatter_tensor_coalesced",
- _reduce_scatter_tensor_coalesced_native_meta,
- "Meta",
- )
- _c10_lib_impl.impl("all_to_all_single", _all_to_all_single_meta, "Meta")
- _c10_lib_impl.impl("broadcast", _broadcast_meta, "Meta")
- _c10_lib_impl.impl("broadcast_", _broadcast__meta, "Meta")
else:
warnings.warn(
"PyTorch Distributed functional collectives do not work with torch::deploy."
diff --git a/torch/distributed/_functional_collectives_impl.py b/torch/distributed/_functional_collectives_impl.py
index 308b317f52..7abd33e42a 100644
--- a/torch/distributed/_functional_collectives_impl.py
+++ b/torch/distributed/_functional_collectives_impl.py
@@ -1,289 +1,73 @@
-import logging
-import warnings
-import weakref
-from typing import cast, Dict, List, Optional
+from typing import List, Optional
import torch
-import torch.distributed as dist
import torch.distributed.distributed_c10d as c10d
"""
-Moved eager kernel implementations to a separate file partly for readability and partly as it is currently
-easier in dynamo to set tracing policy on a file-by-file level.
-
-Do not put code in this file that Dynamo is expected to trace into, as dynamo may disallow this whole file.
-
-DEBUG/TESTING HELPERS:
-
-This module includes some helpers that are quite useful when debugging or testing functional collectives:
-
-_tensor_needs_wait
-_outstanding_wait_count
-_wait_all
-
-"""
-
-logger = logging.getLogger(__name__)
-
-_use_native_funcol: Optional[bool] = None
-
-
-data_ptr_to_work: Dict[int, "_WaitRegistration"] = dict()
-work_version = 0
-
-
-class _WaitRegistration:
- def __init__(self, work):
- global work_version
- self.work = work
- self.version = work_version
- self.ptrs = set()
- self.ptr_alias_count = {}
- self.cleanup_count = 0
- work_version += 1
-
- def _register_tensor_ptr(self, data_ptr):
- global data_ptr_to_work
- data_ptr_to_work[data_ptr] = self
- self.ptrs.add(data_ptr)
-
- def _record_wrapper(self, ptr):
- self._register_tensor_ptr(ptr)
- self.ptr_alias_count.setdefault(ptr, 0)
- self.ptr_alias_count[ptr] += 1
- self.cleanup_count += 1
-
- def wait(self):
- if self.work is not None:
- self.work.wait()
- self.work = None
- self.cleanup()
-
- def decrement_live_tensor(self, ptr):
- self.cleanup_count -= 1
- if self.cleanup_count == 0:
- self.wait()
- else:
- self.ptr_alias_count[ptr] -= 1
- if (
- self.ptr_alias_count[ptr] < 1
- and data_ptr_to_work.get(ptr, None) == self
- ):
- del data_ptr_to_work[ptr]
-
- def cleanup(self):
- for ptr in self.ptrs:
- if data_ptr_to_work.get(ptr, None) == self:
- del data_ptr_to_work[ptr]
-
-
-def _register_tensor_work(tensor_or_list, work_or_list):
- if not isinstance(tensor_or_list, list):
- tensor_or_list = [tensor_or_list]
- if not isinstance(work_or_list, list):
- reg = _WaitRegistration(work_or_list)
- for tensor in tensor_or_list:
- reg._register_tensor_ptr(tensor.data_ptr())
- else:
- for tensor, work in zip(tensor_or_list, work_or_list):
- reg = _WaitRegistration(work)
- reg._register_tensor_ptr(tensor.data_ptr())
-
-
-def _wait_reg_dec(ptr, wait_reg):
- wait_reg.decrement_live_tensor(ptr)
-
-
-def _register_tensor_wrapper(tensor) -> None:
- global data_ptr_to_work
-
- # FIXME: This is almost definitely a bug.
- if isinstance(
- tensor.elem,
- (
- torch._subclasses.fake_tensor.FakeTensor,
- torch._subclasses.functional_tensor.FunctionalTensor,
- ),
- ):
- data_ptr = 0
- else:
- data_ptr = tensor.elem.data_ptr()
-
- # Note: we should NEVER try to trace this, bc it registers runtime stuff during trace.
- # Instead, backends must call this themselves when implementing traced collectives.
- wait_reg = data_ptr_to_work.get(data_ptr, None)
- if wait_reg is None:
- warnings.warn(
- "Trying to register finalizer to AsyncCollectiveTensor but the inner tensor is already gone"
- )
- else:
- # We force the collective to be waited in the case this tensor goes away to reduce the change of deadlocks.
- # NOTE: we register the callback to the ACT wrapper class, for the following reasons:
- # 1. The inner tensor is referenced by the associated Work object, so it's uncollective until we release the
- # associated work object
- # 2. There's a n-to-1 relationship between wrappers and inner tensor due to non-waitable ops like view()
- wait_reg._record_wrapper(data_ptr)
- weakref.finalize(tensor, _wait_reg_dec, data_ptr, wait_reg)
-
-
-def _wait_tensor(tensor: torch.Tensor) -> torch.Tensor:
- global data_ptr_to_work
- data_ptr = tensor.data_ptr()
- wait_reg = data_ptr_to_work.get(data_ptr)
- if wait_reg is not None:
- wait_reg.wait()
- return tensor
-
-
-def _tensor_needs_wait(tensor: torch.Tensor) -> bool:
- """Returns true if ```tensor``` needs to be waited. Works with ACS and inner tensors."""
- if hasattr(tensor, "_get_acs_underlying_tensor"):
- tensor = tensor._get_acs_underlying_tensor()
- data_ptr = tensor.data_ptr()
- wait_reg = data_ptr_to_work.get(data_ptr)
- return wait_reg is not None and wait_reg.work is not None
-
-
-def _outstanding_wait_count() -> int:
- """Returns the number of outstanding work objects waiting to be waited (sic)."""
- return len(data_ptr_to_work)
-
-
-def _wait_all() -> None:
- """Wait for all outstanding collectives."""
- for work_reg in list(data_ptr_to_work.values()):
- work_reg.wait()
-
-
-def _str_to_reduce_op(reduceOp: str) -> dist.ReduceOp:
- reduceOp = reduceOp.upper()
- op = dist.ReduceOp.RedOpType.__members__.get(reduceOp)
- if op is None:
- raise ValueError(f"Invalid reduce operation {reduceOp}")
- return cast(dist.ReduceOp, op)
-
-
-"""
-Kernel implementations (for eager runtime only) - should never be traced by torch.compile
-
-These functions should all be bound to dispatcher ops. During tracing, the op itself should be
-captured in the graph and the backend should implement the op however it prefers.
+This file contains the op impls for the legacy (c10d_functional) functional collectives.
+These impls simply call into the native (_c10d_functional) functional collectives.
"""
-def _broadcast(self, src, tag, ranks, group_size):
- group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size)
- assert group is not None
-
- inplace_tensor = self.clone(memory_format=torch.contiguous_format)
- work = dist.broadcast(inplace_tensor, src, group=group, async_op=True)
- _register_tensor_work(inplace_tensor, work)
-
- return inplace_tensor
-
-
-# TODO assert if ranks has duplicated entries
-def _all_reduce(self, reduceOp, tag, ranks, group_size):
- op = _str_to_reduce_op(reduceOp)
- group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size)
- assert group is not None
-
- inplace_tensor = self.clone(memory_format=torch.contiguous_format)
- work = dist.all_reduce(inplace_tensor, op=op, group=group, async_op=True)
- _register_tensor_work(inplace_tensor, work)
-
- return inplace_tensor
-
-
-def _all_reduce_coalesced(self, reduceOp, tag, ranks, group_size):
- op = _str_to_reduce_op(reduceOp)
- group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size)
- assert group is not None
-
- inplace_tensor_list = [t.clone(memory_format=torch.contiguous_format) for t in self]
- work = dist.all_reduce_coalesced(
- inplace_tensor_list, op=op, group=group, async_op=True
+def _broadcast(input, src, tag, ranks, group_size):
+ group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
+ return torch.ops._c10d_functional.broadcast(
+ input,
+ src,
+ group_name,
)
- _register_tensor_work(inplace_tensor_list, work)
- return inplace_tensor_list
+def _all_reduce(input, reduce_op, tag, ranks, group_size):
+ group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
+ return torch.ops._c10d_functional.all_reduce(
+ input,
+ reduce_op,
+ group_name,
+ )
-def _all_gather_into_tensor(shard, tag, ranks, group_size):
- # TODO add dim support?
- group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size)
- assert group is not None
- out_size = list(shard.size())
- out_size[0] *= group_size
- out_tensor = shard.new_empty(out_size)
- assert out_tensor.is_contiguous()
- # FIXME gloo doesn't support _allgather_base
- if dist.get_backend(group) == dist.Backend.GLOO or shard.is_cpu:
- tensor_list = list(torch.chunk(out_tensor, group_size))
- work = dist.all_gather(tensor_list, shard, group=group, async_op=True)
- else:
- work = dist.all_gather_into_tensor(
- out_tensor, shard, group=group, async_op=True
- )
- _register_tensor_work(out_tensor, work)
-
- return out_tensor
+def _all_reduce_coalesced(inputs, reduce_op, tag, ranks, group_size):
+ group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
+ return torch.ops._c10d_functional.all_reduce_coalesced(
+ inputs,
+ reduce_op,
+ group_name,
+ )
-def _all_gather_into_tensor_coalesced(self, tag, rankset, group_size):
- group = c10d._find_or_create_pg_by_ranks_and_tag(tag, rankset, group_size)
- assert group is not None
- def mk_out_tensor(shard):
- out_size = list(shard.size())
- out_size[0] *= group_size
- out_tensor = shard.new_empty(out_size)
- assert out_tensor.is_contiguous()
- return out_tensor
+def _all_gather_into_tensor(input, tag, ranks, group_size):
+ group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
+ return torch.ops._c10d_functional.all_gather_into_tensor(
+ input,
+ group_size,
+ group_name,
+ )
- out_tensors = [mk_out_tensor(t) for t in self]
- work_list = _all_gather_into_tensor_coalesced_fallback(
- output_tensors=out_tensors, input_tensors=self, group=group, async_op=True
+def _all_gather_into_tensor_coalesced(input, tag, ranks, group_size):
+ group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
+ return torch.ops._c10d_functional.all_gather_into_tensor_coalesced(
+ input,
+ group_size,
+ group_name,
)
- _register_tensor_work(out_tensors, work_list)
- return out_tensors
-
def _reduce_scatter_tensor(
input: torch.Tensor,
- reduceOp: str,
+ reduce_op: str,
tag: str,
ranks: List[int],
group_size: int,
):
- # TODO add dim support?
- group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size)
- assert group is not None
- op = _str_to_reduce_op(reduceOp)
-
- if dist.get_backend(group) == dist.Backend.GLOO or input.is_cpu:
- # cpu::gloo backend does not have reduce_scatter we fallback to do all_reduce
- # + local chunk
- logger.warning(
- "ProcessGroupGloo does not support reduce_scatter, falling back with all reduce!"
- )
- reduction_input = input.clone()
- group_rank = dist.get_rank(group)
- work = dist.all_reduce(reduction_input, op=op, group=group, async_op=True)
- out_tensor = reduction_input.chunk(group_size, dim=0)[group_rank]
- _register_tensor_work(out_tensor, work)
- else:
- out_size = list(input.size())
- out_size[0] //= group_size
- out_tensor = input.new_empty(out_size)
- work = dist.reduce_scatter_tensor(
- out_tensor, input, op=op, group=group, async_op=True
- )
- _register_tensor_work(out_tensor, work)
-
- return out_tensor
+ group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
+ return torch.ops._c10d_functional.reduce_scatter_tensor(
+ input,
+ reduce_op,
+ group_size,
+ group_name,
+ )
def _reduce_scatter_tensor_coalesced(
@@ -293,68 +77,14 @@ def _reduce_scatter_tensor_coalesced(
ranks: List[int],
group_size: int,
):
- group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size)
- assert group is not None
- op = _str_to_reduce_op(reduce_op)
-
- def mk_out_tensor(shard):
- out_size = list(shard.size())
- out_size[0] //= group_size
- out_tensor = shard.new_empty(out_size)
- assert out_tensor.is_contiguous()
- return out_tensor
-
- out_tensors = [mk_out_tensor(t) for t in inputs]
-
- work_list = _reduce_scatter_tensor_coalesced_fallback(
- output_tensors=out_tensors,
- input_tensors=inputs,
- op=op,
- group=group,
- async_op=False,
+ group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
+ return torch.ops._c10d_functional.reduce_scatter_tensor_coalesced(
+ inputs,
+ reduce_op,
+ group_size,
+ group_name,
)
- _register_tensor_work(out_tensors, work_list)
- return out_tensors
-
-
-def _all_gather_into_tensor_coalesced_fallback(
- output_tensors, input_tensors, group, async_op=False
-):
- # all_gather_coalesced is useless, it doesn't work under NCCL and does lots of copies under Gloo
- # all_gather is useless too because it's single tensor
- # NCCL's PG::all_gather with multiple tensors is broken, it only works for the multi-device setting
- # and fails if you mix same-size with different-size tensor lists.
- # _coalescing_manager crashed NCCL when used with all_gather_into_tensor.
- if input_tensors[0].is_cpu or not async_op:
- work_list = []
- out_tensors_sliced = [
- list(torch.chunk(out_tensor, dist.get_world_size(group)))
- for out_tensor in output_tensors
- ]
- for shard, out_tensor in zip(input_tensors, out_tensors_sliced):
- work = c10d.all_gather(out_tensor, shard, group=group, async_op=async_op)
- work_list.append(work)
- return work_list
- else:
- with c10d._coalescing_manager(group=group, async_ops=True) as cm:
- for in_t, out_t in zip(input_tensors, output_tensors):
- dist.all_gather_into_tensor(out_t, in_t, group=group, async_op=True)
- return cm
-
-
-def _reduce_scatter_tensor_coalesced_fallback(
- output_tensors, input_tensors, op, group, async_op=False
-):
- # All the same reasons as the all_gather fallback
- work_list = []
- for shard, out_tensor in zip(input_tensors, output_tensors):
- work = c10d.reduce_scatter_tensor(
- out_tensor, shard, op=op, group=group, async_op=async_op
- )
- work_list.append(work)
- return work_list
-
def _all_to_all_single(
input: torch.Tensor,
@@ -364,27 +94,22 @@ def _all_to_all_single(
ranks: List[int],
group_size: int,
):
- group = c10d._find_or_create_pg_by_ranks_and_tag(tag, ranks, group_size)
-
- if output_split_sizes is not None:
- torch._check(
- input.dim() >= 1,
- lambda: f"Expected input to have at least 1 dim but got {input.dim()} dim",
+ if output_split_sizes is None or input_split_sizes is None:
+ assert output_split_sizes is None and input_split_sizes is None, (
+ "output_split_sizes and input_split_sizes must either be "
+ "specified together or both set to None"
)
- out_size = list(input.size())
- out_size[0] = sum(output_split_sizes)
- out_tensor = input.new_empty(out_size)
- else:
- out_tensor = input.new_empty(input.size())
+ output_split_sizes = [input.shape[0] // group_size] * group_size
+ input_split_sizes = output_split_sizes
- work = c10d.all_to_all_single(
- out_tensor,
+ group_name = c10d._resolve_group_name_by_ranks_and_tag(ranks, tag)
+ return torch.ops._c10d_functional.all_to_all_single(
input,
- output_split_sizes=output_split_sizes,
- input_split_sizes=input_split_sizes,
- group=group,
- async_op=True,
+ output_split_sizes,
+ input_split_sizes,
+ group_name,
)
- _register_tensor_work(out_tensor, work)
- return out_tensor
+
+def _wait_tensor(tensor: torch.Tensor) -> torch.Tensor:
+ return torch.ops._c10d_functional.wait_tensor(tensor)
diff --git a/torch/distributed/_spmd/api.py b/torch/distributed/_spmd/api.py
index bf0ebd58d1..000b28bc25 100644
--- a/torch/distributed/_spmd/api.py
+++ b/torch/distributed/_spmd/api.py
@@ -294,8 +294,8 @@ SPMD_DECOMP_TABLE = {
DEDUP_TARGETS: Set[torch._ops.OpOverload] = {
- torch.ops.c10d_functional.all_reduce.default,
- torch.ops.c10d_functional.wait_tensor.default,
+ torch.ops._c10d_functional.all_reduce.default,
+ torch.ops._c10d_functional.wait_tensor.default,
}
|
2.41.0
|
1e937f3d6b904d6706594c1b3cfd7d0e56f9663
|
Sat, 27 Apr 2024 05:10:45 +0000
|
[PATCH 0760/1000] Add registration API for torch.compile-eager (#121387)
|
This PR is a follow-up of RFC https://github.com/pytorch/pytorch/issues/115545. In this PR, we intend to provide a registration API dedicated to eager-through-torch.compile. The major workflow of this API will be as follows. - Load cache - Check cache according to the input tensors - Cache Hit: Run the cached kernel directly - Cache Miss: Run the AOTI to produce kernel and run the produced kernel. If AOTI fails to produce the kernel, invoke the python fallback function. Currently, this PR always fallback to python kernel now and cache mechanism will be implemented in another PR - https://github.com/pytorch/pytorch/pull/116368 Pull Request resolved: https://github.com/pytorch/pytorch/pull/121387 Approved by: https://github.com/desertfire, https://github.com/jansel, https://github.com/zou3519, https://github.com/jgong5
|
diff --git a/build_variables.bzl b/build_variables.bzl
index 5939da825c..f28131023c 100644
--- a/build_variables.bzl
+++ b/build_variables.bzl
@@ -824,6 +824,7 @@ libtorch_python_core_sources = [
"torch/csrc/mps/Module.cpp",
"torch/csrc/mtia/Module.cpp",
"torch/csrc/inductor/aoti_runner/pybind.cpp",
+ "torch/csrc/inductor/aoti_eager/kernel_holder.cpp",
"torch/csrc/jit/backends/backend_init.cpp",
"torch/csrc/jit/python/init.cpp",
"torch/csrc/jit/passes/onnx.cpp",
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py
index 8928fad6bd..dd28449093 100644
--- a/test/inductor/test_torchinductor.py
+++ b/test/inductor/test_torchinductor.py
@@ -46,6 +46,7 @@ from torch._inductor.utils import (
from torch._inductor.virtualized import V
from torch._prims_common import is_integer_dtype
from torch.fx.experimental.proxy_tensor import make_fx
+from torch.library import _scoped_library
from torch.nn import functional as F
from torch.testing import FileCheck, make_tensor
from torch.testing._internal.common_cuda import (
@@ -759,6 +760,70 @@ class CommonTemplate:
),
)
+ @skipCUDAIf(not SM80OrLater, "Requires sm80")
+ def test_torch_compile_override_registration(self):
+ dynamic = False
+ namespace_name = "aten"
+ dispatch_key = "CPU"
+ device = torch.device("cpu")
+ if self.device.lower() == "cuda":
+ dispatch_key = "CUDA"
+ device = torch.device("cuda")
+
+ unary_op_set = ["abs", "acos"]
+
+ def fn(x, op_name=""):
+ return getattr(torch, op_name)(x)
+
+ # Invoke torch.compile directly to get referent results
+ x = torch.randn(3, 4, device=device)
+
+ ref_array = []
+ for unary_op_name in unary_op_set:
+ opt_fn = torch.compile(functools.partial(fn, op_name=unary_op_name))
+ ref = opt_fn(x)
+ ref_array.append(ref)
+
+ def register_ops(op_set, dispatch_key, torch_compile_op_lib_impl):
+ for _op_name in op_set:
+ qualified_op_name = f"{namespace_name}::{_op_name}"
+ _, overload_names = torch._C._jit_get_operation(qualified_op_name)
+ for overload_name in overload_names:
+ try:
+ reg_op_name = qualified_op_name
+ schema = torch._C._get_schema(qualified_op_name, overload_name)
+ if schema.overload_name:
+ reg_op_name = f"{qualified_op_name}.{schema.overload_name}"
+ torch_compile_op_lib_impl._impl_with_aoti_compile( # noqa: F821
+ reg_op_name, dispatch_key
+ )
+ except Exception as e:
+ continue
+
+ with _scoped_library("aten", "IMPL") as torch_compile_op_lib_impl:
+ register_ops(unary_op_set, dispatch_key, torch_compile_op_lib_impl)
+
+ res_array = []
+ for unary_op_name in unary_op_set:
+ res_array.append(getattr(torch, unary_op_name)(x))
+
+ for ref, res in zip(ref_array, res_array):
+ self.assertEqual(ref, res)
+
+ a = torch.randn(128, device=device)
+ min_tensor = torch.randn(128, device=device)
+ max_tensor = min_tensor + 0.5
+
+ ref_with_min = torch.ops.aten.clamp(a, min_tensor)
+ ref_with_min_max = torch.ops.aten.clamp(a, min_tensor, max_tensor)
+
+ with _scoped_library("aten", "IMPL") as torch_compile_op_lib_impl:
+ register_ops(["clamp"], dispatch_key, torch_compile_op_lib_impl)
+ res_with_min = torch.ops.aten.clamp(a, min_tensor)
+ res_with_min_max = torch.ops.aten.clamp(a, min_tensor, max_tensor)
+ self.assertEqual(ref_with_min, res_with_min)
+ self.assertEqual(ref_with_min_max, res_with_min_max)
+
def test_add_const_int(self):
def fn(a):
return (a + 1, torch.add(a, 1, alpha=2))
diff --git a/torch/csrc/inductor/aoti_eager/kernel_holder.cpp b/torch/csrc/inductor/aoti_eager/kernel_holder.cpp
new file mode 100644
index 0000000000..55c0d71c55
--- /dev/null
+++ b/torch/csrc/inductor/aoti_eager/kernel_holder.cpp
@@ -0,0 +1,246 @@
+#if !defined(C10_MOBILE) && !defined(ANDROID)
+#include <torch/csrc/inductor/aoti_eager/kernel_holder.h>
+
+#include <ATen/ATen.h>
+
+#include <ATen/core/dispatch/Dispatcher.h>
+#include <torch/csrc/PyInterpreter.h>
+#include <torch/csrc/autograd/python_variable.h>
+#include <torch/csrc/inductor/aoti_runner/model_container_runner_cpu.h>
+#ifdef USE_CUDA
+#include <torch/csrc/inductor/aoti_runner/model_container_runner_cuda.h>
+#endif
+#include <torch/csrc/jit/frontend/function_schema_parser.h>
+
+namespace torch::inductor {
+
+namespace {
+
+inline void unpack_tensor_ivalue(
+ const c10::IValue& ivalue,
+ const c10::Device& device,
+ std::vector<at::Tensor>& inputs) {
+ inputs.push_back(ivalue.toTensor());
+}
+
+inline void unpack_optional_tensor_ivalue(
+ const c10::IValue& ivalue,
+ const c10::Device& device,
+ std::vector<at::Tensor>& inputs) {
+ auto ivalue_opt_tensor = ivalue.toOptional<at::Tensor>();
+ if (ivalue_opt_tensor.has_value()) {
+ inputs.push_back(ivalue_opt_tensor.value());
+ }
+}
+
+inline void unpack_tensor_list_ivalue(
+ const c10::IValue& ivalue,
+ const c10::Device& device,
+ std::vector<at::Tensor>& inputs) {
+ for (const auto& item : ivalue.toListRef()) {
+ inputs.push_back(item.toTensor());
+ }
+}
+
+inline void unpack_optional_tensor_list_ivalue(
+ const c10::IValue& ivalue,
+ const c10::Device& device,
+ std::vector<at::Tensor>& inputs) {
+ for (const auto& item : ivalue.toListRef()) {
+ unpack_optional_tensor_ivalue(item, device, inputs);
+ }
+}
+
+inline void unpack_scalar_ivalue(
+ const c10::IValue& ivalue,
+ const c10::Device& device,
+ std::vector<at::Tensor>& inputs) {
+ inputs.push_back(at::scalar_tensor(
+ ivalue.toScalar(),
+ c10::TensorOptions().device(device).dtype(ivalue.toScalar().type())));
+}
+
+bool unpack_ivalue(
+ const c10::Argument& argument,
+ const c10::IValue& ivalue,
+ const c10::Device& device,
+ std::vector<at::Tensor>& inputs) {
+ if (ivalue.isTensor()) {
+ unpack_tensor_ivalue(ivalue, device, inputs);
+ } else if (ivalue.isTensorList()) {
+ unpack_tensor_list_ivalue(ivalue, device, inputs);
+ } else if (ivalue.isOptionalTensorList()) {
+ unpack_optional_tensor_list_ivalue(ivalue, device, inputs);
+ } else if (ivalue.isScalar()) {
+ // ivalue is scalar
+ unpack_scalar_ivalue(ivalue, device, inputs);
+ } else if (
+ *argument.real_type() == *c10::getTypePtr<c10::optional<at::Tensor>>()) {
+ // ivalue is c10::optional<at::Tensor>
+ unpack_optional_tensor_ivalue(ivalue, device, inputs);
+ } else {
+ // Unsupport IValue type.
+ return false;
+ }
+
+ return true;
+}
+
+bool unpack_tensors(
+ const std::vector<c10::Argument>& arguments,
+ const torch::jit::Stack& stack,
+ const c10::Device& device,
+ std::vector<at::Tensor>& inputs) {
+ for (size_t idx = 0; idx < stack.size(); idx++) {
+ if (!unpack_ivalue(arguments[idx], stack[idx], device, inputs)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // namespace
+
+AOTIPythonKernelHolder::AOTIPythonKernelHolder(
+ c10::DispatchKey dispatch_key,
+ c10::string_view ns,
+ c10::string_view op_name_with_overload)
+ : dispatch_key_(dispatch_key),
+ ns_(std::string(ns)),
+ op_name_with_overload_(std::string(op_name_with_overload)),
+ device_(c10::dispatchKeyToDeviceType(dispatch_key_), 0),
+ pyinterpreter_(getPyInterpreter()) {
+ TORCH_CHECK(
+ (device_.type() == c10::DeviceType::CPU) ||
+ (device_.type() == c10::DeviceType::CUDA),
+ "Unsupported device type");
+}
+
+void AOTIPythonKernelHolder::operator()(
+ const c10::OperatorHandle& op,
+ c10::DispatchKeySet keyset,
+ torch::jit::Stack* stack) {
+ if (cache_lookup(op, keyset, stack)) {
+ cache_hit(op, keyset, stack);
+ } else {
+ cache_miss(op, keyset, stack);
+ }
+}
+
+bool AOTIPythonKernelHolder::cache_lookup(
+ const c10::OperatorHandle& op,
+ c10::DispatchKeySet keyset,
+ torch::jit::Stack* stack) {
+ // TODO: Always return false now to implement cache_miss. Later, we will add
+ // cache lookup and implement cache hit.
+ return false;
+}
+
+void AOTIPythonKernelHolder::cache_hit(
+ const c10::OperatorHandle& op,
+ c10::DispatchKeySet keyset,
+ torch::jit::Stack* stack) {
+ TORCH_INTERNAL_ASSERT(false);
+}
+
+void AOTIPythonKernelHolder::cache_miss(
+ const c10::OperatorHandle& op,
+ c10::DispatchKeySet keyset,
+ torch::jit::Stack* stack) {
+ auto kernel_lib_path = produce_aoti_kernel_lib(op, keyset, stack);
+ std::shared_ptr<AOTIModelContainerRunner> kernel = nullptr;
+ // TODO: To enable the plugin mechanism to allow registration for other
+ // backends
+ if (device_.type() == c10::DeviceType::CPU) {
+ kernel = std::make_shared<AOTIModelContainerRunnerCpu>(kernel_lib_path);
+ } else {
+#ifdef USE_CUDA
+ kernel = std::make_shared<AOTIModelContainerRunnerCuda>(kernel_lib_path);
+#else
+ TORCH_CHECK(false, "Unsupported CUDA device type");
+#endif
+ }
+
+ std::vector<at::Tensor> inputs;
+ TORCH_INTERNAL_ASSERT(
+ unpack_tensors(op.schema().arguments(), *stack, device_, inputs),
+ "Failed to unpack tensors for the stack to run the AOTI kernel.");
+ auto outputs = kernel->run(inputs);
+ if (outputs.size() > 0) {
+ torch::jit::drop(*stack, op.schema().arguments().size());
+ // TODO: Get the output type of this operation and then convert to the
+ // output type.
+ for (auto& output : outputs) {
+ torch::jit::push(*stack, std::move(output));
+ }
+ }
+}
+
+std::string AOTIPythonKernelHolder::produce_aoti_kernel_lib(
+ const c10::OperatorHandle& op,
+ c10::DispatchKeySet keyset,
+ torch::jit::Stack* stack) {
+ auto arguments = torch::jit::last(*stack, op.schema().arguments().size());
+
+ py::gil_scoped_acquire gil;
+
+ // Get the corresponding python operation for the current operator and the
+ // python operation will pass to the AOT Inductor to generate the kernel
+ // library.
+ const auto& schema = op.schema();
+ const auto& qualified_name = op.operator_name().name;
+ const auto& overload_name =
+ schema.overload_name().empty() ? "default" : schema.overload_name();
+ auto pos = qualified_name.find("::");
+ TORCH_INTERNAL_ASSERT(pos != std::string::npos, qualified_name);
+ // Make me some null terminated strings
+ std::string ns_str = qualified_name.substr(0, pos);
+ const char* ns = ns_str.c_str();
+ const char* func_name = qualified_name.c_str() + pos + strlen("::");
+ py::handle op_py_func = op.getPythonOp(pyinterpreter_, [&]() -> PyObject* {
+ py::handle torch_api_function =
+ py::module::import("torch").attr("ops").attr(ns).attr(func_name);
+ return torch_api_function.attr(overload_name.c_str()).ptr();
+ });
+
+ TORCH_INTERNAL_ASSERT(
+ op_py_func.ptr() != nullptr && op_py_func.ptr() != Py_None,
+ "Failed to get python operation. Operator Name is ",
+ op.operator_name().name,
+ ", Overload Name is ",
+ overload_name);
+
+ py::handle aot_compile_function =
+ py::module::import("torch._export").attr("aot_compile");
+ TORCH_INTERNAL_ASSERT(
+ aot_compile_function.ptr() != nullptr &&
+ aot_compile_function.ptr() != Py_None,
+ "Failed to import - torch._export.aot_compile");
+
+ // Pass the python operation to the AOT Inductor to generate the kernel
+ // library.
+ auto args_kwargs = parseIValuesToPyArgsKwargs(op, arguments.vec());
+ auto result = py::reinterpret_steal<py::object>(PyObject_CallFunctionObjArgs(
+ aot_compile_function.ptr(),
+ op_py_func.ptr(),
+ args_kwargs.first.ptr(),
+ args_kwargs.second.ptr(),
+ nullptr));
+ TORCH_INTERNAL_ASSERT(result.ptr() != nullptr && result.ptr() != Py_None);
+
+ auto kernel_lib_path = py::cast<std::string>(result);
+ TORCH_CHECK(
+ !kernel_lib_path.empty(),
+ "Failed to produce kernel libarary by using AOTI for ",
+ c10::DeviceTypeName(device_.type()),
+ ". Operator Name is ",
+ op.operator_name().name,
+ ", Overload Name is ",
+ op.schema().overload_name());
+
+ return kernel_lib_path;
+}
+
+} // namespace torch::inductor
+#endif
diff --git a/torch/csrc/inductor/aoti_eager/kernel_holder.h b/torch/csrc/inductor/aoti_eager/kernel_holder.h
new file mode 100644
index 0000000000..f7a886eb26
--- /dev/null
+++ b/torch/csrc/inductor/aoti_eager/kernel_holder.h
@@ -0,0 +1,65 @@
+#if !defined(C10_MOBILE) && !defined(ANDROID)
+#pragma once
+
+#include <ATen/ATen.h>
+#include <ATen/core/boxing/KernelFunction.h>
+
+#include <torch/csrc/inductor/aoti_runner/model_container_runner.h>
+#include <torch/csrc/utils/pybind.h>
+
+#include <string>
+
+namespace torch::inductor {
+
+// The AOTIPythonKernelHolder class uses the AOT Inductor to generate a kernel
+// for a specified operation. To speed up this process, the generated kernel
+// library is cached on disk. Detailed information from the input tensors is
+// used as the key for caching the kernel library. On subsequent runs, these
+// input tensors are used to search the cache. If a cache hit occurs, the cached
+// kernel library is loaded and executed. If a cache miss occurs, the AOT
+// Inductor is called again to generate the kernel library.
+class AOTIPythonKernelHolder : public c10::OperatorKernel {
+ // A DispatchKey object that represents the dispatch key for the kernel.
+ c10::DispatchKey dispatch_key_;
+ // Namespace of the kernel.
+ std::string ns_;
+ // Name of the operation the kernel performs.
+ std::string op_name_with_overload_;
+ // The device on which the kernel is to be executed.
+ c10::Device device_;
+ // The Python interpreter to get OpOverload object with the given op_name and
+ // op_overload_name.
+ c10::impl::PyInterpreter* pyinterpreter_;
+
+ public:
+ AOTIPythonKernelHolder(
+ c10::DispatchKey dispatch_key,
+ c10::string_view ns,
+ c10::string_view op_name_with_overload);
+
+ void operator()(
+ const c10::OperatorHandle& op,
+ c10::DispatchKeySet keyset,
+ torch::jit::Stack* stack);
+
+ private:
+ bool cache_lookup(
+ const c10::OperatorHandle& op,
+ c10::DispatchKeySet keyset,
+ torch::jit::Stack* stack);
+ void cache_miss(
+ const c10::OperatorHandle& op,
+ c10::DispatchKeySet keyset,
+ torch::jit::Stack* stack);
+ void cache_hit(
+ const c10::OperatorHandle& op,
+ c10::DispatchKeySet keyset,
+ torch::jit::Stack* stack);
+ std::string produce_aoti_kernel_lib(
+ const c10::OperatorHandle& op,
+ c10::DispatchKeySet keyset,
+ torch::jit::Stack* stack);
+};
+
+} // namespace torch::inductor
+#endif
diff --git a/torch/csrc/utils/python_dispatch.cpp b/torch/csrc/utils/python_dispatch.cpp
index 2d115a8228..a3e71a2542 100644
--- a/torch/csrc/utils/python_dispatch.cpp
+++ b/torch/csrc/utils/python_dispatch.cpp
@@ -21,6 +21,7 @@
#include <c10/util/flat_hash_map.h>
#include <pybind11/operators.h>
#include <pybind11/stl.h>
+#include <torch/csrc/inductor/aoti_eager/kernel_holder.h>
#include <torch/csrc/utils/pybind.h>
#include <torch/csrc/utils/python_raii.h>
@@ -372,6 +373,32 @@ void initDispatchBindings(PyObject* module) {
py::arg("name"),
py::arg("dispatch") = "",
py::arg("debug") = "impl_t_t")
+ .def(
+ "impl_with_aoti_compile",
+ [](const py::object& self,
+ const char* ns,
+ const char* op_name_with_overload,
+ c10::DispatchKey dispatch) {
+ HANDLE_TH_ERRORS
+ std::string reg_op_name =
+ std::string(ns).append("::").append(op_name_with_overload);
+
+ auto& lib = self.cast<torch::Library&>();
+ lib.impl(
+ reg_op_name.c_str(),
+ torch::dispatch(
+ dispatch,
+ CppFunction::makeFromBoxedFunctor(
+ std::make_unique<
+ torch::inductor::AOTIPythonKernelHolder>(
+ dispatch, ns, op_name_with_overload))),
+ register_or_verify());
+ END_HANDLE_TH_ERRORS_PYBIND
+ },
+ "",
+ py::arg("ns"),
+ py::arg("op_name_with_overload"),
+ py::arg("dispatch"))
.def(
"impl",
[](const py::object& self,
diff --git a/torch/library.py b/torch/library.py
index 6bd4bd8110..1d6886aeb6 100644
--- a/torch/library.py
+++ b/torch/library.py
@@ -139,6 +139,48 @@ class Library:
handle = entry.abstract_impl.register(func_to_register, source)
self._registration_handles.append(handle)
+ def _impl_with_aoti_compile(self, op_name, dispatch_key=''):
+ r'''Register the operator to use the AOTI-compiled implementation.
+
+ Args:
+ op_name: operator name (along with the overload) or OpOverload object.
+ dispatch_key: dispatch key that the input function should be registered for. By default, it uses
+ the dispatch key that the library was created with.
+
+ Example::
+ >>> my_lib = Library("aten", "IMPL")
+ >>> my_lib._impl_with_aoti_compile("div.Tensor", "CPU")
+ '''
+ if dispatch_key == '':
+ dispatch_key = self.dispatch_key
+ assert torch.DispatchKeySet(dispatch_key).has(torch._C.DispatchKey.Dense)
+
+ if isinstance(op_name, str):
+ name = op_name
+ elif isinstance(op_name, OpOverload):
+ name = op_name._schema.name
+ overload_name = op_name._schema.overload_name
+ if overload_name != '':
+ name = name + '.' + overload_name
+ else:
+ raise RuntimeError("_impl_with_aoti_compile should be passed either a name or an OpOverload object "
+ "as the first argument")
+
+ key = self.ns + "/" + name.split("::")[-1] + "/" + dispatch_key
+ if key in _impls:
+ # TODO: in future, add more info about where the existing function is registered (this info is
+ # today already returned by the C++ warning when _impl_with_aoti_compile is called but we error out before that)
+ raise RuntimeError("This is not allowed since there's already a kernel registered from python overriding {}"
+ "'s behavior for {} dispatch key and {} namespace.".
+ format(name.split("::")[-1], dispatch_key, self.ns))
+
+ assert self.m is not None
+ impl_fn: Callable = self.m.impl_with_aoti_compile
+ impl_fn(self.ns, name.split("::")[-1], dispatch_key)
+
+ _impls.add(key)
+ self._op_impls.add(key)
+
def impl(self, op_name, fn, dispatch_key='', *, with_keyset=False):
r'''Registers the function implementation for an operator defined in the library.
|
2.41.0
|
bf53b128ca1888d3b7b91551d492b685839eda1
|
Sat, 27 Apr 2024 15:53:14 +0000
|
[PATCH 0762/1000] [codemod] Remove unused variables in caffe2/aten/src/ATen/test/scalar_test.cpp (#125041)
|
Summary: LLVM-15 has a warning `-Wunused-but-set-variable` which we treat as an error because it's so often diagnostic of a code issue. Unused variables can compromise readability or, worse, performance. This diff either (a) removes an unused variable and, possibly, it's associated code, or (b) qualifies the variable with `[[maybe_unused]]`, mostly in cases where the variable _is_ used, but, eg, in an `assert` statement that isn't present in production code. - If you approve of this diff, please use the "Accept & Ship" button :-) Test Plan: Sandcastle Reviewed By: palmje Differential Revision: D56587751 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125041 Approved by: https://github.com/Skylion007
|
diff --git a/aten/src/ATen/test/scalar_test.cpp b/aten/src/ATen/test/scalar_test.cpp
index b6762e1739..c10e8386d6 100644
--- a/aten/src/ATen/test/scalar_test.cpp
+++ b/aten/src/ATen/test/scalar_test.cpp
@@ -28,8 +28,6 @@
using std::cout;
using namespace at;
-constexpr auto Float = ScalarType::Float;
-
template<typename scalar_type>
struct Foo {
static void apply(Tensor a, Tensor b) {
diff --git a/caffe2/utils/math_test.cc b/caffe2/utils/math_test.cc
index d29860bd3a..0389a10f29 100644
--- a/caffe2/utils/math_test.cc
+++ b/caffe2/utils/math_test.cc
@@ -166,8 +166,6 @@ TEST(MathTest, GemmNoTransTrans) {
namespace {
-constexpr float kEps = 1e-5;
-
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
class GemmBatchedTest
: public testing::TestWithParam<testing::tuple<bool, bool>> {
|
2.41.0
|
246f4286403abeac9ff518c5974041680ae2541
|
Sat, 27 Apr 2024 16:40:47 +0000
|
[PATCH 0763/1000] Export torch.newaxis=None for Python Array API/Numpy consistency (#125026)
|
Fixes #65307 For consistency with Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html) and NumPy (https://numpy.org/devdocs/reference/constants.html), I added `torch.newaxis = None`. Note that the consistency is directly mentioned also in the `__init__.py`, right above the added export. The `torch.newaxis` is also mentioned in #110636. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125026 Approved by: https://github.com/lezcano
|
diff --git a/torch/__init__.py b/torch/__init__.py
index 846038e351..eee311f604 100644
--- a/torch/__init__.py
+++ b/torch/__init__.py
@@ -1267,7 +1267,8 @@ def _check_tensor_all(cond, message=None): # noqa: F811
# For Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html) and
# NumPy consistency (https://numpy.org/devdocs/reference/constants.html)
from math import e , nan , inf , pi
-__all__.extend(['e', 'pi', 'nan', 'inf'])
+newaxis: None = None
+__all__.extend(['e', 'pi', 'nan', 'inf', 'newaxis'])
################################################################################
# Define Storage and Tensor classes
|
2.41.0
|
0a5a0d29890c8bddffe15f3b9d569937c314186
|
Sat, 27 Apr 2024 18:01:15 +0000
|
[PATCH 0764/1000] OSS: Capture triton kernel in ET (#124775)
|
This DIFF is to capture triton kernels in execution trace Pull Request resolved: https://github.com/pytorch/pytorch/pull/124775 Approved by: https://github.com/briancoutinho, https://github.com/aaronenyeshi
|
diff --git a/test/profiler/test_execution_trace.py b/test/profiler/test_execution_trace.py
index 3d31ee20a1..dd8299c920 100644
--- a/test/profiler/test_execution_trace.py
+++ b/test/profiler/test_execution_trace.py
@@ -21,6 +21,7 @@ from typing import Any, Dict, List
import torch
import torch.nn as nn
+from torch import _dynamo as torchdynamo
from torch.autograd import (
_record_function_with_args_enter,
_record_function_with_args_exit,
@@ -198,6 +199,7 @@ class TestExecutionTrace(TestCase):
expected_loop_events = 0
et = ExecutionTraceObserver().register_callback(fp.name)
+
et.start()
for idx in range(5):
expected_loop_events += 1
@@ -231,41 +233,48 @@ class TestExecutionTrace(TestCase):
)
@unittest.skipIf(not TEST_CUDA or not has_triton(), "need CUDA and triton to run")
def test_execution_trace_with_pt2(self):
- class ConvAndRelu(nn.Module):
- def __init__(self) -> None:
- super().__init__()
- self.linear = nn.Linear(4096, 4096)
- self.relu = nn.ReLU(inplace=True)
+ @torchdynamo.optimize("inductor")
+ def fn(a, b, c):
+ x = torch.nn.functional.linear(a, b)
+ x = x + c
+ return x.cos()
+
+ a, b, c = (torch.randn(4, 4, requires_grad=True).to("cuda") for _ in range(3))
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- x = self.linear(x)
- x = self.relu(x)
- return x
+ inputs = [a, b, c]
+ with torch._inductor.config.patch(compile_threads=1):
+ fn(*inputs)
# Create a temp file to save execution trace data.
fp = tempfile.NamedTemporaryFile("w+t", suffix="_et.json", delete=False)
fp.close()
- with torch._inductor.config.patch(compile_threads=1):
- test_module = torch.compile(ConvAndRelu())
-
- x = torch.rand(128, 4096)
- et = ExecutionTraceObserver().register_callback(fp.name)
- et.start()
- test_module.forward(x)
- et.stop()
+ with profile(
+ activities=torch.profiler.supported_activities(),
+ record_shapes=True,
+ schedule=torch.profiler.schedule(
+ skip_first=3, wait=1, warmup=1, active=2, repeat=1
+ ),
+ execution_trace_observer=(
+ ExecutionTraceObserver().register_callback(fp.name)
+ ),
+ ) as p:
+ for idx in range(10):
+ with record_function(f"## LOOP {idx} ##"):
+ fn(*inputs)
+ p.step()
- assert fp.name == et.get_output_file_path()
- et.unregister_callback()
nodes = self.get_execution_trace_root(fp.name)
-
- found_root_node = False
+ found_captured_triton_kernel_node = False
for n in nodes:
assert "name" in n
- if "[pytorch|profiler|execution_trace|process]" in n["name"]:
- found_root_node = True
-
- assert found_root_node
+ if "triton_" in n["name"]:
+ for attr in n["attrs"]:
+ if attr["name"] == "kernel_file" and attr["value"] != "":
+ found_captured_triton_kernel_node = True
+ assert len(n["inputs"]["values"]) > 0
+ assert len(n["outputs"]["values"]) == 0
+ assert found_captured_triton_kernel_node
def test_execution_trace_start_stop(self):
use_cuda = torch.profiler.ProfilerActivity.CUDA in supported_activities()
@@ -273,8 +282,7 @@ class TestExecutionTrace(TestCase):
fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
fp.close()
expected_loop_events = 0
- et = ExecutionTraceObserver()
- et.register_callback(fp.name)
+ et = ExecutionTraceObserver().register_callback(fp.name)
for idx in range(10):
if idx == 3:
et.start()
@@ -314,8 +322,7 @@ class TestExecutionTrace(TestCase):
fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
fp.close()
output_files.append(fp.name)
- et = ExecutionTraceObserver()
- et.register_callback(fp.name)
+ et = ExecutionTraceObserver().register_callback(fp.name)
et.start()
with record_function(f"## LOOP {idx} ##"):
self.payload(use_cuda=use_cuda)
@@ -340,8 +347,7 @@ class TestExecutionTrace(TestCase):
def test_execution_trace_no_capture(self):
fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
fp.close()
- et = ExecutionTraceObserver()
- et.register_callback(fp.name)
+ et = ExecutionTraceObserver().register_callback(fp.name)
assert fp.name == et.get_output_file_path()
et.unregister_callback()
@@ -357,8 +363,7 @@ class TestExecutionTrace(TestCase):
fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False)
fp.close()
- et = ExecutionTraceObserver()
- observer = et.register_callback(fp.name)
+ observer = ExecutionTraceObserver().register_callback(fp.name)
def fn(nt):
return nt.sin().cos()
diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py
index 02e29c431b..916ea8b930 100644
--- a/torch/_inductor/runtime/triton_heuristics.py
+++ b/torch/_inductor/runtime/triton_heuristics.py
@@ -805,7 +805,7 @@ class CachingAutotuner(KernelInterface):
args,
{
"kernel_file": self.filename,
- "kernel_type": "triton",
+ "kernel_backend": "triton",
"grid": grid_info,
"stream": stream,
},
diff --git a/torch/csrc/profiler/standalone/execution_trace_observer.cpp b/torch/csrc/profiler/standalone/execution_trace_observer.cpp
index 763f449c23..3ebd8618bd 100644
--- a/torch/csrc/profiler/standalone/execution_trace_observer.cpp
+++ b/torch/csrc/profiler/standalone/execution_trace_observer.cpp
@@ -236,6 +236,8 @@ const ExecutionTraceObserver::ID root_id{1};
struct FunctionCallContext : public ObserverContext {
std::string name;
+ std::string kernel_backend;
+ std::string kernel_file;
ExecutionTraceObserver::ID op_id{uninitialized_id};
ExecutionTraceObserver::ID parent_id{uninitialized_id};
ExecutionTraceObserver::ID fw_parent_id{uninitialized_id};
@@ -273,14 +275,16 @@ static void writeJsonNode(
const std::string& outputs = "[]",
const std::string& output_shapes = "[]",
const std::string& output_types = "[]",
- const std::string& operator_schema = "") {
+ const std::string& operator_schema = "",
+ const std::string& kernel_backend = "",
+ const std::string& kernel_file = "") {
out << fmt::format(
R"JSON(
{{
"id": {}, "name": "{}", "ctrl_deps": {},
"inputs": {{"values": {}, "shapes": {}, "types": {}}},
"outputs": {{"values": {}, "shapes": {}, "types": {}}},
- "attrs": [{{"name": "rf_id", "type": "uint64", "value": {}}}, {{"name": "fw_parent", "type": "uint64", "value": {}}}, {{"name": "seq_id", "type": "int64", "value": {}}}, {{"name": "scope", "type": "uint64", "value": {}}}, {{"name": "tid", "type": "uint64", "value": {}}}, {{"name": "fw_tid", "type": "uint64", "value": {}}}, {{"name": "op_schema", "type": "string", "value": "{}"}}]
+ "attrs": [{{"name": "rf_id", "type": "uint64", "value": {}}},{{"name": "fw_parent", "type": "uint64", "value": {}}},{{"name": "seq_id", "type": "int64", "value": {}}},{{"name": "scope", "type": "uint64", "value": {}}},{{"name": "tid", "type": "uint64", "value": {}}},{{"name": "fw_tid", "type": "uint64", "value": {}}},{{"name": "op_schema", "type": "string", "value": "{}"}},{{"name": "kernel_backend", "type": "string", "value": "{}"}},{{"name": "kernel_file", "type": "string", "value": "{}"}}]
}})JSON",
id,
name,
@@ -297,7 +301,9 @@ static void writeJsonNode(
scope,
tid,
fw_tid,
- operator_schema);
+ operator_schema,
+ kernel_backend,
+ kernel_file);
}
inline std::string timeString(const std::time_t timepoint) {
@@ -326,7 +332,7 @@ static bool initExecutionTraceStart(ExecutionTraceObserver& ob) {
ob.out << fmt::format(
R"JSON({{
- "schema": "1.0.3-chakra.0.0.4", "pid": {}, "time": "{}", "start_ts": {},
+ "schema": "1.0.4-chakra.0.0.4", "pid": {}, "time": "{}", "start_ts": {},
"nodes": [)JSON",
ob.pid,
ob.record_time,
@@ -442,6 +448,44 @@ inline void appendValueInfo(
shapes.push_back(getValueShape(val));
}
+inline void handleKernelBackendInfo(
+ FunctionCallContext& fc,
+ const RecordFunction& fn) {
+ // triton kernel related information are in kwinputs
+ const auto& kwinputs = fn.kwinputs();
+ if (kwinputs.find("kernel_backend") != kwinputs.end()) {
+ fc.kernel_backend = kwinputs.at("kernel_backend").toStringRef();
+ if (fc.kernel_backend == "triton") {
+ fc.kernel_file = kwinputs.at("kernel_file").toStringRef();
+ TORCH_INTERNAL_ASSERT(
+ kwinputs.find("kernel_file") != kwinputs.end(),
+ "kernel file is missing in triton kernel");
+ // Remove the path of the file name
+ if (fc.kernel_file.find_last_of('/') != std::string::npos)
+ fc.kernel_file =
+ fc.kernel_file.substr(fc.kernel_file.find_last_of('/') + 1);
+
+ // get grid information
+ TORCH_INTERNAL_ASSERT(
+ kwinputs.find("grid") != kwinputs.end(),
+ "grid is missing in triton kernel");
+ fc.input_values.emplace_back(
+ "\"" + kwinputs.at("grid").toStringRef() + "\"");
+ fc.input_types.emplace_back("\"String\"");
+ fc.input_shapes.emplace_back("[]");
+
+ // get stream information
+ TORCH_INTERNAL_ASSERT(
+ kwinputs.find("stream") != kwinputs.end(),
+ "stream is missing in triton kernel");
+ fc.input_values.emplace_back(
+ std::to_string(kwinputs.at("stream").toInt()));
+ fc.input_types.emplace_back("\"Int\"");
+ fc.input_shapes.emplace_back("[]");
+ }
+ }
+}
+
static void recordOperatorStart(
ExecutionTraceObserver& ob,
FunctionCallContext& fc,
@@ -491,6 +535,9 @@ static void recordOperatorStart(
appendValueInfo(
ob, inputs[i], fc.input_values, fc.input_types, fc.input_shapes);
}
+
+ handleKernelBackendInfo(fc, fn);
+
fc.parent_id = ob.op_stack[tid].top();
// get parent id from the forward stack, this can be different for
// autograd ops, which may execute on a different thread than the original
@@ -615,7 +662,9 @@ static void onFunctionExit(const RecordFunction& fn, ObserverContext* ctx_ptr) {
vectorToString(output_values),
vectorToString(output_shapes),
vectorToString(output_types),
- op_schema_str);
+ op_schema_str,
+ fc.kernel_backend,
+ fc.kernel_file);
ob->out << ",";
} catch (const std::exception& e) {
LOG(WARNING) << "Exception in execution trace observer: [" << fc.name
diff --git a/torch/profiler/profiler.py b/torch/profiler/profiler.py
index 82daffcdcb..81f1d2c2f1 100644
--- a/torch/profiler/profiler.py
+++ b/torch/profiler/profiler.py
@@ -1,6 +1,7 @@
import gzip
import json
import os
+import shutil
import tempfile
from abc import ABC, abstractmethod
from enum import Enum
@@ -792,8 +793,36 @@ class ExecutionTraceObserver(_ITraceObserver):
"""
Removes ET observer from record function callbacks.
"""
+
+ def _save_triton_kernels():
+ # Save the kernel paths for the generated kernels
+ from torch._inductor.codecache import PyCodeCache as PyCodeCache
+
+ kernel_files = [
+ v.__file__
+ for v in PyCodeCache.cache.values()
+ if getattr(v, "__file__", None) is not None
+ ]
+ work_dir, file_name = os.path.split(self._output_file_path)
+ resource_dir = os.path.join(
+ work_dir, os.path.splitext(file_name)[0] + "_resources"
+ )
+ if not os.path.exists(resource_dir):
+ os.mkdir(resource_dir)
+
+ for kernel_file in kernel_files:
+ if kernel_file is None:
+ continue
+ path, name = os.path.split(kernel_file)
+ dst = os.path.join(resource_dir, name)
+ shutil.copyfile(kernel_file, dst)
+
if self._registered:
self.stop()
+ try:
+ _save_triton_kernels()
+ except Exception as e:
+ warn(f"Execution trace failed to save kernels: {e}")
_remove_execution_trace_observer()
self._registered = False
|
2.41.0
|
aa6bd7fa01256dbc80247dc538ade79a8170884
|
Fri, 26 Apr 2024 06:39:13 -0700
|
[PATCH 0767/1000] Refactor all top level usages of record_shapeenv_event to ShapeEnv class (#123735)
|
This ensures that first argument to record_shapeenv_event is a ShapeEnv so we can appropriately short circuit when recording is not in progress. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/123735 Approved by: https://github.com/ysiraichi, https://github.com/zou3519, https://github.com/albanD
|
diff --git a/torch/_export/serde/serialize.py b/torch/_export/serde/serialize.py
index 77f0cf3d3c..ffa4adbd21 100644
--- a/torch/_export/serde/serialize.py
+++ b/torch/_export/serde/serialize.py
@@ -1456,8 +1456,7 @@ class GraphModuleDeserializer(metaclass=Final):
self.shape_env.add_var_to_val(sym, hint)
if vr := self.symbol_name_to_range.get(val.expr_str):
- symbolic_shapes._constrain_symbol_range(
- self.shape_env,
+ self.shape_env.constrain_symbol_range(
sym,
compiler_min=vr.lower, # type: ignore[arg-type]
compiler_max=vr.upper, # type: ignore[arg-type]
@@ -1472,8 +1471,7 @@ class GraphModuleDeserializer(metaclass=Final):
if s.name not in self.symbol_name_to_symbol:
self.symbol_name_to_symbol[s.name] = s
if vr := self.symbol_name_to_range.get(s.name):
- symbolic_shapes._constrain_symbol_range(
- self.shape_env,
+ self.shape_env.constrain_symbol_range(
s,
compiler_min=vr.lower, # type: ignore[arg-type]
compiler_max=vr.upper, # type: ignore[arg-type]
diff --git a/torch/_logging/_registrations.py b/torch/_logging/_registrations.py
index 509d6961b1..912f283040 100644
--- a/torch/_logging/_registrations.py
+++ b/torch/_logging/_registrations.py
@@ -1,7 +1,11 @@
# flake8: noqa: B950
from ._internal import register_artifact, register_log
-DYNAMIC = ["torch.fx.experimental.symbolic_shapes", "torch.fx.experimental.sym_node"]
+DYNAMIC = [
+ "torch.fx.experimental.symbolic_shapes",
+ "torch.fx.experimental.sym_node",
+ "torch.fx.experimental.recording",
+]
DISTRIBUTED = [
"torch.distributed",
"torch._dynamo.backends.distributed",
diff --git a/torch/fx/experimental/recording.py b/torch/fx/experimental/recording.py
index c200c10e6f..4bf9ebab17 100644
--- a/torch/fx/experimental/recording.py
+++ b/torch/fx/experimental/recording.py
@@ -1,4 +1,5 @@
import functools
+import inspect
import itertools
import logging
from dataclasses import dataclass
@@ -220,52 +221,64 @@ def _extract_shape_env_and_assert_equal(args, kwargs):
def record_shapeenv_event(*, save_tracked_fakes: bool = False) -> Callable:
def decorator(fn: Callable) -> Callable:
assert callable(fn)
+ args = inspect.getfullargspec(fn).args
+ assert args and args[0] == "self", (
+ "record_shapeenv_event should only wrap methods on ShapeEnv; refactor your "
+ "code so that it calls into a method on ShapeEnv"
+ )
name = fn.__name__
@functools.wraps(fn)
def wrapper(*args, **kwargs):
from torch.fx.experimental.symbolic_shapes import ShapeEnv
- if isinstance(args[0], ShapeEnv) and args[0].is_recording: # type: ignore[has-type]
- # If ShapeEnv is already recording an event, call the wrapped
- # function directly.
- #
- # NB: here, we skip the check of whether all ShapeEnv instances
- # are equal, in favor of a faster dispatch.
- return fn(*args, **kwargs)
-
- # Retrieve an instance of ShapeEnv.
- # Assumption: the collection of args and kwargs may not reference
- # different ShapeEnv instances.
- self = _extract_shape_env_and_assert_equal(args, kwargs)
-
- # If we are calling this function without any ShapeEnv instance
- # alive in its arguments, we don't record and call the original.
- if self is None:
- return fn(*args, **kwargs)
-
- # Otherwise, start recording and call the function.
- with self._recording():
- # Take a snapshot of the current tracked_fakes.
- tracked_fakes = (
- self._snapshot_tracked_fakes() if save_tracked_fakes else None
- )
- # Record the event for 'fn'.
- event = ShapeEnvEvent(
- fn, list(args), kwargs, tracked_fakes, name=fn.__name__
- )
- # Play the event on this ShapeEnv.
- # NB: It's important to put the event first, because running
- # the event can trigger internal events that must be ordered
- # after this event. However, if an exception happens, we do
- # NOT want to have the event in the list, so pop it off from
- # the record if an error happened
- self.events.append(event)
- try:
- return event.run(self)
- except Exception:
- self.events.pop()
- raise
+ assert isinstance(args[0], ShapeEnv)
+
+ try:
+ if args[0].is_recording: # type: ignore[has-type]
+ # If ShapeEnv is already recording an event, call the wrapped
+ # function directly.
+ #
+ # NB: here, we skip the check of whether all ShapeEnv instances
+ # are equal, in favor of a faster dispatch.
+ return fn(*args, **kwargs)
+
+ # Retrieve an instance of ShapeEnv.
+ # Assumption: the collection of args and kwargs may not reference
+ # different ShapeEnv instances.
+ self = _extract_shape_env_and_assert_equal(args, kwargs)
+
+ # If we are calling this function without any ShapeEnv instance
+ # alive in its arguments, we don't record and call the original.
+ if self is None:
+ return fn(*args, **kwargs)
+
+ # Otherwise, start recording and call the function.
+ with self._recording():
+ # Take a snapshot of the current tracked_fakes.
+ tracked_fakes = (
+ self._snapshot_tracked_fakes() if save_tracked_fakes else None
+ )
+ # Record the event for 'fn'.
+ event = ShapeEnvEvent(
+ fn, list(args), kwargs, tracked_fakes, name=fn.__name__
+ )
+ # Play the event on this ShapeEnv.
+ # NB: It's important to put the event first, because running
+ # the event can trigger internal events that must be ordered
+ # after this event. However, if an exception happens, we do
+ # NOT want to have the event in the list, so pop it off from
+ # the record if an error happened
+ self.events.append(event)
+ try:
+ return event.run(self)
+ except Exception:
+ self.events.pop()
+ raise
+
+ except Exception:
+ log.error("failed while running %s(*%s, **%s)", name, args[1:], kwargs)
+ raise
return wrapper
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index d1c434a8dc..6c17b5d870 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -770,7 +770,6 @@ def _advise_is_size(a):
):
_constrain_range_for_size(a)
-@record_shapeenv_event()
def _constrain_range_for_size(a, min: Optional[int] = None, max: Optional[int] = None):
"""
This function is NOT INTENDED to be used by itself.
@@ -782,27 +781,10 @@ def _constrain_range_for_size(a, min: Optional[int] = None, max: Optional[int] =
assert isinstance(a, SymInt), "can only constrain range for SymInt"
assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
- if min is None:
- min = 0
- if max is None:
- max = sys.maxsize - 1
-
- if max < min:
- raise ValueError(
- "Maximum value to constrain_as_size can't be less than the specified min value, "
- "received min={min} and max={max}"
- )
-
- a.node.shape_env.constrain_symbol_range(
- a.node.expr,
- compiler_min=min,
- compiler_max=max,
- )
- a.node.shape_env.size_like.add(a.node.expr)
+ a.node.shape_env._constrain_range_for_size(a.node.expr, min, max)
# inclusive both ways
-@record_shapeenv_event()
def constrain_range(a, *, min: Optional[int], max: Optional[int] = None):
"""
Applies a constraint that the passed in SymInt must lie between min-max
@@ -844,54 +826,24 @@ def constrain_range(a, *, min: Optional[int], max: Optional[int] = None):
raise ValueError(f"Invalid value {a} for range [{min}:{max}]")
return
- if isinstance(a.node.expr, sympy.Integer):
- if not (min <= int(a.node.expr) <= max):
- raise ValueRangeError(f"Invalid value {int(a.node.expr)} for range [{min}:{max}]")
- return
- assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
-
- # TODO: Shouldn't we install a guard if the symbol is backed? Or is the
- # semantics that this is an "unchecked" assert (but it this actually
- # something useful? Might be better to restrict only for unbacked
- # SymInt).
- _constrain_symbol_range(
- a.node.shape_env,
- a.node.expr,
- compiler_min=min,
- compiler_max=max,
- )
+ a.node.shape_env._constrain_range(a.node.expr, min, max)
-
-@record_shapeenv_event()
-def constrain_unify(a, b):
+def constrain_unify(a: torch.SymInt, b: torch.SymInt) -> None:
"""
Given two SymInts, constrain them so that they must be equal. NB:
this will not work with SymInts that represent nontrivial expressions
(yet!)
"""
- # TODO: this does not install a deferred runtime assert yet
-
- # TODO: Maybe dedupe this with _maybe_guard_rel?
if not isinstance(a, SymInt):
if not isinstance(b, SymInt):
assert a == b
+ return
else:
- assert isinstance(b.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
shape_env = b.node.shape_env
- shape_env.replacements[b.node.expr] = sympy.Integer(a)
else:
- # TODO: Actually, we can support this as long as one of them is a symbol.
- # NB: We can't actually do "unification" as our operators are not
- # injective
- assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
shape_env = a.node.shape_env
- if not isinstance(b, SymInt):
- shape_env.replacements[a.node.expr] = sympy.Integer(b)
- else:
- assert a.node.shape_env is b.node.shape_env
- assert isinstance(b.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
- new_var = shape_env._find(a.node.expr)
- shape_env.replacements[b.node.expr] = new_var
+
+ shape_env._constrain_unify(a, b)
# Assume that a boolean is true for the purposes of subsequent symbolic
# reasoning. This will keep track of corresponding runtime checks to verify
@@ -2470,6 +2422,78 @@ class ShapeEnv:
if dest is not None:
self._set_replacement(new_s, dest, "rename_unbacked_to_dest")
+ @record_shapeenv_event()
+ def _constrain_range_for_size(self, a: sympy.Symbol, min: Optional[int] = None, max: Optional[int] = None):
+ if min is None:
+ min = 0
+ if max is None:
+ max = sys.maxsize - 1
+
+ if max < min:
+ raise ValueError(
+ "Maximum value to constrain_as_size can't be less than the specified min value, "
+ "received min={min} and max={max}"
+ )
+
+ self.constrain_symbol_range(
+ a,
+ compiler_min=min,
+ compiler_max=max,
+ )
+ self.size_like.add(a)
+
+ @record_shapeenv_event()
+ def _constrain_range(self, a: sympy.Expr, min: int, max: int):
+ if isinstance(a, sympy.Integer):
+ if not (min <= int(a) <= max):
+ raise ValueRangeError(f"Invalid value {int(a)} for range [{min}:{max}]")
+ return
+ assert isinstance(a, sympy.Symbol), "constraining non-Symbols NYI"
+
+ # TODO: Shouldn't we install a guard if the symbol is backed? Or is the
+ # semantics that this is an "unchecked" assert (but it this actually
+ # something useful? Might be better to restrict only for unbacked
+ # SymInt).
+ self.constrain_symbol_range(
+ a,
+ compiler_min=min,
+ compiler_max=max,
+ )
+
+ @record_shapeenv_event()
+ def _constrain_unify(self, a, b):
+ """
+ Given two SymInts, constrain them so that they must be equal. NB:
+ this will not work with SymInts that represent nontrivial expressions
+ (yet!)
+ """
+ # TODO: this does not install a deferred runtime assert yet
+
+ # TODO: Maybe dedupe this with _maybe_guard_rel?
+ # Update Feb 2024: this is extra important to do, this doesn't handle
+ # unbacked replacements properly nor does it generate deferred runtime
+ # asserts
+ if not isinstance(a, SymInt):
+ if not isinstance(b, SymInt):
+ assert a == b
+ else:
+ assert isinstance(b.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
+ assert b.node.shape_env is self
+ self.replacements[b.node.expr] = sympy.Integer(a)
+ else:
+ # TODO: Actually, we can support this as long as one of them is a symbol.
+ # NB: We can't actually do "unification" as our operators are not
+ # injective
+ assert isinstance(a.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
+ assert a.node.shape_env is self
+ if not isinstance(b, SymInt):
+ self.replacements[a.node.expr] = sympy.Integer(b)
+ else:
+ assert a.node.shape_env is b.node.shape_env
+ assert isinstance(b.node.expr, sympy.Symbol), "constraining non-Symbols NYI"
+ new_var = self._find(a.node.expr)
+ self.replacements[b.node.expr] = new_var
+
def _ignore_fresh_unbacked_symbols_tls(self):
return getattr(TLS, "ignore_fresh_unbacked_symbols", False)
|
2.41.0
|
f08140de2ad1b0eeee6255924109a6425b6a9e2
|
Fri, 26 Apr 2024 22:40:48 -0700
|
[PATCH 0768/1000] [dynamo] Collect cell_and_freevars correctly (#125097)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/125097 Approved by: https://github.com/Skylion007
|
diff --git a/test/inductor/test_cpu_repro.py b/test/inductor/test_cpu_repro.py
index fea142e6f6..004db97b8a 100644
--- a/test/inductor/test_cpu_repro.py
+++ b/test/inductor/test_cpu_repro.py
@@ -1,6 +1,7 @@
# Owner(s): ["oncall: cpu inductor"]
import contextlib
import copy
+import functools
import itertools
import math
import platform
@@ -3048,6 +3049,38 @@ class CPUReproTests(TestCase):
v2 = jit_func(input_tensor)
self.assertEqual(v1, v2)
+ def test_nn_param_assign_wrapped(self):
+ class Model2(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.conv = nn.Conv2d(in_channels=3, out_channels=5, kernel_size=3)
+ self.batchnorm = nn.BatchNorm2d(num_features=5)
+ self.conv_weight = torch.randn(5, 3, 3, 3)
+ self.conv_bias = torch.randn(5)
+
+ def forward(self, x):
+ self.conv.weight = nn.Parameter(self.conv_weight)
+ self.conv.bias = nn.Parameter(self.conv_bias, requires_grad=False)
+ self.conv.eval()
+ x = self.conv(x)
+ x = self.batchnorm(x)
+ x = F.relu(x)
+ return x
+
+ input_tensor = torch.randn(1, 3, 10, 10)
+ func = Model2().to("cpu")
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ return func(*args, **kwargs)
+
+ with torch.no_grad():
+ func.train(False)
+ v1 = func(input_tensor)
+ jit_func = torch.compile(wrapper, fullgraph=True)
+ v2 = jit_func(input_tensor)
+ self.assertEqual(v1, v2)
+
@config.patch(inplace_buffers=True)
def test_in_out_buffer(self):
def fn(x, y):
diff --git a/torch/_dynamo/symbolic_convert.py b/torch/_dynamo/symbolic_convert.py
index 78d5b08e68..4e52c5fdad 100644
--- a/torch/_dynamo/symbolic_convert.py
+++ b/torch/_dynamo/symbolic_convert.py
@@ -694,6 +694,11 @@ class InstructionTranslatorBase(
self._cell_and_freevars = tuple(
self.code_options["co_cellvars"] or []
) + tuple(self.code_options["co_freevars"] or [])
+
+ # An inlined function might depend on the freevar of the parent
+ # function. So, recursively obtain parent cell and freevars.
+ if isinstance(self, InliningInstructionTranslator):
+ self._cell_and_freevars += self.parent.cell_and_freevars()
return self._cell_and_freevars
def prune_dead_locals(self):
|
2.41.0
|
478b7f1cac9686f00edf3db4667cf86d2421531
|
Sat, 27 Apr 2024 21:04:52 +0000
|
[PATCH 0769/1000] Add common used score_mod functions for templated attention (#124670)
|
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/124670 Approved by: https://github.com/Chillee
|
diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_templated_attention.py
index a0d70902c7..9c665d5b88 100644
--- a/test/inductor/test_templated_attention.py
+++ b/test/inductor/test_templated_attention.py
@@ -13,7 +13,15 @@ from torch._higher_order_ops.templated_attention import (
)
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_code
-from torch.nn.attention._templated_attention import _compose, _templated_attention
+from torch.nn.attention._templated_attention import (
+ _causal,
+ _compose,
+ _generate_alibi_bias,
+ _identity,
+ _rel_bias,
+ _rel_causal,
+ _templated_attention,
+)
from torch.testing import FileCheck
from torch.testing._internal import common_utils
from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_BF16
@@ -48,9 +56,13 @@ test_dtypes_fast = [torch.float16]
if common_utils.TEST_WITH_ROCM:
test_dtypes = [torch.float32]
-
-def _identity_mod(score, b, h, m, n):
- return score
+test_score_mods = [
+ _identity,
+ _causal,
+ _rel_bias,
+ _rel_causal,
+ _generate_alibi_bias(8),
+]
def _causal_mod(score, b, h, token_q, token_kv):
@@ -90,58 +102,8 @@ class TestTemplatedSDPA(InductorTestCase):
@supported_platform
@common_utils.parametrize("dtype", test_dtypes)
- def test_identity(self, dtype: torch.dtype):
- def score_mod(score, b, h, m, n):
- return score
-
- self.run_test(score_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes)
- def test_causal_mask(self, dtype: torch.dtype):
- def score_mod(score, b, h, token_q, token_kv):
- return torch.where(token_q >= token_kv, score, float("-inf"))
-
- self.run_test(score_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes)
- def test_rel_bias(self, dtype: torch.dtype):
- def score_mod(score, b, h, m, n):
- return score + (m - n)
-
- self.run_test(score_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes)
- def test_alibi_bias(self, dtype: torch.dtype):
- def score_mod(score, b, h, m, n):
- return score + (m - n) * h
-
- self.run_test(score_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes)
- def test_rel_causal(self, dtype: torch.dtype):
- def score_mod(score, b, h, m, n):
- return torch.where(m <= n, score + (m - n), float("-inf"))
-
- self.run_test(score_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes)
- def test_skip_odd_keys(self, dtype: torch.dtype):
- def score_mod(score, b, h, q, kv):
- return torch.where(kv % 2 == 0, score, float("-inf"))
-
- self.run_test(score_mod, dtype)
-
- @supported_platform
- @common_utils.parametrize("dtype", test_dtypes)
- def test_alibi_causal(self, dtype: torch.dtype):
- def score_mod(score, b, h, m, n):
- return torch.where(m <= n, score + (m - n) * h, float("-inf"))
-
+ @common_utils.parametrize("score_mod", test_score_mods)
+ def test_builtin_score_mods(self, dtype: torch.dtype, score_mod: Callable):
self.run_test(score_mod, dtype)
@supported_platform
@@ -302,7 +264,7 @@ class TestTemplatedSDPA(InductorTestCase):
requires_grad=True,
)
q, k, v = make_tensor(), make_tensor(), make_tensor()
- out = _templated_attention(q, k, v, _identity_mod)
+ out = _templated_attention(q, k, v, _identity)
with self.assertRaisesRegex(
RuntimeError, "Autograd not implemented for templated_attention"
):
@@ -316,7 +278,7 @@ class TestTemplatedSDPA(InductorTestCase):
with self.assertRaisesRegex(
ValueError, "Expected query, key, and value to have the same dtype"
):
- _templated_attention(query, key, value, _identity_mod)
+ _templated_attention(query, key, value, _identity)
@supported_platform
def test_different_sequence_length_fails(self):
@@ -324,7 +286,7 @@ class TestTemplatedSDPA(InductorTestCase):
key = torch.randn((1, 1, 1024, 64), dtype=torch.float32, device="cuda")
value = torch.randn((1, 1, 1024, 64), dtype=torch.float32, device="cuda")
with self.assertRaisesRegex(ValueError, "NYI: The target sequence length"):
- _templated_attention(query, key, value, _identity_mod)
+ _templated_attention(query, key, value, _identity)
@supported_platform
@patch.object(torch._inductor.config, "max_autotune", True)
@@ -351,7 +313,7 @@ class TestTemplatedSDPA(InductorTestCase):
@supported_platform
@common_utils.parametrize("dtype", test_dtypes)
- @common_utils.parametrize("score_mod", [_identity_mod, _causal_mod])
+ @common_utils.parametrize("score_mod", [_identity, _causal])
def test_logsumexp_correctness(self, dtype, score_mod):
@torch.compile
def sdpa_hop(q, k, v, score_mod):
@@ -414,7 +376,7 @@ class TestTemplatedSDPA(InductorTestCase):
lse_2 = lse * 2
return lse_2
- _, code = run_and_get_code(func, q, k, v, _identity_mod)
+ _, code = run_and_get_code(func, q, k, v, _identity)
# Ensure that two kernels are generated
FileCheck().check_count(".run(", 2, True).run(code[0])
@@ -435,7 +397,7 @@ class TestTemplatedSDPA(InductorTestCase):
lse_2 = lse * 2
return out, lse_2
- _, code = run_and_get_code(func, q, k, v, _identity_mod)
+ _, code = run_and_get_code(func, q, k, v, _identity)
# Ensure that two kernels are generated
FileCheck().check_count(".run(", 2, True).run(code[0])
diff --git a/torch/_inductor/kernel/templated_attention.py b/torch/_inductor/kernel/templated_attention.py
index e0adf25322..0b62dfbd68 100644
--- a/torch/_inductor/kernel/templated_attention.py
+++ b/torch/_inductor/kernel/templated_attention.py
@@ -3,7 +3,7 @@ import logging
from typing import Any, List
import torch
-from .. import config
+from .. import config, utils
from ..lowering import empty_strided, lowerings, register_lowering
from ..select_algorithm import autotune_select_algorithm, TritonTemplate
@@ -173,6 +173,24 @@ sdpa_template = TritonTemplate(
)
+def _get_default_config(query):
+ default_config = None
+ is_big_shared_mem = utils.get_gpu_shared_memory() > 128 * 1024
+
+ if is_big_shared_mem:
+ if query.get_dtype() == torch.float32:
+ default_config = (64, 64, 4, 3)
+ else:
+ default_config = (128, 64, 4, 3)
+ else:
+ if query.get_dtype() == torch.float32:
+ default_config = (32, 32, 4, 3)
+ else:
+ default_config = (64, 32, 4, 3)
+
+ return default_config
+
+
# TODO: We probably also need a layout constraint?
@register_lowering(torch.ops.higher_order.templated_attention, type_promotion_kind=None)
def templated_attention(*args, **kwargs):
@@ -274,10 +292,7 @@ def templated_attention(*args, **kwargs):
)
choices: List[Any] = []
configs: List[Any] = []
- if query.get_dtype() == torch.float32:
- configs.append((64, 64, 4, 3))
- else:
- configs.append((128, 64, 4, 3))
+ configs.append(_get_default_config(query))
if config.max_autotune:
configs += [
(128, 64, 4, 3),
diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py
index 14478b0cb7..f54e81e1bd 100644
--- a/torch/_inductor/utils.py
+++ b/torch/_inductor/utils.py
@@ -1197,6 +1197,12 @@ def get_gpu_dram_gbps():
return get_dram_gbps()
+def get_gpu_shared_memory():
+ from triton.runtime import driver
+
+ return driver.active.utils.get_device_properties(0).get("max_shared_mem", 0)
+
+
def is_welford_reduction(reduction_type):
return reduction_type.startswith("welford")
diff --git a/torch/nn/attention/_templated_attention.py b/torch/nn/attention/_templated_attention.py
index 0e614c8a4e..00183a2e31 100644
--- a/torch/nn/attention/_templated_attention.py
+++ b/torch/nn/attention/_templated_attention.py
@@ -90,3 +90,60 @@ def _templated_attention(
# Drop the logsumexp value since this is only needed for backwards
return out
+
+
+"""Some common used score_mod functions for templated attention in PyTorch."""
+
+
+def _identity(
+ score: torch.Tensor,
+ batch: torch.Tensor,
+ head: torch.Tensor,
+ token_q: torch.Tensor,
+ token_kv: torch.Tensor,
+) -> torch.Tensor:
+ return score
+
+
+def _causal(
+ score: torch.Tensor,
+ batch: torch.Tensor,
+ head: torch.Tensor,
+ token_q: torch.Tensor,
+ token_kv: torch.Tensor,
+) -> torch.Tensor:
+ return torch.where(token_q >= token_kv, score, float("-inf"))
+
+
+def _rel_bias(
+ score: torch.Tensor,
+ batch: torch.Tensor,
+ head: torch.Tensor,
+ token_q: torch.Tensor,
+ token_kv: torch.Tensor,
+) -> torch.Tensor:
+ return score + (token_q - token_kv)
+
+
+def _rel_causal(
+ score: torch.Tensor,
+ batch: torch.Tensor,
+ head: torch.Tensor,
+ token_q: torch.Tensor,
+ token_kv: torch.Tensor,
+) -> torch.Tensor:
+ return torch.where(token_q <= token_kv, score + (token_q - token_kv), float("-inf"))
+
+
+def _generate_alibi_bias(num_heads: int):
+ def _alibi_bias(
+ score: torch.Tensor,
+ batch: torch.Tensor,
+ head: torch.Tensor,
+ token_q: torch.Tensor,
+ token_kv: torch.Tensor,
+ ) -> torch.Tensor:
+ scale = torch.exp2(-((head + 1) * 8.0 / num_heads))
+ return score + (token_kv - token_q) * scale
+
+ return _alibi_bias
|
2.41.0
|
7790fd088af40d9ab44c66def34db23f9d2879f
|
Thu, 25 Apr 2024 16:24:24 +0800
|
[PATCH 0770/1000] [inductor] share cse cache during vectorized indirect load (#124597)
|
Fix https://github.com/pytorch/pytorch/issues/123502 `swap_buffer` in not needed in vectorized indirect load, remove it to share cse buffer. ``` auto tmp8 = [&] { __at_align__ std::array<int64_t, 16> tmpbuf; tmp7.store(tmpbuf.data()); return tmpbuf; } () ; // // other codes // // also store tmp7 here (redundant tmp16) auto tmp16 = [&] { __at_align__ std::array<int64_t, 16> tmpbuf; tmp7.store(tmpbuf.data()); return tmpbuf; } () ; ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124597 Approved by: https://github.com/jgong5, https://github.com/jansel
|
diff --git a/test/inductor/test_cpu_repro.py b/test/inductor/test_cpu_repro.py
index 004db97b8a..fbbdb8ee96 100644
--- a/test/inductor/test_cpu_repro.py
+++ b/test/inductor/test_cpu_repro.py
@@ -3683,6 +3683,39 @@ class CPUReproTests(TestCase):
x = torch.randn(1, 4, 2, 2)
self.common(fn, (x,))
+ @requires_vectorization
+ def test_vec_indirect_load_cse_cache(self):
+ # https://github.com/pytorch/pytorch/issues/123502
+ from math import inf
+
+ def fn(arg0_1):
+ full_default = torch.ops.aten.full.default([209985], 1)
+ select = torch.ops.aten.select.int(arg0_1, 0, 0)
+ select_1 = torch.ops.aten.select.int(arg0_1, 0, 1)
+ view = torch.ops.aten.reshape.default(select_1, [-1])
+ expand = torch.ops.aten.expand.default(view, [209985])
+ full_default_1 = torch.ops.aten.full.default([10000], 0)
+ scatter_add = torch.ops.aten.scatter_add.default(
+ full_default_1, 0, expand, full_default
+ )
+ pow_1 = torch.ops.aten.pow.Tensor_Scalar(scatter_add, -0.5)
+ eq = torch.ops.aten.eq.Scalar(pow_1, inf)
+ full_default_2 = torch.ops.aten.full.default([], 0.0)
+ where = torch.ops.aten.where.self(eq, full_default_2, pow_1)
+ index = torch.ops.aten.index.Tensor(where, [select])
+ index_1 = torch.ops.aten.index.Tensor(where, [select_1])
+ mul_1 = torch.ops.aten.mul.Tensor(index, index_1)
+ return (mul_1,)
+
+ x = torch.zeros(2, 209985).to(torch.int64)
+ opt_fn = torch._dynamo.optimize("inductor")(fn)
+ _, code = run_and_get_cpp_code(opt_fn, x)
+ FileCheck().check_count(
+ "return at::vec::VectorizedN<int64_t,2>::loadu(tmpbuf.data(),",
+ 2,
+ exactly=True,
+ ).run(code)
+
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
diff --git a/torch/_inductor/codegen/cpp.py b/torch/_inductor/codegen/cpp.py
index f8f18a80b0..42b1aebe20 100644
--- a/torch/_inductor/codegen/cpp.py
+++ b/torch/_inductor/codegen/cpp.py
@@ -2336,7 +2336,7 @@ class CppVecKernel(CppKernel):
assert vec_var.is_vec
code = BracesBuffer()
code.writeline("[&]")
- with self.swap_buffers(code), code.indent():
+ with code.indent():
vec_dtype = vec_var.dtype
assert vec_dtype is not None
if vec_dtype == torch.bool:
@@ -2357,7 +2357,7 @@ class CppVecKernel(CppKernel):
assert opt_ctx is not None
code = BracesBuffer()
code.writeline("[&]")
- with self.swap_buffers(code), code.indent():
+ with code.indent():
result_size = get_result_size(dtype)
result_declare = (
f"__at_align__ std::array<{DTYPE_TO_CPP[dtype]}, {result_size}> tmpbuf;"
|
2.41.0
|
5b1a4c269b3b9587233eaca4b7acd54c4a64f48
|
Thu, 25 Apr 2024 16:24:34 +0800
|
[PATCH 0771/1000] [inductor] share more cse cache during swap buffer (#124921)
|
`swap_buffer` will make the `cse_cache` cannot be shared inside/outside of the lambda function scope. For example, ``` auto tmp8 = -std::numeric_limits<float>::infinity(); auto tmp9 = [&] { auto tmp12 = -std::numeric_limits<float>::infinity(); return tmp12; } ``` `tmp12` should not be created since it is same with `tmp8`. We make the `cse_cache` as a read only cache inside the scope (because it is unsafe to expose cache inside the scope,the outside scope cannot use it.) **Test Plan** ``` python test/inductor/test_torchinductor.py -k test_AllenaiLongformerBase_repro_cpu ``` the `static_cast<int>(256)` will only occur once after this PR since the inside scope can share the cse buffer outside the scope. Before this PR, ``` cpp_fused_copy_full_like_0 = async_compile.cpp_pybinding(['const float*', 'float*'], ''' #include "/tmp/torchinductor_root/ub/cub6x5nmhqhp7xapkb3dlgjxef3t2bnkx7y7n4z2f4z5obnecxpy.h" extern "C" void kernel(const float* in_ptr0, float* out_ptr1) { #pragma omp parallel num_threads(128) { int tid = omp_get_thread_num(); { #pragma omp for collapse(2) for(long x0=static_cast<long>(0L); x0<static_cast<long>(4L); x0+=static_cast<long>(1L)) { for(long x1=static_cast<long>(0L); x1<static_cast<long>(1024L); x1+=static_cast<long>(1L)) { #pragma GCC ivdep for(long x2=static_cast<long>(0L); x2<static_cast<long>(12L); x2+=static_cast<long>(1L)) { for(long x3=static_cast<long>(0L); x3<static_cast<long>(512L); x3+=static_cast<long>(16L)) { auto tmp0 = c10::convert<int>(x1); auto tmp1 = static_cast<int>(256); auto tmp2 = tmp0 < tmp1; auto tmp3 = [&] { auto tmp4 = c10::convert<int>(x3); auto tmp5 = at::vec::Vectorized<int>::arange(tmp4, 1); auto tmp6 = static_cast<int>(257); auto tmp7 = at::vec::Vectorized<int>(tmp6); auto tmp8 = at::vec::VecMask<int,1>(tmp5 < tmp7); auto tmp10 = at::vec::VecMask<float,1>::from(tmp2); auto tmp11 = tmp8 & tmp10; auto tmp9 = [&] { auto tmp12 = -std::numeric_limits<float>::infinity(); return tmp12; } ; auto tmp13 = [&] { if (tmp11.all_zero()) { return at::vec::Vectorized<float>(static_cast<float>(0.0)); } else { return decltype(at::vec::Vectorized<float>(tmp9()))::blendv(at::vec::Vectorized<float>(static_cast<float>(0.0)), at::vec::Vectorized<float>(tmp9()), tmp11.template cast<float,1>()); } } () ; auto tmp14 = c10::convert<int>(c10::div_floor_integer(x1, 256L)); auto tmp15 = static_cast<int>(3); auto tmp16 = tmp14 < tmp15; auto tmp18 = tmp16 & tmp2; auto tmp17 = [&] { auto tmp19 = c10::convert<int>(x3); auto tmp20 = at::vec::Vectorized<int>::arange(tmp19, 1); auto tmp21 = static_cast<int>(256); auto tmp22 = at::vec::Vectorized<int>(tmp21); auto tmp23 = at::vec::VecMask<int,1>(tmp20 >= tmp22); auto tmp25 = at::vec::VecMask<float,1>::from(tmp18); auto tmp26 = tmp23 & tmp25; auto tmp24 = [&] { auto tmp27 = tmp26.template cast<float,1>().template loadu<float,1>(in_ptr0 + static_cast<long>((-256L) + x3 + (513L*(static_cast<long>(x1) % static_cast<long>(256L))) + (262656L*(c10::div_floor_integer(x1, 256L))) + (787968L*x2) + (9455616L*x0))); return tmp27; } ; auto tmp28 = [&] { if (tmp26.all_zero()) { return at::vec::Vectorized<float>(static_cast<float>(0.0)); } else { return decltype(tmp24())::blendv(at::vec::Vectorized<float>(static_cast<float>(0.0)), tmp24(), tmp26.template cast<float,1>()); } } () ; auto tmp29 = static_cast<float>(0.0); auto tmp30 = at::vec::Vectorized<float>(tmp29); auto tmp31 = decltype(tmp28)::blendv(tmp30, tmp28, tmp23.template cast<float,1>()); return tmp31; } ; auto tmp32 = tmp16 ? tmp17() : at::vec::Vectorized<float>(static_cast<float>(0.0)); auto tmp33 = static_cast<float>(0.0); auto tmp34 = at::vec::VecMask<float,1>::from(tmp16); auto tmp35 = at::vec::Vectorized<float>(tmp33); auto tmp36 = decltype(tmp32)::blendv(tmp35, tmp32, tmp34.template cast<float,1>()); auto tmp37 = decltype(tmp13)::blendv(tmp36, tmp13, tmp8.template cast<float,1>()); return tmp37; } ; auto tmp38 = tmp2 ? tmp3() : at::vec::Vectorized<float>(static_cast<float>(0.0)); auto tmp39 = c10::convert<int>(c10::div_floor_integer(x1, 256L)); auto tmp40 = static_cast<int>(3); auto tmp41 = tmp39 < tmp40; auto tmp42 = [&] { auto tmp43 = c10::convert<int>(x3); auto tmp44 = at::vec::Vectorized<int>::arange(tmp43, 1); auto tmp45 = static_cast<int>(256); auto tmp46 = at::vec::Vectorized<int>(tmp45); auto tmp47 = at::vec::VecMask<int,1>(tmp44 >= tmp46); auto tmp49 = at::vec::VecMask<float,1>::from(tmp41); auto tmp50 = tmp47 & tmp49; auto tmp48 = [&] { auto tmp51 = tmp50.template cast<float,1>().template loadu<float,1>(in_ptr0 + static_cast<long>((-256L) + x3 + (513L*(static_cast<long>(x1) % static_cast<long>(256L))) + (262656L*(c10::div_floor_integer(x1, 256L))) + (787968L*x2) + (9455616L*x0))); return tmp51; } ; auto tmp52 = [&] { if (tmp50.all_zero()) { return at::vec::Vectorized<float>(static_cast<float>(0.0)); } else { return decltype(tmp48())::blendv(at::vec::Vectorized<float>(static_cast<float>(0.0)), tmp48(), tmp50.template cast<float,1>()); } } () ; auto tmp53 = static_cast<float>(0.0); auto tmp54 = at::vec::Vectorized<float>(tmp53); auto tmp55 = decltype(tmp52)::blendv(tmp54, tmp52, tmp47.template cast<float,1>()); return tmp55; } ; auto tmp56 = tmp41 ? tmp42() : at::vec::Vectorized<float>(static_cast<float>(0.0)); auto tmp57 = static_cast<float>(0.0); auto tmp58 = at::vec::VecMask<float,1>::from(tmp41); auto tmp59 = at::vec::Vectorized<float>(tmp57); auto tmp60 = decltype(tmp56)::blendv(tmp59, tmp56, tmp58.template cast<float,1>()); auto tmp61 = at::vec::VecMask<float,1>::from(tmp2); auto tmp62 = decltype(tmp38)::blendv(tmp60, tmp38, tmp61.template cast<float,1>()); tmp62.store(out_ptr1 + static_cast<long>(x3 + (513L*x1) + (525312L*x2) + (6303744L*x0))); } #pragma omp simd simdlen(8) for(long x3=static_cast<long>(512L); x3<static_cast<long>(513L); x3+=static_cast<long>(1L)) { auto tmp0 = c10::convert<int64_t>(x1); auto tmp1 = static_cast<int64_t>(256); auto tmp2 = tmp0 < tmp1; auto tmp3 = [&] { auto tmp4 = c10::convert<int64_t>(x3); auto tmp5 = static_cast<int64_t>(257); auto tmp6 = tmp4 < tmp5; auto tmp7 = [&] { auto tmp8 = -std::numeric_limits<float>::infinity(); return tmp8; } ; auto tmp9 = tmp6 ? tmp7() : static_cast<decltype(tmp7())>(0.0); auto tmp10 = c10::convert<int64_t>(c10::div_floor_integer(x1, 256L)); auto tmp11 = static_cast<int64_t>(3); auto tmp12 = tmp10 < tmp11; auto tmp13 = [&] { auto tmp14 = c10::convert<int64_t>(x3); auto tmp15 = static_cast<int64_t>(256); auto tmp16 = tmp14 >= tmp15; auto tmp17 = [&] { auto tmp18 = in_ptr0[static_cast<long>((-256L) + x3 + (513L*(static_cast<long>(x1) % static_cast<long>(256L))) + (262656L*(c10::div_floor_integer(x1, 256L))) + (787968L*x2) + (9455616L*x0))]; return tmp18; } ; auto tmp19 = tmp16 ? tmp17() : static_cast<decltype(tmp17())>(0.0); auto tmp20 = static_cast<float>(0.0); auto tmp21 = tmp16 ? tmp19 : tmp20; return tmp21; } ; auto tmp22 = tmp12 ? tmp13() : static_cast<decltype(tmp13())>(0.0); auto tmp23 = static_cast<float>(0.0); auto tmp24 = tmp12 ? tmp22 : tmp23; auto tmp25 = tmp6 ? tmp9 : tmp24; return tmp25; } ; auto tmp26 = tmp2 ? tmp3() : static_cast<decltype(tmp3())>(0.0); auto tmp27 = c10::convert<int64_t>(c10::div_floor_integer(x1, 256L)); auto tmp28 = static_cast<int64_t>(3); auto tmp29 = tmp27 < tmp28; auto tmp30 = [&] { auto tmp31 = c10::convert<int64_t>(x3); auto tmp32 = static_cast<int64_t>(256); auto tmp33 = tmp31 >= tmp32; auto tmp34 = [&] { auto tmp35 = in_ptr0[static_cast<long>((-256L) + x3 + (513L*(static_cast<long>(x1) % static_cast<long>(256L))) + (262656L*(c10::div_floor_integer(x1, 256L))) + (787968L*x2) + (9455616L*x0))]; return tmp35; } ; auto tmp36 = tmp33 ? tmp34() : static_cast<decltype(tmp34())>(0.0); auto tmp37 = static_cast<float>(0.0); auto tmp38 = tmp33 ? tmp36 : tmp37; return tmp38; } ; auto tmp39 = tmp29 ? tmp30() : static_cast<decltype(tmp30())>(0.0); auto tmp40 = static_cast<float>(0.0); auto tmp41 = tmp29 ? tmp39 : tmp40; auto tmp42 = tmp2 ? tmp26 : tmp41; out_ptr1[static_cast<long>(x3 + (513L*x1) + (525312L*x2) + (6303744L*x0))] = tmp42; } } } } } } } ''') ``` After this PR, ``` cpp_fused_copy_full_like_0 = async_compile.cpp_pybinding(['const float*', 'float*'], ''' #include "/tmp/torchinductor_root/ub/cub6x5nmhqhp7xapkb3dlgjxef3t2bnkx7y7n4z2f4z5obnecxpy.h" extern "C" void kernel(const float* in_ptr0, float* out_ptr1) { #pragma omp parallel num_threads(128) { int tid = omp_get_thread_num(); { #pragma omp for collapse(2) for(long x0=static_cast<long>(0L); x0<static_cast<long>(4L); x0+=static_cast<long>(1L)) { for(long x1=static_cast<long>(0L); x1<static_cast<long>(1024L); x1+=static_cast<long>(1L)) { #pragma GCC ivdep for(long x2=static_cast<long>(0L); x2<static_cast<long>(12L); x2+=static_cast<long>(1L)) { for(long x3=static_cast<long>(0L); x3<static_cast<long>(512L); x3+=static_cast<long>(16L)) { auto tmp0 = c10::convert<int>(x1); auto tmp1 = static_cast<int>(256); auto tmp2 = tmp0 < tmp1; auto tmp3 = [&] { auto tmp4 = c10::convert<int>(x3); auto tmp5 = at::vec::Vectorized<int>::arange(tmp4, 1); auto tmp6 = static_cast<int>(257); auto tmp7 = at::vec::Vectorized<int>(tmp6); auto tmp8 = at::vec::VecMask<int,1>(tmp5 < tmp7); auto tmp10 = at::vec::VecMask<float,1>::from(tmp2); auto tmp11 = tmp8 & tmp10; auto tmp9 = [&] { auto tmp12 = -std::numeric_limits<float>::infinity(); return tmp12; } ; auto tmp13 = [&] { if (tmp11.all_zero()) { return at::vec::Vectorized<float>(static_cast<float>(0.0)); } else { return decltype(at::vec::Vectorized<float>(tmp9()))::blendv(at::vec::Vectorized<float>(static_cast<float>(0.0)), at::vec::Vectorized<float>(tmp9()), tmp11.template cast<float,1>()); } } () ; auto tmp14 = c10::convert<int>(c10::div_floor_integer(x1, 256L)); auto tmp15 = static_cast<int>(3); auto tmp16 = tmp14 < tmp15; auto tmp18 = tmp16 & tmp2; auto tmp17 = [&] { auto tmp19 = at::vec::Vectorized<int>(tmp1); auto tmp20 = at::vec::VecMask<int,1>(tmp5 >= tmp19); auto tmp22 = at::vec::VecMask<float,1>::from(tmp18); auto tmp23 = tmp20 & tmp22; auto tmp21 = [&] { auto tmp24 = tmp23.template cast<float,1>().template loadu<float,1>(in_ptr0 + static_cast<long>((-256L) + x3 + (513L*(static_cast<long>(x1) % static_cast<long>(256L))) + (262656L*(c10::div_floor_integer(x1, 256L))) + (787968L*x2) + (9455616L*x0))); return tmp24; } ; auto tmp25 = [&] { if (tmp23.all_zero()) { return at::vec::Vectorized<float>(static_cast<float>(0.0)); } else { return decltype(tmp21())::blendv(at::vec::Vectorized<float>(static_cast<float>(0.0)), tmp21(), tmp23.template cast<float,1>()); } } () ; auto tmp26 = static_cast<float>(0.0); auto tmp27 = at::vec::Vectorized<float>(tmp26); auto tmp28 = decltype(tmp25)::blendv(tmp27, tmp25, tmp20.template cast<float,1>()); return tmp28; } ; auto tmp29 = tmp16 ? tmp17() : at::vec::Vectorized<float>(static_cast<float>(0.0)); auto tmp30 = static_cast<float>(0.0); auto tmp31 = at::vec::VecMask<float,1>::from(tmp16); auto tmp32 = at::vec::Vectorized<float>(tmp30); auto tmp33 = decltype(tmp29)::blendv(tmp32, tmp29, tmp31.template cast<float,1>()); auto tmp34 = decltype(tmp13)::blendv(tmp33, tmp13, tmp8.template cast<float,1>()); return tmp34; } ; auto tmp35 = tmp2 ? tmp3() : at::vec::Vectorized<float>(static_cast<float>(0.0)); auto tmp36 = c10::convert<int>(c10::div_floor_integer(x1, 256L)); auto tmp37 = static_cast<int>(3); auto tmp38 = tmp36 < tmp37; auto tmp39 = [&] { auto tmp40 = c10::convert<int>(x3); auto tmp41 = at::vec::Vectorized<int>::arange(tmp40, 1); auto tmp42 = at::vec::Vectorized<int>(tmp1); auto tmp43 = at::vec::VecMask<int,1>(tmp41 >= tmp42); auto tmp45 = at::vec::VecMask<float,1>::from(tmp38); auto tmp46 = tmp43 & tmp45; auto tmp44 = [&] { auto tmp47 = tmp46.template cast<float,1>().template loadu<float,1>(in_ptr0 + static_cast<long>((-256L) + x3 + (513L*(static_cast<long>(x1) % static_cast<long>(256L))) + (262656L*(c10::div_floor_integer(x1, 256L))) + (787968L*x2) + (9455616L*x0))); return tmp47; } ; auto tmp48 = [&] { if (tmp46.all_zero()) { return at::vec::Vectorized<float>(static_cast<float>(0.0)); } else { return decltype(tmp44())::blendv(at::vec::Vectorized<float>(static_cast<float>(0.0)), tmp44(), tmp46.template cast<float,1>()); } } () ; auto tmp49 = static_cast<float>(0.0); auto tmp50 = at::vec::Vectorized<float>(tmp49); auto tmp51 = decltype(tmp48)::blendv(tmp50, tmp48, tmp43.template cast<float,1>()); return tmp51; } ; auto tmp52 = tmp38 ? tmp39() : at::vec::Vectorized<float>(static_cast<float>(0.0)); auto tmp53 = static_cast<float>(0.0); auto tmp54 = at::vec::VecMask<float,1>::from(tmp38); auto tmp55 = at::vec::Vectorized<float>(tmp53); auto tmp56 = decltype(tmp52)::blendv(tmp55, tmp52, tmp54.template cast<float,1>()); auto tmp57 = at::vec::VecMask<float,1>::from(tmp2); auto tmp58 = decltype(tmp35)::blendv(tmp56, tmp35, tmp57.template cast<float,1>()); tmp58.store(out_ptr1 + static_cast<long>(x3 + (513L*x1) + (525312L*x2) + (6303744L*x0))); } #pragma omp simd simdlen(8) for(long x3=static_cast<long>(512L); x3<static_cast<long>(513L); x3+=static_cast<long>(1L)) { auto tmp0 = c10::convert<int64_t>(x1); auto tmp1 = static_cast<int64_t>(256); auto tmp2 = tmp0 < tmp1; auto tmp3 = [&] { auto tmp4 = c10::convert<int64_t>(x3); auto tmp5 = static_cast<int64_t>(257); auto tmp6 = tmp4 < tmp5; auto tmp7 = [&] { auto tmp8 = -std::numeric_limits<float>::infinity(); return tmp8; } ; auto tmp9 = tmp6 ? tmp7() : static_cast<decltype(tmp7())>(0.0); auto tmp10 = c10::convert<int64_t>(c10::div_floor_integer(x1, 256L)); auto tmp11 = static_cast<int64_t>(3); auto tmp12 = tmp10 < tmp11; auto tmp13 = [&] { auto tmp14 = tmp4 >= tmp1; auto tmp15 = [&] { auto tmp16 = in_ptr0[static_cast<long>((-256L) + x3 + (513L*(static_cast<long>(x1) % static_cast<long>(256L))) + (262656L*(c10::div_floor_integer(x1, 256L))) + (787968L*x2) + (9455616L*x0))]; return tmp16; } ; auto tmp17 = tmp14 ? tmp15() : static_cast<decltype(tmp15())>(0.0); auto tmp18 = static_cast<float>(0.0); auto tmp19 = tmp14 ? tmp17 : tmp18; return tmp19; } ; auto tmp20 = tmp12 ? tmp13() : static_cast<decltype(tmp13())>(0.0); auto tmp21 = static_cast<float>(0.0); auto tmp22 = tmp12 ? tmp20 : tmp21; auto tmp23 = tmp6 ? tmp9 : tmp22; return tmp23; } ; auto tmp24 = tmp2 ? tmp3() : static_cast<decltype(tmp3())>(0.0); auto tmp25 = c10::convert<int64_t>(c10::div_floor_integer(x1, 256L)); auto tmp26 = static_cast<int64_t>(3); auto tmp27 = tmp25 < tmp26; auto tmp28 = [&] { auto tmp29 = c10::convert<int64_t>(x3); auto tmp30 = tmp29 >= tmp1; auto tmp31 = [&] { auto tmp32 = in_ptr0[static_cast<long>((-256L) + x3 + (513L*(static_cast<long>(x1) % static_cast<long>(256L))) + (262656L*(c10::div_floor_integer(x1, 256L))) + (787968L*x2) + (9455616L*x0))]; return tmp32; } ; auto tmp33 = tmp30 ? tmp31() : static_cast<decltype(tmp31())>(0.0); auto tmp34 = static_cast<float>(0.0); auto tmp35 = tmp30 ? tmp33 : tmp34; return tmp35; } ; auto tmp36 = tmp27 ? tmp28() : static_cast<decltype(tmp28())>(0.0); auto tmp37 = static_cast<float>(0.0); auto tmp38 = tmp27 ? tmp36 : tmp37; auto tmp39 = tmp2 ? tmp24 : tmp38; out_ptr1[static_cast<long>(x3 + (513L*x1) + (525312L*x2) + (6303744L*x0))] = tmp39; } } } } } } } ''') ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124921 Approved by: https://github.com/jgong5, https://github.com/jansel ghstack dependencies: #124597
|
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py
index dd28449093..8adf431771 100644
--- a/test/inductor/test_torchinductor.py
+++ b/test/inductor/test_torchinductor.py
@@ -8822,6 +8822,17 @@ class CommonTemplate:
]
args = [rand_strided(sh, st) for (sh, st) in args]
args.append(256)
+
+ if self.device == "cpu":
+ opt_fn = torch._dynamo.optimize("inductor")(fn)
+ _, code = run_and_get_cpp_code(opt_fn, *args)
+ print(code)
+ FileCheck().check_count(
+ "static_cast<int>(256)",
+ 1,
+ exactly=True,
+ ).run(code)
+
self.common(fn, args)
def test_cumsum_pattern_matcher_issue(self):
diff --git a/torch/_inductor/codegen/common.py b/torch/_inductor/codegen/common.py
index 256e16b68d..7126d565cf 100644
--- a/torch/_inductor/codegen/common.py
+++ b/torch/_inductor/codegen/common.py
@@ -1296,6 +1296,28 @@ class CodeGen:
self.exit_stack.__exit__(exc_type, exc_val, exc_tb)
+class ScopedDict:
+ def __init__(self, original_dict):
+ self.original_dict = original_dict
+ self.new_items = {}
+
+ def __getitem__(self, key):
+ if key in self.new_items:
+ return self.new_items[key]
+ return self.original_dict[key]
+
+ def __setitem__(self, key, value):
+ self.new_items[key] = value
+
+ def __contains__(self, key):
+ return key in self.new_items or key in self.original_dict
+
+ def get(self, key, default=None):
+ if key in self.new_items:
+ return self.new_items[key]
+ return self.original_dict.get(key, default)
+
+
class Kernel(CodeGen):
newvar_prefix = ""
suffix = ""
@@ -1349,6 +1371,13 @@ class Kernel(CodeGen):
@contextlib.contextmanager
def swap_buffers(self, lb, cb=None, sb=None):
+ def scope_cse(cse):
+ new_cse = cse.clone()
+ new_cse.cache = ScopedDict(cse.cache)
+ new_cse.reduction_cache = ScopedDict(cse.reduction_cache)
+ new_cse.store_cache = ScopedDict(cse.store_cache)
+ return new_cse
+
if cb is None:
cb = lb
loads = self.loads
@@ -1358,7 +1387,7 @@ class Kernel(CodeGen):
self.loads = lb
self.compute = cb
self.stores = sb
- self.cse = cse.clone()
+ self.cse = scope_cse(cse)
try:
yield
finally:
|
2.41.0
|
2a192db0f064ff122fd7b9f6418f6f48ecd03ea
|
Sat, 27 Apr 2024 19:09:44 -0700
|
[PATCH 0774/1000] Fix Conv BN folding with deadcode (#124808)
|
**Summary** Fix issue: https://github.com/pytorch/pytorch/issues/124286 The TorchBenchmark includes a method called `run_n_iterations` which runs model multiple times. https://github.com/pytorch/pytorch/blob/43f4e71daa6dce6014d30da046d28f14cf30d5a4/benchmarks/dynamo/common.py#L2272-L2276 https://github.com/pytorch/pytorch/pull/123399 enables tracing into a `UserDefinedObjectVariable` that's an instance method. It will trace the model into FX graph multiple times within `run_n_iterations`. Then, in the Inductor, `Conv-BN folding` at the module level will fuse the same Conv-BN module multiple times in this case, which leads to accuracy failures. This PR addresses the issue by ensuring that each Conv-BN module is fused only once. **TestPlan** ``` python -u -m pytest -s -v test/inductor/test_inductor_freezing.py -k test_folded_conv_bn_with_module_sharing python -u -m pytest -s -v test/inductor/test_inductor_freezing.py -k test_folded_conv_functional_bn_with_module_sharing python -u -m pytest -s -v test/inductor/test_inductor_freezing.py -k test_conv_bn_with_multi_bn_share_conv python -u -m pytest -s -v test/inductor/test_inductor_freezing.py -k test_conv_functional_bn_with_multi_bn_share_conv ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/124808 Approved by: https://github.com/jansel, https://github.com/jgong5
|
diff --git a/test/inductor/test_inductor_freezing.py b/test/inductor/test_inductor_freezing.py
index 44b76383fc..7d1688b366 100644
--- a/test/inductor/test_inductor_freezing.py
+++ b/test/inductor/test_inductor_freezing.py
@@ -90,6 +90,102 @@ class ConvBN(torch.nn.Module):
return self.bn(self.conv(x))
+class ConvFunctionalBN(torch.nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ bias=False,
+ kernel_size=3,
+ stride=2,
+ running_mean=None,
+ running_var=None,
+ weight=None,
+ bn_bias=None,
+ ):
+ super().__init__()
+ self.conv = torch.nn.Conv2d(
+ in_channels, out_channels, bias=bias, kernel_size=kernel_size, stride=stride
+ )
+ self.running_mean = running_mean
+ self.running_var = running_var
+ self.weight = weight
+ self.bias = bn_bias
+
+ def forward(self, x):
+ return torch.nn.functional.batch_norm(
+ self.conv(x),
+ self.running_mean,
+ self.running_var,
+ self.weight,
+ self.bias,
+ False,
+ 0.1,
+ 1e-5,
+ )
+
+
+class ConvMultiBN(torch.nn.Module):
+ def __init__(self, in_channels, out_channels, bias=False, **kwargs):
+ super().__init__()
+ self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=bias, **kwargs)
+ self.bn = torch.nn.BatchNorm2d(out_channels, eps=0.001, dtype=torch.float)
+ self.bn2 = torch.nn.BatchNorm2d(out_channels, eps=0.1, dtype=torch.float)
+
+ def forward(self, x):
+ tmp = self.bn(self.conv(x))
+ tmp2 = self.bn2(self.conv(x))
+ return tmp + tmp2
+
+
+class ConvMultiFunctionalBN(torch.nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ bias=False,
+ kernel_size=3,
+ stride=2,
+ running_mean=None,
+ running_var=None,
+ weight=None,
+ bn_bias=None,
+ running_mean2=None,
+ ):
+ super().__init__()
+ self.conv = torch.nn.Conv2d(
+ in_channels, out_channels, bias=bias, kernel_size=kernel_size, stride=stride
+ )
+ self.running_mean = running_mean
+ self.running_var = running_var
+ self.weight = weight
+ self.bias = bn_bias
+ self.running_mean2 = running_mean2
+
+ def forward(self, x):
+ tmp = torch.nn.functional.batch_norm(
+ self.conv(x),
+ self.running_mean,
+ self.running_var,
+ self.weight,
+ self.bias,
+ False,
+ 0.1,
+ 1e-5,
+ )
+ tmp2 = torch.nn.functional.batch_norm(
+ self.conv(x),
+ self.running_mean2,
+ self.running_var,
+ self.weight,
+ self.bias,
+ False,
+ 0.1,
+ 1e-5,
+ )
+ return tmp + tmp2
+
+
class OptimizeForInferenceTemplate(TestCase):
def test_mutation(self):
class Mod(torch.nn.Module):
@@ -360,11 +456,6 @@ class OptimizeForInferenceTemplate(TestCase):
out_eager = mod(x)
out_optimized_for_infernece, code = run_and_get_code(foo, mod, x)
- self.assertNotIn(
- "aten._native_batch_norm_legit_no_training(",
- code[0],
- )
-
# we unfuse the conv bias, but it should only have one constant in the kernel
if self.device == "cuda":
FileCheck().check_not(".run(").check("conv").check(".run(").check_same(
@@ -375,6 +466,129 @@ class OptimizeForInferenceTemplate(TestCase):
out_optimized_for_infernece, out_eager, atol=1e-2, rtol=1e-2
)
+ @torch._inductor.config.patch(layout_optimization=False)
+ def test_folded_conv_bn_with_module_sharing(self):
+ mod = (
+ ConvBN(32, 32, bias=True, kernel_size=3, stride=2)
+ .to(self.device)
+ .to(torch.float32)
+ )
+
+ # Update the default parameters of BN module
+ for _ in range(10):
+ mod(torch.rand(3, 32, 32, 32).to(self.device).to(torch.float32))
+
+ mod.eval()
+ x = torch.rand(3, 32, 32, 32).to(self.device).to(torch.float32)
+
+ def foo(mod, x):
+ mod(x)
+ return mod(x)
+
+ with torch.no_grad():
+ out_eager = foo(mod, x)
+ out_optimized_for_infernece, _ = run_and_get_code(
+ torch.compile(foo), mod, x
+ )
+
+ self.assertEqual(out_optimized_for_infernece, out_eager, atol=1e-2, rtol=1e-2)
+
+ @torch._inductor.config.patch(layout_optimization=False)
+ def test_folded_conv_functional_bn_with_module_sharing(self):
+ x = torch.rand(3, 32, 32, 32).to(self.device).to(torch.float32)
+ running_mean = torch.mean(x, dim=(0, 2, 3)).to(self.device)
+ running_var = torch.var(x, dim=(0, 2, 3)).to(self.device)
+
+ mod = (
+ ConvFunctionalBN(
+ 32,
+ 32,
+ bias=True,
+ kernel_size=3,
+ stride=2,
+ running_mean=running_mean,
+ running_var=running_var,
+ weight=torch.ones(32).to(self.device),
+ bn_bias=torch.zeros(32).to(self.device),
+ )
+ .eval()
+ .to(self.device)
+ .to(torch.float32)
+ )
+
+ def foo(mod, x):
+ mod(x)
+ return mod(x)
+
+ with torch.no_grad():
+ out_eager = foo(mod, x)
+ out_optimized_for_infernece, _ = run_and_get_code(
+ torch.compile(foo), mod, x
+ )
+
+ self.assertEqual(out_optimized_for_infernece, out_eager, atol=1e-2, rtol=1e-2)
+
+ @torch._inductor.config.patch(layout_optimization=False)
+ def test_conv_bn_with_multi_bn_share_conv(self):
+ mod = (
+ ConvMultiBN(32, 32, bias=True, kernel_size=3, stride=2)
+ .to(self.device)
+ .to(torch.float32)
+ )
+
+ # Update the default parameters of BN module
+ for _ in range(10):
+ mod(torch.rand(3, 32, 32, 32).to(self.device).to(torch.float32))
+
+ mod.eval()
+ x = torch.rand(3, 32, 32, 32).to(self.device).to(torch.float32)
+
+ def foo(mod, x):
+ return mod(x)
+
+ with torch.no_grad():
+ out_eager = foo(mod, x)
+ out_optimized_for_infernece, _ = run_and_get_code(
+ torch.compile(foo), mod, x
+ )
+
+ self.assertEqual(out_optimized_for_infernece, out_eager, atol=1e-2, rtol=1e-2)
+
+ @torch._inductor.config.patch(layout_optimization=False)
+ def test_conv_functional_bn_with_multi_bn_share_conv(self):
+ x = torch.rand(3, 32, 32, 32).to(self.device).to(torch.float32)
+ running_mean = torch.mean(x, dim=(0, 2, 3)).to(self.device)
+ running_var = torch.var(x, dim=(0, 2, 3)).to(self.device)
+ running_mean2 = torch.mean(x, dim=(0, 2, 3)).to(self.device)
+
+ mod = (
+ ConvMultiFunctionalBN(
+ 32,
+ 32,
+ bias=True,
+ kernel_size=3,
+ stride=2,
+ running_mean=running_mean,
+ running_var=running_var,
+ weight=torch.ones(32).to(self.device),
+ bn_bias=torch.zeros(32).to(self.device),
+ running_mean2=running_mean2,
+ )
+ .eval()
+ .to(self.device)
+ .to(torch.float32)
+ )
+
+ def foo(mod, x):
+ return mod(x)
+
+ with torch.no_grad():
+ out_eager = foo(mod, x)
+ out_optimized_for_infernece, _ = run_and_get_code(
+ torch.compile(foo), mod, x
+ )
+ self.assertEqual(out_optimized_for_infernece, out_eager, atol=1e-2, rtol=1e-2)
+
@torch._inductor.config.patch(layout_optimization=False)
def test_dont_change_dtype_folding(self):
dtype = torch.float16 if self.device == "cuda" else torch.bfloat16
diff --git a/torch/_inductor/fx_passes/pre_grad.py b/torch/_inductor/fx_passes/pre_grad.py
index 091de51c2d..dadede9f0f 100644
--- a/torch/_inductor/fx_passes/pre_grad.py
+++ b/torch/_inductor/fx_passes/pre_grad.py
@@ -1,7 +1,7 @@
import copy
import itertools
import logging
-from typing import List, Optional
+from typing import Dict, List, Optional
import torch
import torch.nn as nn
@@ -313,7 +313,43 @@ def fuse_conv_bn(gm: torch.fx.GraphModule, inplace=False) -> torch.fx.GraphModul
(torch.nn.Conv3d, F.batch_norm),
]
modules = dict(gm.named_modules())
+
+ class ConvBNFusion:
+ def __init__(
+ self,
+ bn_node,
+ conv_module,
+ bn_module=None, # For BN Module
+ bn_running_mean=None, # For Functional BN
+ bn_running_var=None,
+ bn_eps=None,
+ bn_weight=None,
+ bn_bias=None,
+ ):
+ self.bn_nodes = [
+ bn_node,
+ ]
+ self.conv_module = conv_module
+ self.bn_module = bn_module
+ self.bn_running_mean = bn_running_mean
+ self.bn_running_var = bn_running_var
+ self.bn_eps = bn_eps
+ self.bn_weight = bn_weight
+ self.bn_bias = bn_bias
+ self.fusion_enabled = True
+
+ def add_bn_node(self, bn_node):
+ self.bn_nodes.append(bn_node)
+
+ def disable_fusion(self):
+ self.fusion_enabled = False
+
+ def is_fusion_enabled(self):
+ return self.fusion_enabled
+
+ conv_bn_to_fuse: Dict[int, ConvBNFusion] = {}
for pattern in modules_patterns:
+ conv_bn_to_fuse.clear()
for node in gm.graph.nodes:
if matches_module_pattern(pattern, node, modules):
if len(node.args[0].users) > 1: # Output of conv is used by other nodes
@@ -325,12 +361,34 @@ def fuse_conv_bn(gm: torch.fx.GraphModule, inplace=False) -> torch.fx.GraphModul
continue
if not bn.track_running_stats:
continue
+
+ # Do hash based on the module name of conv
+ hash_id = hash(node.args[0].target)
+ if hash_id not in conv_bn_to_fuse:
+ conv_bn_to_fuse[hash_id] = ConvBNFusion(node, conv, bn)
+ else:
+ if bn == conv_bn_to_fuse[hash_id].bn_module:
+ # Do fusion if same bn module
+ conv_bn_to_fuse[hash_id].add_bn_node(node)
+ else:
+ # Disable the conv bn folding if conv shared by different bn
+ conv_bn_to_fuse[hash_id].disable_fusion()
+
+ for conv_bn_fusion in conv_bn_to_fuse.values():
+ if conv_bn_fusion.is_fusion_enabled():
+ bn_nodes = conv_bn_fusion.bn_nodes
+ conv = conv_bn_fusion.conv_module
+ bn = conv_bn_fusion.bn_module
+
fused_conv = fuse_conv_bn_eval(conv, bn)
- replace_node_module(node.args[0], modules, fused_conv)
- node.replace_all_uses_with(node.args[0])
- gm.graph.erase_node(node)
+ for bn_node in bn_nodes:
+ replace_node_module(bn_node.args[0], modules, fused_conv)
+ bn_node.replace_all_uses_with(bn_node.args[0])
+ gm.graph.erase_node(bn_node)
+
gm.graph.lint()
for pattern in module_function_patterns:
+ conv_bn_to_fuse.clear()
for node in gm.graph.nodes:
if matches_module_function_pattern(pattern, node, modules):
# TODO: support kwargs.
@@ -343,8 +401,17 @@ def fuse_conv_bn(gm: torch.fx.GraphModule, inplace=False) -> torch.fx.GraphModul
continue
if type(bn_eps) is not float:
continue
+
+ def _used_by_same_conv_module(users):
+ conv_module_name = users[0].args[0].target
+ return all(
+ conv_module_name == user.args[0].target for user in users
+ )
+
bn_args_is_constant = all(
- n.op == "get_attr" and len(n.users) == 1 for n in node.args[1:5]
+ n.op == "get_attr"
+ and (len(n.users) == 1 or _used_by_same_conv_module(list(n.users)))
+ for n in node.args[1:5]
)
if not bn_args_is_constant:
continue
@@ -354,6 +421,48 @@ def fuse_conv_bn(gm: torch.fx.GraphModule, inplace=False) -> torch.fx.GraphModul
bn_bias = fetch_attr(node.args[4].target, gm)
if bn_running_mean is None or bn_running_var is None:
continue
+
+ # Do hash based on the module name of conv
+ hash_id = hash(node.args[0].target)
+ if hash_id not in conv_bn_to_fuse:
+ conv_bn_to_fuse[hash_id] = ConvBNFusion(
+ node,
+ conv,
+ bn_running_mean=bn_running_mean,
+ bn_running_var=bn_running_var,
+ bn_eps=bn_eps,
+ bn_weight=bn_weight,
+ bn_bias=bn_bias,
+ )
+ else:
+ if (
+ hash(bn_running_mean)
+ == hash(conv_bn_to_fuse[hash_id].bn_running_mean)
+ and hash(bn_running_var)
+ == hash(conv_bn_to_fuse[hash_id].bn_running_var)
+ and torch.allclose(
+ torch.tensor(bn_eps),
+ torch.tensor(conv_bn_to_fuse[hash_id].bn_eps),
+ )
+ and hash(bn_weight) == hash(conv_bn_to_fuse[hash_id].bn_weight)
+ and hash(bn_bias) == hash(conv_bn_to_fuse[hash_id].bn_bias)
+ ):
+ # Do fusion if same functional bn
+ conv_bn_to_fuse[hash_id].add_bn_node(node)
+ else:
+ # Disable the conv bn folding if conv shared by different bn
+ conv_bn_to_fuse[hash_id].disable_fusion()
+
+ for conv_bn_fusion in conv_bn_to_fuse.values():
+ if conv_bn_fusion.is_fusion_enabled():
+ bn_nodes = conv_bn_fusion.bn_nodes
+ conv = conv_bn_fusion.conv_module
+ bn_running_mean = conv_bn_fusion.bn_running_mean
+ bn_running_var = conv_bn_fusion.bn_running_var
+ bn_eps = conv_bn_fusion.bn_eps
+ bn_weight = conv_bn_fusion.bn_weight
+ bn_bias = conv_bn_fusion.bn_bias
+
fused_conv = copy.deepcopy(conv)
fused_conv.weight, fused_conv.bias = fuse_conv_bn_weights(
fused_conv.weight,
@@ -364,9 +473,10 @@ def fuse_conv_bn(gm: torch.fx.GraphModule, inplace=False) -> torch.fx.GraphModul
bn_weight,
bn_bias,
)
- replace_node_module(node.args[0], modules, fused_conv)
- node.replace_all_uses_with(node.args[0])
- gm.graph.erase_node(node)
+ for bn_node in bn_nodes:
+ replace_node_module(bn_node.args[0], modules, fused_conv)
+ bn_node.replace_all_uses_with(bn_node.args[0])
+ gm.graph.erase_node(bn_node)
gm.graph.lint()
gm.recompile()
|
2.41.0
|
4b328ee4592605f490d422f57ad4747a92ac339
|
Sun, 28 Apr 2024 07:03:12 +0000
|
[PATCH 0775/1000] add likely/unlikely macro for unsupport c++20 compiler. (#124997)MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit
|
# Issue: Intel validation team found some low version gcc which not support c++20 will occur below issue: ```cmd [2024-04-13T08:03:25.142Z] g++ /tmp/torchinductor_root/vd/cvdytwwwlhi63ofh3pwzqfpjga4w4xe7bjfdoavpblbo5khzf3b2.cpp -shared -fPIC -Wall -std=c++17 -Wno-unused-variable -Wno-unknown-pragmas -D_GLIBCXX_USE_CXX11_ABI=0 -I/root/anaconda3/envs/pytorch/lib/python3.8/site-packages/torch/include -I/root/anaconda3/envs/pytorch/lib/python3.8/site-packages/torch/include/torch/csrc/api/include -I/root/anaconda3/envs/pytorch/lib/python3.8/site-packages/torch/include/TH -I/root/anaconda3/envs/pytorch/lib/python3.8/site-packages/torch/include/THC -I/root/anaconda3/envs/pytorch/include/python3.8 -L/root/anaconda3/envs/pytorch/lib/python3.8/site-packages/torch/lib -L/root/anaconda3/envs/pytorch/lib -L/root/anaconda3/envs/pytorch/lib/python3.8/site-packages/torch/lib -ltorch -ltorch_cpu -lgomp -ltorch_python -lc10 -mavx2 -mfma -DCPU_CAPABILITY_AVX2 -O3 -DNDEBUG -ffast-math -fno-finite-math-only -fno-unsafe-math-optimizations -ffp-contract=off -march=native -fopenmp -D C10_USING_CUSTOM_GENERATED_MACROS -o /tmp/torchinductor_root/vd/cvdytwwwlhi63ofh3pwzqfpjga4w4xe7bjfdoavpblbo5khzf3b2.so [2024-04-13T08:03:25.142Z] [2024-04-13T08:03:25.142Z] Output: [2024-04-13T08:03:25.142Z] /tmp/torchinductor_root/vd/cvdytwwwlhi63ofh3pwzqfpjga4w4xe7bjfdoavpblbo5khzf3b2.cpp: In function ‘T parse_arg(PyObject*, size_t) [with T = long int; PyObject = _object; size_t = long unsigned int]’: [2024-04-13T08:03:25.142Z] /tmp/torchinductor_root/vd/cvdytwwwlhi63ofh3pwzqfpjga4w4xe7bjfdoavpblbo5khzf3b2.cpp:117:10: error: expected identifier before ‘[’ token [2024-04-13T08:03:25.142Z] [[unlikely]] throw std::runtime_error("expected int arg"); [2024-04-13T08:03:25.142Z] ^ ``` The season is `unlikely` need c++20 attribute, ref: https://en.cppreference.com/w/cpp/language/attributes/likely # Solution: Add MACRO to enable non-c++20 attribute GNU compiler. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124997 Approved by: https://github.com/jgong5, https://github.com/jansel
|
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 2eb677b4d6..1d148f9d99 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -2210,6 +2210,15 @@ class CppPythonBindingsCodeCache(CppCodeCache):
#include <sstream>
#include <cstdlib>
+ #ifndef _MSC_VER
+ #if __cplusplus < 202002L
+ // C++20 earlier code
+ // https://en.cppreference.com/w/cpp/language/attributes/likely
+ #define likely(x) __builtin_expect(!!(x), 1)
+ #define unlikely(x) __builtin_expect(!!(x), 0)
+ #endif
+ #endif
+
// This is defined in guards.cpp so we don't need to import PyTorch headers that are slooow.
// We manually link it below to workaround issues with fbcode build.
static void* (*_torchinductor_pyobject_tensor_data_ptr)(PyObject* obj);
|
2.41.0
|
9ca2b3429961d5c90a3669b8e1aaacbf45be964
|
Sun, 28 Apr 2024 15:09:21 +0000
|
[PATCH 0776/1000] [BE]: Apply RUF025 perf fixups (#125104)
|
Uses `dict.fromkeys()` for more efficient dict construction. Automatically generated by RUF025 (prev). Pull Request resolved: https://github.com/pytorch/pytorch/pull/125104 Approved by: https://github.com/ezyang
|
diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py
index f709aab575..36eb5e223d 100644
--- a/torch/_inductor/codegen/wrapper.py
+++ b/torch/_inductor/codegen/wrapper.py
@@ -1138,7 +1138,7 @@ class WrapperCodeGen(CodeGen):
# https://github.com/openai/triton/blob/231efe9ed2d200be0f69a07c298e4342b08efe3d/python/triton/runtime/jit.py#L384
"constants": {
**constants,
- **{idx: 1 for idx in equal_to_1_arg_idx},
+ **dict.fromkeys(equal_to_1_arg_idx, 1),
},
"configs": [
config_of(
diff --git a/torch/_inductor/fx_passes/group_batch_fusion.py b/torch/_inductor/fx_passes/group_batch_fusion.py
index 9ad1e39ced..880ba9df98 100644
--- a/torch/_inductor/fx_passes/group_batch_fusion.py
+++ b/torch/_inductor/fx_passes/group_batch_fusion.py
@@ -858,7 +858,7 @@ class BatchMulPostGradFusion(BatchPointwiseOpsPostGradFusion):
class _OrderedSet:
def __init__(self, param=None):
if param:
- self.rep = OrderedDict({k: None for k in param})
+ self.rep = OrderedDict(dict.fromkeys(param))
else:
self.rep = OrderedDict()
|
2.41.0
|
f139b04b39c12413059db35925e29fd49c87483
|
Sat, 27 Apr 2024 11:38:32 -0700
|
[PATCH 0777/1000] [dynamo] Fix test (#125107)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/125107 Approved by: https://github.com/jansel ghstack dependencies: #125097
|
diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index 5d0e2c2ae6..1c6b2fada9 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -10158,21 +10158,36 @@ fn
def test_outside_linear_module_free(self):
# Compared to test_linear_module_free, the linear
# layer is not the code object that is directly compiled.
- def model_inp_ctr():
- fc = torch.nn.Linear(100, 100)
- class Mod(torch.nn.Module):
- def __init__(self):
- super().__init__()
- self.fc_ref = fc
+ # This test does not use _test_compile_model_free because of difficulty
+ # in handling variable fc.
- def forward(self, x):
- return fc(x[0])
+ fc = torch.nn.Linear(100, 100)
+
+ class Mod(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.fc_ref = fc
+
+ def forward(self, x):
+ return fc(x[0])
- # return fc to keep it alive in _test_compile_model_free
- return Mod(), (torch.randn(100, 100), fc)
+ cleared = False
+
+ def finalize():
+ nonlocal cleared
+ cleared = True
- self._test_compile_model_free(model_inp_ctr, lambda mod: mod.fc_ref)
+ def run():
+ mod = Mod()
+ inp = torch.randn(100, 100)
+ weakref.finalize(mod.fc_ref, finalize)
+ torch.compile(mod, backend="eager")(inp)
+
+ run()
+ del fc # This should delete all the references
+ gc.collect()
+ self.assertTrue(cleared)
@unittest.skipIf(sys.version_info >= (3, 12), "leaks in 3.12+")
def test_parameter_free(self):
|
2.41.0
|
761b49551f8bedd4cc01fbf90962d3fb1ec1a7c
|
Sun, 28 Apr 2024 16:27:27 +0000
|
[PATCH 0779/1000] Ensure autocast device_type is a string + Unit test (#125014)
|
Reviving #124873 (already approved) to resolve CLA issues Fixes #124738 (Marked as draft until I get local unit tests to run) Edit: Tests passing Pull Request resolved: https://github.com/pytorch/pytorch/pull/125014 Approved by: https://github.com/mikaylagawarecki, https://github.com/soulitzer
|
diff --git a/test/test_autocast.py b/test/test_autocast.py
index 5b1e38eff0..b87942c858 100644
--- a/test/test_autocast.py
+++ b/test/test_autocast.py
@@ -343,6 +343,13 @@ class TestTorchAutocast(TestCase):
with self.assertRaisesRegex(RuntimeError, msg):
assert torch.amp.is_autocast_available(device_type=dev)
+ def test_non_string_device(self):
+ """Test that `autocast` throws a ValueError when provided a `torch.device` object for `device_type` instead of a string"""
+ dev = torch.device("cpu")
+ msg = f"Expected `device_type` of type `str`, got: `{type(dev)}`"
+ with self.assertRaisesRegex(expected_exception=ValueError, expected_regex=msg):
+ torch.autocast(device_type=dev)
+
if __name__ == "__main__":
run_tests()
diff --git a/torch/amp/autocast_mode.py b/torch/amp/autocast_mode.py
index 523d8dc34d..41ceaee839 100644
--- a/torch/amp/autocast_mode.py
+++ b/torch/amp/autocast_mode.py
@@ -203,6 +203,10 @@ class autocast:
enabled: bool = True,
cache_enabled: Optional[bool] = None,
):
+ if not isinstance(device_type, str):
+ raise ValueError(
+ f"Expected `device_type` of type `str`, got: `{type(device_type)}`"
+ )
if torch._jit_internal.is_scripting():
self._enabled = enabled
self.device = device_type
|
2.41.0
|
18ab48e8538eb84e89b7a8da523464f188be444
|
Sun, 28 Apr 2024 17:02:32 +0000
|
[PATCH 0781/1000] Enable UFMT on test/test_functionalization.py (#123926)
|
Part of #123062 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123926 Approved by: https://github.com/ezyang, https://github.com/statelesshz
|
diff --git a/.lintrunner.toml b/.lintrunner.toml
index 7ea6532ccf..c73334a5a6 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -1065,7 +1065,6 @@ exclude_patterns = [
'test/test_function_schema.py',
'test/test_functional_autograd_benchmark.py',
'test/test_functional_optim.py',
- 'test/test_functionalization.py',
'test/test_functionalization_of_rng_ops.py',
'test/test_futures.py',
'test/test_fx.py',
diff --git a/test/test_functionalization.py b/test/test_functionalization.py
index ac2443823f..978b58b492 100644
--- a/test/test_functionalization.py
+++ b/test/test_functionalization.py
@@ -1,31 +1,46 @@
# Owner(s): ["module: codegen"]
-import torch
+import unittest
from contextlib import nullcontext
-from torch.testing._internal.common_utils import (
- TestCase, run_tests, skipIfTorchDynamo, TEST_WITH_TORCHDYNAMO, IS_WINDOWS,
- xfail_inherited_tests
+
+import torch
+from torch._dispatch.python import (
+ enable_crossref_functionalize,
+ enable_python_dispatcher,
+)
+from torch._subclasses.functional_tensor import (
+ dispatch_functionalize,
+ FunctionalTensor,
+ FunctionalTensorMode,
)
-from torch._subclasses.functional_tensor import FunctionalTensor, FunctionalTensorMode, dispatch_functionalize
-from torch.testing._internal.logging_tensor import LoggingTensor, capture_logs
-from torch.utils._pytree import tree_map_only
from torch.fx.experimental.proxy_tensor import make_fx
from torch.fx.passes.reinplace import reinplace
-from torch._dispatch.python import enable_crossref_functionalize, enable_python_dispatcher
from torch.multiprocessing.reductions import StorageWeakRef
+from torch.testing._internal.common_utils import (
+ IS_WINDOWS,
+ run_tests,
+ skipIfTorchDynamo,
+ TEST_WITH_TORCHDYNAMO,
+ TestCase,
+ xfail_inherited_tests,
+)
+from torch.testing._internal.logging_tensor import capture_logs, LoggingTensor
from torch.utils import _pytree as pytree
+from torch.utils._pytree import tree_map_only
-import unittest
def are_aliased(x, y):
x_storage = StorageWeakRef(x.storage())
y_storage = StorageWeakRef(y.storage())
return x_storage == y_storage
+
# We can unify testing and use functionalize() here instead
# if/when functorch moves into core.
# This is basically a crappy version of `functionalize()`.
-def _functionalize(f, *, reapply_views: bool, crossref: bool, skip_input_mutations: bool = False):
+def _functionalize(
+ f, *, reapply_views: bool, crossref: bool, skip_input_mutations: bool = False
+):
def to_fun(t: torch.Tensor):
func_t = torch._to_functional_tensor(t)
func_t.requires_grad = t.requires_grad
@@ -54,34 +69,46 @@ def _functionalize(f, *, reapply_views: bool, crossref: bool, skip_input_mutatio
if inpt_new.shape == inpt.shape:
inpt.copy_(inpt_new)
tree_map_only(torch.Tensor, torch._sync, out)
- out_unwrapped = tree_map_only(torch.Tensor, torch._from_functional_tensor, out)
+ out_unwrapped = tree_map_only(
+ torch.Tensor, torch._from_functional_tensor, out
+ )
return out_unwrapped
return wrapped
-@unittest.skipIf(TEST_WITH_TORCHDYNAMO, "https://github.com/pytorch/pytorch/issues/81457")
-class TestFunctionalization(TestCase):
+@unittest.skipIf(
+ TEST_WITH_TORCHDYNAMO, "https://github.com/pytorch/pytorch/issues/81457"
+)
+class TestFunctionalization(TestCase):
crossref = False
def get_logs(self, func, *inpts, reapply_views=False, run_reinplace=False):
inpts_clone = tree_map_only(torch.Tensor, torch.clone, inpts)
- traced_f = make_fx(_functionalize(func, reapply_views=reapply_views, crossref=self.crossref))(*inpts)
+ traced_f = make_fx(
+ _functionalize(func, reapply_views=reapply_views, crossref=self.crossref)
+ )(*inpts)
if run_reinplace:
traced_f = reinplace(traced_f, *inpts_clone)
return traced_f.code
- def assert_functionalization(self, func, *inpts, reapply_views=False, mutated_input_metadata=False):
+ def assert_functionalization(
+ self, func, *inpts, reapply_views=False, mutated_input_metadata=False
+ ):
clones1 = tree_map_only(torch.Tensor, torch.clone, inpts)
clones2 = tree_map_only(torch.Tensor, torch.clone, inpts)
clones3 = tree_map_only(torch.Tensor, torch.clone, inpts)
# Compare outputs (and mutated inputs), with and without functionalization.
out_ref = func(*inpts)
- out_functional = _functionalize(func, reapply_views=reapply_views, crossref=self.crossref)(*clones1)
+ out_functional = _functionalize(
+ func, reapply_views=reapply_views, crossref=self.crossref
+ )(*clones1)
# The reinplacing pass is only valid to run with reapply_views=True.
- functional_func = make_fx(_functionalize(func, reapply_views=True, crossref=self.crossref))(*clones2)
+ functional_func = make_fx(
+ _functionalize(func, reapply_views=True, crossref=self.crossref)
+ )(*clones2)
reinplace_func = reinplace(functional_func, *clones2)
# NOTE: for now, need to pass in fresh inputs here, because make_fx
@@ -95,22 +122,38 @@ class TestFunctionalization(TestCase):
flat_inpts = pytree.tree_leaves(inpts)
flat_clones1 = pytree.tree_leaves(clones1)
flat_clones3 = pytree.tree_leaves(clones3)
- for inpt, input_clone, input_clone3 in zip(flat_inpts, flat_clones1, flat_clones3):
- self.assertEqual(inpt, input_clone) # input mutations should still occur
+ for inpt, input_clone, input_clone3 in zip(
+ flat_inpts, flat_clones1, flat_clones3
+ ):
+ self.assertEqual(
+ inpt, input_clone
+ ) # input mutations should still occur
self.assertEqual(inpt, input_clone3)
# Handle tests with multi-tensor outputs
if isinstance(out_ref, tuple):
- out_refs, out_functionals, out_reinplaces = list(out_ref), list(out_functional), list(out_reinplace)
+ out_refs, out_functionals, out_reinplaces = (
+ list(out_ref),
+ list(out_functional),
+ list(out_reinplace),
+ )
else:
- out_refs, out_functionals, out_reinplaces = [out_ref], [out_functional], [out_reinplace]
+ out_refs, out_functionals, out_reinplaces = (
+ [out_ref],
+ [out_functional],
+ [out_reinplace],
+ )
- for out_ref_, out_functional_, out_reinplace_ in zip(out_refs, out_functionals, out_reinplaces):
+ for out_ref_, out_functional_, out_reinplace_ in zip(
+ out_refs, out_functionals, out_reinplaces
+ ):
self.assertEqual(out_ref_, out_functional_)
self.assertEqual(out_ref_, out_reinplace_)
def test_save_for_backwards_segfault(self):
- inp = torch._to_functional_tensor(LoggingTensor(torch.randn(2, 2))).requires_grad_(True)
+ inp = torch._to_functional_tensor(
+ LoggingTensor(torch.randn(2, 2))
+ ).requires_grad_(True)
inp.exp()
def test_multiple_views_of_same_base(self):
@@ -123,6 +166,7 @@ class TestFunctionalization(TestCase):
# z should have been updated too.
z2 = z + 1
return z2
+
self.assert_functionalization(f, torch.ones(4))
def test_freeze(self):
@@ -143,7 +187,9 @@ class TestFunctionalization(TestCase):
y.copy_(x)
return y
- r = _functionalize(f, reapply_views=True, crossref=self.crossref)(torch.ones(2, 2))
+ r = _functionalize(f, reapply_views=True, crossref=self.crossref)(
+ torch.ones(2, 2)
+ )
self.assertEqual(r.stride(), (5, 1))
def test_set_(self):
@@ -155,7 +201,7 @@ class TestFunctionalization(TestCase):
# We should probaby get the crossref test to work,
# but fixing it for Storage() objects is annoying.
r = _functionalize(f, reapply_views=True, crossref=False)(torch.ones(2))
- self.assertEqual(str(r.device), 'cpu')
+ self.assertEqual(str(r.device), "cpu")
def test_advanced_indexing(self):
def f():
@@ -178,8 +224,11 @@ class TestFunctionalization(TestCase):
def g(x):
loss = f(x).sum()
- from torch._functorch.aot_autograd import setup_stacktrace_preservation_hooks
import torch.fx.traceback as fx_traceback
+ from torch._functorch.aot_autograd import (
+ setup_stacktrace_preservation_hooks,
+ )
+
setup_stacktrace_preservation_hooks([loss.grad_fn])
with fx_traceback.preserve_node_meta():
loss.backward()
@@ -187,7 +236,9 @@ class TestFunctionalization(TestCase):
with torch.autograd.detect_anomaly(check_nan=False):
logs = self.get_logs(g, torch.ones(16, 64, 128, 128, requires_grad=True))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -217,7 +268,8 @@ def forward(self, arg0_1):
view_copy_11 = torch.ops.aten.view_copy.default(view_copy_8, [16, 64, 128, 128]); view_copy_8 = None
detach_copy_1 = torch.ops.aten.detach_copy.default(view_copy_11); view_copy_11 = None
return detach_copy_1
- """) # noqa: B950
+ """,
+ ) # noqa: B950
def test_simple(self):
def f(x):
@@ -227,9 +279,12 @@ def forward(self, arg0_1):
y.add_(tmp)
z = x * x
return y
+
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -242,10 +297,15 @@ def forward(self, arg0_1):
mul = torch.ops.aten.mul.Tensor(view_copy_1, view_copy_1)
copy_ = torch.ops.aten.copy_.default(arg0_1, view_copy_1); arg0_1 = view_copy_1 = None
return view_copy_2
- """)
+ """,
+ )
- reinplaced_logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(4, 2), reapply_views=True, run_reinplace=True
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -258,7 +318,8 @@ def forward(self, arg0_1):
mul = torch.ops.aten.mul.Tensor(view_1, view_1)
copy_ = torch.ops.aten.copy_.default(arg0_1, view_1); arg0_1 = view_1 = None
return view_2
- """)
+ """,
+ )
def test_simple_out(self):
def f(x):
@@ -269,9 +330,12 @@ def forward(self, arg0_1):
torch.add(y, tmp, out=z)
w = z * z
return w
+
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -282,10 +346,15 @@ def forward(self, arg0_1):
add = torch.ops.aten.add.Tensor(view_copy, ones); view_copy = ones = None
mul = torch.ops.aten.mul.Tensor(add, add); add = None
return mul
- """)
+ """,
+ )
- reinplaced_logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(4, 2), reapply_views=True, run_reinplace=True
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -296,7 +365,8 @@ def forward(self, arg0_1):
add = torch.ops.aten.add.Tensor(view, ones); view = ones = None
mul = torch.ops.aten.mul.Tensor(add, add); add = None
return mul
- """)
+ """,
+ )
def test_multi_out(self):
def f(x):
@@ -306,9 +376,12 @@ def forward(self, arg0_1):
out_max = torch.empty(4)
torch.aminmax(x, dim=0, out=(out_max, out_min))
return out_max
+
self.assert_functionalization(f, torch.arange(8, dtype=torch.float32))
logs = self.get_logs(f, torch.arange(8, dtype=torch.float32))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -319,10 +392,18 @@ def forward(self, arg0_1):
getitem = aminmax[0]
getitem_1 = aminmax[1]; aminmax = None
return getitem
- """)
+ """,
+ )
- reinplaced_logs = self.get_logs(f, torch.arange(8, dtype=torch.float32), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f,
+ torch.arange(8, dtype=torch.float32),
+ reapply_views=True,
+ run_reinplace=True,
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -333,7 +414,8 @@ def forward(self, arg0_1):
getitem = aminmax[0]
getitem_1 = aminmax[1]; aminmax = None
return getitem
- """)
+ """,
+ )
def test_tensor_ctr(self):
def f(x):
@@ -346,7 +428,9 @@ def forward(self, arg0_1):
self.assert_functionalization(f, inpt)
logs = self.get_logs(f, inpt)
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -358,10 +442,13 @@ def forward(self, arg0_1):
view_copy_1 = torch.ops.aten.view_copy.default(add, [3]); add = None
view_copy_2 = torch.ops.aten.view_copy.default(view_copy_1, [-1])
return view_copy_1
- """)
+ """,
+ )
reinplaced_logs = self.get_logs(f, inpt, reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -373,7 +460,8 @@ def forward(self, arg0_1):
view_1 = torch.ops.aten.view.default(view, [3]); view = None
view_2 = torch.ops.aten.view.default(view_1, [-1])
return view_1
- """)
+ """,
+ )
def test_advanced_indexing_correct_strides(self):
def f(a):
@@ -383,6 +471,7 @@ def forward(self, arg0_1):
c = torch.ones_like(b, dtype=torch.bool)
d = b.masked_fill_(c, 0)
return d
+
self.assert_functionalization(f, torch.ones(2, 2), reapply_views=True)
def test_tensor_list_mixed_functional_nonfunctional(self):
@@ -393,8 +482,11 @@ def forward(self, arg0_1):
functional_tensor = torch.ones(2, dtype=torch.long)
out = x[functional_tensor, nonfunctional_tensor]
return out
+
out = f(torch.ones(2, 2))
- out_functional = _functionalize(f, reapply_views=True, crossref=self.crossref)(torch.ones(2, 2))
+ out_functional = _functionalize(f, reapply_views=True, crossref=self.crossref)(
+ torch.ones(2, 2)
+ )
self.assertEqual(out, out_functional)
def test_inplace_on_non_view(self):
@@ -405,9 +497,12 @@ def forward(self, arg0_1):
y = x.view(4, 2)
x.add_(tmp)
return y
+
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -418,10 +513,15 @@ def forward(self, arg0_1):
copy_ = torch.ops.aten.copy_.default(arg0_1, add); arg0_1 = None
view_copy_1 = torch.ops.aten.view_copy.default(add, [4, 2]); add = None
return view_copy_1
- """)
+ """,
+ )
- reinplaced_logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(4, 2), reapply_views=True, run_reinplace=True
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -432,16 +532,21 @@ def forward(self, arg0_1):
copy_ = torch.ops.aten.copy_.default(arg0_1, add); arg0_1 = None
view_1 = torch.ops.aten.view.default(add, [4, 2]); add = None
return view_1
- """)
+ """,
+ )
# Some ops that are mutable are neither inplace nor out= ops.
# They also need special handling.
def test_mutable_op_not_inplace_or_other(self):
def f(x):
- return torch._fused_moving_avg_obs_fq_helper(x, x, x, x, x, x, x, 1.0, 0, 1, 0)
+ return torch._fused_moving_avg_obs_fq_helper(
+ x, x, x, x, x, x, x, 1.0, 0, 1, 0
+ )
logs = self.get_logs(f, torch.ones(1))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -455,16 +560,20 @@ def forward(self, arg0_1):
getitem_5 = _fused_moving_avg_obs_fq_helper_functional[5]; _fused_moving_avg_obs_fq_helper_functional = None
copy_ = torch.ops.aten.copy_.default(arg0_1, getitem_5); arg0_1 = getitem_5 = None
return (getitem, getitem_1)
- """) # noqa: B950
+ """, # noqa: B950
+ )
def test_as_strided(self):
def f(x):
y = x.as_strided((2,), (2,), 1)
y.add_(1)
return x
+
self.assert_functionalization(f, torch.ones(9))
logs = self.get_logs(f, torch.ones(9))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -475,11 +584,16 @@ def forward(self, arg0_1):
as_strided_copy_1 = torch.ops.aten.as_strided_copy.default(as_strided_scatter, [2], [2], 1)
copy_ = torch.ops.aten.copy_.default(arg0_1, as_strided_scatter); arg0_1 = None
return as_strided_scatter
- """)
+ """,
+ )
# NB: even with reapply_views=True, we expect to see scatter op
- reinplaced_logs = self.get_logs(f, torch.ones(2, 2), reapply_views=True, run_reinplace=False)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(2, 2), reapply_views=True, run_reinplace=False
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -490,32 +604,40 @@ def forward(self, arg0_1):
as_strided_1 = torch.ops.aten.as_strided.default(as_strided_scatter, [2], [2], 1)
copy_ = torch.ops.aten.copy_.default(arg0_1, as_strided_scatter); arg0_1 = None
return as_strided_scatter
- """)
+ """,
+ )
def test_tensor_list_composite(self):
def f(x):
# Test an op with TensorList input
y = torch.block_diag(x, x)
return y
+
self.assert_functionalization(f, torch.ones(2, 2))
logs = self.get_logs(f, torch.ones(2, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
def forward(self, arg0_1):
block_diag = torch.ops.aten.block_diag.default([arg0_1, arg0_1]); arg0_1 = None
return block_diag
- """)
+ """,
+ )
def test_cat(self):
def f(x):
out = torch.empty(0)
torch.cat((x,), out=out)
return out
+
self.assert_functionalization(f, torch.ones(2, 2))
logs = self.get_logs(f, torch.ones(2, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -523,10 +645,15 @@ def forward(self, arg0_1):
empty = torch.ops.aten.empty.memory_format([0], device = device(type='cpu'), pin_memory = False)
cat = torch.ops.aten.cat.default([arg0_1]); arg0_1 = None
return cat
- """)
+ """,
+ )
- reinplaced_logs = self.get_logs(f, torch.ones(2, 2), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(2, 2), reapply_views=True, run_reinplace=True
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -534,8 +661,8 @@ def forward(self, arg0_1):
empty = torch.ops.aten.empty.memory_format([0], device = device(type='cpu'), pin_memory = False)
cat = torch.ops.aten.cat.default([arg0_1]); arg0_1 = None
return cat
- """)
-
+ """,
+ )
def test_diagonal(self):
def f(x):
@@ -545,9 +672,12 @@ def forward(self, arg0_1):
y.add_(tmp)
z = x * x
return z
+
self.assert_functionalization(f, torch.ones(2, 2))
logs = self.get_logs(f, torch.ones(2, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -560,10 +690,15 @@ def forward(self, arg0_1):
diagonal_copy_1 = torch.ops.aten.diagonal_copy.default(diagonal_scatter); diagonal_scatter = None
mul = torch.ops.aten.mul.Tensor(arg0_1, arg0_1); arg0_1 = None
return mul
- """)
+ """,
+ )
- reinplaced_logs = self.get_logs(f, torch.ones(2, 2), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(2, 2), reapply_views=True, run_reinplace=True
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -575,7 +710,8 @@ def forward(self, arg0_1):
diagonal_1 = torch.ops.aten.diagonal.default(clone); clone = None
mul = torch.ops.aten.mul.Tensor(arg0_1, arg0_1); arg0_1 = None
return mul
- """)
+ """,
+ )
def test_diagonal_mutated_input(self):
def f(x):
@@ -584,10 +720,13 @@ def forward(self, arg0_1):
y = x.diagonal()
y.add_(tmp)
return x
+
x = torch.ones(2, 2)
self.assert_functionalization(f, x)
logs = self.get_logs(f, torch.ones(2, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -599,11 +738,16 @@ def forward(self, arg0_1):
diagonal_copy_1 = torch.ops.aten.diagonal_copy.default(diagonal_scatter)
copy_ = torch.ops.aten.copy_.default(arg0_1, diagonal_scatter); arg0_1 = None
return diagonal_scatter
- """)
+ """,
+ )
# NB: even with reapply_views=True, we expect to see scatter op
- reinplaced_logs = self.get_logs(f, torch.ones(2, 2), reapply_views=True, run_reinplace=False)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(2, 2), reapply_views=True, run_reinplace=False
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -615,7 +759,8 @@ def forward(self, arg0_1):
diagonal_1 = torch.ops.aten.diagonal.default(diagonal_scatter)
copy_ = torch.ops.aten.copy_.default(arg0_1, diagonal_scatter); arg0_1 = None
return diagonal_scatter
- """)
+ """,
+ )
def test_channels_last_contiguous(self):
def f(x):
@@ -624,13 +769,17 @@ def forward(self, arg0_1):
y = x.diagonal()
y.add_(tmp)
return x
+
x = torch.randn(4, 8, 8, 3).permute(0, 3, 1, 2)
self.assert_functionalization(f, x)
logs = self.get_logs(f, x).strip()
# There should be no clone in the graph
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
def forward(self, arg0_1):
- return arg0_1""")
+ return arg0_1""",
+ )
def test_split(self):
def f(x):
@@ -641,9 +790,12 @@ def forward(self, arg0_1):
y3.add_(tmp)
z = x * x
return y3
+
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -666,11 +818,16 @@ def forward(self, arg0_1):
mul = torch.ops.aten.mul.Tensor(slice_scatter, slice_scatter)
copy_ = torch.ops.aten.copy_.default(arg0_1, slice_scatter); arg0_1 = slice_scatter = None
return diagonal_copy_1
- """) # noqa: B950
+ """,
+ ) # noqa: B950
# NB: even with reapply_views=True, we expect to see scatter op
- reinplaced_logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True, run_reinplace=False)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(4, 2), reapply_views=True, run_reinplace=False
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -693,7 +850,8 @@ def forward(self, arg0_1):
mul = torch.ops.aten.mul.Tensor(slice_scatter, slice_scatter)
copy_ = torch.ops.aten.copy_.default(arg0_1, slice_scatter); arg0_1 = slice_scatter = None
return diagonal_1
- """) # noqa: B950
+ """,
+ ) # noqa: B950
def test_split_with_sizes(self):
def f(x):
@@ -704,9 +862,12 @@ def forward(self, arg0_1):
y3.add_(tmp)
z = x * x
return y3
+
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -729,11 +890,16 @@ def forward(self, arg0_1):
mul = torch.ops.aten.mul.Tensor(slice_scatter, slice_scatter)
copy_ = torch.ops.aten.copy_.default(arg0_1, slice_scatter); arg0_1 = slice_scatter = None
return diagonal_copy_1
- """) # noqa: B950
+ """,
+ ) # noqa: B950
# NB: even with reapply_views=True, we expect to see scatter op
- reinplaced_logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True, run_reinplace=False)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(4, 2), reapply_views=True, run_reinplace=False
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -756,7 +922,8 @@ def forward(self, arg0_1):
mul = torch.ops.aten.mul.Tensor(slice_scatter, slice_scatter)
copy_ = torch.ops.aten.copy_.default(arg0_1, slice_scatter); arg0_1 = slice_scatter = None
return diagonal_1
- """) # noqa: B950
+ """,
+ ) # noqa: B950
def test_slice(self):
def f(x):
@@ -765,9 +932,12 @@ def forward(self, arg0_1):
y = x[0:2]
y.add_(tmp)
return x
+
self.assert_functionalization(f, torch.ones(4, 2), mutated_input_metadata=True)
logs = self.get_logs(f, torch.ones(4, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -783,11 +953,16 @@ def forward(self, arg0_1):
slice_copy_1 = torch.ops.aten.slice_copy.Tensor(transpose_copy_3, 0, 0, 2); transpose_copy_3 = None
transpose_copy_4 = torch.ops.aten.transpose_copy.int(transpose_copy_2, 1, 0); transpose_copy_2 = None
return transpose_copy_4
- """) # noqa: B950
+ """,
+ ) # noqa: B950
# NB: even with reapply_views=True, we expect to see scatter op
- reinplaced_logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True, run_reinplace=False)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(4, 2), reapply_views=True, run_reinplace=False
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -803,7 +978,8 @@ def forward(self, arg0_1):
slice_2 = torch.ops.aten.slice.Tensor(transpose_3, 0, 0, 2); transpose_3 = None
transpose_4 = torch.ops.aten.transpose.int(transpose_2, 1, 0); transpose_2 = None
return transpose_4
- """) # noqa: B950
+ """,
+ ) # noqa: B950
def test_view_inplace(self):
def f(x):
@@ -813,9 +989,12 @@ def forward(self, arg0_1):
y = x[0]
y.add_(tmp)
return x
+
self.assert_functionalization(f, torch.ones(4, 2), mutated_input_metadata=True)
logs = self.get_logs(f, torch.ones(4, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -831,11 +1010,16 @@ def forward(self, arg0_1):
select_copy_1 = torch.ops.aten.select_copy.int(transpose_copy_3, 0, 0); transpose_copy_3 = None
transpose_copy_4 = torch.ops.aten.transpose_copy.int(transpose_copy_2, 1, 0); transpose_copy_2 = None
return transpose_copy_4
- """) # noqa: B950
+ """,
+ ) # noqa: B950
# NB: even with reapply_views=True, we expect to see scatter op
- reinplaced_logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True, run_reinplace=False)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(4, 2), reapply_views=True, run_reinplace=False
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -851,7 +1035,8 @@ def forward(self, arg0_1):
select_1 = torch.ops.aten.select.int(transpose_3, 0, 0); transpose_3 = None
transpose_4 = torch.ops.aten.transpose.int(transpose_2, 1, 0); transpose_2 = None
return transpose_4
- """) # noqa: B950
+ """,
+ ) # noqa: B950
def test_unbind(self):
def f(x):
@@ -861,9 +1046,12 @@ def forward(self, arg0_1):
y, _ = x.unbind(0)
y.add_(tmp)
return x
+
self.assert_functionalization(f, torch.ones(4, 2), mutated_input_metadata=True)
logs = self.get_logs(f, torch.ones(4, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -883,11 +1071,16 @@ def forward(self, arg0_1):
getitem_3 = unbind_copy_1[1]; unbind_copy_1 = None
transpose_copy_4 = torch.ops.aten.transpose_copy.int(transpose_copy_2, 1, 0); transpose_copy_2 = None
return transpose_copy_4
- """) # noqa: B950
+ """,
+ ) # noqa: B950
# NB: even with reapply_views=True, we expect to see scatter op
- reinplaced_logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True, run_reinplace=False)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(4, 2), reapply_views=True, run_reinplace=False
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -907,7 +1100,8 @@ def forward(self, arg0_1):
getitem_3 = unbind_1[1]; unbind_1 = None
transpose_4 = torch.ops.aten.transpose.int(transpose_2, 1, 0); transpose_2 = None
return transpose_4
- """) # noqa: B950
+ """,
+ ) # noqa: B950
def test_optional_tensor_list(self):
def f(x):
@@ -918,9 +1112,12 @@ def forward(self, arg0_1):
values = torch.arange(4, dtype=y.dtype)
y.index_put_((indices,), values, accumulate=False)
return y
+
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -933,7 +1130,8 @@ def forward(self, arg0_1):
view_copy_2 = torch.ops.aten.view_copy.default(view_copy_1, [8])
copy_ = torch.ops.aten.copy_.default(arg0_1, view_copy_1); arg0_1 = view_copy_1 = None
return view_copy_2
- """) # noqa: B950
+ """,
+ ) # noqa: B950
def test_scalars(self):
def f(x):
@@ -944,9 +1142,12 @@ def forward(self, arg0_1):
z = 2 * y
z.div_(1)
return z
+
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -960,7 +1161,8 @@ def forward(self, arg0_1):
div = torch.ops.aten.div.Tensor(mul, 1); mul = None
copy_ = torch.ops.aten.copy_.default(arg0_1, view_copy_1); arg0_1 = view_copy_1 = None
return div
- """)
+ """,
+ )
@skipIfTorchDynamo("Test does not work with TorchDynamo")
def test_metadata_change(self):
@@ -970,9 +1172,12 @@ def forward(self, arg0_1):
y = x.clone()
out = y.ge_(0)
return out
+
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -981,10 +1186,15 @@ def forward(self, arg0_1):
ge = torch.ops.aten.ge.Scalar(clone, 0); clone = None
_to_copy = torch.ops.aten._to_copy.default(ge, dtype = torch.float32, layout = torch.strided); ge = None
return _to_copy
- """)
+ """,
+ )
- reinplaced_logs = self.get_logs(f, torch.ones(2, 2), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(2, 2), reapply_views=True, run_reinplace=True
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -993,7 +1203,8 @@ def forward(self, arg0_1):
ge = torch.ops.aten.ge.Scalar(clone, 0); clone = None
_to_copy = torch.ops.aten._to_copy.default(ge, dtype = torch.float32, layout = torch.strided); ge = None
return _to_copy
- """) # noqa: B950
+ """,
+ ) # noqa: B950
@skipIfTorchDynamo("Test does not work with TorchDynamo")
def test_metadata_change_out_op(self):
@@ -1002,7 +1213,9 @@ def forward(self, arg0_1):
return torch.add(t, y, out=out_1)
inpt1, inpt2 = torch.tensor([1]), torch.tensor([1])
- inpt1_func, inpt2_func = torch._to_functional_tensor(inpt1), torch._to_functional_tensor(inpt2)
+ inpt1_func, inpt2_func = torch._to_functional_tensor(
+ inpt1
+ ), torch._to_functional_tensor(inpt2)
out_ref = f(inpt1, inpt2)
torch._enable_functionalization(reapply_views=True)
@@ -1012,22 +1225,25 @@ def forward(self, arg0_1):
torch._disable_functionalization()
self.assertEqual(out_ref, torch._from_functional_tensor(out_functional))
-
def test_only_one_view(self):
def f(x):
# This tests that we don't have any unnecessary views in the trace.
# If the input wasn't mutated, we don't need to regenerate it,
# so there should be a total of 1 op in the output trace.
return x.view(4, 2)
+
logs = self.get_logs(f, torch.ones(4, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
def forward(self, arg0_1):
view_copy = torch.ops.aten.view_copy.default(arg0_1, [4, 2]); arg0_1 = None
return view_copy
- """)
+ """,
+ )
def test_everything(self):
def f(x):
@@ -1043,9 +1259,12 @@ def forward(self, arg0_1):
z2.add_(tmp)
z4 = z0[0] + z2.reshape(4)
return z2
+
self.assert_functionalization(f, torch.ones(4, 2))
logs = self.get_logs(f, torch.ones(4, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -1096,10 +1315,15 @@ def forward(self, arg0_1):
view_copy_13 = torch.ops.aten.view_copy.default(getitem_4, [4]); getitem_4 = None
add_2 = torch.ops.aten.add.Tensor(select_copy_1, view_copy_13); select_copy_1 = view_copy_13 = None
return getitem_2
- """) # noqa: B950
+ """,
+ ) # noqa: B950
- reinplaced_logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(4, 2), reapply_views=True, run_reinplace=True
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -1141,7 +1365,8 @@ def forward(self, arg0_1):
select_1 = torch.ops.aten.select.int(view_9, 0, 0); view_9 = None
add_2 = torch.ops.aten.add.Tensor(select_1, _unsafe_view); select_1 = _unsafe_view = None
return getitem_2
- """)
+ """,
+ )
def test_reapply_views_simple(self):
def f(x):
@@ -1150,9 +1375,12 @@ def forward(self, arg0_1):
y.add_(tmp)
z = x * x
return y
+
self.assert_functionalization(f, torch.ones(4, 2), reapply_views=True)
logs = self.get_logs(f, torch.ones(4, 2), reapply_views=True)
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -1165,7 +1393,8 @@ def forward(self, arg0_1):
mul = torch.ops.aten.mul.Tensor(view_1, view_1)
copy_ = torch.ops.aten.copy_.default(arg0_1, view_1); arg0_1 = view_1 = None
return view_2
- """)
+ """,
+ )
def test_aliases_maintained_after_pass_when_reapplying_views(self):
def f(x):
@@ -1203,7 +1432,9 @@ def forward(self, arg0_1):
# to() is a composite op that noops when the dtype/shape match, so nothing gets logged.
# self.assert_functionalization(f, torch.ones(2))
logs = self.get_logs(f, torch.ones(2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -1217,10 +1448,15 @@ def forward(self, arg0_1):
diagonal_scatter_1 = torch.ops.aten.diagonal_scatter.default(diagonal_scatter, add); diagonal_scatter = add = None
diagonal_copy_2 = torch.ops.aten.diagonal_copy.default(diagonal_scatter_1); diagonal_scatter_1 = None
return diagonal_copy_2
- """)
+ """,
+ )
- reinplaced_logs = self.get_logs(f, torch.ones(2), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(2), reapply_views=True, run_reinplace=True
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -1232,12 +1468,15 @@ def forward(self, arg0_1):
add = torch.ops.aten.add_.Tensor(diagonal_1, arg0_1); diagonal_1 = arg0_1 = None
diagonal_2 = torch.ops.aten.diagonal.default(zeros); zeros = None
return diagonal_2
- """)
+ """,
+ )
# Test 2: copy_() with same dtype, different shape
self.assert_functionalization(f, torch.ones(1))
logs = self.get_logs(f, torch.ones(1))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -1251,10 +1490,15 @@ def forward(self, arg0_1):
diagonal_scatter_1 = torch.ops.aten.diagonal_scatter.default(diagonal_scatter, add); diagonal_scatter = add = None
diagonal_copy_2 = torch.ops.aten.diagonal_copy.default(diagonal_scatter_1); diagonal_scatter_1 = None
return diagonal_copy_2
- """)
+ """,
+ )
- reinplaced_logs = self.get_logs(f, torch.ones(1), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(1), reapply_views=True, run_reinplace=True
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -1266,12 +1510,15 @@ def forward(self, arg0_1):
add = torch.ops.aten.add_.Tensor(diagonal_1, arg0_1); diagonal_1 = arg0_1 = None
diagonal_2 = torch.ops.aten.diagonal.default(zeros); zeros = None
return diagonal_2
- """)
+ """,
+ )
# Test 3: copy_() with different dtype, same shape
self.assert_functionalization(f, torch.ones(2, dtype=torch.long))
logs = self.get_logs(f, torch.ones(2, dtype=torch.long))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -1285,10 +1532,15 @@ def forward(self, arg0_1):
diagonal_scatter_1 = torch.ops.aten.diagonal_scatter.default(diagonal_scatter, add); diagonal_scatter = add = None
diagonal_copy_2 = torch.ops.aten.diagonal_copy.default(diagonal_scatter_1); diagonal_scatter_1 = None
return diagonal_copy_2
- """) # noqa: B950
+ """,
+ ) # noqa: B950
- reinplaced_logs = self.get_logs(f, torch.ones(2, dtype=torch.long), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(2, dtype=torch.long), reapply_views=True, run_reinplace=True
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -1300,12 +1552,15 @@ def forward(self, arg0_1):
add = torch.ops.aten.add_.Tensor(diagonal_1, arg0_1); diagonal_1 = arg0_1 = None
diagonal_2 = torch.ops.aten.diagonal.default(zeros); zeros = None
return diagonal_2
- """) # noqa: B950
+ """,
+ ) # noqa: B950
# Test 4: copy_() with different dtype, different shape
self.assert_functionalization(f, torch.ones(1, dtype=torch.long))
logs = self.get_logs(f, torch.ones(1, dtype=torch.long))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -1319,10 +1574,15 @@ def forward(self, arg0_1):
diagonal_scatter_1 = torch.ops.aten.diagonal_scatter.default(diagonal_scatter, add); diagonal_scatter = add = None
diagonal_copy_2 = torch.ops.aten.diagonal_copy.default(diagonal_scatter_1); diagonal_scatter_1 = None
return diagonal_copy_2
- """) # noqa: B950
+ """,
+ ) # noqa: B950
- reinplaced_logs = self.get_logs(f, torch.ones(1, dtype=torch.long), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(1, dtype=torch.long), reapply_views=True, run_reinplace=True
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -1334,7 +1594,8 @@ def forward(self, arg0_1):
add = torch.ops.aten.add_.Tensor(diagonal_1, arg0_1); diagonal_1 = arg0_1 = None
diagonal_2 = torch.ops.aten.diagonal.default(zeros); zeros = None
return diagonal_2
- """) # noqa: B950
+ """,
+ ) # noqa: B950
def test_expand_symint(self):
# Once some existing SymInt bugs are ironed out, we should update
@@ -1344,14 +1605,17 @@ def forward(self, arg0_1):
self.assert_functionalization(f, torch.ones(2, 2))
logs = self.get_logs(f, torch.ones(2, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
def forward(self, arg0_1):
expand_copy = torch.ops.aten.expand_copy.default(arg0_1, [2, 2]); arg0_1 = None
return expand_copy
- """)
+ """,
+ )
def test_fill_(self):
def f(x):
@@ -1362,7 +1626,9 @@ def forward(self, arg0_1):
self.assert_functionalization(f, torch.ones(2, 2))
logs = self.get_logs(f, torch.ones(2, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -1373,10 +1639,15 @@ def forward(self, arg0_1):
diagonal_scatter = torch.ops.aten.diagonal_scatter.default(add, fill); add = fill = None
diagonal_copy_1 = torch.ops.aten.diagonal_copy.default(diagonal_scatter)
return diagonal_scatter
- """)
+ """,
+ )
- reinplaced_logs = self.get_logs(f, torch.ones(2, 2), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(2, 2), reapply_views=True, run_reinplace=True
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -1386,7 +1657,8 @@ def forward(self, arg0_1):
fill = torch.ops.aten.fill_.Scalar(diagonal, 0); diagonal = None
diagonal_1 = torch.ops.aten.diagonal.default(add)
return add
- """)
+ """,
+ )
def test_resize_smaller(self):
def f(w):
@@ -1401,7 +1673,9 @@ def forward(self, arg0_1):
self.assert_functionalization(f, torch.ones(8, 2))
logs = self.get_logs(f, torch.ones(8, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -1424,10 +1698,15 @@ def forward(self, arg0_1):
as_strided_copy_3 = torch.ops.aten.as_strided_copy.default(view_copy_7, [3, 3], [3, 1]); view_copy_7 = None
add_2 = torch.ops.aten.add.Tensor(as_strided_copy_3, 1); as_strided_copy_3 = None
return add_2
- """) # noqa: B950
+ """, # noqa: B950
+ )
- reinplaced_logs = self.get_logs(f, torch.ones(8, 2), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(8, 2), reapply_views=True, run_reinplace=True
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -1449,7 +1728,8 @@ def forward(self, arg0_1):
as_strided_3 = torch.ops.aten.as_strided.default(view_7, [3, 3], [3, 1]); view_7 = None
add_2 = torch.ops.aten.add_.Tensor(as_strided_3, 1)
return as_strided_3
- """)
+ """,
+ )
def test_resize_same_size_diff_rank(self):
def f(x):
@@ -1478,7 +1758,9 @@ def forward(self, arg0_1):
self.assert_functionalization(f, torch.ones(8, 2))
logs = self.get_logs(f, torch.ones(8, 2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -1491,10 +1773,15 @@ def forward(self, arg0_1):
view_copy_2 = torch.ops.aten.view_copy.default(view_copy_1, [25])
add_1 = torch.ops.aten.add.Tensor(view_copy_1, 1)
return (view_copy_1, add_1)
- """)
+ """,
+ )
- reinplaced_logs = self.get_logs(f, torch.ones(8, 2), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(8, 2), reapply_views=True, run_reinplace=True
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -1507,7 +1794,8 @@ def forward(self, arg0_1):
view_2 = torch.ops.aten.view.default(view_1, [25])
add_1 = torch.ops.aten.add.Tensor(view_1, 1)
return (view_1, add_1)
- """)
+ """,
+ )
def test_resize_larger_invalid(self):
def f(x):
@@ -1524,8 +1812,9 @@ def forward(self, arg0_1):
return y, out
with self.assertRaisesRegex(
- RuntimeError,
- r'Attempted to resize a view tensor to a larger size. This is not allowed in the functionalization pass'):
+ RuntimeError,
+ r"Attempted to resize a view tensor to a larger size. This is not allowed in the functionalization pass",
+ ):
self.assert_functionalization(f, torch.ones(8, 2))
def test_nested_functions_propagate_updates(self):
@@ -1558,9 +1847,12 @@ def forward(self, arg0_1):
# Make sure that functionalization ran the "+" kernel
# with a functional + non-functional tensor, and wrapped the output appropriately.
- self.assertExpectedInline('\n'.join(logs), """\
+ self.assertExpectedInline(
+ "\n".join(logs),
+ """\
$2: f32[4] = torch._ops.aten.add.Tensor($0, $1)
-$3: f32[4] = torch._ops.aten.add.Tensor($2, 1)""")
+$3: f32[4] = torch._ops.aten.add.Tensor($2, 1)""",
+ )
def test_mixed_wrappers_invalid(self):
x1_not_functional = torch.ones(4)
@@ -1577,9 +1869,12 @@ $3: f32[4] = torch._ops.aten.add.Tensor($2, 1)""")
tmp = torch.zeros(10)
tmp[5].fill_(1)
return tmp
+
self.assert_functionalization(f, torch.ones(2))
logs = self.get_logs(f, torch.ones(2))
- self.assertExpectedInline(logs, """\
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -1590,10 +1885,15 @@ def forward(self, arg0_1):
select_scatter = torch.ops.aten.select_scatter.default(zeros, fill, 0, 5); zeros = fill = None
select_copy_1 = torch.ops.aten.select_copy.int(select_scatter, 0, 5)
return select_scatter
- """) # noqa: B950
+ """,
+ ) # noqa: B950
- reinplaced_logs = self.get_logs(f, torch.ones(2), reapply_views=True, run_reinplace=True)
- self.assertExpectedInline(reinplaced_logs, """\
+ reinplaced_logs = self.get_logs(
+ f, torch.ones(2), reapply_views=True, run_reinplace=True
+ )
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -1603,23 +1903,39 @@ def forward(self, arg0_1):
fill = torch.ops.aten.fill_.Scalar(select, 1); select = None
select_1 = torch.ops.aten.select.int(zeros, 0, 5)
return zeros
- """)
-
+ """,
+ )
def test_instance_norm(self):
size = 100
def f(x, running_mean, running_var):
with enable_python_dispatcher():
- return torch.instance_norm(x, None, None, running_mean, running_var,
- use_input_stats=True, momentum=0.1, eps=1e-5, cudnn_enabled=False)
- self.assert_functionalization(f, torch.randn(20, size, 35, 45), torch.zeros(size), torch.ones(size))
+ return torch.instance_norm(
+ x,
+ None,
+ None,
+ running_mean,
+ running_var,
+ use_input_stats=True,
+ momentum=0.1,
+ eps=1e-5,
+ cudnn_enabled=False,
+ )
+
+ self.assert_functionalization(
+ f, torch.randn(20, size, 35, 45), torch.zeros(size), torch.ones(size)
+ )
# On Windows, for instance_norm, the alias_copy's are reordered to come right before they need to be used
# whereas on other platforms, the alias_copy's are before the view_copy's.
# e.g., the alias_copy after the getitem_4 assignment would be moved to be right before the copy assignment.
if not IS_WINDOWS:
- logs = self.get_logs(f, torch.randn(20, size, 35, 45), torch.zeros(size), torch.ones(size))
- self.assertExpectedInline(logs, """\
+ logs = self.get_logs(
+ f, torch.randn(20, size, 35, 45), torch.zeros(size), torch.ones(size)
+ )
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -1652,13 +1968,20 @@ def forward(self, arg0_1, arg1_1, arg2_1):
copy_ = torch.ops.aten.copy_.default(arg1_1, alias_copy_1); arg1_1 = alias_copy_1 = None
copy__1 = torch.ops.aten.copy_.default(arg2_1, alias_copy_4); arg2_1 = alias_copy_4 = None
return view_copy_5
- """) # noqa: B950
+ """, # noqa: B950
+ )
reinplaced_logs = self.get_logs(
- f, torch.randn(20, size, 35, 45), torch.zeros(size), torch.ones(size),
- reapply_views=True, run_reinplace=True
+ f,
+ torch.randn(20, size, 35, 45),
+ torch.zeros(size),
+ torch.ones(size),
+ reapply_views=True,
+ run_reinplace=True,
)
- self.assertExpectedInline(reinplaced_logs, """\
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -1691,8 +2014,8 @@ def forward(self, arg0_1, arg1_1, arg2_1):
copy_ = torch.ops.aten.copy_.default(arg1_1, alias_1); arg1_1 = alias_1 = None
copy__1 = torch.ops.aten.copy_.default(arg2_1, alias_4); arg2_1 = alias_4 = None
return view_5
- """) # noqa: B950
-
+ """, # noqa: B950
+ )
def test_mutation_overlapping_mem(self):
def fn(x):
@@ -1702,19 +2025,29 @@ def forward(self, arg0_1, arg1_1, arg2_1):
t3 = t2.abs_()
return t3
- with self.assertRaisesRegex(RuntimeError, r'encountered a tensor being mutated that has internal overlap'):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"encountered a tensor being mutated that has internal overlap",
+ ):
x = torch.ones(1, 5)
out = _functionalize(fn, reapply_views=True, crossref=False)(x)
-
def test_batch_norm(self):
def f(x, running_mean, running_var):
with enable_python_dispatcher():
- return torch.batch_norm(x, None, None, running_mean, running_var, True, 0.1, 1e-5, False)
+ return torch.batch_norm(
+ x, None, None, running_mean, running_var, True, 0.1, 1e-5, False
+ )
- self.assert_functionalization(f, torch.randn(20, 100, 35, 45), torch.zeros(100), torch.ones(100))
- logs = self.get_logs(f, torch.randn(20, 100, 35, 45), torch.zeros(100), torch.ones(100))
- self.assertExpectedInline(logs, """\
+ self.assert_functionalization(
+ f, torch.randn(20, 100, 35, 45), torch.zeros(100), torch.ones(100)
+ )
+ logs = self.get_logs(
+ f, torch.randn(20, 100, 35, 45), torch.zeros(100), torch.ones(100)
+ )
+ self.assertExpectedInline(
+ logs,
+ """\
@@ -1729,12 +2062,20 @@ def forward(self, arg0_1, arg1_1, arg2_1):
copy_ = torch.ops.aten.copy_.default(arg1_1, getitem_3); arg1_1 = getitem_3 = None
copy__1 = torch.ops.aten.copy_.default(arg2_1, getitem_4); arg2_1 = getitem_4 = None
return getitem
- """) # noqa: B950
+ """, # noqa: B950
+ )
reinplaced_logs = self.get_logs(
- f, torch.randn(20, 100, 35, 45), torch.zeros(100), torch.ones(100), reapply_views=True, run_reinplace=True
+ f,
+ torch.randn(20, 100, 35, 45),
+ torch.zeros(100),
+ torch.ones(100),
+ reapply_views=True,
+ run_reinplace=True,
)
- self.assertExpectedInline(reinplaced_logs, """\
+ self.assertExpectedInline(
+ reinplaced_logs,
+ """\
@@ -1749,7 +2090,8 @@ def forward(self, arg0_1, arg1_1, arg2_1):
copy_ = torch.ops.aten.copy_.default(arg1_1, getitem_3); arg1_1 = getitem_3 = None
copy__1 = torch.ops.aten.copy_.default(arg2_1, getitem_4); arg2_1 = getitem_4 = None
return getitem
- """) # noqa: B950
+ """, # noqa: B950
+ )
# This tests our python shims around C++ Functionalization: FunctionalTensor and FunctionalTensorMode
def test_python_functionalization(self):
@@ -1768,7 +2110,9 @@ def forward(self, arg0_1, arg1_1, arg2_1):
# our FunctionalTensor will inherit the same keyset.
# We don't have an easy way of directly mutating a tensor's keyset from python,
# so globally disabling functionalization here is easier.
- maybe_disable = torch._C._ExcludeDispatchKeyGuard(torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize))
+ maybe_disable = torch._C._ExcludeDispatchKeyGuard(
+ torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
+ )
with maybe_disable, FunctionalTensorMode():
x_wrapped = FunctionalTensor.to_functional(x)
out_wrapped = f(x_wrapped)
@@ -1781,14 +2125,17 @@ def forward(self, arg0_1, arg1_1, arg2_1):
fx_g = make_fx(f_functionalized)(x)
# NB: view_1 below is expected (though unused) due to view replay. AOTAutograd runs a
# DCE pass that will remove nodes like this later on.
- self.assertExpectedInline(fx_g.code.strip(), """\
+ self.assertExpectedInline(
+ fx_g.code.strip(),
+ """\
def forward(self, x_1):
view = torch.ops.aten.view.default(x_1, [-1])
mul = torch.ops.aten.mul.Tensor(x_1, 2); x_1 = None
view_1 = torch.ops.aten.view.default(mul, [-1])
view_2 = torch.ops.aten.view.default(mul, [-1]); mul = None
add = torch.ops.aten.add.Tensor(view_2, 1); view_2 = None
- return add""")
+ return add""",
+ )
def test_python_functionalization_zero_tensor(self):
def f(x):
@@ -1796,14 +2143,21 @@ def forward(self, x_1):
out = x + y
out.mul_(2)
return out
+
x = torch.randn(4)
out_ref = f(x)
out_test = dispatch_functionalize(f)(x)
- out_test_cpp = _functionalize(f, reapply_views=True, crossref=False, skip_input_mutations=True)(x)
+ out_test_cpp = _functionalize(
+ f, reapply_views=True, crossref=False, skip_input_mutations=True
+ )(x)
self.assertEqual(out_ref, out_test)
self.assertEqual(out_ref, out_test_cpp)
fx_g = make_fx(dispatch_functionalize(f))(x)
- fx_g_cpp = make_fx(_functionalize(f, reapply_views=True, crossref=False, skip_input_mutations=True))(x)
+ fx_g_cpp = make_fx(
+ _functionalize(
+ f, reapply_views=True, crossref=False, skip_input_mutations=True
+ )
+ )(x)
self.assertEqual(fx_g_cpp.code.strip(), fx_g.code.strip())
def test_python_functionalization_is_conj(self):
@@ -1834,7 +2188,6 @@ def forward(self, x_1):
self.assertEqual(out_ref[0], out_test_cpp[0])
self.assertEqual(out_ref[1], out_test_cpp[1])
-
def test_python_functionalization_conj(self):
def f(x):
y = x.clone().conj()
@@ -1844,12 +2197,20 @@ def forward(self, x_1):
x = torch.randn(4, dtype=torch.complex64)
out_ref = f(x)
out_test = dispatch_functionalize(f)(x)
- out_test_cpp = _functionalize(f, reapply_views=True, crossref=False, skip_input_mutations=True)(x)
+ out_test_cpp = _functionalize(
+ f, reapply_views=True, crossref=False, skip_input_mutations=True
+ )(x)
self.assertEqual(out_ref, out_test)
self.assertEqual(out_test, out_test_cpp)
fx_g = make_fx(dispatch_functionalize(f))(x)
- fx_g_cpp = make_fx(_functionalize(f, reapply_views=True, crossref=False, skip_input_mutations=True))(x)
- self.assertExpectedInline(fx_g.code.strip(), """\
+ fx_g_cpp = make_fx(
+ _functionalize(
+ f, reapply_views=True, crossref=False, skip_input_mutations=True
+ )
+ )(x)
+ self.assertExpectedInline(
+ fx_g.code.strip(),
+ """\
def forward(self, arg0_1):
clone = torch.ops.aten.clone.default(arg0_1); arg0_1 = None
_conj = torch.ops.aten._conj.default(clone); clone = None
@@ -1861,7 +2222,8 @@ def forward(self, arg0_1):
_conj_2 = torch.ops.aten._conj.default(_conj_1); _conj_1 = None
clone_3 = torch.ops.aten.clone.default(_conj_2); _conj_2 = None
view_as_real = torch.ops.aten.view_as_real.default(clone_3); clone_3 = None
- return view_as_real""")
+ return view_as_real""",
+ )
self.assertEqual(fx_g_cpp.code.strip(), fx_g.code.strip())
def test_python_functionalization_neg(self):
@@ -1873,23 +2235,34 @@ def forward(self, arg0_1):
x = torch.randn(4)
out_ref = f(x)
out_test = dispatch_functionalize(f)(x)
- out_test_cpp = _functionalize(f, reapply_views=True, crossref=False, skip_input_mutations=True)(x)
+ out_test_cpp = _functionalize(
+ f, reapply_views=True, crossref=False, skip_input_mutations=True
+ )(x)
self.assertEqual(out_ref, out_test)
self.assertEqual(out_ref, out_test_cpp)
fx_g = make_fx(dispatch_functionalize(f))(x)
- fx_g_cpp = make_fx(_functionalize(f, reapply_views=True, crossref=False, skip_input_mutations=True))(x)
- self.assertExpectedInline(fx_g.code.strip(), """\
+ fx_g_cpp = make_fx(
+ _functionalize(
+ f, reapply_views=True, crossref=False, skip_input_mutations=True
+ )
+ )(x)
+ self.assertExpectedInline(
+ fx_g.code.strip(),
+ """\
def forward(self, arg0_1):
_neg_view = torch.ops.aten._neg_view.default(arg0_1); arg0_1 = None
clone = torch.ops.aten.clone.default(_neg_view); _neg_view = None
add = torch.ops.aten.add.Tensor(clone, 1); clone = None
- return add""")
+ return add""",
+ )
self.assertEqual(fx_g_cpp.code.strip(), fx_g.code.strip())
def test_python_functionalization_lift_fresh_storage(self):
unlifted = torch.tensor([0.0])
- maybe_disable = torch._C._ExcludeDispatchKeyGuard(torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize))
+ maybe_disable = torch._C._ExcludeDispatchKeyGuard(
+ torch._C.DispatchKeySet(torch._C.DispatchKey.Functionalize)
+ )
with maybe_disable, FunctionalTensorMode():
lifted = torch.ops.aten.lift_fresh.default(unlifted)
@@ -1903,36 +2276,51 @@ def forward(self, arg0_1):
x = torch.randn(4)
out_ref = f(x)
out_test = dispatch_functionalize(f)(x)
- out_test_cpp = _functionalize(f, reapply_views=True, crossref=False, skip_input_mutations=True)(x)
+ out_test_cpp = _functionalize(
+ f, reapply_views=True, crossref=False, skip_input_mutations=True
+ )(x)
self.assertEqual(out_ref, out_test)
self.assertEqual(out_ref, out_test_cpp)
fx_g = make_fx(dispatch_functionalize(f))(x)
- fx_g_cpp = make_fx(_functionalize(f, reapply_views=True, crossref=False, skip_input_mutations=True))(x)
- self.assertExpectedInline(fx_g.code.strip(), """\
+ fx_g_cpp = make_fx(
+ _functionalize(
+ f, reapply_views=True, crossref=False, skip_input_mutations=True
+ )
+ )(x)
+ self.assertExpectedInline(
+ fx_g.code.strip(),
+ """\
def forward(self, arg0_1):
_tensor_constant0 = self._tensor_constant0
lift_fresh_copy = torch.ops.aten.lift_fresh_copy.default(_tensor_constant0); _tensor_constant0 = None
add = torch.ops.aten.add.Tensor(lift_fresh_copy, arg0_1); lift_fresh_copy = arg0_1 = None
- return add""")
+ return add""",
+ )
self.assertEqual(fx_g_cpp.code.strip(), fx_g.code.strip())
-@xfail_inherited_tests([
- "test_as_strided",
- "test_copy_",
- "test_diagonal",
- "test_diagonal_mutated_input",
- "test_everything",
- "test_fill_",
- "test_slice",
- "test_split",
- "test_split_with_sizes",
- "test_unbind",
- "test_view_clone_view_inplace",
- "test_view_inplace",
-])
-@unittest.skipIf(TEST_WITH_TORCHDYNAMO, "dynamo-ing code with proxy + fake doesnt work well")
+
+@xfail_inherited_tests(
+ [
+ "test_as_strided",
+ "test_copy_",
+ "test_diagonal",
+ "test_diagonal_mutated_input",
+ "test_everything",
+ "test_fill_",
+ "test_slice",
+ "test_split",
+ "test_split_with_sizes",
+ "test_unbind",
+ "test_view_clone_view_inplace",
+ "test_view_inplace",
+ ]
+)
+@unittest.skipIf(
+ TEST_WITH_TORCHDYNAMO, "dynamo-ing code with proxy + fake doesnt work well"
+)
class TestCrossRefFunctionalization(TestFunctionalization):
crossref = True
-if __name__ == '__main__':
+
+if __name__ == "__main__":
run_tests()
|
2.41.0
|
e1fb9696491b6b70060672b83797f892042f4e5
|
Sun, 28 Apr 2024 21:41:34 +0000
|
[PATCH 0783/1000] [BE]: RUF018 - ban assignment in assert (#125125)
|
Ban assignment inside of assert. Python code should ideally not break with assertions disabled. Adds a ruff lint rule to enforce this. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125125 Approved by: https://github.com/ezyang
|
diff --git a/pyproject.toml b/pyproject.toml
index 5d749c3462..5867deccd6 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -131,6 +131,7 @@ select = [
"RUF015", # access first ele in constant time
"RUF016", # type error non-integer index
"RUF017",
+ "RUF018", # no assignment in assert
"TRY002", # ban vanilla raise (todo fix NOQAs)
"TRY200", # TODO: migrate from deprecated alias
"TRY302",
diff --git a/torch/_export/serde/serialize.py b/torch/_export/serde/serialize.py
index ffa4adbd21..be4d33eea3 100644
--- a/torch/_export/serde/serialize.py
+++ b/torch/_export/serde/serialize.py
@@ -2265,13 +2265,11 @@ class ExportedProgramDeserializer(metaclass=Final):
key for key in model_opset_version if key in self.expected_opset_version
}
for namespace in common_namespaces:
- assert isinstance(
- model_version := model_opset_version[namespace], int
- ), f"model_opset_version value should be int, got {model_opset_version[namespace]}"
+ model_version = model_opset_version[namespace]
+ assert isinstance(model_version, int), f"model_opset_version value should be int, got {model_version}"
- assert isinstance(
- compiler_version := self.expected_opset_version[namespace], int
- ), f"expected_opset_version value should be int, got {self.expected_opset_version[namespace]}"
+ compiler_version = self.expected_opset_version[namespace]
+ assert isinstance(compiler_version, int), f"expected_opset_version value should be int, got {compiler_version}"
# TODO(larryliu0820): Add support for upgrader & downgrader
if model_version != compiler_version:
diff --git a/torch/onnx/_internal/fx/passes/type_promotion.py b/torch/onnx/_internal/fx/passes/type_promotion.py
index 47ddd2d1dc..944cad4acf 100644
--- a/torch/onnx/_internal/fx/passes/type_promotion.py
+++ b/torch/onnx/_internal/fx/passes/type_promotion.py
@@ -292,9 +292,11 @@ class ReductionTypePromotionRule(TypePromotionRule):
def preview_type_promotion(
self, args: tuple, kwargs: dict
) -> TypePromotionSnapshot:
- assert len(args) >= 1 and isinstance(
- arg := args[0], torch.Tensor
+ assert (
+ len(args) >= 1
), f"Reduction op torch.ops.{self.namespace}.{self.op_name} expects at least one argument"
+ arg = args[0]
+ assert isinstance(arg, torch.Tensor), f"{type(arg)=} is not torch.Tensor"
dtype: Optional[torch.dtype] = kwargs.get("dtype", None)
computation_dtype, result_dtype = _prims_common.reduction_dtypes(
@@ -329,9 +331,11 @@ class AllOrAnyReductionTypePromotionRule(ReductionTypePromotionRule):
def preview_type_promotion(
self, args: tuple, kwargs: dict
) -> TypePromotionSnapshot:
- assert len(args) >= 1 and isinstance(
- arg := args[0], torch.Tensor
+ assert (
+ len(args) >= 1
), f"Reduction op torch.ops.{self.namespace}.{self.op_name} expects at least one argument"
+ arg = args[0]
+ assert isinstance(arg, torch.Tensor), f"{type(arg)=} is not torch.Tensor"
computation_dtype = torch.bool
# Preserves uint8 -- probably a legacy mask thing
result_dtype = torch.uint8 if arg.dtype == torch.uint8 else torch.bool
@@ -352,9 +356,11 @@ class SumLikeReductionTypePromotionRule(ReductionTypePromotionRule):
def preview_type_promotion(
self, args: tuple, kwargs: dict
) -> TypePromotionSnapshot:
- assert len(args) >= 1 and isinstance(
- arg := args[0], torch.Tensor
+ assert (
+ len(args) >= 1
), f"Reduction op torch.ops.{self.namespace}.{self.op_name} expects at least one argument"
+ arg = args[0]
+ assert isinstance(arg, torch.Tensor), f"{type(arg)=} is not torch.Tensor"
dtype: Optional[torch.dtype] = kwargs.get("dtype", None)
# The below logic is copied from `torch/_refs/__init__.py` reduction ops impl.
if dtype is None:
|
2.41.0
|
3b9b71684ae4f81d6128854b12771f83064c5ce
|
Sun, 28 Apr 2024 21:44:30 +0000
|
[PATCH 0784/1000] [BE]: Ruff - TRY401 - Avoid verbose exception logging (#125126)
|
Don't bother logging exception obj explicitly with logger, it's captured anyway and would generate verbose outputs. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125126 Approved by: https://github.com/ezyang
|
diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py
index 1c3dcd9348..2ecadef60e 100644
--- a/benchmarks/dynamo/common.py
+++ b/benchmarks/dynamo/common.py
@@ -2478,7 +2478,7 @@ class BenchmarkRunner:
if isinstance(e, torch.cuda.OutOfMemoryError)
else "eager_1st_run_fail"
)
- log.exception(e)
+ log.exception("")
return record_status(accuracy_status, dynamo_start_stats=start_stats)
finally:
del model_copy
@@ -2499,7 +2499,7 @@ class BenchmarkRunner:
if isinstance(e, torch.cuda.OutOfMemoryError)
else "eager_2nd_run_fail"
)
- log.exception(e)
+ log.exception("")
return record_status(accuracy_status, dynamo_start_stats=start_stats)
finally:
del model_copy
@@ -2551,7 +2551,7 @@ class BenchmarkRunner:
with maybe_enable_compiled_autograd(self.args.compiled_autograd):
new_result = optimized_model_iter_fn(model_copy, example_inputs)
except Exception as e:
- log.exception(e)
+ log.exception("")
print(
"TorchDynamo optimized model failed to run because of following error"
)
@@ -2653,7 +2653,7 @@ class BenchmarkRunner:
optimized_model_iter_fn = optimize_ctx(self.run_n_iterations)
new_result = optimized_model_iter_fn(model, example_inputs)
except Exception as e:
- log.exception(e)
+ log.exception("")
print(
"TorchDynamo optimized model failed to run because of following error"
)
diff --git a/benchmarks/dynamo/runner.py b/benchmarks/dynamo/runner.py
index 9c169b75db..2a21613874 100755
--- a/benchmarks/dynamo/runner.py
+++ b/benchmarks/dynamo/runner.py
@@ -1452,7 +1452,7 @@ class DashboardUpdater:
try:
RegressionTracker(self.args).diff()
except Exception as e:
- logging.exception(e)
+ logging.exception("")
with open(f"{self.args.output_dir}/gh_regression.txt", "w") as gh_fh:
gh_fh.write("")
diff --git a/pyproject.toml b/pyproject.toml
index 5867deccd6..3d0749d6b5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -135,6 +135,7 @@ select = [
"TRY002", # ban vanilla raise (todo fix NOQAs)
"TRY200", # TODO: migrate from deprecated alias
"TRY302",
+ "TRY401", # verbose-log-message
"UP",
]
diff --git a/torch/_dynamo/debug_utils.py b/torch/_dynamo/debug_utils.py
index 38326734e8..67dd492fe8 100644
--- a/torch/_dynamo/debug_utils.py
+++ b/torch/_dynamo/debug_utils.py
@@ -282,7 +282,7 @@ def helper_for_dump_minify(contents):
fd.write(contents)
except OSError as e:
- log.exception(e)
+ log.exception("")
raise NotImplementedError("Could not write to {minified_repro_path}") from e
diff --git a/torch/_inductor/autotune_process.py b/torch/_inductor/autotune_process.py
index bab2af3cb8..035961c311 100644
--- a/torch/_inductor/autotune_process.py
+++ b/torch/_inductor/autotune_process.py
@@ -102,7 +102,7 @@ class TuningProcess:
try:
TuningProcess.workloop(request_queue, response_queue)
except Exception as ex:
- log.exception("Exception in TuningProcess: %s", ex)
+ log.exception("Exception in TuningProcess")
@staticmethod
def workloop(request_queue: Queue[Any], response_queue: Queue[Any]) -> None:
diff --git a/torch/_inductor/fx_passes/numeric_utils.py b/torch/_inductor/fx_passes/numeric_utils.py
index b4baf12d4e..44d0564fe3 100644
--- a/torch/_inductor/fx_passes/numeric_utils.py
+++ b/torch/_inductor/fx_passes/numeric_utils.py
@@ -149,8 +149,8 @@ def run_model(
_ = pred_control[0].sum().backward(retain_graph=True)
res = compare_gradients(model_base, model_control, precision)
logger.info("compare param grad. Numerical result : %s", res)
- except Exception as e:
- logger.exception("Exception %s when compare gradients", e)
+ except Exception:
+ logger.exception("Exception when comparing gradients")
traceback.print_exc()
if config.fx_passes_numeric_check["requires_optimizer"]:
@@ -172,7 +172,7 @@ def run_model(
)
except Exception as e:
logger.exception(
- "Exception %s when optimizer is added to check parameter names", e
+ "Exception when optimizer is added to check parameter names"
)
traceback.print_exc()
else:
|
2.41.0
|
6b845dedca77ed3be756efc1176c4594da2fa80
|
Mon, 29 Apr 2024 02:11:40 +0000
|
[PATCH 0786/1000] Make metadata serialization more strict (#124411)
|
Summary: When I was debugging an issue, this silent error makes the debugging harder. It is better to error earlier with more descriptive error message. Test Plan: None Differential Revision: D56312433 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124411 Approved by: https://github.com/zhxchen17
|
diff --git a/torch/export/_trace.py b/torch/export/_trace.py
index 48caa86549..0143d0ab15 100644
--- a/torch/export/_trace.py
+++ b/torch/export/_trace.py
@@ -589,10 +589,13 @@ def _export_non_strict(
return CustomObjArgument(
name=node.name, class_fqn=val._type().qualified_name() # type: ignore[attr-defined]
)
- else:
- # TODO: this branch is likely wrong, all permissible ConstantArgument type
- # should have been handled already
+ elif isinstance(val, (int, bool, str, float, type(None))):
return ConstantArgument(name=node.name, value=val)
+ else:
+ raise AssertionError(
+ f"Encountered an unsupported object of type {type(val)} "
+ f"while writing the metadata for exported program"
+ )
input_specs, output_specs = _sig_to_specs(
user_inputs=set(graph_signature.user_inputs),
diff --git a/torch/export/graph_signature.py b/torch/export/graph_signature.py
index 3e246e2ef0..ecfd785340 100644
--- a/torch/export/graph_signature.py
+++ b/torch/export/graph_signature.py
@@ -41,7 +41,7 @@ class CustomObjArgument:
@dataclasses.dataclass
class ConstantArgument:
name: str
- value: Union[int, float, bool, None]
+ value: Union[int, float, bool, str, None]
ArgumentSpec = Union[
|
2.41.0
|
55f1aeb02d04b57d384066335030118a102441d
|
Mon, 29 Apr 2024 06:05:12 +0000
|
[PATCH 0787/1000] Fix module buffer mutation (#124586)
|
Fixes #124583 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124586 Approved by: https://github.com/leslie-fang-intel, https://github.com/desertfire
|
diff --git a/test/inductor/test_cpu_repro.py b/test/inductor/test_cpu_repro.py
index fbbdb8ee96..dfc453220a 100644
--- a/test/inductor/test_cpu_repro.py
+++ b/test/inductor/test_cpu_repro.py
@@ -343,6 +343,24 @@ class CPUReproTests(TestCase):
]
self.common(fn, inps)
+ @config.patch(freezing=True)
+ def test_module_buffer_mutation(self):
+ class Model(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.register_buffer("foo", torch.rand((3, 10)))
+
+ def forward(self, x):
+ lx = [x, x.clone(), x.clone()]
+ y = []
+ for i in range(3):
+ y.append(lx[i] + self.foo[i])
+ return torch.cat(y, 1)
+
+ with torch.no_grad():
+ example_inputs = (torch.rand(1, 10),)
+ self.common(Model(), example_inputs)
+
@unittest.skipIf(not torch.backends.mkldnn.is_available(), "MKLDNN is not enabled")
@patch("torch.cuda.is_available", lambda: False)
def test_linear_packed(self):
diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py
index c6d363ab4b..de17bf4308 100644
--- a/torch/_inductor/graph.py
+++ b/torch/_inductor/graph.py
@@ -786,6 +786,7 @@ class GraphLowering(torch.fx.Interpreter):
and data.device == value.device
and data.untyped_storage().data_ptr()
== value.untyped_storage().data_ptr()
+ and data.storage_offset() == value.storage_offset()
):
return constant_name
|
2.41.0
|
03880e16bb3d765f6539c031315ca80dd343dbe
|
Mon, 29 Apr 2024 08:25:16 +0000
|
[PATCH 0789/1000] Update gen.py aoti_fm install dir (#125087)
|
Summary: make it consistent with all the other install dir Test Plan: Sandcastle Differential Revision: D56660301 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125087 Approved by: https://github.com/frank-wei
|
diff --git a/torchgen/gen.py b/torchgen/gen.py
index 5c9b156b50..78699bcac1 100644
--- a/torchgen/gen.py
+++ b/torchgen/gen.py
@@ -2710,6 +2710,12 @@ def main() -> None:
help="output directory",
default="build/aten/src/ATen",
)
+ parser.add_argument(
+ "--aoti-install-dir",
+ "--aoti_install_dir",
+ help="output directory for AOTInductor shim",
+ default="torch/csrc/inductor/aoti_torch/generated",
+ )
parser.add_argument(
"--rocm",
action="store_true",
@@ -2830,15 +2836,15 @@ def main() -> None:
pathlib.Path(core_install_dir).mkdir(parents=True, exist_ok=True)
ops_install_dir = f"{options.install_dir}/ops"
pathlib.Path(ops_install_dir).mkdir(parents=True, exist_ok=True)
+ aoti_install_dir = f"{options.aoti_install_dir}"
+ pathlib.Path(aoti_install_dir).mkdir(parents=True, exist_ok=True)
core_fm = make_file_manager(options=options, install_dir=core_install_dir)
cpu_fm = make_file_manager(options=options)
cpu_vec_fm = make_file_manager(options=options)
cuda_fm = make_file_manager(options=options)
ops_fm = make_file_manager(options=options, install_dir=ops_install_dir)
- aoti_fm = make_file_manager(
- options=options, install_dir="torch/csrc/inductor/aoti_torch/generated"
- )
+ aoti_fm = make_file_manager(options=options, install_dir=aoti_install_dir)
# Only a limited set of dispatch keys get CPUFunctions.h headers generated
# for them; this is the set
|
2.41.0
|
498e28b2fb4a2cc5ca395dc4bc938f0731933d9
|
Sun, 28 Apr 2024 22:13:30 -0700
|
[PATCH 0790/1000] Remove API that allows for extra deferred runtime asserts during lowering (#124864)
|
I want to generate runtime assert nodes during lowering, which means that I need a finalized list of asserts by the time I start lowering. This means this runtime assert introduced in https://github.com/pytorch/pytorch/pull/113839 must go. Fortunately, this runtime assert was never exercisable, apparently, and the test still "passes" without it. I replace it with a compile time test. We can revisit if this assert fails in practice. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124864 Approved by: https://github.com/jansel
|
diff --git a/test/inductor/test_unbacked_symints.py b/test/inductor/test_unbacked_symints.py
index 8d7228e0d4..43d1307fcf 100644
--- a/test/inductor/test_unbacked_symints.py
+++ b/test/inductor/test_unbacked_symints.py
@@ -38,14 +38,14 @@ class TestUnbackedSymints(InductorTestCase):
@skipCUDAIf(not HAS_CUDA, "requires cuda")
@dynamo_config.patch({"capture_dynamic_output_shape_ops": True})
- def test_expand_mismatch(self, device):
+ def test_expand_ok_with_runtime_assert(self, device):
def fn(x):
nz = x.nonzero()
- return nz.expand([-1, 128])
+ torch._check(nz.size(0) == 128)
+ return nz.expand([128, -1, 2])
x = make_tensor(32, 4, device=device, dtype=torch.float32, exclude_zero=True)
- with self.assertRaises(torch._dynamo.exc.TorchRuntimeError):
- actual = torch.compile(fn, fullgraph=True)(x)
+ actual = torch.compile(fn, fullgraph=True)(x)
@skipCUDAIf(not HAS_CUDA, "requires cuda")
@dynamo_config.patch({"capture_dynamic_output_shape_ops": True})
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index 64a83e31e1..3fcbe514ed 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -1948,12 +1948,12 @@ class ExpandView(BaseView):
elif old_size[i] is None or old_size[i] == 1:
pass
else:
- # Expect broadcast compatibility
- new_size[i] = V.graph.sizevars.expect_equals(
- new_size[i],
- old_size[i],
- msg=f"Broadcast failed in ExpandView({x.get_size()}, {new_size}) on dimension {i}",
- )
+ # NB: new_size[i] == old_size[i] is known because the meta
+ # formula was expected to have taught us this equality.
+ # We can't conveniently check it right now because
+ # statically_known_equals doesn't know to consult preexisting
+ # guards
+ pass
return new_size
@classmethod
diff --git a/torch/_inductor/sizevars.py b/torch/_inductor/sizevars.py
index 7da7172b5f..fcf5c57a25 100644
--- a/torch/_inductor/sizevars.py
+++ b/torch/_inductor/sizevars.py
@@ -328,21 +328,6 @@ class SizeVarAllocator:
def guard_lt(self, left: Expr, right: Expr) -> None:
assert self.shape_env.evaluate_expr(sympy.Lt(left, right))
- def expect_true(self, expr: Expr, *, msg: str) -> None:
- expr = sympy_subs(expr, self.inv_precomputed_replacements) # type: ignore[arg-type]
- self.shape_env.defer_runtime_assert(expr, msg, fx_node=None)
-
- def expect_equals(self, left: Expr, right: Expr, *, msg: str) -> Expr:
- # Prefer returning the expression without unbacked symints
- if self.shape_env.is_unbacked_symint(left):
- self.expect_true(sympy.Eq(left, right), msg=msg) # type: ignore[arg-type]
- return right
- elif self.shape_env.is_unbacked_symint(right):
- self.expect_true(sympy.Eq(left, right), msg=msg) # type: ignore[arg-type]
- return left
- else:
- return self.guard_equals(left, right)
-
def guarded_order(self, seq):
"""
Return the order of a sequence as a permutation of range(len(seq)) and guard on that order not changing.
|
2.41.0
|
5e623af4bfe0390403e94def7506111e66eb695
|
Sun, 28 Apr 2024 22:13:31 -0700
|
[PATCH 0791/1000] Codegen runtime asserts in Inductor (#124874)
|
This completely subsumes https://github.com/pytorch/pytorch/pull/120816 This makes use of the unbacked binding machinery to teach Inductor how to generate deferred runtime asserts directly. There is some back story about why I did it this way, let me explain. Previously, our strategy for generating runtime asserts was that Dynamo would insert them into the FX graph after finishing tracing, and we would attempt to code generate them based on the FX graph. This is a good strategy for export, where we immediately export the graph. However, this strategy was afflicted by problems in eager, where we reuse the same ShapeEnv as before. In particular, on subsequent graph passes, we would immediately turn all of these assertions into noops, because when we evaluated their expressions, we would see that because we had a deferred runtime assert in the ShapeEnv, we know "oh, of course this expression is True" already. Oops! So, with this PR, we take the attitude that as long as the ShapeEnv sticks around, the ShapeEnv's list of deferred runtime asserts is the source of truth, and we don't put anything in the graph. So we just need to decide when to actually generate asserts, and the place I picked was Inductor lowering, since we already have an AssertScalar buffer concept, and so I just need to insert them at this point. AssertScalar also uses raw sympy.Expr rather than SymInt/Bool, so it is easier to prevent unrestricted simplification at this point. There are a few things jumbled together in this PR. I can split them if you want, but some of the changes are before I changed my strategy, but they're useful changes anyway. **torch/_dynamo/output_graph.py** and **torch/_inductor/lowering.py** - Here, we stop putting deferred runtime asserts in the graph. I also have to make sure we don't DCE unused symbol arguments; we're going to get some goofy graph arguments this way, will be good to restore that optimization eventually. We also just disable codegen for `_assert_scalar` entirely; we assume that ShapeEnv will be good enough to capture all of these. **torch/_inductor/codegen/wrapper.py** and **torch/_inductor/ir.py** - Add a way to codegen sizevars without forcing simplification **torch/_inductor/graph.py** - The main logic. Our strategy is to interpose in the same place we are testing that unbacked SymInts are properly showing up in lowered code. The logic is directly analogous to the logic in the existing insert deferred runtime asserts FX pass, but it's simpler because sympy expressions can be directly stored on inductor IR nodes. **torch/fx/experimental/symbolic_shapes.py** - For extra safety, we have a way of freezing runtime asserts, so that if you try to add more we error. This prevents us from adding runtime asserts after we've done lowering. There's a funny interaction with backwards which there's a comment for in graph.py **torch/fx/passes/runtime_assert.py** - This is not really needed in this PR, but I rewrote the runtime assert logic to use unbacked_bindings rather than inferring it by looking for unbacked SymInts. Now, keypaths are translated into FX node acessors. Unfortunately, I couldn't delete the old inference code, because you still need it to find backed SymInts from arguments (as this pass may be used on graphs which don't explicitly bind all their shape variables as argments). There are some new tests exercising this. TODO: I think we need to generate asserts for replacements too. This is a preexisting problem that the old FX pass had too. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124874 Approved by: https://github.com/jansel ghstack dependencies: #124864
|
diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index 1c6b2fada9..c23673389c 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -8493,8 +8493,6 @@ def ___make_guard_fn():
f(torch.tensor([2, 3, 4]), torch.randn(9))
- # See https://github.com/pytorch/pytorch/issues/119689
- @unittest.expectedFailure
@torch._dynamo.config.patch(capture_scalar_outputs=True)
def test_runtime_assert_replacement(self):
@torch.compile(backend="aot_eager")
diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py
index 293def3b96..ce4f9ad193 100644
--- a/test/inductor/test_aot_inductor.py
+++ b/test/inductor/test_aot_inductor.py
@@ -1504,6 +1504,27 @@ class AOTInductorTestsTemplate:
example_inputs = (torch.randn(4, 4, 4, 4).to(self.device),)
self.check_model(Model(), example_inputs)
+ # This exercises _eliminate_unbacked path in ShapeEnv
+ @unittest.skipIf(IS_FBCODE, "Not runnable in fbcode")
+ def test_dup_unbacked_sym_decl_with_refinement(self):
+ class Model(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ def forward(self, x):
+ abs_1 = torch.ops.aten.abs.default(x)
+ lt = torch.ops.aten.lt.Scalar(abs_1, 0.001)
+ eq = torch.ops.aten.eq.Scalar(lt, 0)
+ index_1 = torch.ops.aten.index.Tensor(x, [eq])
+ torch._check(index_1.size(0) == 4**4)
+ sin = torch.ops.aten.sin.default(index_1)
+ index_2 = torch.ops.aten.index.Tensor(x, [eq])
+ div_3 = torch.ops.aten.div.Tensor(sin, index_2)
+ return div_3
+
+ example_inputs = (torch.ones(4, 4, 4, 4).to(self.device),)
+ self.check_model(Model(), example_inputs)
+
def test_run_with_grad_enabled(self):
class Model(torch.nn.Module):
def forward(self, x, weight, bias):
@@ -2735,6 +2756,7 @@ CPU_TEST_FAILURES = {
is_skip=True
),
"test_dup_unbacked_sym_decl": fail_with_and_without_stack_allocation(),
+ "test_dup_unbacked_sym_decl_with_refinement": fail_with_and_without_stack_allocation(),
"test_dynamic_cat": fail_minimal_arrayref_interface(),
# https://github.com/pytorch/pytorch/issues/122978
"test_dynamic_scalar": fail_stack_allocation(is_skip=True),
@@ -2812,6 +2834,7 @@ CPU_TEST_FAILURES = {
CUDA_TEST_FAILURES = {
# test_failures, xfail by default, set is_skip=True to skip
"test_dup_unbacked_sym_decl": fail_abi_compatible_cuda(),
+ "test_dup_unbacked_sym_decl_with_refinement": fail_abi_compatible_cuda(),
"test_normal_functional": fail_abi_compatible_cuda(),
# There is a double-free issue which will be fixed in another PR
# no ABI shim fn for torch.sort; remove this when adding one
@@ -2830,6 +2853,7 @@ if TEST_WITH_ROCM:
CUDA_TEST_FAILURES.update(
{
"test_dup_unbacked_sym_decl": fail_cuda(is_skip=True),
+ "test_dup_unbacked_sym_decl_with_refinement": fail_cuda(is_skip=True),
"test_addmm_multiple_dynamic": fail_cuda(is_skip=True),
"test_bmm_multiple_dynamic": fail_cuda(is_skip=True),
"test_convolution": fail_cuda(is_skip=True),
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py
index 8adf431771..fe1cf93768 100644
--- a/test/inductor/test_torchinductor.py
+++ b/test/inductor/test_torchinductor.py
@@ -4488,6 +4488,56 @@ class CommonTemplate:
(torch.randn([8, 16, 8, 8]),),
)
+ @torch._dynamo.config.patch(capture_dynamic_output_shape_ops=True)
+ def test_nonzero_unbacked_refinement(self):
+ def fn(x):
+ z = x.nonzero()
+ torch._check(z.size(0) == 4)
+ return z + 3
+
+ self.common(
+ fn,
+ (torch.tensor([0, 1, 3, 4, 2, 0, 0]),),
+ )
+
+ with self.assertRaises(RuntimeError):
+ torch.compile(fn)(torch.tensor([0, 0, 0, 0]))
+
+ @torch._dynamo.config.patch(capture_scalar_outputs=True)
+ def test_unbacked_floordiv_simplify(self):
+ def fn(x, y):
+ z = y.item()
+ torch._check(z // 2 == 3)
+ return x + x.new_zeros(z)
+
+ self.common(
+ fn,
+ (
+ torch.randn(6),
+ torch.tensor([6]),
+ ),
+ )
+
+ self.common(
+ fn,
+ (
+ torch.randn(7),
+ torch.tensor([7]),
+ ),
+ )
+
+ @torch._dynamo.config.patch(capture_scalar_outputs=True)
+ def test_unbacked_floordiv_simplify_errors(self):
+ def fn(x, y):
+ z = y.item()
+ torch._check(z // 2 == 3)
+ return x + x.new_zeros(z)
+
+ # This is a little suboptimal: we actually fail /in the compiler/ but
+ # not in a way that causes Dynamo to graph break
+ with self.assertRaises(RuntimeError):
+ torch.compile(fn)(torch.randn(8), torch.tensor(8))
+
def test_cat(self):
def fn(a):
tmp = a * 2
diff --git a/test/inductor/test_torchinductor_codegen_dynamic_shapes.py b/test/inductor/test_torchinductor_codegen_dynamic_shapes.py
index abac142ec3..c8f554b77c 100644
--- a/test/inductor/test_torchinductor_codegen_dynamic_shapes.py
+++ b/test/inductor/test_torchinductor_codegen_dynamic_shapes.py
@@ -333,6 +333,9 @@ test_failures = {
"test_mutations_loop_fusion_dynamic_shapes": TestFailure(
("cpu", "cuda"), is_skip=True
),
+ # Refinement means we don't actually generate dynamic shapes (but only on
+ # cpu apparently?!)
+ "test_nonzero_unbacked_refinement_dynamic_shapes": TestFailure(("cpu",)),
}
if TEST_WITH_ROCM:
diff --git a/test/test_proxy_tensor.py b/test/test_proxy_tensor.py
index fea87eebaa..6c7454656b 100644
--- a/test/test_proxy_tensor.py
+++ b/test/test_proxy_tensor.py
@@ -24,6 +24,7 @@ import torch.testing._internal.optests as optests
from torch._C import _disabled_torch_function_impl
from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter, get_isolated_graphmodule
from torch.utils._pytree import tree_map
+from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts
from torch import nn
import re
@@ -1449,11 +1450,6 @@ def forward(self, x_1, y_1):
add = torch.ops.aten.add.Tensor(y_1, 2); y_1 = None
return add""") # noqa: B950
- # This is due to https://github.com/pytorch/pytorch/pull/124316 which bans
- # i0 = i1 refinement. To work around it, you should assert i1 = s0 by
- # hand. This particular example the refinement is OK because i0 is always
- # available when i1 and vice versa, but it is difficult to tell if it
- # is safe in general.
@unittest.expectedFailure
def test_unbacked_unify_guard_transitivity(self):
def f(x1, x2, y):
@@ -1466,7 +1462,36 @@ def forward(self, x_1, y_1):
else:
return y + 2
- make_fx(f, tracing_mode="symbolic")(torch.tensor(10), torch.tensor(10), torch.randn(10))
+ gm = make_fx(f, tracing_mode="symbolic")(torch.tensor(10), torch.tensor(10), torch.randn(10))
+ insert_deferred_runtime_asserts(gm, gm.shape_env, "test")
+ gm.recompile()
+ r = str(gm.code).strip()
+ # self.assertExpectedInline(
+ # r, """""" # noqa: B950
+ # )
+
+ def test_unbacked_unify_dependency_violation(self):
+ def f(x1, x2, x3, y):
+ z1 = x1.item()
+ torch._check(z1 // 9 == 1)
+ z2 = x2.item()
+ z3 = x3.item()
+ torch._check(z1 == z2 + z3)
+ return y * 2
+ if z2 + z3 == z1:
+ return y * 2
+ else:
+ return y + 3
+
+ # NB:
+
+ gm = make_fx(f, tracing_mode="symbolic")(torch.tensor(10), torch.tensor(5), torch.tensor(5), torch.randn(1))
+ insert_deferred_runtime_asserts(gm, gm.shape_env, "test")
+ gm.recompile()
+ self.assertEqual(gm(torch.tensor(12), torch.tensor(6), torch.tensor(6), torch.tensor([1.0])), torch.tensor([2.0]))
+ with self.assertRaises(RuntimeError):
+ gm(torch.tensor(20), torch.tensor(10), torch.tensor(10), torch.tensor([1.0]))
+
def test_split_unbacked_sizes(self):
def f(lengths, values):
diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py
index 36eb5e223d..cc282d1005 100644
--- a/torch/_inductor/codegen/wrapper.py
+++ b/torch/_inductor/codegen/wrapper.py
@@ -918,8 +918,11 @@ class WrapperCodeGen(CodeGen):
def finalize_prefix(self):
pass
- def codegen_python_sizevar(self, x: Expr) -> str:
- return pexpr(V.graph.sizevars.simplify(x))
+ def codegen_python_sizevar(self, x: Expr, *, simplify: bool = True) -> str:
+ if simplify:
+ return pexpr(V.graph.sizevars.simplify(x))
+ else:
+ return pexpr(x)
def codegen_sizevar(self, x: Expr) -> str:
return self.codegen_python_sizevar(x)
diff --git a/torch/_inductor/graph.py b/torch/_inductor/graph.py
index de17bf4308..014f334f22 100644
--- a/torch/_inductor/graph.py
+++ b/torch/_inductor/graph.py
@@ -26,6 +26,7 @@ from torch.fx.experimental.symbolic_shapes import (
free_unbacked_symbols,
has_free_symbols,
resolve_unbacked_bindings,
+ RuntimeAssert,
ShapeEnv,
SymTypes,
)
@@ -316,6 +317,14 @@ class GraphLowering(torch.fx.Interpreter):
self._shape_env = shape_env
self.reuse_shape_env = True
self._shape_env = shape_env
+ # We are going to start code generating runtime asserts, so make sure
+ # you don't start adding new ones in the lowering process
+ shape_env.freeze_runtime_asserts()
+ # We're going to mutate ras_by_symbol as we finish generating them
+ self.ras_by_symbol: Dict[
+ sympy.Symbol, List[RuntimeAssert]
+ ] = shape_env.deferred_runtime_asserts.copy()
+ self.bound_unbacked_symbols: Set[sympy.Symbol] = set()
self.sizevars = SizeVarAllocator(shape_env)
self.graph_input_names: List[str] = []
self.graph_inputs: Dict[str, TensorBox] = {}
@@ -705,7 +714,7 @@ class GraphLowering(torch.fx.Interpreter):
def run(self, *args):
return super().run(*args)
- def register_buffer(self, buffer: ir.Buffer):
+ def register_buffer(self, buffer: ir.Buffer, *, set_name: bool = False):
name = self.qualify_name(f"buf{len(self.buffers)}")
self.buffers.append(buffer)
self.name_to_buffer[name] = buffer
@@ -716,6 +725,8 @@ class GraphLowering(torch.fx.Interpreter):
):
self.add_device_info(buffer.get_device())
+ if set_name:
+ buffer.name = name
return name
def register_list(self, buffer_names: List[str]):
@@ -1357,6 +1368,64 @@ class GraphLowering(torch.fx.Interpreter):
return "***\n".join(r)
if n.op != "placeholder":
+ # Note [Backwards runtime asserts]
+ # Backwards poses an interesting problem for deferred runtime
+ # asserts. In the easy case, we may solely close over data
+ # dependent sized tensors, and there are no binding sites for
+ # unbacked SymInts. In this case, we can just drop all the
+ # runtime asserts on the floor: no non-placeholder bindings, no
+ # problem.
+ #
+ # However, it is *possible* for a fresh runtime assert to show up
+ # between forwards and backwards. Right now, the freezing process
+ # that happens when we lower forwards means that we will freeze
+ # runtime asserts, and then the moment the backwards lowering
+ # process attempts to add a new deferred runtime assert, we will
+ # fail. Let's say you remove that assert. Now when we get here,
+ # we need to make sure we actually emit these asserts (because we
+ # can't emit them in forwards, we already compiled it). So we
+ # have to do something here. But we don't want to reemit ALL
+ # deferred runtime asserts, we only want to emit the NEW ones.
+ # Therefore needing some sort of stratification in the ShapeEnv.
+ # This is all doable, it just hasn't been done yet.
+ shape_env = V.graph.sizevars.shape_env
+
+ for i0 in new_unbacked_defs:
+ ras = self.ras_by_symbol.pop(i0, [])
+ # NB: size-like not needed, we won't retrace
+ vr = shape_env.var_to_range[i0]
+ if not shape_env._default_unspecified_value_range().issubset(vr):
+
+ def convert(s):
+ try:
+ return int(s)
+ except TypeError:
+ return None
+
+ if (lower := convert(vr.lower)) is not None:
+ self.register_buffer(
+ ir.AssertScalar(i0 >= vr.lower, f"{i0} >= {vr.lower}"),
+ set_name=True,
+ )
+ if (upper := convert(vr.upper)) is not None:
+ self.register_buffer(
+ ir.AssertScalar(i0 <= vr.upper, f"{i0} <= {vr.upper}"),
+ set_name=True,
+ )
+
+ for ra in ras:
+ fvs = free_unbacked_symbols(ra.expr)
+ missing = fvs - self.bound_unbacked_symbols
+ if missing:
+ i1 = sorted(missing, key=lambda x: str(x))[0]
+ self.ras_by_symbol.setdefault(i1, []).append(ra)
+ else:
+ self.register_buffer(
+ ir.AssertScalar(ra.expr, f"{ra.expr}"), set_name=True
+ )
+
+ self.bound_unbacked_symbols |= new_unbacked_defs
+
unbacked_bindings = resolve_unbacked_bindings(
V.graph.sizevars.shape_env, n.meta.get("unbacked_bindings", {})
)
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index 3fcbe514ed..80b33026c3 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -3018,7 +3018,7 @@ class Buffer(IRNode):
return self.layout.make_indexer()
def get_name(self) -> str:
- assert self.name
+ assert self.name, self
return self.name
def get_device(self):
@@ -5073,8 +5073,15 @@ class AssertScalar(ExternKernel):
if V.graph.cpp_wrapper:
pass
else:
+ # NB: It is EXTREMELY important not to simplify the scalar under
+ # assertion here, because simplify is done with respect to
+ # runtime asserts. So if you have "u0 == 0" in the runtime
+ # asserts, if you subsequently try to simplify(u0 == 0), you will
+ # get True (because we've already runtime assert'ed that it's
+ # true). But we're code generating the actual runtime assert
+ # here!!
wrapper.writeline(
- f"if not {V.graph.wrapper_code.codegen_python_sizevar(self.scalar)}:"
+ f"if not {V.graph.wrapper_code.codegen_python_sizevar(self.scalar, simplify=False)}:"
)
wrapper.writeline(f" raise RuntimeError({repr(self.msg)})")
# No one should ever use this buffer, but for uniformity
diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py
index 0e579feb6b..a1e5de75f9 100644
--- a/torch/_inductor/lowering.py
+++ b/torch/_inductor/lowering.py
@@ -2395,10 +2395,11 @@ def _local_scalar_dense(data):
@register_lowering(aten._assert_scalar)
def _assert_scalar(data, msg):
- buffer = ir.AssertScalar(data, msg)
- # This buffer isn't used by anyone (it returns None), so we must explicitly register it
- buffer.name = V.graph.register_buffer(buffer)
- return buffer
+ # NB: These will be handled at codegen time
+ # Not sure if we are guaranteed to be able to serve out truth from the
+ # deferred_runtime_asserts, TODO: try this assert out
+ # assert bool(data.scalar), data
+ return None
def _full(fill_value, device, dtype, size):
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index 6c17b5d870..0630614fb3 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -592,24 +592,35 @@ def compute_unbacked_bindings(shape_env, example_value, old_example_value=None):
else ""
)
)
- # TODO: This is pretty fragile
- # Normally, the equality test is supposed to be a no-op here, because
- # you've already called rebind_unbacked first which takes all the old
- # binding sites and discovers how they are newly bound. But this does
- # not always work. For example, if the original FX node wasn't a
- # binding site because you had a memo hit, but post translation you
- # aren't a memo hit anymore, there's now a new binding site... but we
- # know (because it's the same FX node) that the value is actually the
- # same, they're just not obviously equal anymore. So we just insert
- # a runtime assert in this case.
+ # Why do we have to do some rebinding here? If the original FX node
+ # wasn't a binding site because you had a memo hit, but post
+ # translation you aren't a memo hit anymore, there's now a new binding
+ # site... but we know (because it's the same FX node) that the value
+ # is actually the same, they're just not obviously equal anymore.
#
- # This is very fragile, because u0 == u1 assertion does not generate
- # a replacement. Here, I think it might be acceptable to do a
- # replacement, so long as we replace the newer thing with the older
- # thing. Fix this if it becomes an issue.
+ # The logic here is written carefully, because unlike the
+ # bind_unbacked case, we are not guaranteed to have a symbol for
+ # old_sym. If we have a symbol, do regular rename unbacked to; but if
+ # we don't, we need to specially eliminate the fresh unbacked symbol
+ # (NB: we are /trusting/ that the memoization is correct, and that we
+ # don't need to generate a new runtime assert. This is load bearing,
+ # as repropagation can happen after we've frozen runtime asserts.)
if old_example_value is not None:
for keypath in symbol_to_path.values():
- torch._check(pytree.key_get(old_example_value, keypath) == pytree.key_get(example_value, keypath))
+ old_sym = pytree.key_get(old_example_value, keypath)
+ new_sym = pytree.key_get(example_value, keypath)
+ if (
+ isinstance(new_sym, SymTypes) and
+ isinstance(new_s := new_sym.node.expr, sympy.Symbol)
+ ):
+ if isinstance(old_sym, SymTypes) and (old_s := old_sym.node.expr) != new_s:
+ if isinstance(old_s, sympy.Symbol):
+ shape_env._rename_unbacked_to(new_s, old_s)
+ else:
+ shape_env._eliminate_unbacked(new_s, old_s)
+ elif not isinstance(old_sym, SymTypes):
+ shape_env._eliminate_unbacked(new_s, sympy.sympify(old_sym))
+
return symbol_to_path
def definitely_true(a):
@@ -1300,6 +1311,8 @@ def _sympy_cast_symbool_to_symint_guardless(x: sympy.Expr) -> sympy.Expr:
def cast_symbool_to_symint_guardless(symbool: torch.SymBool) -> torch.SymInt:
+ if isinstance(symbool, bool):
+ return 1 if symbool else 0
int_sym = _sympy_cast_symbool_to_symint_guardless(symbool.node.expr)
return symbool.node.shape_env.create_symintnode(int_sym, hint=int(symbool.node.require_hint()) if has_hint(symbool) else None)
@@ -2233,6 +2246,7 @@ class ShapeEnv:
self.log = log
self.log.debug("create_env")
self.frozen = False
+ self.runtime_asserts_frozen = False
self.dim_constraints: Optional[DimConstraints] = None
self.counter = collections.Counter()
# Mapping from sympy.Symbol to the number of guards which mention this
@@ -2275,6 +2289,33 @@ class ShapeEnv:
self.fx_node_cache: Dict[Tuple[Callable, Tuple[Any, ...]], torch.fx.Node] = {}
self.source_to_symbol: Dict[str, sympy.Symbol] = {}
+ # Suppose you want to replace an unbacked symbol with another
+ # unbacked symbol. This is error prone because you can cause
+ # references to unbacked symbols to time travel backwards. E.g.,
+ #
+ # u1 = x.item()
+ # ... use of u1 ...
+ # u2 = y.item()
+ # u3 = z.item()
+ # torch._check(u1 == u2 + u3)
+ #
+ # If you replace u1 with u2 + u3, then the use of u1 now
+ # references u2 and u3 prior to them actually being bound at
+ # runtime.
+ #
+ # To control for this, we track the order unbacked symbols
+ # were allocated, and only allow substitutions if they respect
+ # the dependency from this order; an unbacked symbol can only
+ # be substituted with unbacked symbols that come before it in the
+ # order.
+ #
+ # This also imposes an ordering on the unbacked symbol binding
+ # sites themselves: you are not allowed to reorder unbacked symbol
+ # bindings. At the moment, this is not tracked, but we potentially
+ # could track this at the IR level using a higher order operator
+ # with something like effect token tracking.
+ self.unbacked_alloc_order: Dict[sympy.Symbol, int] = {}
+
from torch.fx.experimental.validator import translation_validation_enabled
self._translation_validation_enabled = translation_validation_enabled()
@@ -2406,6 +2447,10 @@ class ShapeEnv:
finally:
self.is_recording = False
+ @record_shapeenv_event()
+ def _eliminate_unbacked(self, orig_s: sympy.Symbol, new_s: sympy.Expr):
+ self._set_replacement(orig_s, new_s, "eliminate_unbacked")
+
# Unlike set_replacement, this records a shapeenv event
@record_shapeenv_event()
def _rename_unbacked_to(self, orig_s: sympy.Symbol, new_s: sympy.Symbol):
@@ -2526,6 +2571,17 @@ class ShapeEnv:
"""
self.frozen = True
+ @record_shapeenv_event()
+ def freeze_runtime_asserts(self):
+ """Freeze this ShapeEnv to stop adding deferred runtime asserts.
+
+ We will error if you try to install a new runtime assert when it is
+ frozen. This would indicate a lowering violation, or perhaps something
+ we know statically is already True but we are checking it again in a way
+ that is not clearly dischargeable.
+ """
+ self.runtime_asserts_frozen = True
+
def _create_symbol_for_source(self, source: Source) -> Optional[sympy.Symbol]:
if not self._translation_validation_enabled:
return None
@@ -4763,6 +4819,10 @@ class ShapeEnv:
# NB: Don't use new_expr as expr; it could contain gunk like shape0
# which we don't want to guard on
+ # If you're here because of this assert, read Note [Backwards runtime asserts]
+ # in torch/_inductor/graph.py
+ assert not self.runtime_asserts_frozen, expr
+
# OK, we're definitely doing a runtime assert now
if (
self._translation_validation_enabled
diff --git a/torch/fx/passes/runtime_assert.py b/torch/fx/passes/runtime_assert.py
index 0de728dd00..bb90b9c568 100644
--- a/torch/fx/passes/runtime_assert.py
+++ b/torch/fx/passes/runtime_assert.py
@@ -9,6 +9,7 @@ else:
ShapeEnv = Any
import torch
+import torch.utils._pytree as pytree
from torch import fx
from torch.fx._utils import get_node_context, lazy_format_graph_code
from torch.fx.experimental.sym_node import SymNode
@@ -48,7 +49,13 @@ def insert_deferred_runtime_asserts(
# Import sympy locally
import sympy
- from torch.fx.experimental.symbolic_shapes import free_symbols
+ from torch.fx.experimental.symbolic_shapes import (
+ CallMethodKey,
+ cast_symbool_to_symint_guardless,
+ ConvertIntKey,
+ DivideByKey,
+ free_symbols,
+ )
from torch.utils._sympy.interp import sympy_interp
from torch.utils._sympy.reference import PythonReferenceAnalysis
@@ -118,51 +125,96 @@ def insert_deferred_runtime_asserts(
with graph.inserting_before(
node.next if node not in placeholders else last_placeholder.next
):
- example_value = get_example_value(node)
- if example_value is None:
- continue
+ # Unfortunately, this logic still must remain because manual
+ # make_fx calls may not explicitly bind all symbolic ints as
+ # arguments to the function, so we must infer it from the other
+ # arguments
+ if (
+ node in placeholders
+ and (example_value := get_example_value(node)) is not None
+ ):
+
+ def match_symbol(symint, cb):
+ if (
+ isinstance(symint, torch.SymInt)
+ and isinstance(symint.node, SymNode)
+ and isinstance(s := symint.node.expr, sympy.Symbol)
+ and s not in symbol_to_proxy
+ and s in needed_symbols
+ ):
+ symbol_to_proxy[s] = fx.Proxy(cb())
+ log.debug("symbol_to_proxy[%s] = %s", s, symbol_to_proxy[s])
+
+ match_symbol(example_value, lambda: node)
+ if isinstance(t := example_value, torch.Tensor):
+ for i, s in enumerate(t.size()):
+ match_symbol(s, lambda: graph.call_method("size", (node, i)))
+ for i, s in enumerate(t.stride()):
+ match_symbol(s, lambda: graph.call_method("stride", (node, i)))
+ match_symbol(
+ t.storage_offset(),
+ lambda: graph.call_method("storage_offset", (node,)),
+ )
+ # Handle asserts that aren't associated with any symbol. This
+ # doesn't really have to be in the loop as it will only run once,
+ # it just needs to happen right after the placeholders.
if node not in placeholders:
add_runtime_asserts(ras_by_symbol.pop(None, [])) # type: ignore[call-overload]
defs = []
- # For every new unbacked symbol, we need an fx.Node representing
- # precisely this value. There are a few places where the unbacked
- # symbol could have come from, and we will check them to setup
- # these nodes.
- #
- # For a case like item(), this is trivial (no new node is added.)
- #
- # For nonzero(), we need to add something like i0 = out.size(0)
- #
- # We could end up with duplicate nodes this way but it is not a
- # big deal.
- #
- # We also do this to setup backed SymInts, but those are all going
- # to be matched from placeholders
- def match_symbol(symint, cb):
- if (
- isinstance(symint, torch.SymInt)
- and isinstance(symint.node, SymNode)
- and isinstance(s := symint.node.expr, sympy.Symbol)
- and s not in symbol_to_proxy
- and s in needed_symbols
- ):
- symbol_to_proxy[s] = fx.Proxy(cb())
- log.debug("symbol_to_proxy[%s] = %s", s, symbol_to_proxy[s])
+ if unbacked_bindings := node.meta.get("unbacked_bindings"):
+ for s, keypath in unbacked_bindings.items():
defs.append(s)
- match_symbol(example_value, lambda: node)
- if isinstance(t := example_value, torch.Tensor):
- for i, s in enumerate(t.size()):
- match_symbol(s, lambda: graph.call_method("size", (node, i)))
- for i, s in enumerate(t.stride()):
- match_symbol(s, lambda: graph.call_method("stride", (node, i)))
- match_symbol(
- t.storage_offset(),
- lambda: graph.call_method("storage_offset", (node,)),
- )
+ # TODO: some CSE when generating these nodes can probably
+ # help reduce graph size and improve compile itme
+ def go(node, keypath):
+ if keypath == ():
+ return node
+ if (
+ len(keypath) >= 2
+ and isinstance(keypath[0], CallMethodKey)
+ and isinstance(keypath[1], pytree.SequenceKey)
+ ):
+ return go(
+ graph.call_method(
+ keypath[0].name, (node, keypath[1].idx)
+ ),
+ keypath[2:],
+ )
+ elif isinstance(keypath[0], CallMethodKey):
+ return go(
+ graph.call_method(keypath[0].name, (node,)), keypath[1:]
+ )
+ elif isinstance(keypath[0], pytree.SequenceKey):
+ return go(
+ graph.call_function(
+ operator.getitem, (node, keypath[0].idx)
+ ),
+ keypath[1:],
+ )
+ elif isinstance(keypath[0], ConvertIntKey):
+ return go(
+ graph.call_function(
+ cast_symbool_to_symint_guardless, (node,)
+ ),
+ keypath[1:],
+ )
+ elif isinstance(keypath[0], DivideByKey):
+ # TODO: need to assert divisibility
+ return go(
+ graph.call_function(
+ operator.floordiv, (node, keypath[0].divisor)
+ ),
+ keypath[1:],
+ )
+ else:
+ raise AssertionError(f"unrecognized keypath {keypath}")
+
+ symbol_to_proxy[s] = fx.Proxy(go(node, keypath))
+ log.debug("symbol_to_proxy[%s] = %s", s, symbol_to_proxy[s])
for i0 in defs:
ras = ras_by_symbol.pop(i0, [])
|
2.41.0
|
a44d2f7fb805fe83803b79ccbd783494ec31d6b
|
Mon, 29 Apr 2024 14:13:44 +0000
|
[PATCH 0792/1000] split out flop counting its own method (#125061)
|
Summary: Modularizing code for reuse by splitting __torch_dispatch__ to move flop counting to its own method. Test Plan: unit tests Differential Revision: D56644523 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125061 Approved by: https://github.com/842974287
|
diff --git a/torch/utils/flop_counter.py b/torch/utils/flop_counter.py
index fcad5d1fd3..42868dc359 100644
--- a/torch/utils/flop_counter.py
+++ b/torch/utils/flop_counter.py
@@ -539,7 +539,9 @@ class FlopCounterMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs if kwargs else {}
out = func(*args, **kwargs)
- func_packet = func._overloadpacket
+ return self._count_flops(func._overloadpacket, out, args, kwargs)
+
+ def _count_flops(self, func_packet, out, args, kwargs):
if func_packet in self.flop_registry:
flop_count_func = self.flop_registry[func_packet]
flop_count = flop_count_func(*args, **kwargs, out_val=out) # type: ignore[operator]
|
2.41.0
|
35a946241ce8f15686a0ad73a1b4c2b0f65f3f1
|
Thu, 25 Apr 2024 17:18:24 -0700
|
[PATCH 0793/1000] [RFC][FSDP2] Renamed `FSDP` to `FSDPModule` (#124955)
|
This PR renames the `FSDP` class to `FSDPModule`. This is a BC breaking change. The rationale is that `FSDPModule` is more descriptive since `fully_shard` is a module-level API (applied to a `module` arg), so the `FSDP` class will always correspond to a module. Also, users commonly import `FullyShardedDataParallel` as `FSDP`, so this can help avoid some name conflict in some cases. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124955 Approved by: https://github.com/wanchaol, https://github.com/wconstab ghstack dependencies: #124651, #124741, #124767, #124768, #124780, #124787
|
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_comm.py b/test/distributed/_composable/fsdp/test_fully_shard_comm.py
index 5ef56f0ed6..0bb4e54b32 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_comm.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_comm.py
@@ -12,7 +12,7 @@ import torch.nn.functional as F
from torch.distributed._composable import checkpoint, replicate
from torch.distributed._composable.fsdp import (
- FSDP,
+ FSDPModule,
fully_shard,
MixedPrecisionPolicy,
OffloadPolicy,
@@ -630,13 +630,13 @@ class TestFullyShardUnshardMultiProcess(FSDPTest):
def forward(self, x: torch.Tensor):
y1, work1 = self.reduce_module1(x)
- if isinstance(self.mlps.mlp1, FSDP):
+ if isinstance(self.mlps.mlp1, FSDPModule):
self.mlps.mlp1.unshard(async_op=True)
y2, work2 = self.reduce_module2(x)
- if isinstance(self.mlps.mlp2, FSDP):
+ if isinstance(self.mlps.mlp2, FSDPModule):
self.mlps.mlp2.unshard(async_op=True)
y3, work3 = self.reduce_module3(x)
- if isinstance(self.mlps.mlp3, FSDP):
+ if isinstance(self.mlps.mlp3, FSDPModule):
self.mlps.mlp3.unshard(async_op=True)
return self.mlps([y1, y2, y3], [work1, work2, work3])
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_state.py b/test/distributed/_composable/fsdp/test_fully_shard_state.py
index 467d75b112..7b45f7d4d9 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_state.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_state.py
@@ -4,7 +4,7 @@ import copy
import unittest
import torch.nn as nn
-from torch.distributed._composable.fsdp import FSDP, fully_shard
+from torch.distributed._composable.fsdp import FSDPModule, fully_shard
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_fsdp import FSDPTestMultiThread, MLP
from torch.testing._internal.common_utils import run_tests
@@ -47,22 +47,22 @@ class TestFullyShardState(FSDPTestMultiThread):
model = MLP(8)
fully_shard(model)
self.assertTrue(isinstance(model, MLP))
- self.assertTrue(isinstance(model, FSDP))
+ self.assertTrue(isinstance(model, FSDPModule))
self.assertEqual(model.__class__.__name__, "FSDPMLP")
for module in model.modules():
if module is model:
continue
- self.assertFalse(isinstance(module, FSDP))
+ self.assertFalse(isinstance(module, FSDPModule))
# Check that slicing into a `Sequential` does not preserve FSDP
model = nn.Sequential(*[MLP(8) for _ in range(3)])
fully_shard(model)
self.assertTrue(isinstance(model, nn.Sequential))
- self.assertTrue(isinstance(model, FSDP))
+ self.assertTrue(isinstance(model, FSDPModule))
self.assertEqual(model.__class__.__name__, "FSDPSequential")
sliced_model = model[:2]
self.assertTrue(isinstance(sliced_model, nn.Sequential))
- self.assertFalse(isinstance(sliced_model, FSDP))
+ self.assertFalse(isinstance(sliced_model, FSDPModule))
@unittest.skipIf(not TEST_CUDA, "no cuda")
def test_fully_shard_unsupported_module_cls(self):
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_training.py b/test/distributed/_composable/fsdp/test_fully_shard_training.py
index 7b8ab154cb..bf90bd165c 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_training.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_training.py
@@ -13,7 +13,7 @@ import torch.nn as nn
from torch.distributed._composable import checkpoint, replicate
from torch.distributed._composable.fsdp import (
CPUOffloadPolicy,
- FSDP,
+ FSDPModule,
fully_shard,
OffloadPolicy,
)
@@ -144,7 +144,7 @@ class TestFullyShardRegisteredParams(FSDPTestMultiThread):
self._assert_tensor_params(root_params)
self._assert_same_params(model.parameters(), ref_model.parameters())
for module in model.modules():
- if isinstance(module, FSDP):
+ if isinstance(module, FSDPModule):
module.reshard() # however, we can manually reshard
self._assert_dtensor_params(model.parameters())
self._assert_same_params(model.parameters(), ref_model.parameters())
@@ -854,7 +854,7 @@ class TestFullyShardGradientAccumulation(FSDPTest):
# memory usage since we do not reshard after forward
if use_explicit_unshard:
for module in model.modules():
- if isinstance(module, FSDP):
+ if isinstance(module, FSDPModule):
module.unshard(async_op=True)
# Emulate the 1f1b pipeline schedule and only reduce gradients on the
diff --git a/torch/distributed/_composable/fsdp/__init__.py b/torch/distributed/_composable/fsdp/__init__.py
index b57ed17ed6..4a0523f0a0 100644
--- a/torch/distributed/_composable/fsdp/__init__.py
+++ b/torch/distributed/_composable/fsdp/__init__.py
@@ -1,2 +1,2 @@
from ._fsdp_api import CPUOffloadPolicy, MixedPrecisionPolicy, OffloadPolicy
-from .fully_shard import FSDP, fully_shard
+from .fully_shard import FSDPModule, fully_shard
diff --git a/torch/distributed/_composable/fsdp/fully_shard.py b/torch/distributed/_composable/fsdp/fully_shard.py
index a46a103b3d..e1538b00f6 100644
--- a/torch/distributed/_composable/fsdp/fully_shard.py
+++ b/torch/distributed/_composable/fsdp/fully_shard.py
@@ -136,7 +136,7 @@ def fully_shard(
# Place FSDP leftmost for highest priority in the method resolution order
cls = module.__class__
dct = {"__deepcopy__": unimplemented_deepcopy}
- new_cls = type(f"FSDP{cls.__name__}", (FSDP, cls), dct)
+ new_cls = type(f"FSDP{cls.__name__}", (FSDPModule, cls), dct)
module.__class__ = new_cls
return module
@@ -147,14 +147,14 @@ def unimplemented_deepcopy(*args: Any, **kwargs: Any) -> typing_extensions.Never
)
-class FSDP:
+class FSDPModule:
def __new__(cls, *args, **kwargs):
"""
Override ``__new__`` to remove the FSDP class and directly construct
the original class for cases like indexing into a container module.
"""
# Use index 2 since 0 is the dynamically constructed `FSDP<...>` class
- # and index 1 is the `FSDP` class itself
+ # and index 1 is the `FSDPModule` class itself
orig_cls = cls.__mro__[2]
self = orig_cls.__new__(orig_cls, *args, **kwargs)
self.__init__(*args, **kwargs)
@@ -223,7 +223,7 @@ class FSDP:
self_module = cast(nn.Module, self)
modules = list(self_module.modules()) if recurse else [self_module]
for module in modules:
- if isinstance(module, FSDP):
+ if isinstance(module, FSDPModule):
state = module._get_fsdp_state()
if fsdp_param_group := state._fsdp_param_group:
fsdp_param_group.reduce_grads = requires_gradient_sync
@@ -243,7 +243,7 @@ class FSDP:
self_module = cast(nn.Module, self)
modules = list(self_module.modules()) if recurse else [self_module]
for module in modules:
- if isinstance(module, FSDP):
+ if isinstance(module, FSDPModule):
state = module._get_fsdp_state()
if fsdp_param_group := state._fsdp_param_group:
fsdp_param_group.all_reduce_grads = requires_all_reduce
@@ -265,7 +265,7 @@ class FSDP:
self_module = cast(nn.Module, self)
modules = list(self_module.modules()) if recurse else [self_module]
for module in modules:
- if isinstance(module, FSDP):
+ if isinstance(module, FSDPModule):
state = module._get_fsdp_state()
if fsdp_param_group := state._fsdp_param_group:
fsdp_param_group.reshard_after_backward = reshard_after_backward
|
2.41.0
|
d46ab4104a881d1a01196944492e4e8c710ba1c
|
Sun, 28 Apr 2024 16:38:55 -0700
|
[PATCH 0794/1000] [dtensor] move pad/unpad_tensor to separate utils (#124871)
|
as titled, 1. pad/unpad is a general util not specific to the Shard placement, 2. for the propose of the next PR, move these two out of Shard placement itself, and give additional pad_dim argument Pull Request resolved: https://github.com/pytorch/pytorch/pull/124871 Approved by: https://github.com/awgu, https://github.com/wz337, https://github.com/XilunWu
|
diff --git a/test/distributed/_tensor/test_dtensor.py b/test/distributed/_tensor/test_dtensor.py
index 653dfcbb58..224ca8c673 100644
--- a/test/distributed/_tensor/test_dtensor.py
+++ b/test/distributed/_tensor/test_dtensor.py
@@ -809,8 +809,10 @@ class TestDTensorPlacementTypes(DTensorTestBase):
]
assert_array_equal(expected_pad_sizes, pad_sizes)
+ from torch.distributed._tensor._collective_utils import unpad_tensor
+
unpadded_list = [
- shard_placement._unpad_tensor(tensor, pad_sizes[i])
+ unpad_tensor(tensor, shard_placement.dim, pad_sizes[i])
if pad_sizes[i] > 0
else tensor
for i, tensor in enumerate(splitted_tensor_list)
diff --git a/test/distributed/test_device_mesh.py b/test/distributed/test_device_mesh.py
index 534185f67a..89f98ce0b6 100644
--- a/test/distributed/test_device_mesh.py
+++ b/test/distributed/test_device_mesh.py
@@ -9,6 +9,7 @@ from torch.distributed._tensor._collective_utils import (
mesh_all_to_all,
mesh_broadcast,
mesh_scatter,
+ unpad_tensor,
)
from torch.distributed._tensor.placement_types import _Partial, Shard
from torch.distributed.device_mesh import _mesh_resources, DeviceMesh, init_device_mesh
@@ -490,8 +491,8 @@ class DeviceMeshCollectiveTest(DTensorTestBase):
mesh_scatter(scattered_tensor, padded_tensor_list, device_mesh, mesh_dim=0)
if pad_sizes[my_rank] != 0:
- scattered_tensor = shard_placement._unpad_tensor(
- scattered_tensor, pad_sizes[my_rank]
+ scattered_tensor = unpad_tensor(
+ scattered_tensor, shard_dim, pad_sizes[my_rank]
)
if scattered_tensor.numel() == 0:
@@ -533,7 +534,7 @@ class DeviceMeshCollectiveTest(DTensorTestBase):
)
unpadded_list = [
(
- shard_placement._unpad_tensor(big_tensor_chunks[i], pad_sizes[i])
+ unpad_tensor(big_tensor_chunks[i], shard_dim, pad_sizes[i])
if pad_sizes[i] > 0
else big_tensor_chunks[i]
)
@@ -629,8 +630,8 @@ class DeviceMeshCollectiveTest(DTensorTestBase):
# unpad scattered_tensor
if pad_sizes[my_rank] > 0:
- scattered_tensor = shard_placement._unpad_tensor(
- scattered_tensor, pad_sizes[my_rank]
+ scattered_tensor = unpad_tensor(
+ scattered_tensor, shard_dim, pad_sizes[my_rank]
)
if scattered_tensor.numel() == 0:
diff --git a/torch/distributed/_tensor/_collective_utils.py b/torch/distributed/_tensor/_collective_utils.py
index 9cf8376bd2..603ac09f4a 100644
--- a/torch/distributed/_tensor/_collective_utils.py
+++ b/torch/distributed/_tensor/_collective_utils.py
@@ -164,6 +164,24 @@ def mesh_all_to_all(
return work
+def pad_tensor(tensor: torch.Tensor, pad_dim: int, pad_size: int) -> torch.Tensor:
+ if pad_size == 0:
+ return tensor
+ pad = [0, 0] * (tensor.ndim - pad_dim)
+ pad[-1] = pad_size
+ return torch.nn.functional.pad(tensor, pad)
+
+
+def unpad_tensor(tensor: torch.Tensor, pad_dim: int, pad_size: int) -> torch.Tensor:
+ if pad_size == 0:
+ return tensor
+ return tensor.narrow(
+ pad_dim,
+ start=0,
+ length=tensor.size(pad_dim) - pad_size,
+ )
+
+
def spec_to_bytes(spec: "placement_types.DTensorSpec") -> int:
assert spec.tensor_meta is not None, "spec should have tensor meta defined!"
return spec.tensor_meta.dtype.itemsize * math.prod(spec.shape)
diff --git a/torch/distributed/_tensor/placement_types.py b/torch/distributed/_tensor/placement_types.py
index 8d88d064e8..d06c317c16 100644
--- a/torch/distributed/_tensor/placement_types.py
+++ b/torch/distributed/_tensor/placement_types.py
@@ -7,7 +7,12 @@ import torch
import torch.distributed._functional_collectives as funcol
import torch.distributed.distributed_c10d as c10d
-from torch.distributed._tensor._collective_utils import mesh_broadcast, mesh_scatter
+from torch.distributed._tensor._collective_utils import (
+ mesh_broadcast,
+ mesh_scatter,
+ pad_tensor,
+ unpad_tensor,
+)
from torch.distributed.device_mesh import DeviceMesh
@@ -83,37 +88,13 @@ class Shard(Placement):
for shard, pad_size in zip(tensor_list, pad_sizes):
# Fill the empty tensor with zeroes with padding.
if with_padding and pad_size > 0:
- shard = self._pad_tensor(shard, pad_size)
+ shard = pad_tensor(shard, self.dim, pad_size)
shard = shard.contiguous() if contiguous else shard
shard_list.append(shard)
return shard_list, pad_sizes
else:
return tensor_list, pad_sizes
- def _pad_tensor(
- self,
- tensor: torch.Tensor,
- pad_size: int,
- ) -> torch.Tensor:
- if pad_size == 0:
- return tensor
- pad = [0, 0] * (tensor.ndim - self.dim)
- pad[-1] = pad_size
- return torch.nn.functional.pad(tensor, pad)
-
- def _unpad_tensor(
- self,
- tensor: torch.Tensor,
- pad_size: int,
- ) -> torch.Tensor:
- if pad_size == 0:
- return tensor
- return tensor.narrow(
- self.dim,
- start=0,
- length=tensor.size(self.dim) - pad_size,
- )
-
@staticmethod
def _local_shard_size_on_dim(
size_on_dim: int,
@@ -166,7 +147,7 @@ class Shard(Placement):
# Only unpad if the local_tensor was padded on the dimension.
pad_size = pad_sizes[my_coordinate[mesh_dim]]
if pad_size > 0:
- output = self._unpad_tensor(output, pad_size)
+ output = unpad_tensor(output, self.dim, pad_size)
return output
def _reduce_shard_tensor(
@@ -201,7 +182,7 @@ class Shard(Placement):
)
if is_padded:
- output = self._unpad_tensor(output, pad_sizes[my_coordinate[mesh_dim]]) # type: ignore[possibly-undefined]
+ output = unpad_tensor(output, self.dim, pad_sizes[my_coordinate[mesh_dim]]) # type: ignore[possibly-undefined]
return output
def _to_replicate_tensor(
@@ -225,7 +206,7 @@ class Shard(Placement):
if is_padded:
full_chunk_size = (logical_dim_size + num_chunks - 1) // num_chunks
pad_size = full_chunk_size - local_shape[self.dim]
- local_tensor = self._pad_tensor(local_tensor, pad_size)
+ local_tensor = pad_tensor(local_tensor, self.dim, pad_size)
if not local_tensor.is_contiguous():
local_tensor = local_tensor.contiguous()
@@ -237,7 +218,7 @@ class Shard(Placement):
)
if is_padded:
unpad_size = full_chunk_size * num_chunks - logical_dim_size # type: ignore[possibly-undefined]
- result = self._unpad_tensor(result, unpad_size)
+ result = unpad_tensor(result, self.dim, unpad_size)
return result
def _replicate_to_shard(
|
2.41.0
|
b79469d2437531fa506b48d42488be512a87f4d
|
Sun, 28 Apr 2024 16:38:55 -0700
|
[PATCH 0795/1000] [dtensor] implement shard dim change with alltoall (#124872)
|
as titled, we implement a dedicated communication op to allow efficient sharding dimension change using alltoall, to replace our previous allgather + local chunk Pull Request resolved: https://github.com/pytorch/pytorch/pull/124872 Approved by: https://github.com/XilunWu, https://github.com/yifuwang ghstack dependencies: #124871
|
diff --git a/test/distributed/_tensor/test_redistribute.py b/test/distributed/_tensor/test_redistribute.py
index b073783759..9192f815b4 100644
--- a/test/distributed/_tensor/test_redistribute.py
+++ b/test/distributed/_tensor/test_redistribute.py
@@ -334,6 +334,89 @@ class RedistributeTest(DTensorTestBase):
dt_full_tensor = dt.full_tensor()
self.assertEqual(dt_full_tensor, input_tensor)
+ @with_comms
+ def test_redistribute_shard_dim_change(self):
+ # test 1d device mesh
+ mesh_1d = DeviceMesh(self.device_type, torch.arange(self.world_size))
+ data_to_test = [
+ # evenly sharded case
+ torch.randn((8, 8), device=self.device_type),
+ # 3d or more dims
+ torch.randn((8, 8, 8), device=self.device_type),
+ # uneven case 1
+ torch.randn((8, 5), device=self.device_type),
+ # uneven case 2
+ torch.randn((5, 8), device=self.device_type),
+ # uneven case 3
+ torch.randn((5, 5), device=self.device_type),
+ ]
+
+ sharding_src_dst_pairs = [([Shard(0)], [Shard(1)]), ([Shard(1)], [Shard(0)])]
+
+ comm_mode = CommDebugMode()
+
+ for input_data in data_to_test:
+ for src, dst in sharding_src_dst_pairs:
+ expected_dt = distribute_tensor(input_data.clone(), mesh_1d, dst)
+ sharded_dt = distribute_tensor(input_data, mesh_1d, src)
+ with comm_mode:
+ out_dt = sharded_dt.redistribute(mesh_1d, dst)
+ self.assertEqual(out_dt.placements, expected_dt.placements)
+ local_out_dt = out_dt.to_local()
+ local_expected_dt = expected_dt.to_local()
+ self.assertEqual(out_dt.to_local(), expected_dt.to_local())
+ if self.device_type == "cuda":
+ self.assertEqual(
+ comm_mode.get_comm_counts()[
+ torch.ops._dtensor.shard_dim_alltoall
+ ],
+ 1,
+ )
+ else:
+ self.assertEqual(
+ comm_mode.get_comm_counts()[funcol.all_gather_into_tensor],
+ 1,
+ )
+
+ # test 2d device mesh
+ mesh_2d = DeviceMesh(
+ self.device_type, torch.arange(self.world_size).reshape(2, 2)
+ )
+ data_to_test_2d = [
+ # evenly sharded case
+ torch.randn((8, 8), device=self.device_type),
+ # 3d or more dims
+ torch.randn((8, 8, 8), device=self.device_type),
+ # uneven case 1
+ torch.randn((8, 5), device=self.device_type),
+ # uneven case 2
+ torch.randn((5, 8), device=self.device_type),
+ # uneven case 3
+ torch.randn((5, 5), device=self.device_type),
+ ]
+ sharding_src_dst_pairs_2d = [
+ ([Shard(0), Shard(1)], [Shard(0), Shard(0)]),
+ ([Shard(0), Shard(1)], [Shard(1), Shard(0)]),
+ ([Shard(0), Shard(0)], [Shard(1), Shard(1)]),
+ ]
+
+ for input_data in data_to_test_2d:
+ if input_data.ndim > 2:
+ sharding_spec_combs = sharding_src_dst_pairs_2d + [
+ ([Shard(0), Shard(2)], [Shard(1), Shard(0)])
+ ]
+ else:
+ sharding_spec_combs = sharding_src_dst_pairs_2d
+ for src, dst in sharding_spec_combs:
+ expected_dt = distribute_tensor(input_data.clone(), mesh_2d, dst)
+ sharded_dt = distribute_tensor(input_data, mesh_2d, src)
+ out_dt = sharded_dt.redistribute(mesh_2d, dst)
+
+ self.assertEqual(out_dt.placements, expected_dt.placements)
+ local_out_dt = out_dt.to_local()
+ local_expected_dt = expected_dt.to_local()
+ self.assertEqual(out_dt.to_local(), expected_dt.to_local())
+
class MultiDimRedistributeTest(DTensorTestBase):
@property
diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py
index a1e5de75f9..4b219fc517 100644
--- a/torch/_inductor/lowering.py
+++ b/torch/_inductor/lowering.py
@@ -6002,6 +6002,18 @@ try:
ir._WaitKernel.create_wait(_c10d_functional.wait_tensor.default, inp)
return inp
+ @register_lowering(torch.ops._dtensor.shard_dim_alltoall)
+ def _shard_dim_alltoall(inp, gather_dim, shard_dim, group_name):
+ return ir.TensorBox.create(
+ ir._CollectiveKernel.create_out_of_place(
+ torch.ops._dtensor.shard_dim_alltoall.default,
+ inp,
+ gather_dim,
+ shard_dim,
+ group_name,
+ )
+ )
+
except ImportError:
log.info(
"Inductor support for distributed collectives depends on building torch.distributed"
diff --git a/torch/csrc/distributed/c10d/Functional.cpp b/torch/csrc/distributed/c10d/Functional.cpp
index d633429bb3..d392c0213b 100644
--- a/torch/csrc/distributed/c10d/Functional.cpp
+++ b/torch/csrc/distributed/c10d/Functional.cpp
@@ -579,3 +579,50 @@ TORCH_LIBRARY(_c10d_functional_autograd, m) {
c10::DispatchKey::Autograd, ::all_gather_into_tensor_autograd),
{at::Tag::pt2_compliant_tag});
}
+
+namespace {
+// DTensor related comm operations, sharing code with functional collective for
+// now
+at::Tensor shard_dim_alltoall(
+ const at::Tensor& input,
+ int64_t gather_dim,
+ int64_t shard_dim,
+ std::string group_name) {
+ auto group = c10d::resolve_process_group(group_name);
+ auto group_size = group->getSize();
+ std::vector<int64_t> output_sizes = input.sizes().vec();
+ if (output_sizes[shard_dim] % group_size != 0) {
+ LOG(WARNING) << "The first dimension of the shard_dim_alltoall input ("
+ << output_sizes[shard_dim]
+ << ") is not divisible by the group size (" << group_size
+ << ").";
+ }
+ output_sizes[shard_dim] = output_sizes[shard_dim] / group_size;
+ std::vector<at::Tensor> inputs;
+ auto length = output_sizes[shard_dim];
+ for (int i = 0; i < group_size; i++) {
+ inputs.push_back(input.narrow(shard_dim, i * length, length).contiguous());
+ }
+ // allocate outputs
+ std::vector<at::Tensor> outputs;
+ for (int i = 0; i < group_size; i++) {
+ outputs.push_back(input.new_empty(output_sizes).contiguous());
+ }
+ auto work = group->alltoall(outputs, inputs);
+
+ work->wait();
+ // TODO: it's very tricky to get the current async behavior work for shard dim
+ // alltoall so for now we just keep this comm op to be synchronous. We can
+ // revisit later how to support the async case with the Work registry.
+ return at::cat(outputs, gather_dim);
+}
+} // namespace
+
+// DTensor comm op registry
+TORCH_LIBRARY(_dtensor, m) {
+ m.def(
+ "shard_dim_alltoall(Tensor input, int gather_dim, int shard_dim, str group_name) -> Tensor",
+ torch::dispatch(
+ c10::DispatchKey::CompositeExplicitAutograd, ::shard_dim_alltoall),
+ {at::Tag::pt2_compliant_tag});
+}
diff --git a/torch/distributed/_tensor/_collective_utils.py b/torch/distributed/_tensor/_collective_utils.py
index 603ac09f4a..51c1379625 100644
--- a/torch/distributed/_tensor/_collective_utils.py
+++ b/torch/distributed/_tensor/_collective_utils.py
@@ -6,9 +6,11 @@ from functools import lru_cache
from typing import List, Optional
import torch
+import torch.distributed._functional_collectives as funcol
import torch.distributed._tensor.placement_types as placement_types
from torch.distributed.device_mesh import _mesh_resources, DeviceMesh
from torch.distributed.distributed_c10d import (
+ _get_group_size_by_name,
all_to_all,
broadcast,
get_global_rank,
@@ -23,7 +25,33 @@ from torch.distributed.distributed_c10d import (
logger = logging.getLogger(__name__)
-# TODO: we need to migrate these APIs to be functional collectives
+@torch.library.register_fake("_dtensor::shard_dim_alltoall")
+def _shard_dim_alltoall_meta(input, gather_dim, shard_dim, group_name):
+ group_size = _get_group_size_by_name(group_name)
+ stacked_list = [torch.empty_like(input) for _ in range(group_size)]
+ return torch.cat(stacked_list, dim=gather_dim).chunk(group_size, dim=shard_dim)
+
+
+def shard_dim_alltoall(input, gather_dim, shard_dim, mesh, mesh_dim):
+ if mesh.device_type == "cpu":
+ # Gloo does not support alltoall, so falling back to allgather + chunk
+ logger.warning(
+ "CPU process group does not support alltoall yet, falling back with allgather + chunk!"
+ )
+ out = funcol.all_gather_tensor(input, gather_dim, (mesh, mesh_dim))
+ if isinstance(out, funcol.AsyncCollectiveTensor):
+ # stick to the same behavior for the alltoall case, remove this once we enable alltoall async
+ out = out.wait()
+ out = torch.chunk(out, mesh.size(mesh_dim), dim=shard_dim)[
+ mesh.get_local_rank(mesh_dim)
+ ]
+ return out.contiguous() if not out.is_contiguous() else out
+
+ group_name = funcol._resolve_group_name((mesh, mesh_dim))
+ # TODO: enable async op for shard_dim_alltoall
+ return torch.ops._dtensor.shard_dim_alltoall(
+ input, gather_dim, shard_dim, group_name
+ )
def mesh_scatter(
diff --git a/torch/distributed/_tensor/debug/comm_mode.py b/torch/distributed/_tensor/debug/comm_mode.py
index b195b30154..604380a37f 100644
--- a/torch/distributed/_tensor/debug/comm_mode.py
+++ b/torch/distributed/_tensor/debug/comm_mode.py
@@ -50,6 +50,8 @@ class CommDebugMode(TorchDispatchMode):
self.comm_registry.add(native_op)
self.comm_registry.add(py_op)
+ self.comm_registry.add(torch.ops._dtensor.shard_dim_alltoall)
+
def get_total_counts(self) -> int:
return sum(self.comm_counts.values())
diff --git a/torch/distributed/_tensor/placement_types.py b/torch/distributed/_tensor/placement_types.py
index d06c317c16..042dc0e109 100644
--- a/torch/distributed/_tensor/placement_types.py
+++ b/torch/distributed/_tensor/placement_types.py
@@ -11,6 +11,7 @@ from torch.distributed._tensor._collective_utils import (
mesh_broadcast,
mesh_scatter,
pad_tensor,
+ shard_dim_alltoall,
unpad_tensor,
)
from torch.distributed.device_mesh import DeviceMesh
@@ -241,6 +242,67 @@ class Shard(Placement):
)
return shards[shard_index].clone()
+ def _to_new_shard_dim(
+ self,
+ local_tensor: torch.Tensor,
+ mesh: DeviceMesh,
+ mesh_dim: int,
+ current_logical_shape: List[int],
+ new_shard_dim: int,
+ ) -> torch.Tensor:
+ """
+ transform from existing sharded tensor to a new sharded tensor on
+ that shard on a new dimension, which performs an alltoall
+ """
+ my_coordinate = mesh.get_coordinate()
+ if my_coordinate is None:
+ # if rank is not part of mesh, we simply return local_tensor,
+ # which should be an empty tensor
+ return local_tensor
+
+ num_chunks = mesh.size(mesh_dim=mesh_dim)
+
+ old_dim_logical_size = current_logical_shape[self.dim]
+ new_dim_logical_size = current_logical_shape[new_shard_dim]
+ old_dim_padding = old_dim_logical_size % num_chunks != 0
+ new_dim_padding = new_dim_logical_size % num_chunks != 0
+ if old_dim_padding:
+ old_dim_full_chunk_size = (
+ old_dim_logical_size + num_chunks - 1
+ ) // num_chunks
+ old_dim_pad_size = old_dim_full_chunk_size - local_tensor.size(self.dim)
+ local_tensor = pad_tensor(local_tensor, self.dim, old_dim_pad_size)
+ if new_dim_padding:
+ new_dim_full_chunk_size = (
+ new_dim_logical_size + num_chunks - 1
+ ) // num_chunks
+ new_dim_pad_size = new_dim_full_chunk_size * num_chunks - local_tensor.size(
+ new_shard_dim
+ )
+ local_tensor = pad_tensor(local_tensor, new_shard_dim, new_dim_pad_size)
+
+ if not local_tensor.is_contiguous():
+ local_tensor = local_tensor.contiguous()
+
+ new_tensor = shard_dim_alltoall(
+ local_tensor, self.dim, new_shard_dim, mesh, mesh_dim
+ )
+
+ if old_dim_padding:
+ old_dim_unpad_size = (
+ old_dim_full_chunk_size * num_chunks - current_logical_shape[self.dim] # type: ignore[possibly-undefined]
+ )
+ new_tensor = unpad_tensor(new_tensor, self.dim, old_dim_unpad_size) # type: ignore[possibly-undefined]
+
+ if new_dim_padding:
+ local_shard_size_on_new_dim = self._local_shard_size_on_dim(
+ new_dim_logical_size, num_chunks, my_coordinate[mesh_dim]
+ )[0]
+ new_dim_unpad_size = new_dim_full_chunk_size - local_shard_size_on_new_dim # type: ignore[possibly-undefined]
+ new_tensor = unpad_tensor(new_tensor, new_shard_dim, new_dim_unpad_size) # type: ignore[possibly-undefined]
+
+ return new_tensor
+
def __eq__(self, other: object) -> bool:
if not isinstance(other, Shard):
return False
diff --git a/torch/distributed/_tensor/redistribute.py b/torch/distributed/_tensor/redistribute.py
index 1e4249f3fe..5cef7dbb04 100644
--- a/torch/distributed/_tensor/redistribute.py
+++ b/torch/distributed/_tensor/redistribute.py
@@ -49,17 +49,15 @@ def _gen_transform_infos(
dst_spec: DTensorSpec,
) -> List[_TransformInfo]:
"""
- Generate the transform infos from the source placements to the target placements, to
- transform from source to target placement it might have multipl steps, i.e. it might
- decompose Si -> Sj into Si -> R -> Sj.
+ Generate the transform infos from the source placements to the target placements.
+
+ To transform from source to target placement it might have multiple steps, i.e. it
+ might decompose Si -> Sj into Si -> R -> Sj.
This would detects if there're mis-aligned shardings between src/dst placements.
i.e. (Shard(0), Shard(0)) -> (Replicate(), Shard(0)), in this case Shard(0) -> Shard(0)
for mesh dimension 1 actually needs reshard, because in the first case it's a sub-sharding
of an already tensor dimension 0, and in the second case, it's the first sharding on tensor
dimension 0.
-
- Note that we also currently handles sharding on different tensor dimensions, e.g.
- Shard(0) -> Shard(1) in this pass
"""
src_dim_counts: Dict[int, int] = {}
dst_dim_counts: Dict[int, int] = {}
@@ -103,10 +101,10 @@ def _gen_transform_infos(
if (
isinstance(src, Shard)
and isinstance(dst, Shard)
- and (
- src.dim != dst.dim or src_dim_counts[src.dim] != dst_dim_counts[dst.dim]
- )
+ and (mesh_ndim > 1 or src_dim_counts[src.dim] != dst_dim_counts[dst.dim])
):
+ # for the case when mesh ndim > 1 or shard dim counts are different
+ # TODO: see if we can optimize the mesh_ndim > 1 case
# decompose Shard(i) -> Shard(j) into Shard(i) -> Replicate() -> Shard(j)
transform_infos.append(
_TransformInfo(
@@ -207,24 +205,18 @@ def redistribute_local_tensor(
local_tensor, device_mesh, i, my_coordinate[i]
)
else:
- # NOTE: we don't support this case efficiently yet, the fallback path we are going here is
- # to decompose Shard(0) -> Shard(1) into Shard(0) -> Replicate -> Shard(1)
- # TODO: enable this with all_to_all
assert (
current.is_shard()
), f"Current placement should be shard but found {current}"
shard_spec = cast(Shard, current)
if shard_spec.dim != target_placement.dim:
- new_local_tensor = shard_spec._to_replicate_tensor(
- local_tensor, device_mesh, i, transform_info.logical_shape
- )
- shards, _ = target_placement._split_tensor(
- new_local_tensor,
- num_chunks,
- with_padding=False,
- contiguous=False,
+ new_local_tensor = shard_spec._to_new_shard_dim(
+ local_tensor,
+ device_mesh,
+ i,
+ transform_info.logical_shape,
+ target_placement.dim,
)
- new_local_tensor = shards[my_coordinate[i]]
elif target.is_partial():
if current.is_replicate():
partial_spec = cast(_Partial, target)
|
2.41.0
|
6cc73dc13141351c4b0c6c9328aabdf7d4b4927
|
Mon, 29 Apr 2024 18:11:28 +0000
|
[PATCH 0797/1000] [oss][torch.package] fix multiple error messages within PackageExporter (#124943)
|
Summary: fixes two issues: - when exporting with debug=True, the list of error-causing modules and a dependency path to them is not printed correctly, there's a missing newline after the path, meaning the name of the module for the next error is on the wrong line, which makes the output a confusing mess to read - when a pickled object references more than one mocked module directly, the error message incorrectly repeats the same information, claiming the referenced attribute is present in several different libraries, because the if condition references the last seen module name while walking the pickle ops, not the module name from the enclosing block `for module_name in all_dependencies:`. this is confusing because one error will print as O(all_dependencies) errors, all with different module names but the same attribute name Differential Revision: D56578035 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124943 Approved by: https://github.com/JonAmazon, https://github.com/houseroad
|
diff --git a/torch/package/package_exporter.py b/torch/package/package_exporter.py
index e069a80c5f..493c017ccf 100644
--- a/torch/package/package_exporter.py
+++ b/torch/package/package_exporter.py
@@ -166,7 +166,7 @@ class PackagingError(Exception):
if debug:
module_path = dependency_graph.first_path(module_name)
message.write(
- f" A path to {module_name}: {' -> '.join(module_path)}"
+ f" A path to {module_name}: {' -> '.join(module_path)}\n"
)
if not debug:
message.write("\n")
@@ -705,9 +705,9 @@ class PackageExporter:
""" If an object happens to come from a mocked module, then we collect these errors and spit them
out with the other errors found by package exporter.
"""
- if module in mocked_modules:
- assert isinstance(module, str)
- fields = mocked_modules[module]
+ if module_name in mocked_modules:
+ assert isinstance(module_name, str)
+ fields = mocked_modules[module_name]
self.dependency_graph.add_node(
module_name,
action=_ModuleProviderAction.MOCK,
|
2.41.0
|
e13c7e593c1032fa3f96f4aa1b3379547a3a9f3
|
Mon, 29 Apr 2024 18:19:42 +0000
|
[PATCH 0798/1000] Revert "[Meta Tensor] fix meta inplace set storage (#123880)"
|
This reverts commit cccae9355191a807040fb40a65178c4d7fe3f084. Reverted https://github.com/pytorch/pytorch/pull/123880 on behalf of https://github.com/izaitsevfb due to breaks cpu_inductor_torchbench (detectron2_fasterrcnn) ([comment](https://github.com/pytorch/pytorch/pull/123880#issuecomment-2083366385))
|
diff --git a/aten/src/ATen/native/TensorShape.cpp b/aten/src/ATen/native/TensorShape.cpp
index f5479925fc..b7d8eeb00f 100644
--- a/aten/src/ATen/native/TensorShape.cpp
+++ b/aten/src/ATen/native/TensorShape.cpp
@@ -421,19 +421,9 @@ Tensor& set_storage_meta__symint(Tensor& result, Storage storage, c10::SymInt st
// it. TODO: Actually this might not quite be correct if we use special
// pointers to track whether or not fake cuda tensors are pinned or not
const auto itemsize = result.dtype().itemsize();
- c10::SymInt new_size_bytes = at::detail::computeStorageNbytes(
+ c10::SymInt size_bytes = at::detail::computeStorageNbytes(
size, stride, itemsize, std::move(storage_offset));
- // TODO: When there are unbacked SymInts, we unconditionally skip the
- // setter. This is technically wrong, but we cannot conveniently test
- // the real condition in many cases, because a lot of people are using
- // set_ just to swizzle metadata on a tensor, they didn't actually want
- // to see if they need to resize the storage.
- //
- // The old behavior was to unconditionally set_nbytes, but I think not
- // setting it is more safe.
- if (new_size_bytes.has_hint() && storage.sym_nbytes().has_hint() && TORCH_GUARD_SIZE_OBLIVIOUS(new_size_bytes.sym_gt(storage.sym_nbytes()))) {
- storage.set_nbytes(std::move(new_size_bytes));
- }
+ storage.set_nbytes(std::move(size_bytes));
}
return result;
}
diff --git a/test/dynamo/test_subclasses.py b/test/dynamo/test_subclasses.py
index 8adcd04843..8005d6e3a2 100644
--- a/test/dynamo/test_subclasses.py
+++ b/test/dynamo/test_subclasses.py
@@ -3,8 +3,6 @@ import functools
import itertools
import unittest
-from functools import partial
-
import torch
import torch._dynamo.test_case
@@ -39,105 +37,6 @@ def traceable_subclass(c):
return torch._dynamo.config.patch("traceable_tensor_subclasses", {c})
-def get_jagged_tensor(nested_size, offsets, requires_grad=True):
- # Makes a jagged tensor with N constituent tensors with size
- # as specified ((S0, S1, S2), D)
- D = nested_size[1]
- out = []
- for s in nested_size[0]:
- out.append(torch.randn(s, D, requires_grad=requires_grad, dtype=torch.float64))
- return jagged_from_list(out, offsets)
-
-
-def get_view_test_cases():
- # Test all cases with both an NT base and a dense base
- # Subclass -> Subclass
- # Dense -> Subclass
-
- # NB: Don't close over loop variables, they will not get copied into the
- # closure
- #
- # NB: These return functions so we don't generate tensors during test
- # collection time
-
- def mk_basic(base_is_nt):
- # There are three cases to consider here based on the logic in
- # meta_utils.py
- #
- # (1) basic case:
- # view is not a leaf and has the same requires grad as its basic case
- x, _ = get_jagged_tensor(((2, 3, 4), 3), None, requires_grad=True)
- x = x.clone() if base_is_nt else x
- assert not x.is_leaf
- return x.unsqueeze(-1)
-
- def mk_leaf(base_is_nt, requires_grad_1, requires_grad_2):
- x, _ = get_jagged_tensor(((2, 3, 4), 3), None, requires_grad=requires_grad_1)
- x = x.clone() if base_is_nt else x
- with torch.no_grad():
- x_view = x.unsqueeze(-1)
- # The issue is this doesn't quite work
- x_view.requires_grad_(requires_grad_2)
-
- return x_view
-
- def mk_obscure(base_is_nt):
- x, _ = get_jagged_tensor(((2, 3, 4), 3), None, requires_grad=False)
- x = x.clone() if base_is_nt else x
- # intermediate leaf view
- with torch.no_grad():
- x_view = x.unsqueeze(-1)
- x_view.requires_grad_(True)
- x_view_view = x_view.unsqueeze(-1)
- return x_view_view
-
- for base_is_nt in [False, True]:
- prefix = f"base_is_nt_{base_is_nt}"
-
- yield partial(mk_basic, base_is_nt), f"{prefix}_basic"
-
- # (2) leaf view case:
- # the view has to be a leaf (w/ requires_grad True or requires_grad False)
- # base w/ requires_grad True or requires_grad False
- for requires_grad_1, requires_grad_2 in itertools.product(
- [True, False], repeat=2
- ):
- yield partial(
- mk_leaf, base_is_nt, requires_grad_1, requires_grad_2
- ), f"{prefix}_leaf_{requires_grad_1}_{requires_grad_2}"
-
- # (3) obscure case:
- # view is not a leaf (implies requires_grad True)
- # base w/ requires_grad False)
- yield partial(mk_obscure, base_is_nt), f"{prefix}_obscure"
-
- # Subclass -> Dense
- yield lambda: get_jagged_tensor(((2, 3, 4), 3), None, requires_grad=True)[
- 0
- ].clone(), "subclass_dense"
-
- # Dense -> Subclass -> Dense -> Subclass
- def mk_dense_subclass_dense_subclass():
- values = torch.randn(10, 5)
- offsets = torch.tensor([0, 3, 6, 10])
- offsets2 = offsets.clone().detach()
- return nested_view_from_values_offsets(
- nested_view_from_values_offsets(values, offsets).values(), offsets
- )
-
- yield mk_dense_subclass_dense_subclass, "dense_subclass_dense_subclass"
-
- def mk_subclass_dense_subclass_dense():
- x = get_jagged_tensor(((2, 3, 4), 3), None, requires_grad=True)[0].clone()
- offsets2 = x.offsets().clone().detach()
- nt_view = nested_view_from_values_offsets(x.values(), offsets2).values()
-
- yield mk_subclass_dense_subclass_dense, "subclass_dense_subclass_dense"
-
-
-VIEW_TEST_CASES = {k: v for v, k in get_view_test_cases()}
-
-
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")
compile_full_eager = torch.compile(backend="eager", fullgraph=True)
@@ -1308,7 +1207,15 @@ instantiate_parametrized_tests(SubclassTests)
class TestNestedTensor(torch._dynamo.test_case.TestCase):
def _get_jagged_tensor(self, nested_size, offsets, requires_grad=True):
- return get_jagged_tensor(nested_size, offsets, requires_grad)
+ # Makes a jagged tensor with N constituent tensors with size
+ # as specified ((S0, S1, S2), D)
+ D = nested_size[1]
+ out = []
+ for s in nested_size[0]:
+ out.append(
+ torch.randn(s, D, requires_grad=requires_grad, dtype=torch.float64)
+ )
+ return jagged_from_list(out, offsets)
def _get_nc_jagged_tensor(self, inner_dim, starts, lengths, requires_grad=True):
# Makes a jagged tensor with N constituent tensors with size
@@ -1462,9 +1369,62 @@ class TestNestedTensor(torch._dynamo.test_case.TestCase):
torch.compile(fn, fullgraph=True, backend="aot_eager")(nt)
- def _input_view_test(self, nt_view_name):
- nt_view = VIEW_TEST_CASES[nt_view_name]()
+ def _get_views(self):
+ # Test all cases with both an NT base and a dense base
+ # Subclass -> Subclass
+ # Dense -> Subclass
+ for base_is_nt in [False, True]:
+ # There are three cases to consider here based on the logic in
+ # meta_utils.py
+ #
+ # (1) basic case:
+ # view is not a leaf and has the same requires grad as its basic case
+ x, _ = self._get_jagged_tensor(((2, 3, 4), 3), None, requires_grad=True)
+ x = x.clone() if base_is_nt else x
+ self.assertEqual(x.is_leaf, False)
+ yield x.unsqueeze(-1)
+
+ # (2) leaf view case:
+ # the view has to be a leaf (w/ requires_grad True or requires_grad False)
+ # base w/ requires_grad True or requires_grad False
+ for requires_grad_1, requires_grad_2 in itertools.product(
+ [True, False], repeat=2
+ ):
+ x, _ = self._get_jagged_tensor(
+ ((2, 3, 4), 3), None, requires_grad=requires_grad_1
+ )
+ x = x.clone() if base_is_nt else x
+ with torch.no_grad():
+ x_view = x.unsqueeze(-1)
+ # The issue is this doesn't quite work
+ x_view.requires_grad_(requires_grad_2)
+ yield x_view
+
+ # (3) obscure case:
+ # view is not a leaf (implies requires_grad True)
+ # base w/ requires_grad False)
+ x, _ = self._get_jagged_tensor(((2, 3, 4), 3), None, requires_grad=False)
+ x = x.clone() if base_is_nt else x
+ # intermediate leaf view
+ with torch.no_grad():
+ x_view = x.unsqueeze(-1)
+ x_view.requires_grad_(True)
+ x_view_view = x_view.unsqueeze(-1)
+ yield x_view_view
+
+ # Subclass -> Dense
+ x = self._get_jagged_tensor(((2, 3, 4), 3), None, requires_grad=True)[0].clone()
+ yield x.values()
+
+ # Dense -> Subclass -> Dense -> Subclass
+ values = torch.randn(10, 5)
+ offsets = torch.tensor([0, 3, 6, 10])
+ offsets2 = offsets.clone().detach()
+ yield nested_view_from_values_offsets(
+ nested_view_from_values_offsets(values, offsets).values(), offsets
+ )
+ def _input_view_test(self, nt_view):
def fn(x):
return x.sin()
@@ -1490,15 +1450,8 @@ class TestNestedTensor(torch._dynamo.test_case.TestCase):
# varies based on the type of view
guard_str = "\n".join(guards)
- if (
- isinstance(nt_view._base, NestedTensor)
- or nt_view_name == "subclass_dense"
- ):
+ if isinstance(nt_view._base, NestedTensor):
self.assertExpectedInline(guard_str, """Eq(s3 - 1, s0)""")
- elif nt_view_name.startswith("base_is_nt_False_"):
- # TODO: this is a "do I need to resize storage" guard,
- # probably don't actually want to see this
- self.assertExpectedInline(guard_str, """8*s1*s3 <= 8*s0*s1""")
else:
self.assertExpectedInline(guard_str, """""")
return gm
@@ -1507,12 +1460,9 @@ class TestNestedTensor(torch._dynamo.test_case.TestCase):
compile_fn = torch.compile(fn, fullgraph=True, backend=backend, dynamic=True)
out = compile_fn(nt_view)
- @parametrize(
- "nt_view_name",
- [k for k in VIEW_TEST_CASES.keys() if k != "subclass_dense_subclass_dense"],
- )
- def test_inputs_to_compiled_fn_are_views(self, nt_view_name):
- self._input_view_test(nt_view_name)
+ def test_inputs_to_compiled_fn_are_views(self):
+ for nt_view in self._get_views():
+ self._input_view_test(nt_view)
def test_subclass_gives_static_shapes_when_dynamic_false(self):
def check_graph(gm, *args):
@@ -1540,10 +1490,10 @@ class TestNestedTensor(torch._dynamo.test_case.TestCase):
# are cached onto fake offsets to solve this problem.
@unittest.expectedFailure
def test_subclass_dense_subclass_dense_view(self):
- self._input_view_test("subclass_dense_subclass_dense")
-
-
-instantiate_parametrized_tests(TestNestedTensor)
+ x = self._get_jagged_tensor(((2, 3, 4), 3), None, requires_grad=True)[0].clone()
+ offsets2 = x.offsets().clone().detach()
+ nt_view = nested_view_from_values_offsets(x.values(), offsets2).values()
+ self._input_view_test(nt_view)
if __name__ == "__main__":
diff --git a/test/test_meta.py b/test/test_meta.py
index 93d7bb8c14..af1a5fb6ad 100644
--- a/test/test_meta.py
+++ b/test/test_meta.py
@@ -286,14 +286,6 @@ class TestMetaConverter(TestCase):
m = MetaConverter()(y)
self.assertMetadataMatches(m, y)
- def test_inplace_set_storage(self):
- x = torch.tensor([0, 1], dtype=torch.int64)
- storage = x.untyped_storage()
- ssize = storage.size()
- meta = torch.empty((), dtype=torch.int64)
- meta.set_(storage, 0, (), ())
- self.assertEqual(storage.size(), ssize)
-
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/1991")
def test_weakref(self):
x = torch.randn(4, 4, 4)
diff --git a/torch/_subclasses/fake_tensor.py b/torch/_subclasses/fake_tensor.py
index fdea8a3441..d291605d58 100644
--- a/torch/_subclasses/fake_tensor.py
+++ b/torch/_subclasses/fake_tensor.py
@@ -8,7 +8,6 @@ from collections import defaultdict
from dataclasses import dataclass
from typing import (
Any,
- Callable,
cast,
Dict,
List,
@@ -1214,20 +1213,16 @@ class FakeTensorMode(TorchDispatchMode):
if metadata.is_neg:
torch._C._set_neg(empty, True)
- maybe_suppress: Callable[[], Any] = contextlib.nullcontext
- if self.shape_env is not None:
- maybe_suppress = self.shape_env.suppress_guards
-
if func.is_view:
# For view ops, the storage should be the same as the tensor input.
storage = args[cast(int, entry.view_idx)].untyped_storage()
- with in_kernel_invocation_manager(self), maybe_suppress():
+ with in_kernel_invocation_manager(self):
empty.set_(
storage, metadata.storage_offset, metadata.shape, metadata.stride
)
elif metadata.storage_offset != 0:
storage = empty.untyped_storage()
- with in_kernel_invocation_manager(self), maybe_suppress():
+ with in_kernel_invocation_manager(self):
empty.set_(
storage, metadata.storage_offset, metadata.shape, metadata.stride
)
|
2.41.0
|
2cf04cb7f7aa14aff4d1cf40517d5de797550e7
|
Mon, 29 Apr 2024 18:25:37 +0000
|
[PATCH 0799/1000] CI: add aarch64 linux workflow (#121284)
|
aarch64 linux workflow is triggered for ciflow/aarch64 tags. Pull Request resolved: https://github.com/pytorch/pytorch/pull/121284 Approved by: https://github.com/atalman, https://github.com/malfet
|
diff --git a/.ci/docker/build.sh b/.ci/docker/build.sh
index 2344862643..1b8ed8df93 100755
--- a/.ci/docker/build.sh
+++ b/.ci/docker/build.sh
@@ -306,6 +306,12 @@ case "$image" in
DB=yes
VISION=yes
CONDA_CMAKE=yes
+ # snadampal: skipping sccache due to the following issue
+ # https://github.com/pytorch/pytorch/issues/121559
+ SKIP_SCCACHE_INSTALL=yes
+ # snadampal: skipping llvm src build install because the current version
+ # from pytorch/llvm:9.0.1 is x86 specific
+ SKIP_LLVM_SRC_BUILD_INSTALL=yes
;;
*)
# Catch-all for builds that are not hardcoded.
@@ -399,6 +405,8 @@ DOCKER_BUILDKIT=1 docker build \
--build-arg "EXECUTORCH=${EXECUTORCH}" \
--build-arg "BASEKIT_VERSION=${BASEKIT_VERSION}" \
--build-arg "ACL=${ACL:-}" \
+ --build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \
+ --build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \
-f $(dirname ${DOCKERFILE})/Dockerfile \
-t "$tmp_tag" \
"$@" \
diff --git a/.ci/docker/requirements-ci.txt b/.ci/docker/requirements-ci.txt
index e62ddfdd5f..8551246c7e 100644
--- a/.ci/docker/requirements-ci.txt
+++ b/.ci/docker/requirements-ci.txt
@@ -263,10 +263,11 @@ unittest-xml-reporting<=3.2.0,>=2.0.0
#Pinned versions:
#test that import:
-#wheel not found on aarch64, and source build requires rust
lintrunner==0.10.7 ; platform_machine == "x86_64"
+#lintrunner is supported on aarch64-linux only from 0.12.4 version
+lintrunner==0.12.5 ; platform_machine == "aarch64"
#Description: all about linters!
-#Pinned versions: 0.10.7
+#Pinned versions: 0.10.7 on x86 and 0.12.5 on aarch64
#test that import:
rockset==1.0.3
diff --git a/.ci/docker/ubuntu/Dockerfile b/.ci/docker/ubuntu/Dockerfile
index bea3d3ec45..b471ce3b89 100644
--- a/.ci/docker/ubuntu/Dockerfile
+++ b/.ci/docker/ubuntu/Dockerfile
@@ -169,9 +169,11 @@ RUN rm install_acl.sh
ENV INSTALLED_ACL ${ACL}
# Install ccache/sccache (do this last, so we get priority in PATH)
+ARG SKIP_SCCACHE_INSTALL
COPY ./common/install_cache.sh install_cache.sh
ENV PATH /opt/cache/bin:$PATH
-RUN bash ./install_cache.sh && rm install_cache.sh
+RUN if [ -z "${SKIP_SCCACHE_INSTALL}" ]; then bash ./install_cache.sh; fi
+RUN rm install_cache.sh
# Add jni.h for java host build
COPY ./common/install_jni.sh install_jni.sh
@@ -188,7 +190,9 @@ ARG BUILD_ENVIRONMENT
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
# Install LLVM dev version (Defined in the pytorch/builder github repository)
+ARG SKIP_LLVM_SRC_BUILD_INSTALL
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
+RUN if [ -n "${SKIP_LLVM_SRC_BUILD_INSTALL}" ]; then set -eu; rm -rf /opt/llvm; fi
# AWS specific CUDA build guidance
ENV TORCH_CUDA_ARCH_LIST Maxwell
diff --git a/.ci/pytorch/build.sh b/.ci/pytorch/build.sh
index 13069482ae..f7eee9fe9a 100755
--- a/.ci/pytorch/build.sh
+++ b/.ci/pytorch/build.sh
@@ -376,4 +376,8 @@ if [[ "$BUILD_ENVIRONMENT" != *libtorch* && "$BUILD_ENVIRONMENT" != *bazel* ]];
python tools/stats/export_test_times.py
fi
-print_sccache_stats
+# snadampal: skipping it till sccache support added for aarch64
+# https://github.com/pytorch/pytorch/issues/121559
+if [[ "$BUILD_ENVIRONMENT" != *aarch64* ]]; then
+ print_sccache_stats
+fi
diff --git a/.ci/pytorch/test.sh b/.ci/pytorch/test.sh
index b13e41681a..c259a58615 100755
--- a/.ci/pytorch/test.sh
+++ b/.ci/pytorch/test.sh
@@ -181,6 +181,11 @@ if [[ "$BUILD_ENVIRONMENT" != *-bazel-* ]] ; then
export PATH="$HOME/.local/bin:$PATH"
fi
+if [[ "$BUILD_ENVIRONMENT" == *aarch64* ]]; then
+ # TODO: revisit this once the CI is stabilized on aarch64 linux
+ export VALGRIND=OFF
+fi
+
install_tlparse
# DANGER WILL ROBINSON. The LD_PRELOAD here could cause you problems
diff --git a/.github/pytorch-probot.yml b/.github/pytorch-probot.yml
index c7b554ce44..fafa314652 100644
--- a/.github/pytorch-probot.yml
+++ b/.github/pytorch-probot.yml
@@ -8,6 +8,7 @@ ciflow_push_tags:
- ciflow/binaries_wheel
- ciflow/inductor
- ciflow/inductor-perf-compare
+- ciflow/linux-aarch64
- ciflow/mps
- ciflow/nightly
- ciflow/periodic
diff --git a/.github/workflows/linux-aarch64.yml b/.github/workflows/linux-aarch64.yml
new file mode 100644
index 0000000000..96394c0d89
--- /dev/null
+++ b/.github/workflows/linux-aarch64.yml
@@ -0,0 +1,38 @@
+name: linux-aarch64
+
+on:
+ # For testing purposes, removeme later
+ pull_request:
+ push:
+ tags:
+ - ciflow/linux-aarch64/*
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }} but found ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
+ cancel-in-progress: true
+
+jobs:
+ linux-jammy-aarch64-py3_10-build:
+ name: linux-jammy-aarch64-py3.10
+ uses: ./.github/workflows/_linux-build.yml
+ with:
+ build-environment: linux-jammy-aarch64-py3.10
+ docker-image-name: pytorch-linux-jammy-aarch64-py3.10-gcc11
+ runner: linux.arm64.2xlarge
+ test-matrix: |
+ { include: [
+ { config: "default", shard: 1, num_shards: 1, runner: "linux.arm64.2xlarge" },
+ ]}
+
+ linux-jammy-aarch64-py3_10-test:
+ name: linux-jammy-aarch64-py3.10
+ uses: ./.github/workflows/_linux-test.yml
+ needs: linux-jammy-aarch64-py3_10-build
+ permissions:
+ id-token: write
+ contents: read
+ with:
+ build-environment: linux-jammy-aarch64-py3.10
+ docker-image: ${{ needs.linux-jammy-aarch64-py3_10-build.outputs.docker-image }}
+ test-matrix: ${{ needs.linux-jammy-aarch64-py3_10-build.outputs.test-matrix }}
diff --git a/test/run_test.py b/test/run_test.py
index 516dbc753f..5c1a7a8fbd 100755
--- a/test/run_test.py
+++ b/test/run_test.py
@@ -26,7 +26,9 @@ from torch.multiprocessing import current_process, get_context
from torch.testing._internal.common_utils import (
FILE_SCHEMA,
get_report_path,
+ IS_ARM64,
IS_CI,
+ IS_LINUX,
IS_MACOS,
parser as common_parser,
retry_shell,
@@ -265,6 +267,10 @@ CORE_TEST_LIST = [
"test_torch",
]
+# A subset of the TEST list for aarch64 linux platform
+ARM64_LINUX_TEST_LIST = [
+ "test_modules",
+]
# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST
SLOW_TEST_THRESHOLD = 300
@@ -1298,6 +1304,10 @@ def can_run_in_pytest(test):
def get_selected_tests(options) -> List[str]:
+ if IS_ARM64 and IS_LINUX:
+ selected_tests = ARM64_LINUX_TEST_LIST
+ return selected_tests
+
selected_tests = options.include
# filter if there's JIT only and distributed only test options
|
2.41.0
|
03cf9d4dc8ebe85552f450678988cac4e959da3
|
Sat, 27 Apr 2024 15:12:48 +0800
|
[PATCH 0800/1000] Fix & optimze open device registration test. (#124712)
|
Fixes #100152 1. Fix the wrong tests about lazy init for PrivateUse1 named foo 2. Fix wrong backend meta registry mechanism when compiling with clang++( compiling with g++ work well)(introduced by static variable in inline function) 3. Refactor the tests and make it more flexible 4. Disable the two tests temporarily - test_open_device_storage_pin_memory - test_compile_autograd_function_aliasing Pull Request resolved: https://github.com/pytorch/pytorch/pull/124712 Approved by: https://github.com/albanD, https://github.com/malfet
|
diff --git a/test/test_cpp_extensions_open_device_registration.py b/test/test_cpp_extensions_open_device_registration.py
index 3511070ce3..d1134d8182 100644
--- a/test/test_cpp_extensions_open_device_registration.py
+++ b/test/test_cpp_extensions_open_device_registration.py
@@ -4,6 +4,7 @@ import os
import shutil
import sys
import tempfile
+import types
import unittest
from typing import Union
@@ -11,7 +12,7 @@ import torch
import torch.testing._internal.common_utils as common
import torch.utils.cpp_extension
-from torch.testing._internal.common_utils import IS_ARM64, TEST_CUDA
+from torch.testing._internal.common_utils import IS_ARM64, skipIfTorchDynamo, TEST_CUDA
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
@@ -28,33 +29,37 @@ def remove_build_path():
shutil.rmtree(default_build_root, ignore_errors=True)
-class DummyModule:
- @staticmethod
+def generate_faked_module():
def device_count() -> int:
return 1
- @staticmethod
def get_rng_state(device: Union[int, str, torch.device] = "foo") -> torch.Tensor:
# create a tensor using our custom device object.
return torch.empty(4, 4, device="foo")
- @staticmethod
def set_rng_state(
new_state: torch.Tensor, device: Union[int, str, torch.device] = "foo"
) -> None:
pass
- @staticmethod
def is_available():
return True
- @staticmethod
def current_device():
return 0
- @staticmethod
- def is_initialized():
- return True
+ # create a new module to fake torch.foo dynamicaly
+ foo = types.ModuleType("foo")
+
+ foo.device_count = device_count
+ foo.get_rng_state = get_rng_state
+ foo.set_rng_state = set_rng_state
+ foo.is_available = is_available
+ foo.current_device = current_device
+ foo._lazy_init = lambda: None
+ foo.is_initialized = lambda: True
+
+ return foo
@unittest.skipIf(IS_ARM64, "Does not work on arm")
@@ -66,20 +71,24 @@ class TestCppExtensionOpenRgistration(common.TestCase):
def setUp(self):
super().setUp()
+
# cpp extensions use relative paths. Those paths are relative to
# this file, so we'll change the working directory temporarily
self.old_working_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
assert self.module is not None
def tearDown(self):
super().tearDown()
+
# return the working directory (see setUp)
os.chdir(self.old_working_dir)
@classmethod
def setUpClass(cls):
remove_build_path()
+
cls.module = torch.utils.cpp_extension.load(
name="custom_device_extension",
sources=[
@@ -90,509 +99,475 @@ class TestCppExtensionOpenRgistration(common.TestCase):
verbose=True,
)
- @classmethod
- def tearDownClass(cls):
- remove_build_path()
+ # register torch.foo module and foo device to torch
+ torch.utils.rename_privateuse1_backend("foo")
+ torch.utils.generate_methods_for_privateuse1_backend(for_storage=True)
+ torch._register_device_module("foo", generate_faked_module())
+
+ def test_base_device_registration(self):
+ self.assertFalse(self.module.custom_add_called())
+ # create a tensor using our custom device object
+ device = self.module.custom_device()
+ x = torch.empty(4, 4, device=device)
+ y = torch.empty(4, 4, device=device)
+ # Check that our device is correct.
+ self.assertTrue(x.device == device)
+ self.assertFalse(x.is_cpu)
+ self.assertFalse(self.module.custom_add_called())
+ # calls out custom add kernel, registered to the dispatcher
+ z = x + y
+ # check that it was called
+ self.assertTrue(self.module.custom_add_called())
+ z_cpu = z.to(device="cpu")
+ # Check that our cross-device copy correctly copied the data to cpu
+ self.assertTrue(z_cpu.is_cpu)
+ self.assertFalse(z.is_cpu)
+ self.assertTrue(z.device == device)
+ self.assertEqual(z, z_cpu)
+
+ def test_common_registration(self):
+ # check unsupported device and duplicated registration
+ with self.assertRaisesRegex(RuntimeError, "Expected one of cpu"):
+ torch._register_device_module("dev", generate_faked_module())
+ with self.assertRaisesRegex(RuntimeError, "The runtime module of"):
+ torch._register_device_module("foo", generate_faked_module())
+
+ # backend name can be renamed to the same name multiple times
+ torch.utils.rename_privateuse1_backend("foo")
+
+ # backend name can't be renamed multiple times to different names.
+ with self.assertRaisesRegex(
+ RuntimeError, "torch.register_privateuse1_backend()"
+ ):
+ torch.utils.rename_privateuse1_backend("dev")
+
+ # generator tensor and module can be registered only once
+ with self.assertRaisesRegex(RuntimeError, "The custom device module of"):
+ torch.utils.generate_methods_for_privateuse1_backend()
+
+ # check whether torch.foo have been registered correctly
+ self.assertTrue(
+ torch.utils.backend_registration._get_custom_mod_func("device_count")() == 1
+ )
+ with self.assertRaisesRegex(RuntimeError, "Try to call torch.foo"):
+ torch.utils.backend_registration._get_custom_mod_func("func_name_")
+
+ # check attributes after registered
+ self.assertTrue(hasattr(torch.Tensor, "is_foo"))
+ self.assertTrue(hasattr(torch.Tensor, "foo"))
+ self.assertTrue(hasattr(torch.TypedStorage, "is_foo"))
+ self.assertTrue(hasattr(torch.TypedStorage, "foo"))
+ self.assertTrue(hasattr(torch.UntypedStorage, "is_foo"))
+ self.assertTrue(hasattr(torch.UntypedStorage, "foo"))
+ self.assertTrue(hasattr(torch.nn.Module, "foo"))
+ self.assertTrue(hasattr(torch.nn.utils.rnn.PackedSequence, "is_foo"))
+ self.assertTrue(hasattr(torch.nn.utils.rnn.PackedSequence, "foo"))
+
+ def test_open_device_generator_registration_and_hooks(self):
+ device = self.module.custom_device()
+ # None of our CPU operations should call the custom add function.
+ self.assertFalse(self.module.custom_add_called())
+
+ # check generator registered before using
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Please register a generator to the PrivateUse1 dispatch key",
+ ):
+ torch.Generator(device=device)
+
+ self.module.register_generator_first()
+ gen = torch.Generator(device=device)
+ self.assertTrue(gen.device == device)
+
+ # generator can be registered only once
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Only can register a generator to the PrivateUse1 dispatch key once",
+ ):
+ self.module.register_generator_second()
+
+ self.module.register_hook()
+ default_gen = self.module.default_generator(0)
+ self.assertTrue(
+ default_gen.device.type == torch._C._get_privateuse1_backend_name()
+ )
- def test_open_device_registration(self):
- def test_base_device_registration():
- torch.utils.rename_privateuse1_backend("foo")
- self.assertFalse(self.module.custom_add_called())
- # create a tensor using our custom device object
- device = self.module.custom_device()
- # register foo module, torch.foo. This is for lazy
- # init check.
- torch._register_device_module("foo", DummyModule)
- x = torch.empty(4, 4, device=device)
- y = torch.empty(4, 4, device=device)
- # Check that our device is correct.
- self.assertTrue(x.device == device)
- self.assertFalse(x.is_cpu)
- self.assertFalse(self.module.custom_add_called())
- # calls out custom add kernel, registered to the dispatcher
- z = x + y
- # check that it was called
- self.assertTrue(self.module.custom_add_called())
- z_cpu = z.to(device="cpu")
- # Check that our cross-device copy correctly copied the data to cpu
- self.assertTrue(z_cpu.is_cpu)
- self.assertFalse(z.is_cpu)
- self.assertTrue(z.device == device)
- self.assertEqual(z, z_cpu)
- z2 = z_cpu + z_cpu
- del torch.foo
-
- # check whether the error can be reported correctly
- def test_before_common_registration():
- # check that register module name should be the same as custom backend
- with self.assertRaisesRegex(RuntimeError, "Expected one of cpu"):
- torch._register_device_module("xxx", DummyModule)
- # check generator registered before using
- torch.utils.rename_privateuse1_backend("foo")
- with self.assertRaisesRegex(RuntimeError, "torch has no module of"):
- with torch.random.fork_rng(device_type="foo"):
- pass
- # check attributes before registered
- self.assertFalse(hasattr(torch.Tensor, "is_foo"))
- self.assertFalse(hasattr(torch.Tensor, "foo"))
- self.assertFalse(hasattr(torch.TypedStorage, "is_foo"))
- self.assertFalse(hasattr(torch.TypedStorage, "foo"))
- self.assertFalse(hasattr(torch.UntypedStorage, "is_foo"))
- self.assertFalse(hasattr(torch.UntypedStorage, "foo"))
- self.assertFalse(hasattr(torch.nn.Module, "foo"))
- self.assertFalse(hasattr(torch.nn.utils.rnn.PackedSequence, "is_foo"))
- self.assertFalse(hasattr(torch.nn.utils.rnn.PackedSequence, "foo"))
-
- def test_after_common_registration():
- # check attributes after registered
- self.assertTrue(hasattr(torch.Tensor, "is_foo"))
- self.assertTrue(hasattr(torch.Tensor, "foo"))
- self.assertTrue(hasattr(torch.TypedStorage, "is_foo"))
- self.assertTrue(hasattr(torch.TypedStorage, "foo"))
- self.assertTrue(hasattr(torch.UntypedStorage, "is_foo"))
- self.assertTrue(hasattr(torch.UntypedStorage, "foo"))
- self.assertTrue(hasattr(torch.nn.Module, "foo"))
- self.assertTrue(hasattr(torch.nn.utils.rnn.PackedSequence, "is_foo"))
- self.assertTrue(hasattr(torch.nn.utils.rnn.PackedSequence, "foo"))
-
- def test_common_registration():
- # first rename custom backend
- torch.utils.rename_privateuse1_backend("foo")
- # backend name can only rename once
- with self.assertRaisesRegex(
- RuntimeError, "torch.register_privateuse1_backend()"
- ):
- torch.utils.rename_privateuse1_backend("xxx")
- # register foo module, torch.foo
- torch._register_device_module("foo", DummyModule)
- self.assertTrue(
- torch.utils.backend_registration._get_custom_mod_func("device_count")()
- == 1
- )
- with self.assertRaisesRegex(RuntimeError, "Try to call torch.foo"):
- torch.utils.backend_registration._get_custom_mod_func("func_name_")
- # default set for_tensor and for_module are True, so only set for_storage is True
- torch.utils.generate_methods_for_privateuse1_backend(for_storage=True)
- # generator tensor and module can be registered only once
- with self.assertRaisesRegex(RuntimeError, "The custom device module of"):
- torch.utils.generate_methods_for_privateuse1_backend()
-
- def test_open_device_generator_registration_and_hooks():
- device = self.module.custom_device()
- # None of our CPU operations should call the custom add function.
- self.assertFalse(self.module.custom_add_called())
- # check generator registered before using
- with self.assertRaisesRegex(
- RuntimeError,
- "Please register a generator to the PrivateUse1 dispatch key",
- ):
- gen_ = torch.Generator(device=device)
- self.module.register_generator_first()
- gen = torch.Generator(device=device)
- self.assertTrue(gen.device == device)
- # generator can be registered only once
- with self.assertRaisesRegex(
- RuntimeError,
- "Only can register a generator to the PrivateUse1 dispatch key once",
- ):
- self.module.register_generator_second()
- self.module.register_hook()
- default_gen = self.module.default_generator(0)
- self.assertTrue(
- default_gen.device.type == torch._C._get_privateuse1_backend_name()
- )
-
- def test_open_device_dispatchstub():
- # test kernels could be reused by privateuse1 backend through dispatchstub
- torch.utils.rename_privateuse1_backend("foo")
- input_data = torch.randn(2, 2, 3, dtype=torch.float32, device="cpu")
- foo_input_data = input_data.to("foo")
- output_data = torch.abs(input_data)
- foo_output_data = torch.abs(foo_input_data)
- self.assertEqual(output_data, foo_output_data.cpu())
- output_data = torch.randn(2, 2, 6, dtype=torch.float32, device="cpu")
- # output operand will resize flag is True in TensorIterator.
- foo_input_data = input_data.to("foo")
- foo_output_data = output_data.to("foo")
- # output operand will resize flag is False in TensorIterator.
- torch.abs(input_data, out=output_data[:, :, 0:6:2])
- torch.abs(foo_input_data, out=foo_output_data[:, :, 0:6:2])
- self.assertEqual(output_data, foo_output_data.cpu())
- # output operand will resize flag is True in TensorIterator.
- # and convert output to contiguous tensor in TensorIterator.
- output_data = torch.randn(2, 2, 6, dtype=torch.float32, device="cpu")
- foo_input_data = input_data.to("foo")
- foo_output_data = output_data.to("foo")
- torch.abs(input_data, out=output_data[:, :, 0:6:3])
- torch.abs(foo_input_data, out=foo_output_data[:, :, 0:6:3])
- self.assertEqual(output_data, foo_output_data.cpu())
-
- def test_open_device_quantized():
- torch.utils.rename_privateuse1_backend("foo")
- input_data = torch.randn(3, 4, 5, dtype=torch.float32, device="cpu").to(
- "foo"
- )
- quantized_tensor = torch.quantize_per_tensor(
- input_data, 0.1, 10, torch.qint8
- )
- self.assertEqual(quantized_tensor.device, torch.device("foo:0"))
- self.assertEqual(quantized_tensor.dtype, torch.qint8)
-
- def test_open_device_random():
- with torch.random.fork_rng(device_type="foo"):
- pass
-
- def test_open_device_tensor():
- device = self.module.custom_device()
- # check whether print tensor.type() meets the expectation
- dtypes = {
- torch.bool: "torch.foo.BoolTensor",
- torch.double: "torch.foo.DoubleTensor",
- torch.float32: "torch.foo.FloatTensor",
- torch.half: "torch.foo.HalfTensor",
- torch.int32: "torch.foo.IntTensor",
- torch.int64: "torch.foo.LongTensor",
- torch.int8: "torch.foo.CharTensor",
- torch.short: "torch.foo.ShortTensor",
- torch.uint8: "torch.foo.ByteTensor",
- }
- for tt, dt in dtypes.items():
- test_tensor = torch.empty(4, 4, dtype=tt, device=device)
- self.assertTrue(test_tensor.type() == dt)
- # check whether the attributes and methods of the corresponding custom backend are generated correctly
- x = torch.empty(4, 4)
- self.assertFalse(x.is_foo)
- x = x.foo(torch.device("foo"))
- self.assertFalse(self.module.custom_add_called())
- self.assertTrue(x.is_foo)
- # test different device type input
- y = torch.empty(4, 4)
- self.assertFalse(y.is_foo)
- y = y.foo(torch.device("foo:0"))
- self.assertFalse(self.module.custom_add_called())
- self.assertTrue(y.is_foo)
- # test different device type input
- z = torch.empty(4, 4)
- self.assertFalse(z.is_foo)
- z = z.foo(0)
- self.assertFalse(self.module.custom_add_called())
- self.assertTrue(z.is_foo)
-
- def test_open_device_packed_sequence():
- device = self.module.custom_device()
- a = torch.rand(5, 3)
- b = torch.tensor([1, 1, 1, 1, 1])
- input = torch.nn.utils.rnn.PackedSequence(a, b)
- self.assertFalse(input.is_foo)
- input_foo = input.foo()
- self.assertTrue(input_foo.is_foo)
-
- def test_open_device_storage():
- # check whether the attributes and methods for storage of the corresponding custom backend are generated correctly
- x = torch.empty(4, 4)
- z1 = x.storage()
- self.assertFalse(z1.is_foo)
- z1 = z1.foo()
- self.assertFalse(self.module.custom_add_called())
- self.assertTrue(z1.is_foo)
- with self.assertRaisesRegex(RuntimeError, "Invalid device"):
- z1.foo(torch.device("cpu"))
- z1 = z1.cpu()
- self.assertFalse(self.module.custom_add_called())
- self.assertFalse(z1.is_foo)
- z1 = z1.foo(device="foo:0", non_blocking=False)
- self.assertFalse(self.module.custom_add_called())
+ def test_open_device_dispatchstub(self):
+ # test kernels could be reused by privateuse1 backend through dispatchstub
+ input_data = torch.randn(2, 2, 3, dtype=torch.float32, device="cpu")
+ foo_input_data = input_data.to("foo")
+ output_data = torch.abs(input_data)
+ foo_output_data = torch.abs(foo_input_data)
+ self.assertEqual(output_data, foo_output_data.cpu())
+
+ output_data = torch.randn(2, 2, 6, dtype=torch.float32, device="cpu")
+ # output operand will resize flag is True in TensorIterator.
+ foo_input_data = input_data.to("foo")
+ foo_output_data = output_data.to("foo")
+ # output operand will resize flag is False in TensorIterator.
+ torch.abs(input_data, out=output_data[:, :, 0:6:2])
+ torch.abs(foo_input_data, out=foo_output_data[:, :, 0:6:2])
+ self.assertEqual(output_data, foo_output_data.cpu())
+
+ # output operand will resize flag is True in TensorIterator.
+ # and convert output to contiguous tensor in TensorIterator.
+ output_data = torch.randn(2, 2, 6, dtype=torch.float32, device="cpu")
+ foo_input_data = input_data.to("foo")
+ foo_output_data = output_data.to("foo")
+ torch.abs(input_data, out=output_data[:, :, 0:6:3])
+ torch.abs(foo_input_data, out=foo_output_data[:, :, 0:6:3])
+ self.assertEqual(output_data, foo_output_data.cpu())
+
+ def test_open_device_quantized(self):
+ input_data = torch.randn(3, 4, 5, dtype=torch.float32, device="cpu").to("foo")
+ quantized_tensor = torch.quantize_per_tensor(input_data, 0.1, 10, torch.qint8)
+ self.assertEqual(quantized_tensor.device, torch.device("foo:0"))
+ self.assertEqual(quantized_tensor.dtype, torch.qint8)
+
+ def test_open_device_random(self):
+ # check if torch.foo have implemented get_rng_state
+ with torch.random.fork_rng(device_type="foo"):
+ pass
+
+ def test_open_device_tensor(self):
+ device = self.module.custom_device()
+
+ # check whether print tensor.type() meets the expectation
+ dtypes = {
+ torch.bool: "torch.foo.BoolTensor",
+ torch.double: "torch.foo.DoubleTensor",
+ torch.float32: "torch.foo.FloatTensor",
+ torch.half: "torch.foo.HalfTensor",
+ torch.int32: "torch.foo.IntTensor",
+ torch.int64: "torch.foo.LongTensor",
+ torch.int8: "torch.foo.CharTensor",
+ torch.short: "torch.foo.ShortTensor",
+ torch.uint8: "torch.foo.ByteTensor",
+ }
+ for tt, dt in dtypes.items():
+ test_tensor = torch.empty(4, 4, dtype=tt, device=device)
+ self.assertTrue(test_tensor.type() == dt)
+
+ # check whether the attributes and methods of the corresponding custom backend are generated correctly
+ x = torch.empty(4, 4)
+ self.assertFalse(x.is_foo)
+
+ x = x.foo(torch.device("foo"))
+ self.assertFalse(self.module.custom_add_called())
+ self.assertTrue(x.is_foo)
+
+ # test different device type input
+ y = torch.empty(4, 4)
+ self.assertFalse(y.is_foo)
+
+ y = y.foo(torch.device("foo:0"))
+ self.assertFalse(self.module.custom_add_called())
+ self.assertTrue(y.is_foo)
+
+ # test different device type input
+ z = torch.empty(4, 4)
+ self.assertFalse(z.is_foo)
+
+ z = z.foo(0)
+ self.assertFalse(self.module.custom_add_called())
+ self.assertTrue(z.is_foo)
+
+ def test_open_device_packed_sequence(self):
+ device = self.module.custom_device()
+ a = torch.rand(5, 3)
+ b = torch.tensor([1, 1, 1, 1, 1])
+ input = torch.nn.utils.rnn.PackedSequence(a, b)
+ self.assertFalse(input.is_foo)
+ input_foo = input.foo()
+ self.assertTrue(input_foo.is_foo)
+
+ def test_open_device_storage(self):
+ # check whether the attributes and methods for storage of the corresponding custom backend are generated correctly
+ x = torch.empty(4, 4)
+ z1 = x.storage()
+ self.assertFalse(z1.is_foo)
+
+ z1 = z1.foo()
+ self.assertFalse(self.module.custom_add_called())
+ self.assertTrue(z1.is_foo)
+
+ with self.assertRaisesRegex(RuntimeError, "Invalid device"):
+ z1.foo(torch.device("cpu"))
+
+ z1 = z1.cpu()
+ self.assertFalse(self.module.custom_add_called())
+ self.assertFalse(z1.is_foo)
+
+ z1 = z1.foo(device="foo:0", non_blocking=False)
+ self.assertFalse(self.module.custom_add_called())
+ self.assertTrue(z1.is_foo)
+
+ with self.assertRaisesRegex(RuntimeError, "Invalid device"):
+ z1.foo(device="cuda:0", non_blocking=False)
+
+ # check UntypedStorage
+ y = torch.empty(4, 4)
+ z2 = y.untyped_storage()
+ self.assertFalse(z2.is_foo)
+
+ z2 = z2.foo()
+ self.assertFalse(self.module.custom_add_called())
+ self.assertTrue(z2.is_foo)
+
+ # check custom StorageImpl create
+ self.module.custom_storage_registry()
+
+ z3 = y.untyped_storage()
+ self.assertFalse(self.module.custom_storageImpl_called())
+
+ z3 = z3.foo()
+ self.assertTrue(self.module.custom_storageImpl_called())
+ self.assertFalse(self.module.custom_storageImpl_called())
+
+ z3 = z3[0:3]
+ self.assertTrue(self.module.custom_storageImpl_called())
+
+ @skipIfTorchDynamo("unsupported aten.is_pinned.default")
+ def test_open_device_storage_pin_memory(self):
+ # Check if the pin_memory is functioning properly on custom device
+ cpu_tensor = torch.empty(3)
+ self.assertFalse(cpu_tensor.is_foo)
+ self.assertFalse(cpu_tensor.is_pinned("foo"))
+
+ cpu_tensor_pin = cpu_tensor.pin_memory("foo")
+ self.assertTrue(cpu_tensor_pin.is_pinned("foo"))
+
+ # Test storage pin_memory on custom device string
+ cpu_storage = cpu_tensor.storage()
+ foo_device = torch.device("foo")
+ self.assertFalse(cpu_storage.is_pinned("foo"))
+
+ cpu_storage_pin = cpu_storage.pin_memory("foo")
+ self.assertFalse(cpu_storage.is_pinned())
+ self.assertFalse(cpu_storage.is_pinned("foo"))
+ self.assertFalse(cpu_storage.is_pinned(foo_device))
+ self.assertFalse(cpu_storage_pin.is_pinned())
+ self.assertTrue(cpu_storage_pin.is_pinned("foo"))
+ self.assertTrue(cpu_storage_pin.is_pinned(foo_device))
+
+ cpu_storage_pin_already = cpu_storage_pin.pin_memory("foo")
+ self.assertTrue(cpu_storage_pin.is_pinned("foo"))
+ self.assertTrue(cpu_storage_pin.is_pinned(foo_device))
+ self.assertTrue(cpu_storage_pin_already.is_pinned("foo"))
+ self.assertTrue(cpu_storage_pin_already.is_pinned(foo_device))
+ self.assertFalse(cpu_storage.is_pinned("foo"))
+
+ cpu_storage_pinned = cpu_storage.pin_memory(foo_device)
+ self.assertFalse(cpu_storage.is_pinned())
+ self.assertFalse(cpu_storage.is_pinned("foo"))
+ self.assertFalse(cpu_storage.is_pinned(foo_device))
+ self.assertFalse(cpu_storage_pinned.is_pinned())
+ self.assertTrue(cpu_storage_pinned.is_pinned("foo"))
+ self.assertTrue(cpu_storage_pinned.is_pinned(foo_device))
+
+ # Test untyped storage pin_memory and is_pin
+ cpu_tensor = torch.randn([3, 2, 1, 4])
+ cpu_untyped_storage = cpu_tensor.untyped_storage()
+ self.assertFalse(cpu_untyped_storage.is_pinned())
+ self.assertFalse(cpu_untyped_storage.is_pinned("foo"))
+
+ cpu_untyped_storage_pinned = cpu_untyped_storage.pin_memory("foo")
+ self.assertFalse(cpu_untyped_storage_pinned.is_pinned())
+ self.assertTrue(cpu_untyped_storage_pinned.is_pinned("foo"))
+ self.assertTrue(cpu_untyped_storage_pinned.is_pinned(foo_device))
+
+ cpu_untyped_storage_pinned = cpu_untyped_storage.pin_memory(foo_device)
+ self.assertFalse(cpu_untyped_storage_pinned.is_pinned())
+ self.assertTrue(cpu_untyped_storage_pinned.is_pinned("foo"))
+ self.assertTrue(cpu_untyped_storage_pinned.is_pinned(foo_device))
+
+ with self.assertRaisesRegex(TypeError, "positional arguments but 3 were given"):
+ cpu_untyped_storage_pinned.is_pinned("foo1", "foo2")
+
+ # Test storage pin_memory on error device
+ self.assertFalse(cpu_storage_pinned.is_pinned("hpu"))
+ self.assertFalse(cpu_untyped_storage_pinned.is_pinned("hpu"))
+ invalid_device = torch.device("hpu")
+ self.assertFalse(cpu_untyped_storage_pinned.is_pinned(invalid_device))
+
+ with self.assertRaisesRegex(
+ NotImplementedError, "with arguments from the 'HPU' backend"
+ ):
+ cpu_storage.pin_memory("hpu")
+ with self.assertRaisesRegex(
+ NotImplementedError, "with arguments from the 'HPU' backend"
+ ):
+ cpu_untyped_storage.pin_memory("hpu")
+ with self.assertRaisesRegex(
+ NotImplementedError, "with arguments from the 'HPU' backend"
+ ):
+ cpu_untyped_storage.pin_memory(invalid_device)
+
+ def test_open_device_serialization(self):
+ self.module.set_custom_device_index(-1)
+ storage = torch.UntypedStorage(4, device=torch.device("foo"))
+ self.assertEqual(torch.serialization.location_tag(storage), "foo")
+
+ self.module.set_custom_device_index(0)
+ storage = torch.UntypedStorage(4, device=torch.device("foo"))
+ self.assertEqual(torch.serialization.location_tag(storage), "foo:0")
+
+ cpu_storage = torch.empty(4, 4).storage()
+ foo_storage = torch.serialization.default_restore_location(cpu_storage, "foo:0")
+ self.assertTrue(foo_storage.is_foo)
+
+ # test tensor MetaData serialization
+ x = torch.empty(4, 4).long()
+ y = x.foo()
+ self.assertFalse(self.module.check_backend_meta(y))
+ self.module.custom_set_backend_meta(y)
+ self.assertTrue(self.module.check_backend_meta(y))
+
+ self.module.custom_serialization_registry()
+ with tempfile.TemporaryDirectory() as tmpdir:
+ path = os.path.join(tmpdir, "data.pt")
+ torch.save(y, path)
+ z1 = torch.load(path)
+ # loads correctly onto the foo backend device
self.assertTrue(z1.is_foo)
- with self.assertRaisesRegex(RuntimeError, "Invalid device"):
- z1.foo(device="cuda:0", non_blocking=False)
- # check UntypedStorage
- y = torch.empty(4, 4)
- z2 = y.untyped_storage()
+ # loads BackendMeta data correctly
+ self.assertTrue(self.module.check_backend_meta(z1))
+
+ # cross-backend
+ z2 = torch.load(path, map_location="cpu")
+ # loads correctly onto the cpu backend device
self.assertFalse(z2.is_foo)
- z2 = z2.foo()
- self.assertFalse(self.module.custom_add_called())
- self.assertTrue(z2.is_foo)
- # check custom StorageImpl create
- self.module.custom_storage_registry()
- z3 = y.untyped_storage()
- self.assertFalse(self.module.custom_storageImpl_called())
- z3 = z3.foo()
- self.assertTrue(self.module.custom_storageImpl_called())
- self.assertFalse(self.module.custom_storageImpl_called())
- z3 = z3[0:3]
- self.assertTrue(self.module.custom_storageImpl_called())
-
- def test_open_device_storage_pin_memory():
- torch.utils.rename_privateuse1_backend("foo")
- with self.assertRaisesRegex(RuntimeError, "The custom device module of"):
- torch.utils.generate_methods_for_privateuse1_backend(
- for_tensor=False, for_module=False, for_storage=True
- )
- # Check if the pin_memory is functioning properly on custom device
- cpu_tensor = torch.empty(3)
- self.assertFalse(cpu_tensor.is_foo)
- self.assertFalse(cpu_tensor.is_pinned("foo"))
- cpu_tensor_pin = cpu_tensor.pin_memory("foo")
- self.assertTrue(cpu_tensor_pin.is_pinned("foo"))
- # Test storage pin_memory on custom device string
- cpu_storage = cpu_tensor.storage()
- foo_device = torch.device("foo")
- self.assertFalse(cpu_storage.is_pinned("foo"))
- cpu_storage_pin = cpu_storage.pin_memory("foo")
- self.assertFalse(cpu_storage.is_pinned())
- self.assertFalse(cpu_storage.is_pinned("foo"))
- self.assertFalse(cpu_storage.is_pinned(foo_device))
- self.assertFalse(cpu_storage_pin.is_pinned())
- self.assertTrue(cpu_storage_pin.is_pinned("foo"))
- self.assertTrue(cpu_storage_pin.is_pinned(foo_device))
- cpu_storage_pin_already = cpu_storage_pin.pin_memory("foo")
- self.assertTrue(cpu_storage_pin.is_pinned("foo"))
- self.assertTrue(cpu_storage_pin.is_pinned(foo_device))
- self.assertTrue(cpu_storage_pin_already.is_pinned("foo"))
- self.assertTrue(cpu_storage_pin_already.is_pinned(foo_device))
-
- # Test storage pin_memory on torch.device
- self.assertFalse(cpu_storage.is_pinned("foo"))
- cpu_storage_pinned = cpu_storage.pin_memory(foo_device)
- self.assertFalse(cpu_storage.is_pinned())
- self.assertFalse(cpu_storage.is_pinned("foo"))
- self.assertFalse(cpu_storage.is_pinned(foo_device))
- self.assertFalse(cpu_storage_pinned.is_pinned())
- self.assertTrue(cpu_storage_pinned.is_pinned("foo"))
- self.assertTrue(cpu_storage_pinned.is_pinned(foo_device))
-
- # Test untyped storage pin_memory and is_pin
- cpu_tensor = torch.randn([3, 2, 1, 4])
- cpu_untyped_storage = cpu_tensor.untyped_storage()
- self.assertFalse(cpu_untyped_storage.is_pinned())
- self.assertFalse(cpu_untyped_storage.is_pinned("foo"))
- cpu_untyped_storage_pinned = cpu_untyped_storage.pin_memory("foo")
- self.assertFalse(cpu_untyped_storage_pinned.is_pinned())
- self.assertTrue(cpu_untyped_storage_pinned.is_pinned("foo"))
- self.assertTrue(cpu_untyped_storage_pinned.is_pinned(foo_device))
- cpu_untyped_storage_pinned = cpu_untyped_storage.pin_memory(foo_device)
- self.assertFalse(cpu_untyped_storage_pinned.is_pinned())
- self.assertTrue(cpu_untyped_storage_pinned.is_pinned("foo"))
- self.assertTrue(cpu_untyped_storage_pinned.is_pinned(foo_device))
- with self.assertRaisesRegex(
- TypeError, "positional arguments but 3 were given"
- ):
- cpu_untyped_storage_pinned.is_pinned("foo1", "foo2")
-
- # Test storage pin_memory on error device
- self.assertFalse(cpu_storage_pinned.is_pinned("hpu"))
- with self.assertRaisesRegex(
- NotImplementedError, "with arguments from the 'HPU' backend"
- ):
- cpu_storage.pin_memory("hpu")
- self.assertFalse(cpu_untyped_storage_pinned.is_pinned("hpu"))
- with self.assertRaisesRegex(
- NotImplementedError, "with arguments from the 'HPU' backend"
- ):
- cpu_untyped_storage.pin_memory("hpu")
- invalid_device = torch.device("hpu")
- self.assertFalse(cpu_untyped_storage_pinned.is_pinned(invalid_device))
- with self.assertRaisesRegex(
- NotImplementedError, "with arguments from the 'HPU' backend"
- ):
- cpu_untyped_storage.pin_memory(invalid_device)
-
- def test_open_device_serialization():
- self.module.set_custom_device_index(-1)
- storage = torch.UntypedStorage(4, device=torch.device("foo"))
- self.assertEqual(torch.serialization.location_tag(storage), "foo")
-
- self.module.set_custom_device_index(0)
- storage = torch.UntypedStorage(4, device=torch.device("foo"))
- self.assertEqual(torch.serialization.location_tag(storage), "foo:0")
-
- cpu_storage = torch.empty(4, 4).storage()
- foo_storage = torch.serialization.default_restore_location(
- cpu_storage, "foo:0"
- )
- self.assertTrue(foo_storage.is_foo)
- # test tensor MetaData serialization
- x = torch.empty(4, 4).long()
- y = x.foo()
- self.assertFalse(self.module.check_backend_meta(y))
- self.module.custom_set_backend_meta(y)
- self.assertTrue(self.module.check_backend_meta(y))
-
- self.module.custom_serialization_registry()
- with tempfile.TemporaryDirectory() as tmpdir:
- path = os.path.join(tmpdir, "data.pt")
- torch.save(y, path)
- z1 = torch.load(path)
- # loads correctly onto the foo backend device
- self.assertTrue(z1.is_foo)
- # loads BackendMeta data correctly
- self.assertTrue(self.module.check_backend_meta(z1))
- # cross-backend
- z2 = torch.load(path, map_location="cpu")
- # loads correctly onto the cpu backend device
- self.assertFalse(z2.is_foo)
- # loads BackendMeta data correctly
- self.assertFalse(self.module.check_backend_meta(z2))
-
- def test_open_device_storage_resize():
- torch.utils.rename_privateuse1_backend("foo")
- cpu_tensor = torch.randn([8])
- foo_tensor = cpu_tensor.foo()
- foo_storage = foo_tensor.storage()
- self.assertTrue(foo_storage.size() == 8)
- # Only register tensor resize_ function.
- foo_tensor.resize_(8)
- self.assertTrue(foo_storage.size() == 8)
- with self.assertRaisesRegex(TypeError, "Overflow"):
- foo_tensor.resize_(8**29)
-
- def test_open_device_storage_type():
- torch.utils.rename_privateuse1_backend("foo")
- # test cpu float storage
- cpu_tensor = torch.randn([8]).float()
- cpu_storage = cpu_tensor.storage()
- self.assertEqual(cpu_storage.type(), "torch.FloatStorage")
-
- # test custom float storage before defining FloatStorage
- foo_tensor = cpu_tensor.foo()
- foo_storage = foo_tensor.storage()
- self.assertEqual(foo_storage.type(), "torch.storage.TypedStorage")
-
- class CustomFloatStorage:
- @property
- def __module__(self):
- return "torch." + torch._C._get_privateuse1_backend_name()
-
- @property
- def __name__(self):
- return "FloatStorage"
-
- # test custom float storage after defining FloatStorage
- try:
- torch.foo.FloatStorage = CustomFloatStorage()
- self.assertEqual(foo_storage.type(), "torch.foo.FloatStorage")
-
- # test custom int storage after defining FloatStorage
- foo_tensor2 = torch.randn([8]).int().foo()
- foo_storage2 = foo_tensor2.storage()
- self.assertEqual(foo_storage2.type(), "torch.storage.TypedStorage")
- finally:
- torch.foo.FloatStorage = None
-
- def test_open_device_faketensor():
- torch.utils.rename_privateuse1_backend("foo")
- with torch._subclasses.fake_tensor.FakeTensorMode.push():
- a = torch.empty(1, device="foo")
- b = torch.empty(1, device="foo:0")
- result = a + b
-
- def test_open_device_named_tensor():
- torch.utils.rename_privateuse1_backend("foo")
- a = torch.empty([2, 3, 4, 5], device="foo", names=["N", "C", "H", "W"])
-
- # Not an open registration test - this file is just very convenient
- # for testing torch.compile on custom C++ operators
- def test_compile_autograd_function_returns_self():
- x_ref = torch.randn(4, requires_grad=True)
- out_ref = self.module.custom_autograd_fn_returns_self(x_ref)
- out_ref.sum().backward()
-
- x_test = x_ref.clone().detach().requires_grad_(True)
- f_compiled = torch.compile(self.module.custom_autograd_fn_returns_self)
- out_test = f_compiled(x_test)
- out_test.sum().backward()
-
- self.assertEqual(out_ref, out_test)
- self.assertEqual(x_ref.grad, x_test.grad)
-
- # Not an open registration test - this file is just very convenient
- # for testing torch.compile on custom C++ operators
- def test_compile_autograd_function_aliasing():
- x_ref = torch.randn(4, requires_grad=True)
- out_ref = torch.ops._test_funcs.custom_autograd_fn_aliasing(x_ref)
- out_ref.sum().backward()
-
- x_test = x_ref.clone().detach().requires_grad_(True)
- f_compiled = torch.compile(
- torch.ops._test_funcs.custom_autograd_fn_aliasing
- )
- out_test = f_compiled(x_test)
- out_test.sum().backward()
-
- self.assertEqual(out_ref, out_test)
- self.assertEqual(x_ref.grad, x_test.grad)
-
- def test_open_device_scalar_type_fallback():
- torch.utils.rename_privateuse1_backend("foo")
- z_cpu = torch.Tensor([[0, 0, 0, 1, 1, 2], [0, 1, 2, 1, 2, 2]]).to(
- torch.int64
- )
- z = torch.triu_indices(3, 3, device="foo")
- self.assertEqual(z_cpu, z)
-
- def test_open_device_tensor_type_fallback():
- torch.utils.rename_privateuse1_backend("foo")
- # create tensors located in custom device
- x = torch.Tensor([[1, 2, 3], [2, 3, 4]]).to("foo")
- y = torch.Tensor([1, 0, 2]).to("foo")
- # create result tensor located in cpu
- z_cpu = torch.Tensor([[0, 2, 1], [1, 3, 2]])
- # Check that our device is correct.
- device = self.module.custom_device()
- self.assertTrue(x.device == device)
- self.assertFalse(x.is_cpu)
- # call sub op, which will fallback to cpu
- z = torch.sub(x, y)
- self.assertEqual(z_cpu, z)
- # call index op, which will fallback to cpu
- z_cpu = torch.Tensor([3, 1])
- y = torch.Tensor([1, 0]).long().to("foo")
- z = x[y, y]
- self.assertEqual(z_cpu, z)
-
- def test_open_device_tensorlist_type_fallback():
- torch.utils.rename_privateuse1_backend("foo")
- # create tensors located in custom device
- v_foo = torch.Tensor([1, 2, 3]).to("foo")
- # create result tensor located in cpu
- z_cpu = torch.Tensor([2, 4, 6])
- # create tensorlist for foreach_add op
- x = (v_foo, v_foo)
- y = (v_foo, v_foo)
- # Check that our device is correct.
- device = self.module.custom_device()
- self.assertTrue(v_foo.device == device)
- self.assertFalse(v_foo.is_cpu)
- # call _foreach_add op, which will fallback to cpu
- z = torch._foreach_add(x, y)
-
- self.assertEqual(z_cpu, z[0])
- self.assertEqual(z_cpu, z[1])
-
- test_base_device_registration()
- test_before_common_registration()
- test_common_registration()
- test_after_common_registration()
- test_open_device_generator_registration_and_hooks()
- test_open_device_dispatchstub()
- test_open_device_random()
- test_open_device_tensor()
- test_open_device_packed_sequence()
- test_open_device_storage()
- test_open_device_storage_pin_memory()
- test_open_device_serialization()
- test_open_device_storage_resize()
- test_open_device_storage_type()
- test_open_device_faketensor()
- test_open_device_named_tensor()
- test_open_device_quantized()
-
- test_compile_autograd_function_returns_self()
- test_compile_autograd_function_aliasing()
-
- test_open_device_scalar_type_fallback()
- test_open_device_tensor_type_fallback()
- test_open_device_tensorlist_type_fallback()
+ # loads BackendMeta data correctly
+ self.assertFalse(self.module.check_backend_meta(z2))
+
+ def test_open_device_storage_resize(self):
+ cpu_tensor = torch.randn([8])
+ foo_tensor = cpu_tensor.foo()
+ foo_storage = foo_tensor.storage()
+ self.assertTrue(foo_storage.size() == 8)
+
+ # Only register tensor resize_ function.
+ foo_tensor.resize_(8)
+ self.assertTrue(foo_storage.size() == 8)
+
+ with self.assertRaisesRegex(TypeError, "Overflow"):
+ foo_tensor.resize_(8**29)
+
+ def test_open_device_storage_type(self):
+ # test cpu float storage
+ cpu_tensor = torch.randn([8]).float()
+ cpu_storage = cpu_tensor.storage()
+ self.assertEqual(cpu_storage.type(), "torch.FloatStorage")
+
+ # test custom float storage before defining FloatStorage
+ foo_tensor = cpu_tensor.foo()
+ foo_storage = foo_tensor.storage()
+ self.assertEqual(foo_storage.type(), "torch.storage.TypedStorage")
+
+ class CustomFloatStorage:
+ @property
+ def __module__(self):
+ return "torch." + torch._C._get_privateuse1_backend_name()
+
+ @property
+ def __name__(self):
+ return "FloatStorage"
+
+ # test custom float storage after defining FloatStorage
+ try:
+ torch.foo.FloatStorage = CustomFloatStorage()
+ self.assertEqual(foo_storage.type(), "torch.foo.FloatStorage")
+
+ # test custom int storage after defining FloatStorage
+ foo_tensor2 = torch.randn([8]).int().foo()
+ foo_storage2 = foo_tensor2.storage()
+ self.assertEqual(foo_storage2.type(), "torch.storage.TypedStorage")
+ finally:
+ torch.foo.FloatStorage = None
+
+ def test_open_device_faketensor(self):
+ with torch._subclasses.fake_tensor.FakeTensorMode.push():
+ a = torch.empty(1, device="foo")
+ b = torch.empty(1, device="foo:0")
+ result = a + b
+
+ def test_open_device_named_tensor(self):
+ torch.empty([2, 3, 4, 5], device="foo", names=["N", "C", "H", "W"])
+
+ # Not an open registration test - this file is just very convenient
+ # for testing torch.compile on custom C++ operators
+ def test_compile_autograd_function_returns_self(self):
+ x_ref = torch.randn(4, requires_grad=True)
+ out_ref = self.module.custom_autograd_fn_returns_self(x_ref)
+ out_ref.sum().backward()
+
+ x_test = x_ref.clone().detach().requires_grad_(True)
+ f_compiled = torch.compile(self.module.custom_autograd_fn_returns_self)
+ out_test = f_compiled(x_test)
+ out_test.sum().backward()
+
+ self.assertEqual(out_ref, out_test)
+ self.assertEqual(x_ref.grad, x_test.grad)
+
+ # Not an open registration test - this file is just very convenient
+ # for testing torch.compile on custom C++ operators
+ @skipIfTorchDynamo("Temporary disabled due to torch._ops.OpOverloadPacket")
+ def test_compile_autograd_function_aliasing(self):
+ x_ref = torch.randn(4, requires_grad=True)
+ out_ref = torch.ops._test_funcs.custom_autograd_fn_aliasing(x_ref)
+ out_ref.sum().backward()
+
+ x_test = x_ref.clone().detach().requires_grad_(True)
+ f_compiled = torch.compile(torch.ops._test_funcs.custom_autograd_fn_aliasing)
+ out_test = f_compiled(x_test)
+ out_test.sum().backward()
+
+ self.assertEqual(out_ref, out_test)
+ self.assertEqual(x_ref.grad, x_test.grad)
+
+ def test_open_device_scalar_type_fallback(self):
+ z_cpu = torch.Tensor([[0, 0, 0, 1, 1, 2], [0, 1, 2, 1, 2, 2]]).to(torch.int64)
+ z = torch.triu_indices(3, 3, device="foo")
+ self.assertEqual(z_cpu, z)
+
+ def test_open_device_tensor_type_fallback(self):
+ # create tensors located in custom device
+ x = torch.Tensor([[1, 2, 3], [2, 3, 4]]).to("foo")
+ y = torch.Tensor([1, 0, 2]).to("foo")
+ # create result tensor located in cpu
+ z_cpu = torch.Tensor([[0, 2, 1], [1, 3, 2]])
+ # Check that our device is correct.
+ device = self.module.custom_device()
+ self.assertTrue(x.device == device)
+ self.assertFalse(x.is_cpu)
+
+ # call sub op, which will fallback to cpu
+ z = torch.sub(x, y)
+ self.assertEqual(z_cpu, z)
+
+ # call index op, which will fallback to cpu
+ z_cpu = torch.Tensor([3, 1])
+ y = torch.Tensor([1, 0]).long().to("foo")
+ z = x[y, y]
+ self.assertEqual(z_cpu, z)
+
+ def test_open_device_tensorlist_type_fallback(self):
+ # create tensors located in custom device
+ v_foo = torch.Tensor([1, 2, 3]).to("foo")
+ # create result tensor located in cpu
+ z_cpu = torch.Tensor([2, 4, 6])
+ # create tensorlist for foreach_add op
+ x = (v_foo, v_foo)
+ y = (v_foo, v_foo)
+ # Check that our device is correct.
+ device = self.module.custom_device()
+ self.assertTrue(v_foo.device == device)
+ self.assertFalse(v_foo.is_cpu)
+
+ # call _foreach_add op, which will fallback to cpu
+ z = torch._foreach_add(x, y)
+ self.assertEqual(z_cpu, z[0])
+ self.assertEqual(z_cpu, z[1])
if __name__ == "__main__":
diff --git a/torch/csrc/jit/serialization/pickler.cpp b/torch/csrc/jit/serialization/pickler.cpp
index 6e1b399e40..0300dd6169 100644
--- a/torch/csrc/jit/serialization/pickler.cpp
+++ b/torch/csrc/jit/serialization/pickler.cpp
@@ -803,4 +803,18 @@ bool checkHasValidSetGetState(const std::shared_ptr<c10::ClassType>& cls) {
return true;
}
+std::array<
+ c10::optional<std::pair<BackendMetaPtr, BackendMetaPtr>>,
+ at::COMPILE_TIME_MAX_DEVICE_TYPES>&
+GetBackendMetaSerialization() {
+ // The array to save function pointer for BackendMeta serialization.
+ // key is the DeviceType, value is std::pair obj.
+ // value.first represent get function and value.seconde represent set function
+ static std::array<
+ c10::optional<std::pair<BackendMetaPtr, BackendMetaPtr>>,
+ at::COMPILE_TIME_MAX_DEVICE_TYPES>
+ BackendMetaSerialization;
+ return BackendMetaSerialization;
+}
+
} // namespace torch::jit
diff --git a/torch/csrc/jit/serialization/pickler.h b/torch/csrc/jit/serialization/pickler.h
index 4f553b6f7c..fc0f0231dd 100644
--- a/torch/csrc/jit/serialization/pickler.h
+++ b/torch/csrc/jit/serialization/pickler.h
@@ -310,19 +310,10 @@ inline std::unordered_set<c10::DeviceType>& GetBackendMetaAllowlist() {
// Dynamically obtain serialization function pairs
// that require the corresponding backend.
-inline std::array<
+TORCH_API std::array<
c10::optional<std::pair<BackendMetaPtr, BackendMetaPtr>>,
at::COMPILE_TIME_MAX_DEVICE_TYPES>&
-GetBackendMetaSerialization() {
- // The array to save function pointer for BackendMeta serialization.
- // key is the DeviceType, value is std::pair obj.
- // value.first represent get function and value.seconde represent set function
- static std::array<
- c10::optional<std::pair<BackendMetaPtr, BackendMetaPtr>>,
- at::COMPILE_TIME_MAX_DEVICE_TYPES>
- BackendMetaSerialization;
- return BackendMetaSerialization;
-}
+GetBackendMetaSerialization();
// Register function pointer of Tensor BackendMetadata for serialization.
TORCH_API inline void TensorBackendMetaRegistry(
|
2.41.0
|
ab5bd535947b5cf140fd0892081df7283b18cb6
|
Mon, 29 Apr 2024 11:06:53 -0400
|
[PATCH 0801/1000] [checkpoint] Improve error message when use_reentrant=True is used with .grad() (#125155)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/125155 Approved by: https://github.com/albanD
|
diff --git a/test/test_autograd.py b/test/test_autograd.py
index 80af13df3d..79880a1d62 100644
--- a/test/test_autograd.py
+++ b/test/test_autograd.py
@@ -6916,7 +6916,7 @@ for shape in [(1,), ()]:
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(
- Exception, "Checkpointing is not compatible with .grad()"
+ Exception, "torch.utils.checkpoint is incompatible"
):
b = checkpoint(torch.exp, a, use_reentrant=True).sum()
torch.autograd.grad(b, (a,))
diff --git a/test/test_utils.py b/test/test_utils.py
index 5dd946faba..b151b5141a 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -147,7 +147,7 @@ class TestCheckpoint(TestCase):
chunks = 2
modules = list(model.children())
out = checkpoint_sequential(modules, chunks, input_var, use_reentrant=True)
- with self.assertRaisesRegex(RuntimeError, "Checkpointing is not compatible"):
+ with self.assertRaisesRegex(RuntimeError, "torch.utils.checkpoint is incompatible"):
torch.autograd.grad(
outputs=[out], grad_outputs=[torch.ones(1, 5)], inputs=[input_var], create_graph=True
)
diff --git a/torch/utils/checkpoint.py b/torch/utils/checkpoint.py
index 7c74b4c6be..5cdfc55caf 100644
--- a/torch/utils/checkpoint.py
+++ b/torch/utils/checkpoint.py
@@ -258,9 +258,10 @@ class CheckpointFunction(torch.autograd.Function):
def backward(ctx, *args):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError(
- "Checkpointing is not compatible with .grad() or when an `inputs` parameter"
- " is passed to .backward(). Please use .backward() and do not pass its `inputs`"
- " argument."
+ "When use_reentrant=True, torch.utils.checkpoint is incompatible"
+ " with .grad() or passing an `inputs` parameter to .backward()."
+ " To resolve this error, you can either set use_reentrant=False,"
+ " or call .backward() without passing the `inputs` argument."
)
# Copy the list to avoid modifying original list.
inputs = list(ctx.inputs)
|
2.41.0
|
434d1487bcb8a5c3895a30dd1680dec1dedc634
|
Mon, 29 Apr 2024 18:59:01 +0000
|
[PATCH 0802/1000] Fix EtcdServer leak in etcd_server_test.py file (#125121)
|
As stated in the title. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125121 Approved by: https://github.com/Skylion007
|
diff --git a/test/distributed/elastic/rendezvous/etcd_server_test.py b/test/distributed/elastic/rendezvous/etcd_server_test.py
index 88726dddae..62e9a98f83 100644
--- a/test/distributed/elastic/rendezvous/etcd_server_test.py
+++ b/test/distributed/elastic/rendezvous/etcd_server_test.py
@@ -41,19 +41,22 @@ class EtcdServerTest(unittest.TestCase):
server = EtcdServer()
server.start()
- client = etcd.Client(server.get_host(), server.get_port())
-
- rdzv = EtcdRendezvous(
- client=client,
- prefix="test",
- run_id=1,
- num_min_workers=1,
- num_max_workers=1,
- timeout=60,
- last_call_timeout=30,
- )
- rdzv_handler = EtcdRendezvousHandler(rdzv)
- store, rank, world_size = rdzv_handler.next_rendezvous()
- self.assertIsNotNone(store)
- self.assertEqual(0, rank)
- self.assertEqual(1, world_size)
+ try:
+ client = etcd.Client(server.get_host(), server.get_port())
+
+ rdzv = EtcdRendezvous(
+ client=client,
+ prefix="test",
+ run_id=1,
+ num_min_workers=1,
+ num_max_workers=1,
+ timeout=60,
+ last_call_timeout=30,
+ )
+ rdzv_handler = EtcdRendezvousHandler(rdzv)
+ store, rank, world_size = rdzv_handler.next_rendezvous()
+ self.assertIsNotNone(store)
+ self.assertEqual(0, rank)
+ self.assertEqual(1, world_size)
+ finally:
+ server.stop()
|
2.41.0
|
bd67dab324442a28243439461d375c74a466f27
|
Mon, 29 Apr 2024 20:26:15 +0000
|
[PATCH 0804/1000] Revert "[dtensor] delete the old unused mesh_alltoall (#124879)"
|
This reverts commit f7f018a0ed442f92eb5270150ced7b6117773368. Reverted https://github.com/pytorch/pytorch/pull/124879 on behalf of https://github.com/clee2000 due to broke distributed/tensor/parallel/test_tp_examples.py::DistTensorParallelExampleTest::test_transformer_training_is_seq_parallel_True https://github.com/pytorch/pytorch/actions/runs/8882762411/job/24389191482 https://hud.pytorch.org/pytorch/pytorch/commit/f7f018a0ed442f92eb5270150ced7b6117773368. Bad TD ([comment](https://github.com/pytorch/pytorch/pull/124872#issuecomment-2083599445))
|
diff --git a/test/distributed/test_device_mesh.py b/test/distributed/test_device_mesh.py
index 1d8909237d..89f98ce0b6 100644
--- a/test/distributed/test_device_mesh.py
+++ b/test/distributed/test_device_mesh.py
@@ -6,6 +6,7 @@ import torch
import torch.distributed._functional_collectives as funcol
from torch.distributed._tensor import DTensor
from torch.distributed._tensor._collective_utils import (
+ mesh_all_to_all,
mesh_broadcast,
mesh_scatter,
unpad_tensor,
@@ -688,6 +689,70 @@ class DeviceMeshCollectiveTest(DTensorTestBase):
mesh_scatter(received_tensor, scattered_tensors, mesh, mesh_dim=dim)
self.assertEqual(received_tensor, torch.ones(3, 3) * self.rank)
+ @with_comms
+ def test_all_to_all_1d(self):
+ # transpose on a 2D tensor distributed over N nodes:
+ mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
+ tensor_shape = [3, 3]
+ input_tensor_list = [
+ torch.ones(*tensor_shape, device=self.device_type)
+ * (rank + self.rank * self.world_size)
+ for rank in range(self.world_size)
+ ]
+ expected_tensor_list = [
+ torch.ones(tensor_shape, device=self.device_type)
+ * (self.rank + rank * self.world_size) # i.e. transpose
+ for rank in range(self.world_size)
+ ]
+ for scatter_dim in range(len(tensor_shape)):
+ output_tensor_list = [
+ torch.empty_like(input_tensor_list[idx])
+ for idx in range(len(input_tensor_list))
+ ]
+ # scatter on dim > 0 would generate non-contiguous tensor, verify that works
+ mesh_all_to_all(output_tensor_list, input_tensor_list, mesh, mesh_dim=0)
+ output_tensor = torch.cat(output_tensor_list, dim=scatter_dim)
+ expected_tensor = torch.cat(expected_tensor_list, dim=scatter_dim)
+
+ self.assertEqual(output_tensor, expected_tensor)
+
+ @with_comms
+ def test_all_to_all_nd(self):
+ mesh_tensor = torch.arange(8).reshape(2, 2, 2)
+ mesh = DeviceMesh(self.device_type, mesh_tensor)
+ tensor_shape = [3, 3, 3]
+ # check all dim groups
+ dim_to_subgroups = mesh.get_group()
+ for dim, dim_group in enumerate(dim_to_subgroups):
+ my_coordinate = mesh.get_coordinate()[dim]
+ dim_group_size = get_world_size(dim_group)
+ global_ranks = [
+ get_global_rank(dim_group, i) for i in range(dim_group_size)
+ ]
+ input_tensor_list = [
+ torch.ones(*tensor_shape, device=self.device_type)
+ * (i + self.rank * dim_group_size)
+ for i in range(dim_group_size)
+ ]
+ expected_tensor_list = [
+ torch.ones(*tensor_shape, device=self.device_type)
+ * (my_coordinate + global_rank * dim_group_size) # i.e. transpose
+ for global_rank in global_ranks
+ ]
+ for scatter_dim in range(len(tensor_shape)):
+ # input_tensor = torch.cat(input_tensor_list, dim=scatter_dim)
+ output_tensor_list = [
+ torch.empty_like(input_tensor_list[idx])
+ for idx in range(len(input_tensor_list))
+ ]
+ # scatter on dim > 0 would generate non-contiguous tensor, verify that works
+ mesh_all_to_all(
+ output_tensor_list, input_tensor_list, mesh, mesh_dim=dim
+ )
+ output_tensor = torch.cat(output_tensor_list, dim=scatter_dim)
+ expected_tensor = torch.cat(expected_tensor_list, dim=scatter_dim)
+ self.assertEqual(output_tensor, expected_tensor)
+
if __name__ == "__main__":
run_tests()
diff --git a/torch/distributed/_tensor/_collective_utils.py b/torch/distributed/_tensor/_collective_utils.py
index ce4809d996..51c1379625 100644
--- a/torch/distributed/_tensor/_collective_utils.py
+++ b/torch/distributed/_tensor/_collective_utils.py
@@ -11,9 +11,11 @@ import torch.distributed._tensor.placement_types as placement_types
from torch.distributed.device_mesh import _mesh_resources, DeviceMesh
from torch.distributed.distributed_c10d import (
_get_group_size_by_name,
+ all_to_all,
broadcast,
get_global_rank,
get_rank,
+ get_world_size,
GroupMember,
ProcessGroup,
scatter,
@@ -148,6 +150,48 @@ def mesh_broadcast(
return broadcast(tensor, src=src_for_dim, group=dim_group, async_op=async_op)
+# TODO: test uneven split on GLOO and NCCL
+def mesh_all_to_all(
+ output_tensor_list: List[torch.Tensor],
+ input_tensor_list: List[torch.Tensor],
+ mesh: DeviceMesh,
+ mesh_dim: int = 0,
+ async_op: bool = False,
+) -> Optional[Work]:
+ dim_group = mesh.get_group(mesh_dim)
+ assert isinstance(dim_group, ProcessGroup)
+
+ work = None
+ # no direct dist.all_to_all support on 'gloo' so we manually do scatters
+ if mesh.device_type == "cpu":
+ logger.warning(
+ "ProcessGroupGloo does not support all_to_all, falling back with scatters!"
+ )
+ # TODO: pull the handle of uneven case in #492
+ dim_group_size = get_world_size(dim_group)
+ for i in range(dim_group_size):
+ # src need to be global rank
+ src_for_dim = i
+ if dim_group is not GroupMember.WORLD:
+ src_for_dim = get_global_rank(dim_group, i)
+
+ work = scatter(
+ output_tensor_list[i],
+ input_tensor_list if mesh.get_rank() == src_for_dim else [],
+ group=dim_group,
+ src=src_for_dim,
+ async_op=async_op,
+ )
+ else:
+ work = all_to_all(
+ output_tensor_list,
+ input_tensor_list,
+ dim_group,
+ async_op=async_op,
+ )
+ return work
+
+
def pad_tensor(tensor: torch.Tensor, pad_dim: int, pad_size: int) -> torch.Tensor:
if pad_size == 0:
return tensor
|
2.41.0
|
46e202c07a7c7acde4303b4aff8d0124c268828
|
Mon, 29 Apr 2024 20:58:17 +0000
|
[PATCH 0806/1000] [export] Restore user input names to unlifted graph modules (#124765)
|
Summary: Fixes https://github.com/pytorch/pytorch/issues/122842 Currently, calling ep.module() on an ExportedProgram leads to a GraphModule with a default forward signature (e.g. arg_0, arg_1, ...). This leads to original placeholder names disappearing for retracing/re-exporting. Fixing this issue by creating a forward_arg_names field (will take renaming suggestions for this), that stores the positional & keyword arg names that are used. These names aren't present in the call_spec currently stored, and requires a major version bump for the ExportedProgram schema. Test Plan: Tests exist for export, but names are now changed from generic (e.g. arg_0, arg_1) to follow user inputs (e.g. x, y) Differential Revision: D56484994 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124765 Approved by: https://github.com/zhxchen17
|
diff --git a/test/export/test_export.py b/test/export/test_export.py
index bb96de9a9a..9c6ede9d90 100644
--- a/test/export/test_export.py
+++ b/test/export/test_export.py
@@ -2094,8 +2094,8 @@ class TestExport(TestCase):
self.assertExpectedInline(
str(gm.code).strip(),
"""\
-def forward(self, arg_0):
- x, = fx_pytree.tree_flatten_spec(([arg_0], {}), self._in_spec)
+def forward(self, x):
+ x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
conv_weight = self.conv.weight
conv_bias = self.conv.bias
bn_weight = self.bn.weight
@@ -2113,8 +2113,8 @@ def forward(self, arg_0):
self.assertExpectedInline(
str(gm_train.code).strip(),
"""\
-def forward(self, arg_0):
- x, = fx_pytree.tree_flatten_spec(([arg_0], {}), self._in_spec)
+def forward(self, x):
+ x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
conv_weight = self.conv.weight
conv_bias = self.conv.bias
bn_weight = self.bn.weight
@@ -4465,9 +4465,6 @@ def forward(self, x):
]
self.assertEqual(expected_getattr_names, real_getattr_names)
- # original input names aren't retraceable:
- # compilation will succeed, but names won't match forward() signature.
- @testing.expectedFailureRetraceability
def test_constant_input_naming(self):
class Foo(torch.nn.Module):
def forward(self, x, y, div="floor"):
diff --git a/test/export/test_passes.py b/test/export/test_passes.py
index c7240ec0ee..58619e4775 100644
--- a/test/export/test_passes.py
+++ b/test/export/test_passes.py
@@ -582,8 +582,8 @@ class TestPasses(TestCase):
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
-def forward(self, arg_0):
- x, = fx_pytree.tree_flatten_spec(([arg_0], {}), self._in_spec)
+def forward(self, x):
+ x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
add = torch.ops.aten.add.Tensor(x, 1); x = None
sin = torch.ops.aten.sin.default(add); add = None
sum_1 = torch.ops.aten.sum.default(sin); sin = None
@@ -597,8 +597,8 @@ def forward(self, arg_0):
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
-def forward(self, arg_0):
- x, = fx_pytree.tree_flatten_spec(([arg_0], {}), self._in_spec)
+def forward(self, x):
+ x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
add = torch.ops.aten.add.Tensor(x, 1); x = None
sin = torch.ops.aten.sin.default(add); add = None
sum_1 = torch.ops.aten.sum.default(sin); sin = None
@@ -613,8 +613,8 @@ def forward(self, arg_0):
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
-def forward(self, arg_0):
- x, = fx_pytree.tree_flatten_spec(([arg_0], {}), self._in_spec)
+def forward(self, x):
+ x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
add = torch.ops.aten.add.Tensor(x, 1); x = None
sin = torch.ops.aten.sin.default(add); add = None
sum_1 = torch.ops.aten.sum.default(sin); sin = None
@@ -628,8 +628,8 @@ def forward(self, arg_0):
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
-def forward(self, arg_0):
- x, = fx_pytree.tree_flatten_spec(([arg_0], {}), self._in_spec)
+def forward(self, x):
+ x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
add = torch.ops.aten.add.Tensor(x, 1); x = None
submod_5 = self.submod_1
sum_1 = torch._higher_order_ops.wrap.wrap_with_set_grad_enabled(True, submod_5, add); submod_5 = add = None
@@ -643,8 +643,8 @@ def forward(self, arg_0):
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
-def forward(self, arg_0):
- x, = fx_pytree.tree_flatten_spec(([arg_0], {}), self._in_spec)
+def forward(self, x):
+ x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
add = torch.ops.aten.add.Tensor(x, 1); x = None
sin = torch.ops.aten.sin.default(add)
sum_1 = torch.ops.aten.sum.default(sin); sin = None
@@ -663,8 +663,8 @@ def forward(self, arg_0):
self.assertExpectedInline(
mod.code.strip("\n"),
"""\
-def forward(self, arg_0):
- x, = fx_pytree.tree_flatten_spec(([arg_0], {}), self._in_spec)
+def forward(self, x):
+ x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
add = torch.ops.aten.add.Tensor(x, 1); x = None
submod_5 = self.submod_1
wrap_with_set_grad_enabled = torch._higher_order_ops.wrap.wrap_with_set_grad_enabled(True, submod_5, add); submod_5 = add = None
@@ -698,8 +698,8 @@ def forward(self, arg_0):
self.assertExpectedInline(
new_gm.code.strip("\n"),
"""\
-def forward(self, arg_0, arg_1):
- x1, x2, = fx_pytree.tree_flatten_spec(([arg_0, arg_1], {}), self._in_spec)
+def forward(self, x1, x2):
+ x1, x2, = fx_pytree.tree_flatten_spec(([x1, x2], {}), self._in_spec)
submod_1 = self.submod_1(x1, x2); x1 = x2 = None
getitem = submod_1[0]
getitem_1 = submod_1[1]; submod_1 = None
diff --git a/test/export/test_torchbind.py b/test/export/test_torchbind.py
index 3ec3e7dfab..872c713571 100644
--- a/test/export/test_torchbind.py
+++ b/test/export/test_torchbind.py
@@ -165,8 +165,8 @@ class TestExportTorchbind(TestCase):
self.assertExpectedInline(
ep.module().code.strip(),
"""\
-def forward(self, arg_0, arg_1):
- x, n, = fx_pytree.tree_flatten_spec(([arg_0, arg_1], {}), self._in_spec)
+def forward(self, x, n):
+ x, n, = fx_pytree.tree_flatten_spec(([x, n], {}), self._in_spec)
attr = self.attr
call_torchbind = torch.ops.higher_order.call_torchbind(attr, 'add_tensor', x); attr = None
add = torch.ops.aten.add.Tensor(x, call_torchbind); x = call_torchbind = None
@@ -197,8 +197,8 @@ def forward(self, obj_attr, x, n):
self.assertExpectedInline(
ep.module().code.strip(),
"""\
-def forward(self, arg_0):
- x, = fx_pytree.tree_flatten_spec(([arg_0], {}), self._in_spec)
+def forward(self, x):
+ x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
attr = self.attr
call_torchbind = torch.ops.higher_order.call_torchbind(attr, 'add_tensor', x); attr = None
add = torch.ops.aten.add.Tensor(x, call_torchbind); x = call_torchbind = None
@@ -229,8 +229,8 @@ def forward(self, obj_attr, x):
self.assertExpectedInline(
ep.module().code.strip(),
"""\
-def forward(self, arg_0):
- x, = fx_pytree.tree_flatten_spec(([arg_0], {}), self._in_spec)
+def forward(self, x):
+ x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
attr = self.attr
takes_foo_default = torch.ops._TorchScriptTesting.takes_foo.default(attr, x); attr = None
add = torch.ops.aten.add.Tensor(x, takes_foo_default); x = takes_foo_default = None
@@ -263,8 +263,8 @@ def forward(self, token, obj_attr, x):
self.assertExpectedInline(
ep.module().code.strip(),
"""\
-def forward(self, arg_0, arg_1):
- x, cc, = fx_pytree.tree_flatten_spec(([arg_0, arg_1], {}), self._in_spec)
+def forward(self, x, cc):
+ x, cc, = fx_pytree.tree_flatten_spec(([x, cc], {}), self._in_spec)
call_torchbind = torch.ops.higher_order.call_torchbind(cc, 'add_tensor', x); cc = None
add = torch.ops.aten.add.Tensor(x, call_torchbind); x = call_torchbind = None
return pytree.tree_unflatten((add,), self._out_spec)""",
@@ -294,8 +294,8 @@ def forward(self, x, cc):
self.assertExpectedInline(
ep.module().code.strip(),
"""\
-def forward(self, arg_0, arg_1):
- x, cc, = fx_pytree.tree_flatten_spec(([arg_0, arg_1], {}), self._in_spec)
+def forward(self, x, cc):
+ x, cc, = fx_pytree.tree_flatten_spec(([x, cc], {}), self._in_spec)
takes_foo_default = torch.ops._TorchScriptTesting.takes_foo.default(cc, x); cc = None
add = torch.ops.aten.add.Tensor(x, takes_foo_default); x = takes_foo_default = None
return pytree.tree_unflatten((add,), self._out_spec)""",
@@ -330,8 +330,8 @@ def forward(self, token, x, cc):
self.assertExpectedInline(
ep.module().code.strip(),
"""\
-def forward(self, arg_0):
- x, = fx_pytree.tree_flatten_spec(([arg_0], {}), self._in_spec)
+def forward(self, x):
+ x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
attr = self.attr
takes_foo_default_1 = torch.ops._TorchScriptTesting.takes_foo.default(attr, x)
takes_foo_default = torch.ops._TorchScriptTesting.takes_foo.default(attr, takes_foo_default_1); attr = takes_foo_default_1 = None
@@ -372,8 +372,8 @@ def forward(self, token, obj_attr, x):
self.assertExpectedInline(
ep.module().code.strip(),
"""\
-def forward(self, arg_0):
- x, = fx_pytree.tree_flatten_spec(([arg_0], {}), self._in_spec)
+def forward(self, x):
+ x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
attr = self.attr
takes_foo_list_return_default = torch.ops._TorchScriptTesting.takes_foo_list_return.default(attr, x)
getitem_2 = takes_foo_list_return_default[0]
@@ -424,8 +424,8 @@ def forward(self, token, obj_attr, x):
self.assertExpectedInline(
ep.module().code.strip(),
"""\
-def forward(self, arg_0):
- x, = fx_pytree.tree_flatten_spec(([arg_0], {}), self._in_spec)
+def forward(self, x):
+ x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
attr = self.attr
takes_foo_tuple_return_default = torch.ops._TorchScriptTesting.takes_foo_tuple_return.default(attr, x)
getitem_1 = takes_foo_tuple_return_default[0]
diff --git a/torch/export/_trace.py b/torch/export/_trace.py
index 0143d0ab15..b4a977e0a1 100644
--- a/torch/export/_trace.py
+++ b/torch/export/_trace.py
@@ -662,6 +662,37 @@ def _get_params_buffers(mod: torch.nn.Module) -> Dict[str, torch.Tensor]:
return params_buffers
+def _get_forward_arg_names(
+ mod: torch.nn.Module,
+ args: Tuple[Any, ...],
+ kwargs: Optional[Dict[str, Any]] = None,
+) -> List[str]:
+ """
+ Gets the argument names to forward that are used, for restoring the
+ original signature when unlifting the exported program module.
+ - Positional args: retain the original argument names, and enumerate
+ *args as args_0, args_1, ...
+ - Keyword args: retain the original kwarg names in the order specified
+ by the user. This order seems to matter for the current state of
+ export lifted modules.
+ """
+ sig = inspect.signature(mod.forward)
+ _args = sig.bind_partial(*args).arguments
+
+ names: List[str] = []
+ for name, value in _args.items():
+ # handle variable number of positional args
+ if sig.parameters[name].kind == inspect._ParameterKind.VAR_POSITIONAL:
+ names.extend([f"{name}_{i}" for i, _ in enumerate(value)])
+ else:
+ names.append(name)
+ # order of kwargs matters for input spec
+ if kwargs:
+ names.extend([kwarg for kwarg, _ in kwargs.items()])
+
+ return names
+
+
def _rewrite_dynamo_tensor_constants(
orig_mod_buffers: Set[torch.Tensor],
traced_mod_buffers: Dict[str, torch.Tensor],
@@ -915,6 +946,7 @@ def _export(
flat_args, orig_in_spec = pytree.tree_flatten((args, kwargs))
original_state_dict = mod.state_dict(keep_vars=True)
+ forward_arg_names = _get_forward_arg_names(mod, args, kwargs)
if not strict:
out_spec = None
@@ -1041,6 +1073,7 @@ def _export(
gm = ep_non_strict.gm
+ gm.meta["forward_arg_names"] = forward_arg_names
module_call_signatures = {
strip_root(fqn): ModuleCallSignature(inputs=[], outputs=[], **specs)
for fqn, specs in module_call_specs.items()
@@ -1227,6 +1260,7 @@ def _export(
for k, v in dynamo_fake_mode.shape_env.var_to_range.items()
if free_unbacked_symbols(k)
}
+ gm.meta["forward_arg_names"] = forward_arg_names
num_lifted = next(
(
diff --git a/torch/export/_unlift.py b/torch/export/_unlift.py
index bede2986d4..52ce64e4dc 100644
--- a/torch/export/_unlift.py
+++ b/torch/export/_unlift.py
@@ -114,22 +114,26 @@ def _insert_copy_for_mutations(
def _get_codegen(
in_spec: pytree.TreeSpec,
out_spec: Optional[pytree.TreeSpec],
+ forward_arg_names: Optional[List[str]] = None,
) -> _PyTreeCodeGen:
"""
Create the codegen for the graph module based on the in/out specs
"""
- if (
- in_spec.type == tuple
- and in_spec.num_children == 2
- and in_spec.children_specs[0].type == tuple
- and in_spec.children_specs[1].type == dict
- ):
- # if in_spec contains the args (tuple) and kwargs (dict)
- names = [f"arg_{i}" for i in range(in_spec.children_specs[0].num_children)]
- # add kwarg names
- names.extend(in_spec.children_specs[1].context)
+ if forward_arg_names:
+ names = forward_arg_names
else:
- names = [f"arg_{i}" for i in range(in_spec.num_children)]
+ if (
+ in_spec.type == tuple
+ and in_spec.num_children == 2
+ and in_spec.children_specs[0].type == tuple
+ and in_spec.children_specs[1].type == dict
+ ):
+ # if in_spec contains the args (tuple) and kwargs (dict)
+ names = [f"arg_{i}" for i in range(in_spec.children_specs[0].num_children)]
+ # add kwarg names
+ names.extend(in_spec.children_specs[1].context)
+ else:
+ names = [f"arg_{i}" for i in range(in_spec.num_children)]
return _PyTreeCodeGen(
_PyTreeInfo(
@@ -148,6 +152,7 @@ def _unlift(
out_spec: Optional[pytree.TreeSpec],
state_dict: Dict[str, Any],
constants: Dict[str, Any],
+ forward_arg_names: Optional[List[str]] = None,
):
"""
Args:
@@ -170,7 +175,7 @@ def _unlift(
_insert_copy_for_mutations(
gm, mutated_outputs, unlifted_name_to_node, input_name_to_node
)
- gm.graph._codegen = _get_codegen(in_spec, out_spec)
+ gm.graph._codegen = _get_codegen(in_spec, out_spec, forward_arg_names)
gm.graph.lint()
gm.graph.eliminate_dead_code()
gm.recompile()
@@ -277,24 +282,30 @@ def _unlift_exported_program_lifted_states(ep: ExportedProgram) -> torch.nn.Modu
ep = _remove_effect_tokens(ep)
new_gm = torch.fx.GraphModule(ep.graph_module, copy.deepcopy(ep.graph))
_register_attrs_to_new_gm(new_gm, ep.graph_signature, ep.state_dict, ep.constants)
+ forward_arg_names = ep.graph_module.meta.get("forward_arg_names")
lifted_inputs: List[Optional[str]] = [
- in_spec.target
- if in_spec.kind
- in (
- InputKind.BUFFER,
- InputKind.CONSTANT_TENSOR,
- InputKind.PARAMETER,
- InputKind.CUSTOM_OBJ,
+ (
+ in_spec.target
+ if in_spec.kind
+ in (
+ InputKind.BUFFER,
+ InputKind.CONSTANT_TENSOR,
+ InputKind.PARAMETER,
+ InputKind.CUSTOM_OBJ,
+ )
+ else None
)
- else None
for in_spec in ep.graph_signature.input_specs
]
mutated_outputs: List[Optional[str]] = [
- out_spec.target
- if out_spec.kind in (OutputKind.BUFFER_MUTATION, OutputKind.USER_INPUT_MUTATION)
- else None
+ (
+ out_spec.target
+ if out_spec.kind
+ in (OutputKind.BUFFER_MUTATION, OutputKind.USER_INPUT_MUTATION)
+ else None
+ )
for out_spec in ep.graph_signature.output_specs
]
@@ -306,6 +317,7 @@ def _unlift_exported_program_lifted_states(ep: ExportedProgram) -> torch.nn.Modu
ep.call_spec.out_spec,
ep.state_dict,
ep.constants,
+ forward_arg_names=forward_arg_names,
)
unlift_gm = _create_stateful_graph_module(
new_gm, ep.range_constraints, ep.graph_signature
|
2.41.0
|
aee0e5ee881f9b9d247cf5fcaa3d9d3248a4059
|
Mon, 29 Apr 2024 21:22:35 +0000
|
[PATCH 0807/1000] [ez][CI] Move test_linalg and test_sparse_csr off CI_SERIAL_LIST (#125068)
|
* https://github.com/pytorch/pytorch/pull/124649 for context Pull Request resolved: https://github.com/pytorch/pytorch/pull/125068 Approved by: https://github.com/huydhn, https://github.com/ZainRizvi
|
diff --git a/test/run_test.py b/test/run_test.py
index 5c1a7a8fbd..dae0570304 100755
--- a/test/run_test.py
+++ b/test/run_test.py
@@ -219,11 +219,9 @@ CI_SERIAL_LIST = [
"test_cpp_api_parity",
"test_reductions",
"test_fx_backends",
- "test_linalg",
"test_cpp_extensions_jit",
"test_torch",
"test_tensor_creation_ops",
- "test_sparse_csr",
"test_dispatch",
"test_python_dispatch", # torch.library creation and deletion must be serialized
"test_spectral_ops", # Cause CUDA illegal memory access https://github.com/pytorch/pytorch/issues/88916
|
2.41.0
|
20e5f306dce7d1b1103ec4ed0de3b9d7bc6155c
|
Mon, 29 Apr 2024 21:37:18 +0000
|
[PATCH 0808/1000] Update CODEOWNERS - Dataloader (#125181)
|
Fixes #124473 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125181 Approved by: https://github.com/gokulavasan, https://github.com/albanD
|
diff --git a/CODEOWNERS b/CODEOWNERS
index 3d09c31c43..6999f8553b 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -116,7 +116,7 @@ torch/profiler/ @aaronenyeshi
test/functorch/test_aotdispatch.py @ezyang @Chillee
# Dataloader
-torch/utils/data/ @ejguan
+torch/utils/data/ @andrewkho @gokulavasan
# hipify
torch/utils/hipify/ @jeffdaily @jithunnair-amd
|
2.41.0
|
6f8d96cabe69f0a091214e52fda45464cb36009
|
Mon, 29 Apr 2024 22:47:43 +0000
|
[PATCH 0810/1000] Fix typo in `compile` docstring regarding default `cache_size_limit` (#125145)
|
Docstring of `torch.compile` specifies that default `torch._dynamo.config.cache_size_limit` equals to `64`, while the value is `8` in the corresponding py file. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125145 Approved by: https://github.com/kit1980
|
diff --git a/torch/__init__.py b/torch/__init__.py
index eee311f604..7a3bdd9aec 100644
--- a/torch/__init__.py
+++ b/torch/__init__.py
@@ -1806,7 +1806,7 @@ def compile(model: Optional[Callable] = None, *,
results are not applicable for subsequent calls (this is called a "guard
failure), you can use TORCH_LOGS=guards to debug these situations.
Multiple compiled results can be associated with a frame up to
- ``torch._dynamo.config.cache_size_limit``, which defaults to 64; at which
+ ``torch._dynamo.config.cache_size_limit``, which defaults to 8; at which
point we will fall back to eager. Note that compile caches are per
*code object*, not frame; if you dynamically create multiple copies of a
function, they will all share the same code cache.
|
2.41.0
|
caf03fd890b8038c83c3faefd360ad2b12a1386
|
Mon, 29 Apr 2024 23:25:23 +0000
|
[PATCH 0811/1000] Fix: `nn.Parameter` return type identified as `Tensor` instead of `nn.Parameter` (#125106)
|
Fixes #125105 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125106 Approved by: https://github.com/ezyang, https://github.com/albanD
|
diff --git a/test/typing/pass/creation_ops.py b/test/typing/pass/creation_ops.py
index c524d56f19..f866d3a162 100644
--- a/test/typing/pass/creation_ops.py
+++ b/test/typing/pass/creation_ops.py
@@ -2,6 +2,10 @@
# flake8: noqa
import torch
from torch.testing._internal.common_utils import TEST_NUMPY
+
+from typing_extensions import assert_type
+
+
if TEST_NUMPY:
import numpy as np
@@ -117,3 +121,7 @@ torch.polar(abs, angle)
inp = torch.tensor([-1.5, 0, 2.0])
values = torch.tensor([0.5])
torch.heaviside(inp, values)
+
+# Parameter
+p = torch.nn.Parameter(torch.empty(1))
+assert_type(p, torch.nn.Parameter)
diff --git a/tools/pyi/gen_pyi.py b/tools/pyi/gen_pyi.py
index f0b9044c6f..59498f41f3 100644
--- a/tools/pyi/gen_pyi.py
+++ b/tools/pyi/gen_pyi.py
@@ -1064,14 +1064,14 @@ def gen_pyi(
"new_tensor": [
f"def new_tensor(self, data: Any, {FACTORY_PARAMS}) -> Tensor: ..."
],
- "__new__": ["def __new__(self, *args, **kwargs) -> Tensor: ..."],
+ "__new__": ["def __new__(cls, *args, **kwargs) -> Self: ..."],
# new and __init__ have the same signatures differ only in return type
# Adapted from legacy_tensor_ctor and legacy_tensor_new
"new": [
- f"def new(self, *args: Any, {DEVICE_PARAM}) -> Tensor: ...",
- "def new(self, storage: Storage) -> Tensor: ...",
- "def new(self, other: Tensor) -> Tensor: ...",
- f"def new(self, size: _size, *, {DEVICE_PARAM}) -> Tensor: ...",
+ f"def new(cls, *args: Any, {DEVICE_PARAM}) -> Self: ...",
+ "def new(cls, storage: Storage) -> Self: ...",
+ "def new(cls, other: Tensor) -> Self: ...",
+ f"def new(cls, size: _size, *, {DEVICE_PARAM}) -> Self: ...",
],
"__init__": [
f"def __init__(self, *args: Any, {DEVICE_PARAM}) -> None: ...",
diff --git a/torch/_C/__init__.pyi.in b/torch/_C/__init__.pyi.in
index 34e49e15d8..5e20dd31cd 100644
--- a/torch/_C/__init__.pyi.in
+++ b/torch/_C/__init__.pyi.in
@@ -29,7 +29,7 @@ from typing import (
overload,
runtime_checkable,
)
-from typing_extensions import ParamSpec
+from typing_extensions import ParamSpec, Self
import numpy
|
2.41.0
|
1e6ef753b57b342be749f38db8f18623f0b89d4
|
Mon, 29 Apr 2024 11:13:48 -0700
|
[PATCH 0812/1000] [dtensor] use str for reduce_op (#125172)
|
This PR use str for reduce_op directly instead of the c10d enum. Since our functional collective already uses str, there's no reason that we need the c10d enum anymore as that requires a conversion Also the str hash + eq performance is actually significantly faster than the c10d type, so this would somewhat improves the CPU overhead too Some local cpu benchmarks on `1000000` hash operations: ``` Hash performance for string type: 0.039897 seconds Hash performance for integer type: 0.304665 seconds ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/125172 Approved by: https://github.com/awgu, https://github.com/XilunWu, https://github.com/tianyu-l
|
diff --git a/test/distributed/_tensor/test_pointwise_ops.py b/test/distributed/_tensor/test_pointwise_ops.py
index bed30e2990..4b25efdc91 100644
--- a/test/distributed/_tensor/test_pointwise_ops.py
+++ b/test/distributed/_tensor/test_pointwise_ops.py
@@ -16,7 +16,6 @@ from torch.distributed._tensor.placement_types import (
Replicate,
Shard,
)
-from torch.distributed.distributed_c10d import ReduceOp
from torch.testing._internal.common_utils import run_tests
from torch.testing._internal.distributed._tensor.common_dtensor import (
DTensorOpTestBase,
@@ -257,7 +256,7 @@ class DistElementwiseOpsTest(DTensorOpTestBase):
with self.assertRaisesRegex(RuntimeError, "supported"):
self._run_sharded_elementwise_ops(
device_mesh=device_mesh,
- placements=[_Partial(ReduceOp.SUM)],
+ placements=[_Partial("sum")],
input_size=(8, 5),
op=torch.nn.functional.dropout,
)
diff --git a/torch/distributed/_spmd/data_parallel.py b/torch/distributed/_spmd/data_parallel.py
index 80ad107b79..c1bfac89f6 100644
--- a/torch/distributed/_spmd/data_parallel.py
+++ b/torch/distributed/_spmd/data_parallel.py
@@ -6,7 +6,6 @@ from typing import Any, cast, Dict, List, Optional, Tuple
import torch
-import torch.distributed.distributed_c10d as c10d
import torch.fx as fx
import torch.library
import torch.nn as nn
@@ -158,9 +157,8 @@ def _gen_partial_strategy(mesh: DeviceMesh) -> PlacementStrategy:
# TODO: Only NCCL supports AVG so using backend like Gloo would
# crash, we should figure out a way to support avg reduction
# for non-NCCL backend
- reduce_op = c10d.ReduceOp.AVG # type: ignore[attr-defined]
return PlacementStrategy(
- output_specs=DTensorSpec(mesh=mesh, placements=(_Partial(reduce_op),)),
+ output_specs=DTensorSpec(mesh=mesh, placements=(_Partial("avg"),)),
)
diff --git a/torch/distributed/_tensor/ops/embedding_ops.py b/torch/distributed/_tensor/ops/embedding_ops.py
index bf6ad68738..763f122549 100644
--- a/torch/distributed/_tensor/ops/embedding_ops.py
+++ b/torch/distributed/_tensor/ops/embedding_ops.py
@@ -114,7 +114,7 @@ class _MaskPartial(_Partial):
# perform sum reduction
return funcol.all_reduce(
- tensor, reduceOp=self.reduce_op.name, group=(mesh, mesh_dim)
+ tensor, reduceOp=self.reduce_op, group=(mesh, mesh_dim)
)
def _reduce_shard_value(
diff --git a/torch/distributed/_tensor/ops/math_ops.py b/torch/distributed/_tensor/ops/math_ops.py
index 0048bda12b..495a7ae83b 100644
--- a/torch/distributed/_tensor/ops/math_ops.py
+++ b/torch/distributed/_tensor/ops/math_ops.py
@@ -6,7 +6,6 @@ from typing import cast, List, Optional, Sequence, Tuple, Union
import torch
-import torch.distributed.distributed_c10d as c10d
from torch.distributed._tensor.op_schema import (
OpSchema,
OpStrategy,
@@ -47,7 +46,7 @@ class NormReduction:
norm_type: Union[int, float, str]
-ReductionOpType = Union[NormReduction, c10d.ReduceOp.RedOpType]
+ReductionOpType = Union[NormReduction, str]
@dataclass(frozen=True)
@@ -74,11 +73,11 @@ class _NormPartial(_Partial):
"""Set the appropriate reduce op based on the norm type."""
# Use `object.__setattr__` to bypass frozen checks
if self.norm_type in (float("inf"), "inf"):
- object.__setattr__(self, "reduce_op", c10d.ReduceOp.MAX)
+ object.__setattr__(self, "reduce_op", "max")
elif self.norm_type in (float("-inf"), "-inf"):
- object.__setattr__(self, "reduce_op", c10d.ReduceOp.MIN)
+ object.__setattr__(self, "reduce_op", "min")
elif isinstance(self.norm_type, (int, float)):
- object.__setattr__(self, "reduce_op", c10d.ReduceOp.SUM)
+ object.__setattr__(self, "reduce_op", "sum")
else:
raise NotImplementedError(f"Unsupported norm type: {self.norm_type}")
@@ -94,9 +93,9 @@ class _NormPartial(_Partial):
One such f(x) is f(x) = x / sqrt(4). This generalizes to d ranks and
p-norm as f(x) = x / d^(1/p).
"""
- if self.reduce_op in (c10d.ReduceOp.MAX, c10d.ReduceOp.MIN):
+ if self.reduce_op in ("max", "min"):
return tensor
- elif self.reduce_op == c10d.ReduceOp.SUM:
+ elif self.reduce_op == "sum":
if self.norm_type == 0:
raise NotImplementedError(f"Unsupported norm type:: {self.norm_type}")
elif self.norm_type == 1:
@@ -125,14 +124,14 @@ class _NormPartial(_Partial):
return self._post_reduce_transform(reduced_tensor)
def _pre_reduce_transform(self, tensor: torch.Tensor) -> torch.Tensor:
- if self.reduce_op == c10d.ReduceOp.SUM:
+ if self.reduce_op == "sum":
assert isinstance(self.norm_type, (int, float)), f"{self.norm_type}"
if self.norm_type != 0 and self.norm_type != 1:
return tensor**self.norm_type
return tensor
def _post_reduce_transform(self, tensor: torch.Tensor) -> torch.Tensor:
- if self.reduce_op == c10d.ReduceOp.SUM:
+ if self.reduce_op == "sum":
assert isinstance(self.norm_type, (int, float)), f"{self.norm_type}"
if self.norm_type != 0 and self.norm_type != 1:
return tensor ** (1.0 / self.norm_type)
@@ -230,7 +229,7 @@ def common_reduction_strategy(
reduce_dims: List[int],
keep_dim: bool = False,
reduction_linear: bool = True,
- reduction_op: ReductionOpType = c10d.ReduceOp.SUM,
+ reduction_op: ReductionOpType = "sum",
) -> OpStrategy:
"""
reduction_linear means that the reduction `f` follows this rule:
@@ -277,22 +276,22 @@ def common_reduction_strategy(
LINEAR_REDUCTION_OP_MAP = {
- aten.all.default: c10d.ReduceOp.SUM,
- aten.all.dim: c10d.ReduceOp.SUM,
- aten.sum.default: c10d.ReduceOp.SUM,
- aten.sum.dim_IntList: c10d.ReduceOp.SUM,
- aten.prod.default: c10d.ReduceOp.PRODUCT,
- aten.prod.dim_int: c10d.ReduceOp.PRODUCT,
- aten.prod.int_out: c10d.ReduceOp.PRODUCT,
- aten.mean.default: c10d.ReduceOp.AVG,
- aten.mean.dim: c10d.ReduceOp.AVG,
- aten.mean.out: c10d.ReduceOp.AVG,
- aten.max.default: c10d.ReduceOp.MAX,
- aten.max.dim: c10d.ReduceOp.MAX,
- aten.max.out: c10d.ReduceOp.MAX,
- aten.min.default: c10d.ReduceOp.MIN,
- aten.min.dim: c10d.ReduceOp.MIN,
- aten.min.out: c10d.ReduceOp.MIN,
+ aten.all.default: "sum",
+ aten.all.dim: "sum",
+ aten.sum.default: "sum",
+ aten.sum.dim_IntList: "sum",
+ aten.prod.default: "product",
+ aten.prod.dim_int: "product",
+ aten.prod.int_out: "product",
+ aten.mean.default: "avg",
+ aten.mean.dim: "avg",
+ aten.mean.out: "avg",
+ aten.max.default: "max",
+ aten.max.dim: "max",
+ aten.max.out: "max",
+ aten.min.default: "min",
+ aten.min.dim: "min",
+ aten.min.out: "min",
}
@@ -542,7 +541,7 @@ def nll_loss_forward_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrate
)
else:
if reduction == Reduction.MEAN.value:
- reduction_op = c10d.ReduceOp.AVG
+ reduction_op = "avg"
if not is_tensor_evenly_shardable(
target_expected_spec.shape, target_expected_spec
):
@@ -551,7 +550,7 @@ def nll_loss_forward_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrate
resulting in biased mean result."
)
else: # reduction == Reduction.SUM.value:
- reduction_op = c10d.ReduceOp.SUM
+ reduction_op = "sum"
reduce_dims = list(range(target_expected_spec.ndim))
reduce_dims_map = _infer_reduce_dims_map(
reduce_dims, target_expected_spec.ndim, keep_dim=False
@@ -572,7 +571,7 @@ def nll_loss_forward_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrate
target_expected_spec.placements,
reduce_dims,
reduce_dims_map,
- c10d.ReduceOp.SUM,
+ "sum",
)
total_weight_expected_spec = DTensorSpec(
mesh=mesh,
@@ -899,7 +898,7 @@ def layer_norm_bwd_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy
outer_dims, input_src_spec.ndim, False
)
out_placements = map_placements_after_reduction(
- inp_placements, outer_dims, reduce_dims_map, c10d.ReduceOp.SUM
+ inp_placements, outer_dims, reduce_dims_map, "sum"
)
output_specs_list.append(
DTensorSpec(
@@ -932,7 +931,7 @@ def layer_norm_bwd_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy
outer_dims, grad_out_spec.ndim, False
)
out_placements = map_placements_after_reduction(
- inp_placements, outer_dims, reduce_dims_map, c10d.ReduceOp.SUM
+ inp_placements, outer_dims, reduce_dims_map, "sum"
)
output_specs_list.append(
DTensorSpec(
diff --git a/torch/distributed/_tensor/placement_types.py b/torch/distributed/_tensor/placement_types.py
index d06c317c16..b955208d81 100644
--- a/torch/distributed/_tensor/placement_types.py
+++ b/torch/distributed/_tensor/placement_types.py
@@ -5,7 +5,6 @@ from typing import Any, cast, List, NamedTuple, Optional, Tuple
import torch
import torch.distributed._functional_collectives as funcol
-import torch.distributed.distributed_c10d as c10d
from torch.distributed._tensor._collective_utils import (
mesh_broadcast,
@@ -154,7 +153,7 @@ class Shard(Placement):
self,
tensor: torch.Tensor,
mesh: DeviceMesh,
- reduce_op: c10d.ReduceOp.RedOpType,
+ reduce_op: str,
mesh_dim: int,
) -> torch.Tensor:
"""
@@ -178,7 +177,7 @@ class Shard(Placement):
tensor = tensor.contiguous()
output = funcol.reduce_scatter_tensor(
- tensor, reduce_op.name, scatter_dim=self.dim, group=(mesh, mesh_dim)
+ tensor, reduce_op, scatter_dim=self.dim, group=(mesh, mesh_dim)
)
if is_padded:
@@ -310,13 +309,13 @@ class _Partial(Placement):
# 3. _partition_value: partition the value of a replicated tensor on the mesh dimension
# We can implement custom reductions as needed by subclassing this
# class and override those contracts.
- reduce_op: c10d.ReduceOp.RedOpType = c10d.ReduceOp.SUM
+ reduce_op: str = "sum"
def _reduce_value(
self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int
) -> torch.Tensor:
return funcol.all_reduce(
- tensor, reduceOp=self.reduce_op.name, group=(mesh, mesh_dim)
+ tensor, reduceOp=self.reduce_op, group=(mesh, mesh_dim)
)
def _reduce_shard_value(
@@ -338,9 +337,7 @@ class _Partial(Placement):
# - the _reduce_value on a sum reduce op would just be a sum(allreduce) operation
# TODO: if the reduce_op is min/max, etc. the _partition_value should be a
# different operation
- assert (
- self.reduce_op == c10d.ReduceOp.SUM
- ), "only support replicate to PartialSUM for now!"
+ assert self.reduce_op == "sum", "only support replicate to PartialSUM for now!"
num_chunks = mesh.size(mesh_dim=mesh_dim)
return tensor / num_chunks
@@ -356,7 +353,7 @@ class _Partial(Placement):
"""
machine readable representation of the Partial placement
"""
- return f"_Partial(reduce_op={self.reduce_op})"
+ return f"_Partial({self.reduce_op})"
def __str__(self) -> str:
"""
|
2.41.0
|
ee6105f84cb26af664c0f9179e2ddb140f07aa6
|
Thu, 25 Apr 2024 17:16:28 -0700
|
[PATCH 0813/1000] Fix edge case in cudagraph pool detection (#124981)MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit
|
When we do cudagraph warmup, we record which outputs are in the cudagraph pool, so subsequently when we invoke a cudagraph and need to reclaim its memory we can free the prior run's outputs and make them error on access. In warmup, we detect this by ignoring outputs which are an alias of an input that is not a prior output. We did this by checking data pointer. In very rare situations, a data pointer of a non cudagraph input might get reallocated to a cudagraph pool and causes us to ignore it. This was happening with gpt-fast error with gemma 2 when coordinate_descent_tuning was set to False. This updates so that we check aliasing with non-cudagraph inputs by looking at storage pointer.. Unrelated: saw very weird behavior where an output had the same data pointer as a supposedly live input but not the same cdata 🤔 I would think that is not possible. ``` out[0]._cdata in [ref()._cdata for ab in non_cudagraph_inps_storage_refs] # False out[0].data_ptr() in [ref().data_ptr() for ab in non_cudagraph_inps_storage_refs] # True ``` Differential Revision: [D56607721](https://our.internmc.facebook.com/intern/diff/D56607721) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124981 Approved by: https://github.com/ezyang
|
diff --git a/torch/_inductor/cudagraph_trees.py b/torch/_inductor/cudagraph_trees.py
index f1ca0950b9..032f6dc840 100644
--- a/torch/_inductor/cudagraph_trees.py
+++ b/torch/_inductor/cudagraph_trees.py
@@ -584,16 +584,16 @@ class CUDAWarmupNode:
}
def get_non_cudagraph_inps():
- non_cudagraph_inps = set()
+ non_cudagraph_inps = []
for t in itertools.chain(new_inputs, self.wrapped_function.constants):
if (
isinstance(t, torch.Tensor)
and t.untyped_storage().data_ptr() not in existing_path_data_ptrs
):
- non_cudagraph_inps.add(t.untyped_storage().data_ptr())
+ non_cudagraph_inps.append(weakref.ref(t.untyped_storage()))
return non_cudagraph_inps
- non_cudagraph_inps = get_non_cudagraph_inps()
+ non_cudagraph_inps_storages = get_non_cudagraph_inps()
if config.triton.slow_path_cudagraph_asserts and not self.already_warm:
refs = list(self.path_live_weakrefs())
@@ -606,15 +606,26 @@ class CUDAWarmupNode:
), get_history_recording():
out = self.wrapped_function.model(new_inputs)
+ # We need to know which outputs are allocated within the cudagraph pool
+ # so that we can deallocate them at the beginning of the next cudagraph step,
+ # and set their access to error.
+ # We use a weakref to the inputs storage, in case a block which was previously
+ # allocated to the general caching allocator pool gets reallocated to a private pool.
+
+ non_cudagraph_inps_storage_ptrs = set()
+ for storage in non_cudagraph_inps_storages:
+ s = storage()
+ if s is not None:
+ non_cudagraph_inps_storage_ptrs.add(s._cdata)
+
assert len(new_inputs) == 0
# sdpa returns cpu tensors when not recording cuda graph
def add_ref(o):
return (
- o is not None
- and isinstance(o, torch.Tensor)
+ isinstance(o, torch.Tensor)
and o.is_cuda
- and o.untyped_storage().data_ptr() not in non_cudagraph_inps
+ and o.untyped_storage()._cdata not in non_cudagraph_inps_storage_ptrs
and o.untyped_storage().data_ptr() != 0
)
@@ -626,11 +637,8 @@ class CUDAWarmupNode:
)
if config.triton.slow_path_cudagraph_asserts and not self.already_warm:
- out_refs = self.path_live_weakrefs()
- new_storages = [
- t for t in out_refs if t.data_ptr() not in non_cudagraph_inps
- ]
- check_memory_pool(self.device_index, self.cuda_graphs_pool, new_storages)
+ out_refs = list(self.path_live_weakrefs())
+ check_memory_pool(self.device_index, self.cuda_graphs_pool, out_refs)
return out
|
2.41.0
|
d717cd7c38ae6408dbe137aa2dbf2419c544373
|
Mon, 29 Apr 2024 23:39:50 +0000
|
[PATCH 0814/1000] [TD] Enable td on cpu windows (#125049)
|
yolo Also * Ensure that at least 1 test always gets run (`//` does truncation which results in 0 if you have too few tests discovered) * Don't run test removal on slow tests - I'm not touching that yet I am avoid everything other than pull + trunk workflows, so not doing this on windows CUDA, which runs on periodic Pull Request resolved: https://github.com/pytorch/pytorch/pull/125049 Approved by: https://github.com/huydhn, https://github.com/ZainRizvi
|
diff --git a/test/run_test.py b/test/run_test.py
index dae0570304..cdf8ddb624 100755
--- a/test/run_test.py
+++ b/test/run_test.py
@@ -30,6 +30,7 @@ from torch.testing._internal.common_utils import (
IS_CI,
IS_LINUX,
IS_MACOS,
+ IS_WINDOWS,
parser as common_parser,
retry_shell,
set_cwd,
@@ -1192,9 +1193,12 @@ def parse_args():
and os.getenv("TEST_CONFIG") == "distributed"
and TEST_CUDA
)
+ or (IS_WINDOWS and not TEST_CUDA)
)
and os.getenv("BRANCH", "") != "main"
- and not strtobool(os.environ.get("NO_TD", "False")),
+ and not strtobool(os.environ.get("NO_TD", "False"))
+ and "slow" not in os.getenv("TEST_CONFIG", "")
+ and "slow" not in os.getenv("BUILD_ENVIRONMENT", ""),
)
parser.add_argument(
"additional_unittest_args",
diff --git a/tools/testing/target_determination/heuristics/interface.py b/tools/testing/target_determination/heuristics/interface.py
index 6cdd90e8a7..77052e6bba 100644
--- a/tools/testing/target_determination/heuristics/interface.py
+++ b/tools/testing/target_determination/heuristics/interface.py
@@ -116,7 +116,8 @@ class TestPrioritizations:
"""Divides list of tests into two based on the top n% of scores. The
first list is the top, and the second is the rest."""
tests = [x[1] for x in self._traverse_scores()]
- return tests[: n * len(tests) // 100], tests[n * len(tests) // 100 :]
+ index = n * len(tests) // 100 + 1
+ return tests[:index], tests[index:]
def get_info_str(self, verbose: bool = True) -> str:
info = ""
|
2.41.0
|
44f341aa4eaa4f2e7068e5f83fa6fccb0a02ccc
|
Mon, 29 Apr 2024 23:59:23 +0000
|
[PATCH 0815/1000] Fix ref leak in `dtype.to_complex()`/`to_real()` (#125154)
|
By using `Py_NewRef` Also, wrap `THPDtype_to_real`/`THPDtype_to_complex` calls with `HANDLE_TH_ERRORS` Add regression test for the above issues, by calling to_complex for integral dtypes, that raises an exception and by preserving reference count to the same to_complex/to_real call to detect if leak is happeneing. Replace ```cpp auto dtype = (PyObject*)torch::getTHPDtype(current_dtype); Py_INCREF(dtype); return dtype; ``` with a more compact/streamlined equivalent ```cpp return Py_NewRef(torch::getTHPDtype(current_dtype)); ``` Fixes https://github.com/pytorch/pytorch/issues/124868 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125154 Approved by: https://github.com/Skylion007, https://github.com/albanD
|
diff --git a/test/test_type_info.py b/test/test_type_info.py
index 729de86d24..1288b40ff6 100644
--- a/test/test_type_info.py
+++ b/test/test_type_info.py
@@ -6,6 +6,7 @@ from torch.testing._internal.common_utils import TestCase, run_tests, TEST_NUMPY
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
+import sys
import torch
import unittest
@@ -23,6 +24,8 @@ class TestDTypeInfo(TestCase):
for dtype in [torch.int64, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool]:
with self.assertRaises(TypeError):
_ = torch.finfo(dtype)
+ with self.assertRaises(RuntimeError):
+ dtype.to_complex()
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_iinfo(self):
@@ -88,6 +91,26 @@ class TestDTypeInfo(TestCase):
self.assertEqual(xinfo.resolution, 1.0)
self.assertEqual(xinfo.dtype, "float8_e4m3fn")
+ def test_to_complex(self):
+ # Regression test for https://github.com/pytorch/pytorch/issues/124868
+ # If reference count is leaked this would be a set of 10 elements
+ ref_cnt = {sys.getrefcount(torch.float32.to_complex()) for _ in range(10)}
+ self.assertLess(len(ref_cnt), 3)
+
+ self.assertEqual(torch.float64.to_complex(), torch.complex128)
+ self.assertEqual(torch.float32.to_complex(), torch.complex64)
+ self.assertEqual(torch.float16.to_complex(), torch.complex32)
+
+ def test_to_real(self):
+ # Regression test for https://github.com/pytorch/pytorch/issues/124868
+ # If reference count is leaked this would be a set of 10 elements
+ ref_cnt = {sys.getrefcount(torch.cfloat.to_real()) for _ in range(10)}
+ self.assertLess(len(ref_cnt), 3)
+
+ self.assertEqual(torch.complex128.to_real(), torch.double)
+ self.assertEqual(torch.complex64.to_real(), torch.float32)
+ self.assertEqual(torch.complex32.to_real(), torch.float16)
+
if __name__ == '__main__':
TestCase._default_dtype_check_enabled = True
run_tests()
diff --git a/torch/csrc/Dtype.cpp b/torch/csrc/Dtype.cpp
index 8eee2a02fa..4b911322ff 100644
--- a/torch/csrc/Dtype.cpp
+++ b/torch/csrc/Dtype.cpp
@@ -7,6 +7,7 @@
#include <torch/csrc/utils/object_ptr.h>
#include <torch/csrc/utils/python_numbers.h>
#include <torch/csrc/utils/python_strings.h>
+#include <torch/csrc/utils/pythoncapi_compat.h>
#include <torch/csrc/utils/tensor_dtypes.h>
#include <torch/csrc/utils/tensor_types.h>
#include <cstring>
@@ -74,21 +75,25 @@ PyObject* THPDtype_reduce(PyObject* _self, PyObject* noargs) {
}
PyObject* THPDtype_to_real(PyObject* _self, PyObject* noargs) {
+ HANDLE_TH_ERRORS
auto* self = (THPDtype*)_self;
auto scalar_type = self->scalar_type;
if (!at::isFloatingType(self->scalar_type)) {
scalar_type = at::toRealValueType(self->scalar_type);
}
- return (PyObject*)torch::getTHPDtype(scalar_type);
+ return Py_NewRef(torch::getTHPDtype(scalar_type));
+ END_HANDLE_TH_ERRORS
}
PyObject* THPDtype_to_complex(PyObject* _self, PyObject* noargs) {
+ HANDLE_TH_ERRORS
auto* self = (THPDtype*)_self;
auto scalar_type = self->scalar_type;
if (!at::isComplexType(self->scalar_type)) {
scalar_type = at::toComplexType(self->scalar_type);
}
- return (PyObject*)torch::getTHPDtype(scalar_type);
+ return Py_NewRef(torch::getTHPDtype(scalar_type));
+ END_HANDLE_TH_ERRORS
}
typedef PyObject* (*getter)(PyObject*, void*);
diff --git a/torch/csrc/Module.cpp b/torch/csrc/Module.cpp
index b446d29395..22a257909b 100644
--- a/torch/csrc/Module.cpp
+++ b/torch/csrc/Module.cpp
@@ -1048,9 +1048,7 @@ PyObject* THPModule_setFlushDenormal(PyObject* _unused, PyObject* arg) {
PyObject* THPModule_getDefaultDtype(PyObject* _unused, PyObject* arg) {
HANDLE_TH_ERRORS
auto scalar_type = torch::tensors::get_default_scalar_type();
- auto dtype = (PyObject*)torch::getTHPDtype(scalar_type);
- Py_INCREF(dtype);
- return dtype;
+ return Py_NewRef(torch::getTHPDtype(scalar_type));
END_HANDLE_TH_ERRORS
}
diff --git a/torch/csrc/autograd/init.cpp b/torch/csrc/autograd/init.cpp
index 6c9870a5c4..7e1c1c69ec 100644
--- a/torch/csrc/autograd/init.cpp
+++ b/torch/csrc/autograd/init.cpp
@@ -536,9 +536,7 @@ static PyObject* get_autocast_dtype(
auto r = parser.parse(args, kwargs, parsed_args);
auto device_type = at::Device(r.string(0)).type();
at::ScalarType current_dtype = at::autocast::get_autocast_dtype(device_type);
- auto dtype = (PyObject*)torch::getTHPDtype(current_dtype);
- Py_INCREF(dtype);
- return dtype;
+ return Py_NewRef(torch::getTHPDtype(current_dtype));
END_HANDLE_TH_ERRORS
}
@@ -735,9 +733,7 @@ static PyObject* get_autocast_gpu_dtype(PyObject* _unused, PyObject* arg) {
TORCH_WARN_DEPRECATION(
"torch.get_autocast_gpu_dtype() is deprecated. Please use torch.get_autocast_dtype('cuda') instead.")
at::ScalarType current_dtype = at::autocast::get_autocast_dtype(at::kCUDA);
- auto dtype = (PyObject*)torch::getTHPDtype(current_dtype);
- Py_INCREF(dtype);
- return dtype;
+ return Py_NewRef(torch::getTHPDtype(current_dtype));
END_HANDLE_TH_ERRORS
}
@@ -746,9 +742,7 @@ static PyObject* get_autocast_cpu_dtype(PyObject* _unused, PyObject* arg) {
TORCH_WARN_DEPRECATION(
"torch.get_autocast_cpu_dtype() is deprecated. Please use torch.get_autocast_dtype('cpu') instead.")
at::ScalarType current_dtype = at::autocast::get_autocast_dtype(at::kCPU);
- auto dtype = (PyObject*)torch::getTHPDtype(current_dtype);
- Py_INCREF(dtype);
- return dtype;
+ return Py_NewRef(torch::getTHPDtype(current_dtype));
END_HANDLE_TH_ERRORS
}
@@ -757,9 +751,7 @@ static PyObject* get_autocast_ipu_dtype(PyObject* _unused, PyObject* arg) {
TORCH_WARN_DEPRECATION(
"torch.get_autocast_ipu_dtype() is deprecated. Please use torch.get_autocast_dtype('ipu') instead.")
at::ScalarType current_dtype = at::autocast::get_autocast_dtype(at::kIPU);
- auto dtype = (PyObject*)torch::getTHPDtype(current_dtype);
- Py_INCREF(dtype);
- return dtype;
+ return Py_NewRef(torch::getTHPDtype(current_dtype));
END_HANDLE_TH_ERRORS
}
@@ -768,9 +760,7 @@ static PyObject* get_autocast_xla_dtype(PyObject* _unused, PyObject* arg) {
TORCH_WARN_DEPRECATION(
"torch.get_autocast_xla_dtype() is deprecated. Please use torch.get_autocast_dtype('xla') instead.")
at::ScalarType current_dtype = at::autocast::get_autocast_dtype(at::kXLA);
- auto dtype = (PyObject*)torch::getTHPDtype(current_dtype);
- Py_INCREF(dtype);
- return dtype;
+ return Py_NewRef(torch::getTHPDtype(current_dtype));
END_HANDLE_TH_ERRORS
}
|
2.41.0
|
7631d6eae30ad69bd21ab4ac01fdc4268a0469e
|
Tue, 30 Apr 2024 00:24:11 +0000
|
[PATCH 0816/1000] Revert "CI: add aarch64 linux workflow (#121284)"
|
This reverts commit 32cf04cb7f7aa14aff4d1cf40517d5de797550e7. Reverted https://github.com/pytorch/pytorch/pull/121284 on behalf of https://github.com/malfet due to Test only changes has not been reverted ([comment](https://github.com/pytorch/pytorch/pull/121284#issuecomment-2083925890))
|
diff --git a/.ci/docker/build.sh b/.ci/docker/build.sh
index 1b8ed8df93..2344862643 100755
--- a/.ci/docker/build.sh
+++ b/.ci/docker/build.sh
@@ -306,12 +306,6 @@ case "$image" in
DB=yes
VISION=yes
CONDA_CMAKE=yes
- # snadampal: skipping sccache due to the following issue
- # https://github.com/pytorch/pytorch/issues/121559
- SKIP_SCCACHE_INSTALL=yes
- # snadampal: skipping llvm src build install because the current version
- # from pytorch/llvm:9.0.1 is x86 specific
- SKIP_LLVM_SRC_BUILD_INSTALL=yes
;;
*)
# Catch-all for builds that are not hardcoded.
@@ -405,8 +399,6 @@ DOCKER_BUILDKIT=1 docker build \
--build-arg "EXECUTORCH=${EXECUTORCH}" \
--build-arg "BASEKIT_VERSION=${BASEKIT_VERSION}" \
--build-arg "ACL=${ACL:-}" \
- --build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \
- --build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \
-f $(dirname ${DOCKERFILE})/Dockerfile \
-t "$tmp_tag" \
"$@" \
diff --git a/.ci/docker/requirements-ci.txt b/.ci/docker/requirements-ci.txt
index 8551246c7e..e62ddfdd5f 100644
--- a/.ci/docker/requirements-ci.txt
+++ b/.ci/docker/requirements-ci.txt
@@ -263,11 +263,10 @@ unittest-xml-reporting<=3.2.0,>=2.0.0
#Pinned versions:
#test that import:
+#wheel not found on aarch64, and source build requires rust
lintrunner==0.10.7 ; platform_machine == "x86_64"
-#lintrunner is supported on aarch64-linux only from 0.12.4 version
-lintrunner==0.12.5 ; platform_machine == "aarch64"
#Description: all about linters!
-#Pinned versions: 0.10.7 on x86 and 0.12.5 on aarch64
+#Pinned versions: 0.10.7
#test that import:
rockset==1.0.3
diff --git a/.ci/docker/ubuntu/Dockerfile b/.ci/docker/ubuntu/Dockerfile
index b471ce3b89..bea3d3ec45 100644
--- a/.ci/docker/ubuntu/Dockerfile
+++ b/.ci/docker/ubuntu/Dockerfile
@@ -169,11 +169,9 @@ RUN rm install_acl.sh
ENV INSTALLED_ACL ${ACL}
# Install ccache/sccache (do this last, so we get priority in PATH)
-ARG SKIP_SCCACHE_INSTALL
COPY ./common/install_cache.sh install_cache.sh
ENV PATH /opt/cache/bin:$PATH
-RUN if [ -z "${SKIP_SCCACHE_INSTALL}" ]; then bash ./install_cache.sh; fi
-RUN rm install_cache.sh
+RUN bash ./install_cache.sh && rm install_cache.sh
# Add jni.h for java host build
COPY ./common/install_jni.sh install_jni.sh
@@ -190,9 +188,7 @@ ARG BUILD_ENVIRONMENT
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
# Install LLVM dev version (Defined in the pytorch/builder github repository)
-ARG SKIP_LLVM_SRC_BUILD_INSTALL
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
-RUN if [ -n "${SKIP_LLVM_SRC_BUILD_INSTALL}" ]; then set -eu; rm -rf /opt/llvm; fi
# AWS specific CUDA build guidance
ENV TORCH_CUDA_ARCH_LIST Maxwell
diff --git a/.ci/pytorch/build.sh b/.ci/pytorch/build.sh
index f7eee9fe9a..13069482ae 100755
--- a/.ci/pytorch/build.sh
+++ b/.ci/pytorch/build.sh
@@ -376,8 +376,4 @@ if [[ "$BUILD_ENVIRONMENT" != *libtorch* && "$BUILD_ENVIRONMENT" != *bazel* ]];
python tools/stats/export_test_times.py
fi
-# snadampal: skipping it till sccache support added for aarch64
-# https://github.com/pytorch/pytorch/issues/121559
-if [[ "$BUILD_ENVIRONMENT" != *aarch64* ]]; then
- print_sccache_stats
-fi
+print_sccache_stats
diff --git a/.ci/pytorch/test.sh b/.ci/pytorch/test.sh
index c259a58615..b13e41681a 100755
--- a/.ci/pytorch/test.sh
+++ b/.ci/pytorch/test.sh
@@ -181,11 +181,6 @@ if [[ "$BUILD_ENVIRONMENT" != *-bazel-* ]] ; then
export PATH="$HOME/.local/bin:$PATH"
fi
-if [[ "$BUILD_ENVIRONMENT" == *aarch64* ]]; then
- # TODO: revisit this once the CI is stabilized on aarch64 linux
- export VALGRIND=OFF
-fi
-
install_tlparse
# DANGER WILL ROBINSON. The LD_PRELOAD here could cause you problems
diff --git a/.github/pytorch-probot.yml b/.github/pytorch-probot.yml
index fafa314652..c7b554ce44 100644
--- a/.github/pytorch-probot.yml
+++ b/.github/pytorch-probot.yml
@@ -8,7 +8,6 @@ ciflow_push_tags:
- ciflow/binaries_wheel
- ciflow/inductor
- ciflow/inductor-perf-compare
|
- ciflow/mps
|
369ee49cc34b15d06b69f0c7d6e350ff6bad440
|
Tue, 30 Apr 2024 01:18:19 +0000
|
[PATCH 0818/1000] Update torch-xpu-ops pin (ATen XPU implementation) (#125011)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/125011 Approved by: https://github.com/EikanWang
|
diff --git a/third_party/xpu.txt b/third_party/xpu.txt
index 12007db75f..8d6e43952c 100644
--- a/third_party/xpu.txt
+++ b/third_party/xpu.txt
@@ -1 +1 @@
-84db213ab7125fce94aa2f00d2c61811b9384f40
+5bf9e0cc768f7a3b13d829118683275f324399f1
|
2.41.0
|
969f01d739856c5e878ba9950be461e0fe03427
|
Mon, 29 Apr 2024 15:13:57 -0700
|
[PATCH 0820/1000] [FSDP2] Accumulated in `reduce_dtype` if not syncing grads (#125191)
|
For microbatching use cases (e.g. PP), we may use fp32 reduce-scatter (i.e. `MixedPrecisionPolicy(reduce_dtype=torch.float32)`), where we want to accumulate the unsharded gradients in fp32 across microbatches until reduce-scattering in fp32 upon the last microbatch. Note that the `unsharded_param` is in bf16, so we must save the fp32 accumulated gradient to an attribute different from `.grad`. Moreover, saving a new attribute on the `torch.Tensor` leads to some annoying type checking issues (where the attribute may not be defined), so this PR prefers to save the attribute on the `FSDPParam` class instead. One could argue that this behavior should be configurable, but since I think for large-scale training, everyone is leaning toward fp32 accumulation across microbatches, let us avoid adding another argument for now. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125191 Approved by: https://github.com/weifengpy ghstack dependencies: #125190
|
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py b/test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
index 93c14ccc0e..4c660392b9 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py
@@ -3,7 +3,7 @@
import copy
import functools
-from typing import Dict, Optional, Union
+from typing import Dict, List, Optional, Union
import torch
import torch.distributed as dist
@@ -211,6 +211,100 @@ class TestFullyShardMixedPrecisionTraining(FSDPTest):
self.assertEqual(fsdp_loss, ref_loss)
check_sharded_parity(self, ref_model, model)
+ @skip_if_lt_x_gpu(2)
+ def test_grad_acc_with_reduce_dtype(self):
+ """
+ Tests that gradient accumulation without reduce-scatter when using
+ bf16 compute and fp32 reduction accumulates the unsharded gradients in
+ fp32.
+ """
+ self.run_subtests(
+ {"reshard_after_forward": [True, False]},
+ self._test_grad_acc_with_reduce_dtype,
+ )
+
+ def _test_grad_acc_with_reduce_dtype(self, reshard_after_forward: bool):
+ torch.manual_seed(42)
+ param_dtype, reduce_dtype = (torch.bfloat16, torch.float32)
+ mp_policy = MixedPrecisionPolicy(
+ param_dtype=param_dtype, reduce_dtype=reduce_dtype
+ )
+ model = nn.Sequential(*[MLP(16, torch.device("cpu")) for _ in range(3)])
+ # To emulate the mixed precision implementation where forward/backward
+ # compute use bf16 and optimizer uses fp32, we maintain both an fp32
+ # and a bf16 copy of the reference model
+ ref_model = copy.deepcopy(model).cuda()
+ ref_model_compute = copy.deepcopy(ref_model).to(param_dtype)
+ ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
+ for mlp in model:
+ fully_shard(
+ mlp, reshard_after_forward=reshard_after_forward, mp_policy=mp_policy
+ )
+ fully_shard(
+ model, reshard_after_forward=reshard_after_forward, mp_policy=mp_policy
+ )
+ optim = torch.optim.Adam(model.parameters(), lr=1e-2)
+ orig_reduce_scatter = dist.reduce_scatter_tensor
+
+ def assert_fn(output: torch.Tensor):
+ self.assertEqual(output.dtype, reduce_dtype)
+
+ reduce_scatter = functools.partial(
+ reduce_scatter_with_assert, self, orig_reduce_scatter, assert_fn
+ )
+ torch.manual_seed(42 + self.rank + 1)
+ device = torch.device("cuda")
+ # Train on the same input to avoid loss explosion
+ num_microbatches = 4
+ inp = torch.randn((2 * num_microbatches, 16), device=device, dtype=param_dtype)
+ for iter_idx in range(10):
+ microbatch_inps = torch.chunk(inp, 4)
+ for microbatch_idx in range(num_microbatches):
+ is_last_microbatch = microbatch_idx == num_microbatches - 1
+ model.set_requires_gradient_sync(is_last_microbatch)
+ model.set_reshard_after_backward(
+ is_last_microbatch or reshard_after_forward
+ )
+ losses: List[torch.Tensor] = []
+ for _model in (ref_model_compute, model):
+ losses.append(
+ _model(microbatch_inps[microbatch_idx].detach()).sum()
+ )
+ self.assertEqual(losses[-1].dtype, param_dtype)
+ with patch_reduce_scatter(reduce_scatter):
+ losses[-1].backward()
+ self.assertEqual(losses[0], losses[1])
+ # Manually accumulate gradients into the base reference model
+ # from the compute reference model in fp32
+ for ref_param, ref_param_compute in zip(
+ ref_model.parameters(), ref_model_compute.parameters()
+ ):
+ self.assertTrue(ref_param_compute.grad is not None)
+ self.assertEqual(ref_param.dtype, torch.float32)
+ if ref_param.grad is not None:
+ ref_param.grad += ref_param_compute.grad
+ else:
+ ref_param.grad = ref_param_compute.grad.to(ref_param.dtype)
+ ref_param_compute.grad = None
+ # Manually reduce gradients for the reference model on the last
+ # microbatch to implement data parallelism
+ if is_last_microbatch:
+ for ref_param in ref_model.parameters():
+ self.assertTrue(ref_param.grad is not None)
+ dist.all_reduce(ref_param.grad)
+ ref_param.grad /= self.world_size
+ check_sharded_parity(self, ref_model, model)
+ ref_optim.step()
+ optim.step()
+ ref_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
+ optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
+ # Manually copy parameters from the base reference model to the
+ # compute reference model to run the optimizer step for the latter
+ for ref_param, ref_param_compute in zip(
+ ref_model.parameters(), ref_model_compute.parameters()
+ ):
+ ref_param_compute.detach().copy_(ref_param)
+
class TestFullyShardMixedPrecisionCasts(FSDPTestMultiThread):
@property
diff --git a/torch/distributed/_composable/fsdp/_fsdp_api.py b/torch/distributed/_composable/fsdp/_fsdp_api.py
index a317c6dddd..2bf0278ed4 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_api.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_api.py
@@ -28,8 +28,10 @@ class MixedPrecisionPolicy:
gradient reduction (i.e. reduce-scatter or all-reduce). If this is
``None`` but ``param_dtype`` is not ``None``, then the reduction
uses the compute dtype. This can be used to run gradient reduction
- in full precision while using low precision for compute. (Default:
- ``None``)
+ in full precision while using low precision for compute. If also
+ gradient reduction is disabled via :meth:`set_requires_gradient_sync`,
+ then FSDP will accumulate gradients using ``reduce_dtype``.
+ (Default: ``None``)
output_dtype (Optional[torch.dtype]): This specifies the dtype for
casting floating-point forward outputs. This can be used to
help implement cases where different modules have different mixed
diff --git a/torch/distributed/_composable/fsdp/_fsdp_param.py b/torch/distributed/_composable/fsdp/_fsdp_param.py
index 19bba5a9ba..b05abb5f37 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_param.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_param.py
@@ -126,6 +126,7 @@ class FSDPParam:
_sharded_post_forward_param_data: Optional[torch.Tensor] # 1D
_sharded_post_forward_param: Optional[nn.Parameter] # ND
_unsharded_param: nn.Parameter # ND
+ unsharded_accumulated_grad: Optional[torch.Tensor] # ND
_global_placements: Tuple[Placement, ...]
_global_size: torch.Size
_global_stride: Tuple[int, ...]
@@ -160,6 +161,7 @@ class FSDPParam:
self._init_sharded_post_forward_param_metadata(param)
self._init_extensions()
self.all_gather_outputs: List[torch.Tensor] = []
+ self.unsharded_accumulated_grad = None
self._param_fqn: Optional[str] = None # prefixed from root module
# TODO: Remove this padding logic once DTensor pads the local tensor:
# https://github.com/pytorch/pytorch/issues/113045
@@ -455,6 +457,27 @@ class FSDPParam:
self._global_stride,
)
+ def to_accumulated_grad_if_needed(self) -> None:
+ # Access `_unsharded_param` to bypass the sharded state check since we
+ # prefer to reshard before upcasting the gradient to save memory
+ if (
+ self.reduce_dtype is None
+ or self._unsharded_param.grad is None
+ or self._unsharded_param.grad.dtype == self.reduce_dtype
+ ):
+ return
+ unsharded_grad = self._unsharded_param.grad
+ self._unsharded_param.grad = None
+ self.unsharded_accumulated_grad = unsharded_grad.to(self.reduce_dtype)
+
+ def accumulate_unsharded_grad_if_needed(self) -> None:
+ if (
+ self.unsharded_accumulated_grad is not None
+ and self.unsharded_param.grad is not None
+ ):
+ self.unsharded_accumulated_grad += self.unsharded_param.grad
+ self.unsharded_param.grad = None
+
def alloc_all_gather_outputs(self) -> None:
for tensor in self.all_gather_outputs:
alloc_storage(tensor)
@@ -510,6 +533,12 @@ class FSDPParam:
assert grad is not None, "Expects unsharded_param.grad to not be None"
return self._get_grad_inner_tensor(grad)
+ @property
+ def unsharded_accumulated_grad_data(self) -> torch.Tensor:
+ grad = self.unsharded_accumulated_grad
+ assert grad is not None, "Expects unsharded_accumulated_grad to not be None"
+ return self._get_grad_inner_tensor(grad)
+
def _get_grad_inner_tensor(self, grad: torch.Tensor) -> torch.Tensor:
if self.is_dtensor:
if isinstance(grad, AsyncCollectiveTensor):
diff --git a/torch/distributed/_composable/fsdp/_fsdp_param_group.py b/torch/distributed/_composable/fsdp/_fsdp_param_group.py
index 996bd5665f..9036862905 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_param_group.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_param_group.py
@@ -316,17 +316,28 @@ class FSDPParamGroup:
def post_backward(self, *unused: Any):
self._training_state = TrainingState.POST_BACKWARD
+ with torch.profiler.record_function("FSDP::post_backward_accumulate"):
+ for fsdp_param in self.fsdp_params:
+ fsdp_param.accumulate_unsharded_grad_if_needed()
with torch.profiler.record_function("FSDP::post_backward_reshard"):
if not self.reduce_grads:
if self.reshard_after_backward:
self.reshard()
+ for fsdp_param in self.fsdp_params:
+ fsdp_param.to_accumulated_grad_if_needed()
return
# Save the autograd-computed gradients before resharding to only
# access the unsharded parameters when their data is present
fsdp_params_with_grad: List[FSDPParam] = []
unsharded_grads: List[torch.Tensor] = []
for fsdp_param in self.fsdp_params:
- if fsdp_param.unsharded_param.grad is not None:
+ # May have an accumulated gradient of the reduce dtype if the
+ # previous backward did not reduce-scatter
+ if fsdp_param.unsharded_accumulated_grad is not None:
+ fsdp_params_with_grad.append(fsdp_param)
+ unsharded_grads.append(fsdp_param.unsharded_accumulated_grad_data)
+ fsdp_param.unsharded_accumulated_grad = None
+ elif fsdp_param.unsharded_param.grad is not None:
fsdp_params_with_grad.append(fsdp_param)
unsharded_grads.append(fsdp_param.unsharded_grad_data)
fsdp_param.unsharded_param.grad = None
|
2.41.0
|
4b7c56517f97c5d813620da9a479417a564e8b4
|
Tue, 30 Apr 2024 02:38:31 +0000
|
[PATCH 0821/1000] [Autotune] Use half the number of warps for reduction tuning on AMD. (#125084)
|
I was seeing for a reduction kernel and a given block size, on AMDGPU, the vectorization bandwidth (16-byte) for a thread was not fully leveraged while it was not a problem for NVGPU. It appeared that each thread got fewer data to process as a whole row were processed by more threads, and the number of elements each thread got was not enough to saturate full vectorization. On AMDGPU, a warp has 64 lanes compared to 32 on the NV side. Therefore I'm tuning down the default number of warps (8 for NV) for AMD. I'm seeing 10% speed up for an internal benchmark. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125084 Approved by: https://github.com/shunting314
|
diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py
index 916ea8b930..c6b871db87 100644
--- a/torch/_inductor/runtime/triton_heuristics.py
+++ b/torch/_inductor/runtime/triton_heuristics.py
@@ -1272,7 +1272,11 @@ def triton_config_reduction(size_hints, x, r, num_stages=1, num_warps=None) -> C
cfg = {"XBLOCK": x, "RBLOCK": r}
if num_warps is None:
num_warps = conditional_product(x, r) // 128
- num_warps = next_power_of_2(min(max(num_warps, 2), 8))
+ # On AMD GPU each warp has 64 lanes which is double the size on NV GPU,
+ # therefore using half the number of warps here correspondingly.
+ default_num_warps = 4 if torch.version.hip else 8
+ min_num_warps = 1 if torch.version.hip else 2
+ num_warps = next_power_of_2(min(max(num_warps, min_num_warps), default_num_warps))
check_config(cfg, xnumel=size_hints[0])
assert r <= TRITON_MAX_BLOCK["R"], f"increase TRITON_MAX_BLOCK['r'] to {r}"
return Config(cfg, num_warps=num_warps, num_stages=num_stages)
|
2.41.0
|
1801918e90cb79051c55c83f26192a5479fd792
|
Tue, 30 Apr 2024 03:51:46 +0000
|
[PATCH 0823/1000] Add pooling support for 3d channels last (#116305)
|
Part of a multi-PR work to improve #59168 Meant to complete Write native kernels for AvgPool3d Write native kernels for MaxPool3d Write native kernels for AdaptiveAvgPool3d Write native kernels for AdaptiveMaxPool3d Pull Request resolved: https://github.com/pytorch/pytorch/pull/116305 Approved by: https://github.com/ezyang
|
diff --git a/aten/src/ATen/native/AdaptiveAveragePooling3d.cpp b/aten/src/ATen/native/AdaptiveAveragePooling3d.cpp
index 4b818f3107..bbd4f68d40 100644
--- a/aten/src/ATen/native/AdaptiveAveragePooling3d.cpp
+++ b/aten/src/ATen/native/AdaptiveAveragePooling3d.cpp
@@ -317,6 +317,12 @@ Tensor adaptive_avg_pool3d_symint(Tensor const& input, SymIntArrayRef output_siz
// in this case, adaptive pooling is just computing mean over hw
// dimensions, which can be done more efficiently
Tensor out = input.mean({-1, -2, -3}, /* keepdim = */ true);
+ if (input.suggest_memory_format() == at::MemoryFormat::ChannelsLast3d) {
+ // assert ndim == 5, since ndim = 4 doesn't give channels_last
+ const auto n = input.sym_size(0);
+ const auto c = input.sym_size(1);
+ out.as_strided__symint({n, c, 1, 1, 1}, {c, 1, c, c, c});
+ }
return out;
} else {
return _adaptive_avg_pool3d_symint(input, output_size);
diff --git a/aten/src/ATen/native/AdaptivePooling.h b/aten/src/ATen/native/AdaptivePooling.h
index d342d218e4..bb2fda9906 100644
--- a/aten/src/ATen/native/AdaptivePooling.h
+++ b/aten/src/ATen/native/AdaptivePooling.h
@@ -8,15 +8,25 @@
namespace at::native {
-using adaptive_avg_pooling_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size);
-using adaptive_avg_pooling_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output);
-DECLARE_DISPATCH(adaptive_avg_pooling_fn, adaptive_avg_pool2d_kernel);
-DECLARE_DISPATCH(adaptive_avg_pooling_backward_fn, adaptive_avg_pool2d_backward_kernel);
-
-using adaptive_max_pooling_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, IntArrayRef output_size);
-using adaptive_max_pooling_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
-DECLARE_DISPATCH(adaptive_max_pooling_fn, adaptive_max_pool2d_kernel);
-DECLARE_DISPATCH(adaptive_max_pooling_backward_fn, adaptive_max_pool2d_backward_kernel);
+using adaptive_avg_pooling2d_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size);
+using adaptive_avg_pooling2d_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output);
+DECLARE_DISPATCH(adaptive_avg_pooling2d_fn, adaptive_avg_pool2d_kernel);
+DECLARE_DISPATCH(adaptive_avg_pooling2d_backward_fn, adaptive_avg_pool2d_backward_kernel);
+
+using adaptive_max_pooling2d_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, IntArrayRef output_size);
+using adaptive_max_pooling2d_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
+DECLARE_DISPATCH(adaptive_max_pooling2d_fn, adaptive_max_pool2d_kernel);
+DECLARE_DISPATCH(adaptive_max_pooling2d_backward_fn, adaptive_max_pool2d_backward_kernel);
+
+using adaptive_avg_pooling3d_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size);
+using adaptive_avg_pooling3d_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output);
+DECLARE_DISPATCH(adaptive_avg_pooling3d_fn, adaptive_avg_pool3d_kernel);
+DECLARE_DISPATCH(adaptive_avg_pooling3d_backward_fn, adaptive_avg_pool3d_backward_kernel);
+
+using adaptive_max_pooling3d_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, IntArrayRef output_size);
+using adaptive_max_pooling3d_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
+DECLARE_DISPATCH(adaptive_max_pooling3d_fn, adaptive_max_pool3d_kernel);
+DECLARE_DISPATCH(adaptive_max_pooling3d_backward_fn, adaptive_max_pool3d_backward_kernel);
static inline int64_t start_index(int64_t a, int64_t b, int64_t c) {
return (a / b) * c + ((a % b) * c) / b;
diff --git a/aten/src/ATen/native/Pool.h b/aten/src/ATen/native/Pool.h
index 471c953280..07940729fd 100644
--- a/aten/src/ATen/native/Pool.h
+++ b/aten/src/ATen/native/Pool.h
@@ -26,6 +26,19 @@ using avg_pool2d_backward_fn = void(*)(const Tensor& output, const Tensor& input
DECLARE_DISPATCH(avg_pool2d_fn, avg_pool2d_kernel);
DECLARE_DISPATCH(avg_pool2d_backward_fn, avg_pool2d_backward_kernel);
+// averge pooling has same signature for forward and backward
+using avg_pool3d_fn = void(*)(const Tensor& output, const Tensor& input,
+ int64_t kW, int64_t kH, int64_t kD, int64_t dW, int64_t dH, int64_t dD,
+ int64_t padW, int64_t padH, int64_t padD, bool count_include_pad,
+ c10::optional<int64_t> divisor_override);
+using avg_pool3d_backward_fn = void(*)(const Tensor& output, const Tensor& input,
+ int kW, int kH, int kD, int dW, int dH, int dD,
+ int padW, int padH, int padD, bool count_include_pad,
+ c10::optional<int64_t> divisor_override);
+
+DECLARE_DISPATCH(avg_pool3d_fn, avg_pool3d_kernel);
+DECLARE_DISPATCH(avg_pool3d_backward_fn, avg_pool3d_backward_kernel);
+
using max_pool3d_fn = void(*)(Tensor& output, Tensor& indices, const Tensor& input,
int kW, int kH, int kD, int dW, int dH, int dD, int pW, int pH, int pD, int dilationW, int dilationH, int dilationD);
using max_pool3d_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
diff --git a/aten/src/ATen/native/cpu/AdaptiveAvgPoolKernel.cpp b/aten/src/ATen/native/cpu/AdaptiveAvgPoolKernel.cpp
index b3b505ec32..6f96d495f8 100644
--- a/aten/src/ATen/native/cpu/AdaptiveAvgPoolKernel.cpp
+++ b/aten/src/ATen/native/cpu/AdaptiveAvgPoolKernel.cpp
@@ -15,7 +15,7 @@ namespace at::native {
namespace {
template <typename scalar_t, typename accscalar_t>
-void cpu_adaptive_avg_pool(
+void cpu_adaptive_avg_pool2d(
Tensor& output_,
const Tensor& input_,
IntArrayRef output_size) {
@@ -69,7 +69,7 @@ void cpu_adaptive_avg_pool(
template <typename scalar_t>
typename std::enable_if_t<std::is_same_v<scalar_t, at::opmath_type<scalar_t>>, void>
-cpu_adaptive_avg_pool_channels_last(
+cpu_adaptive_avg_pool2d_channels_last(
Tensor& output_,
const Tensor& input_,
IntArrayRef output_size) {
@@ -156,7 +156,7 @@ cpu_adaptive_avg_pool_channels_last(
template <typename scalar_t>
typename std::enable_if_t<!std::is_same_v<scalar_t, at::opmath_type<scalar_t>>, void>
-cpu_adaptive_avg_pool_channels_last(
+cpu_adaptive_avg_pool2d_channels_last(
Tensor& output_,
const Tensor& input_,
IntArrayRef output_size) {
@@ -255,7 +255,7 @@ cpu_adaptive_avg_pool_channels_last(
}
template <typename scalar_t>
-void cpu_adaptive_avg_pool_backward(
+void cpu_adaptive_avg_pool2d_backward(
Tensor& grad_input_,
const Tensor& grad_output_) {
auto grad_output = grad_output_.contiguous();
@@ -305,7 +305,7 @@ void cpu_adaptive_avg_pool_backward(
}
template <typename scalar_t>
-void cpu_adaptive_avg_pool_backward_channels_last(
+void cpu_adaptive_avg_pool2d_backward_channels_last(
Tensor& grad_input_,
const Tensor& grad_output_) {
auto memory_format = at::MemoryFormat::ChannelsLast;
@@ -373,13 +373,13 @@ void adaptive_avg_pool2d_kernel_impl(
case at::MemoryFormat::Contiguous: {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, input.scalar_type(), "adaptive_avg_pool2d", [&] {
using param_t = at::opmath_type<scalar_t>;
- cpu_adaptive_avg_pool<scalar_t, /*accscalar_t*/param_t>(output, input, output_size);
+ cpu_adaptive_avg_pool2d<scalar_t, /*accscalar_t*/param_t>(output, input, output_size);
});
break;
}
case at::MemoryFormat::ChannelsLast: {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, input.scalar_type(), "adaptive_avg_pool2d_channels_last", [&]{
- cpu_adaptive_avg_pool_channels_last<scalar_t>(output, input, output_size);
+ cpu_adaptive_avg_pool2d_channels_last<scalar_t>(output, input, output_size);
});
break;
}
@@ -394,13 +394,458 @@ void adapative_avg_pool2d_backward_kernel_impl(
switch (grad_output.suggest_memory_format()) {
case at::MemoryFormat::Contiguous: {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, grad_output.scalar_type(), "adaptive_avg_pool2d_backward", [&] {
- cpu_adaptive_avg_pool_backward<scalar_t>(grad_input, grad_output);
+ cpu_adaptive_avg_pool2d_backward<scalar_t>(grad_input, grad_output);
});
break;
}
case at::MemoryFormat::ChannelsLast: {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, grad_output.scalar_type(), "adaptive_avg_pool2d_backward_channels_last", [&]{
- cpu_adaptive_avg_pool_backward_channels_last<scalar_t>(grad_input, grad_output);
+ cpu_adaptive_avg_pool2d_backward_channels_last<scalar_t>(grad_input, grad_output);
+ });
+ break;
+ }
+ default:
+ TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
+ }
+}
+
+
+template <typename scalar_t, typename accscalar_t>
+void cpu_adaptive_avg_pool3d(
+ Tensor& output_,
+ const Tensor& input_,
+ IntArrayRef output_size) {
+ auto input = input_.contiguous();
+ auto output = output_.contiguous();
+
+ auto input_data = input.data_ptr<scalar_t>();
+ auto output_data = output.data_ptr<scalar_t>();
+
+ int64_t ndim = input.ndimension();
+ // treat batch size and channels as one dimension
+ int64_t channels = ndim == 4 ? input.size(0) : input.size(0) * input.size(1);
+ int64_t input_depth = input.size(-3);
+ int64_t input_height = input.size(-2);
+ int64_t input_width = input.size(-1);
+ int64_t output_depth = output_size[0];
+ int64_t output_height = output_size[1];
+ int64_t output_width = output_size[2];
+
+ // parallel on dim of N, C
+ at::parallel_for(0, channels, 0, [&](int64_t begin, int64_t end) {
+ for (const auto c : c10::irange(begin, end)) {
+ scalar_t* input_ptr = input_data + c * input_depth * input_height * input_width;
+ scalar_t* output_ptr = output_data + c * output_depth * output_height * output_width;
+
+ for (const auto od : c10::irange(output_depth)) {
+ int64_t id0 = start_index(od, output_depth, input_depth);
+ int64_t id1 = end_index(od, output_depth, input_depth);
+ int64_t kd = id1 - id0;
+
+ for (const auto oh : c10::irange(output_height)) {
+ int64_t ih0 = start_index(oh, output_height, input_height);
+ int64_t ih1 = end_index(oh, output_height, input_height);
+ int64_t kh = ih1 - ih0;
+
+ for (const auto ow : c10::irange(output_width)) {
+ int64_t iw0 = start_index(ow, output_width, input_width);
+ int64_t iw1 = end_index(ow, output_width, input_width);
+ int64_t kw = iw1 - iw0;
+
+ // compute local average
+ accscalar_t sum = 0;
+ for (const auto id : c10::irange(id0, id1)) {
+ for (const auto ih : c10::irange(ih0, ih1)) {
+ for (const auto iw : c10::irange(iw0, iw1)) {
+ sum += accscalar_t(input_ptr[id * input_height * input_width + ih * input_width + iw]);
+ }
+ }
+ }
+ output_ptr[od * output_height * output_width + oh * output_width + ow] = scalar_t(sum / kd / kh / kw);
+ }
+ }
+ }
+ }
+ });
+
+ if (!output_.is_contiguous()) {
+ output_.copy_(output);
+ }
+}
+
+
+template <typename scalar_t>
+typename std::enable_if_t<std::is_same_v<scalar_t, at::opmath_type<scalar_t>>, void>
+cpu_adaptive_avg_pool3d_channels_last(
+ Tensor& output_,
+ const Tensor& input_,
+ IntArrayRef output_size) {
+ auto memory_format = at::MemoryFormat::ChannelsLast3d;
+ auto input = input_.contiguous(memory_format);
+ auto output = output_.contiguous(memory_format);
+
+ auto input_data = input.data_ptr<scalar_t>();
+ auto output_data = output.data_ptr<scalar_t>();
+
+ int64_t nbatch = input.size(0);
+ int64_t channels = input.size(1);
+ int64_t input_depth = input.size(2);
+ int64_t input_height = input.size(3);
+ int64_t input_width = input.size(4);
+ int64_t output_depth = output_size[0];
+ int64_t output_height = output_size[1];
+ int64_t output_width = output_size[2];
+
+ using Vec = vec::Vectorized<scalar_t>;
+ // parallel on dim N, H, W
+ at::parallel_for(0, nbatch * output_depth * output_height * output_width, 0, [&](int64_t begin, int64_t end) {
+ int64_t n = 0;
+ int64_t od = 0;
+ int64_t oh = 0;
+ int64_t ow = 0;
+ data_index_init(begin, n, nbatch, od, output_depth, oh, output_height, ow, output_width);
+
+ for (const auto i : c10::irange(begin, end)) {
+ int64_t id0 = start_index(od, output_depth, input_depth);
+ int64_t id1 = end_index(od, output_depth, input_depth);
+ int64_t kd = id1 - id0;
+
+ int64_t ih0 = start_index(oh, output_height, input_height);
+ int64_t ih1 = end_index(oh, output_height, input_height);
+ int64_t kh = ih1 - ih0;
+
+ int64_t iw0 = start_index(ow, output_width, input_width);
+ int64_t iw1 = end_index(ow, output_width, input_width);
+ int64_t kw = iw1 - iw0;
+
+ scalar_t* out = output_data + i * channels;
+ int64_t size = channels;
+
+ // Note: For oridinary usage scenario, each out lane should
+ // fit in L1 cache; otherwise consider block dim C.
+ // Pass I: zero the out lane
+ int64_t d1 = 0;
+ for (; d1 < size - (size % Vec::size()); d1 += Vec::size()) {
+ Vec out_vec = Vec(scalar_t(0));
+ out_vec.store(out + d1);
+ }
+ for (; d1 < size; d1++) {
+ out[d1] = scalar_t(0);
+ }
+ // Pass II: compute local sum
+ for (const auto id : c10::irange(id0, id1)) {
+ for (const auto ih : c10::irange(ih0, ih1)) {
+ for (const auto iw : c10::irange(iw0, iw1)) {
+ scalar_t* in = input_data + n * input_depth * input_height * input_width * channels +
+ id * input_height * input_width * channels + ih * input_width * channels + iw * channels;
+
+ int64_t d2 = 0;
+ for (; d2 < size - (size % Vec::size()); d2 += Vec::size()) {
+ Vec out_vec = Vec::loadu(out + d2) + Vec::loadu(in + d2);
+ out_vec.store(out + d2);
+ }
+ for (; d2 < size; d2++) {
+ out[d2] += in[d2];
+ }
+ }
+ }
+ }
+ // Pass III: compute local average
+ int64_t d3 = 0;
+ for (; d3 < size - (size % Vec::size()); d3 += Vec::size()) {
+ Vec out_vec = Vec::loadu(out + d3) / Vec(scalar_t(kd * kh * kw));
+ out_vec.store(out + d3);
+ }
+ for (; d3 < size; d3++) {
+ out[d3] = out[d3] / kd / kh / kw;
+ }
+
+ // move on to next output index
+ data_index_step(n, nbatch, od, output_depth, oh, output_height, ow, output_width);
+ }
+ });
+
+ if (!output_.is_contiguous(memory_format)) {
+ output_.copy_(output);
+ }
+}
+
+template <typename scalar_t>
+typename std::enable_if_t<!std::is_same_v<scalar_t, at::opmath_type<scalar_t>>, void>
+cpu_adaptive_avg_pool3d_channels_last(
+ Tensor& output_,
+ const Tensor& input_,
+ IntArrayRef output_size) {
+ auto memory_format = at::MemoryFormat::ChannelsLast3d;
+ auto input = input_.contiguous(memory_format);
+ auto output = output_.contiguous(memory_format);
+
+ auto input_data = input.data_ptr<scalar_t>();
+ auto output_data = output.data_ptr<scalar_t>();
+
+ int64_t nbatch = input.size(0);
+ int64_t channels = input.size(1);
+ int64_t input_depth = input.size(2);
+ int64_t input_height = input.size(3);
+ int64_t input_width = input.size(4);
+ int64_t output_depth = output_size[0];
+ int64_t output_height = output_size[1];
+ int64_t output_width = output_size[2];
+
+ using bVec = vec::Vectorized<scalar_t>;
+ using fVec = vec::Vectorized<float>;
+ // parallel on dim N,D, H, W
+ at::parallel_for(0, nbatch * output_depth * output_height * output_width, 0, [&](int64_t begin, int64_t end) {
+ int64_t n = 0;
+ int64_t oh = 0;
+ int64_t ow = 0;
+ int64_t od = 0;
+ data_index_init(begin, n, nbatch, od, output_depth, oh, output_height, ow, output_width);
+
+ // temp buffer for sum, use float as accumulation type
+ // can't reuse output buffer to store sum since it is BFloat16/Half
+ auto sum_arr = std::make_unique<float []>(channels);
+ float* sum = sum_arr.get();
+
+ for (const auto i : c10::irange(begin, end)) {
+ int64_t id0 = start_index(od, output_depth, input_depth);
+ int64_t id1 = end_index(od, output_depth, input_depth);
+ int64_t kd = id1 - id0;
+
+ int64_t ih0 = start_index(oh, output_height, input_height);
+ int64_t ih1 = end_index(oh, output_height, input_height);
+ int64_t kh = ih1 - ih0;
+
+ int64_t iw0 = start_index(ow, output_width, input_width);
+ int64_t iw1 = end_index(ow, output_width, input_width);
+ int64_t kw = iw1 - iw0;
+
+ scalar_t* out = output_data + i * channels;
+ int64_t size = channels;
+
+ // Pass I: zero the out lane
+ int64_t d1 = 0;
+ for (; d1 < size - (size % fVec::size()); d1 += fVec::size()) {
+ fVec sum_fvec = fVec(float(0));
+ sum_fvec.store(sum + d1);
+ }
+ for (; d1 < size; d1++) {
+ sum[d1] = float(0);
+ }
+ // Pass II: compute local sum
+ for (const auto id : c10::irange(id0, id1)) {
+ for (const auto ih : c10::irange(ih0, ih1)) {
+ for (const auto iw : c10::irange(iw0, iw1)) {
+ scalar_t* in = input_data + n * input_depth * input_height * input_width * channels +
+ id * input_height * input_width * channels +
+ ih * input_width * channels + iw * channels;
+
+ int64_t d2 = 0;
+ for (; d2 < size - (size % bVec::size()); d2 += bVec::size()) {
+ bVec data_bvec = bVec::loadu(in + d2);
+ fVec data_fvec0, data_fvec1;
+ std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec);
+
+ fVec sum_fvec0 = fVec::loadu(sum + d2) + data_fvec0;
+ fVec sum_fvec1 = fVec::loadu(sum + d2 + fVec::size()) + data_fvec1;
+ sum_fvec0.store(sum + d2);
+ sum_fvec1.store(sum + d2 + fVec::size());
+ }
+ for (; d2 < size; d2++) {
+ sum[d2] += float(in[d2]);
+ }
+ }
+ }
+ }
+ // Pass III: compute local average
+ int64_t d3 = 0;
+ for (; d3 < size - (size % bVec::size()); d3 += bVec::size()) {
+ fVec out_fvec0 = fVec::loadu(sum + d3) / fVec(float(kd * kh * kw));
+ fVec out_fvec1 = fVec::loadu(sum + d3 + fVec::size()) / fVec(float(kd * kh * kw));
+
+ bVec out_bvec = convert_from_float<scalar_t>(out_fvec0, out_fvec1);
+ out_bvec.store(out + d3);
+ }
+ for (; d3 < size; d3++) {
+ out[d3] = scalar_t(sum[d3] / kd / kh / kw);
+ }
+
+ // move on to next output index
+ data_index_step(n, nbatch, od, output_depth, oh, output_height, ow, output_width);
+ }
+ });
+
+ if (!output_.is_contiguous(memory_format)) {
+ output_.copy_(output);
+ }
+}
+
+template <typename scalar_t>
+void cpu_adaptive_avg_pool3d_backward(
+ Tensor& grad_input_,
+ const Tensor& grad_output_) {
+ auto grad_output = grad_output_.contiguous();
+ auto grad_input = grad_input_.contiguous();
+
+ auto grad_output_data = grad_output.data_ptr<scalar_t>();
+ auto grad_input_data = grad_input.mutable_data_ptr<scalar_t>();
+
+ int64_t ndim = grad_output.ndimension();
+ // treat batch size and channels as one dimension
+ int64_t channels = ndim == 4 ? grad_output.size(0) : grad_output.size(0) * grad_output.size(1);
+ int64_t input_depth = grad_input.size(-3);
+ int64_t input_height = grad_input.size(-2);
+ int64_t input_width = grad_input.size(-1);
+ int64_t output_depth = grad_output.size(-3);
+ int64_t output_height = grad_output.size(-2);
+ int64_t output_width = grad_output.size(-1);
+
+ // parallel on dim of N, C
+ at::parallel_for(0, channels, 0, [&](int64_t begin, int64_t end) {
+ for (const auto c : c10::irange(begin, end)) {
+ scalar_t* grad_input_ptr = grad_input_data + c * input_depth * input_height * input_width;
+ scalar_t* grad_output_ptr = grad_output_data + c * output_depth * output_height * output_width;
+
+ for (const auto od : c10::irange(output_depth)) {
+ int64_t id0 = start_index(od, output_depth, input_depth);
+ int64_t id1 = end_index(od, output_depth, input_depth);
+ int64_t kd = id1 - id0;
+ for (const auto oh : c10::irange(output_height)) {
+ int64_t ih0 = start_index(oh, output_height, input_height);
+ int64_t ih1 = end_index(oh, output_height, input_height);
+ int64_t kh = ih1 - ih0;
+
+ for (const auto ow : c10::irange(output_width)) {
+ int64_t iw0 = start_index(ow, output_width, input_width);
+ int64_t iw1 = end_index(ow, output_width, input_width);
+ int64_t kw = iw1 - iw0;
+
+ scalar_t grad_delta = grad_output_ptr[od * output_width * output_height + oh * output_width + ow] / kd / kh / kw;
+ for (const auto id : c10::irange(id0, id1)) {
+ for (const auto ih : c10::irange(ih0, ih1)) {
+ for (const auto iw : c10::irange(iw0, iw1)) {
+ grad_input_ptr[id * input_height * input_width + ih * input_width + iw] += grad_delta;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ });
+
+ if (!grad_input_.is_contiguous()) {
+ grad_input_.copy_(grad_input);
+ }
+}
+
+template <typename scalar_t>
+void cpu_adaptive_avg_pool3d_backward_channels_last(
+ Tensor& grad_input_,
+ const Tensor& grad_output_) {
+ auto memory_format = at::MemoryFormat::ChannelsLast3d;
+ auto grad_input = grad_input_.contiguous(memory_format);
+ auto grad_output = grad_output_.contiguous(memory_format);
+
+ auto grad_input_data = grad_input.mutable_data_ptr<scalar_t>();
+ auto grad_output_data = grad_output.data_ptr<scalar_t>();
+
+ int64_t nbatch = grad_input.size(0);
+ int64_t channels = grad_input.size(1);
+ int64_t input_depth = grad_input.size(2);
+ int64_t input_height = grad_input.size(3);
+ int64_t input_width = grad_input.size(4);
+ int64_t output_depth = grad_output.size(2);
+ int64_t output_height = grad_output.size(3);
+ int64_t output_width = grad_output.size(4);
+
+ using Vec = vec::Vectorized<scalar_t>;
+ // parallel on dim N
+ at::parallel_for(0, nbatch, 0, [&](int64_t begin, int64_t end) {
+ for (const auto n : c10::irange(begin, end)) {
+ scalar_t* grad_input_ptr = grad_input_data + n * input_depth * input_height * input_width * channels;
+ scalar_t* grad_output_ptr = grad_output_data + n * output_depth * output_height * output_width * channels;
+
+ for (const auto od : c10::irange(output_depth)) {
+ int64_t id0 = start_index(od, output_depth, input_depth);
+ int64_t id1 = end_index(od, output_depth, input_depth);
+ int64_t kd = id1 - id0;
+ for (const auto oh : c10::irange(output_height)) {
+ int64_t ih0 = start_index(oh, output_height, input_height);
+ int64_t ih1 = end_index(oh, output_height, input_height);
+ int64_t kh = ih1 - ih0;
+
+ for (const auto ow : c10::irange(output_width)) {
+ int64_t iw0 = start_index(ow, output_width, input_width);
+ int64_t iw1 = end_index(ow, output_width, input_width);
+ int64_t kw = iw1 - iw0;
+
+ scalar_t* gout = grad_output_ptr + od * output_depth * channels + oh * output_width * channels + ow * channels;
+ int64_t size = channels;
+ for (const auto id : c10::irange(id0, id1)) {
+ for (const auto ih : c10::irange(ih0, ih1)) {
+ for (const auto iw : c10::irange(iw0, iw1)) {
+ scalar_t* gin = grad_input_ptr + id * input_width * input_height * channels + ih * input_width * channels + iw * channels;
+
+ int64_t d = 0;
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
+ Vec gin_vec = Vec::loadu(gin + d) + Vec::loadu(gout + d) / Vec(scalar_t(kd * kh * kw));
+ gin_vec.store(gin + d);
+ }
+ for (; d < size; d++) {
+ gin[d] += gout[d] / kd / kh / kw;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ });
+
+ if (!grad_input_.is_contiguous(memory_format)) {
+ grad_input_.copy_(grad_input);
+ }
+}
+
+
+void adaptive_avg_pool3d_kernel_impl(
+ Tensor& output,
+ const Tensor& input,
+ IntArrayRef output_size) {
+ switch (input.suggest_memory_format()) {
+ case at::MemoryFormat::Contiguous: {
+ AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, input.scalar_type(), "adaptive_avg_pool3d", [&] {
+ using param_t = at::opmath_type<scalar_t>;
+ cpu_adaptive_avg_pool3d<scalar_t, /*accscalar_t*/param_t>(output, input, output_size);
+ });
+ break;
+ }
+ case at::MemoryFormat::ChannelsLast3d: {
+ AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, input.scalar_type(), "adaptive_avg_pool3d_channels_last", [&]{
+ cpu_adaptive_avg_pool3d_channels_last<scalar_t>(output, input, output_size);
+ });
+ break;
+ }
+ default:
+ TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
+ }
+}
+
+void adapative_avg_pool3d_backward_kernel_impl(
+ Tensor& grad_input,
+ const Tensor& grad_output) {
+ switch (grad_output.suggest_memory_format()) {
+ case at::MemoryFormat::Contiguous: {
+ AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, grad_output.scalar_type(), "adaptive_avg_pool3d_backward", [&] {
+ cpu_adaptive_avg_pool3d_backward<scalar_t>(grad_input, grad_output);
+ });
+ break;
+ }
+ case at::MemoryFormat::ChannelsLast3d: {
+ AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, grad_output.scalar_type(), "adaptive_avg_pool3d_backward_channels_last", [&]{
+ cpu_adaptive_avg_pool3d_backward_channels_last<scalar_t>(grad_input, grad_output);
});
break;
}
@@ -413,5 +858,7 @@ void adapative_avg_pool2d_backward_kernel_impl(
REGISTER_DISPATCH(adaptive_avg_pool2d_kernel, &adaptive_avg_pool2d_kernel_impl);
REGISTER_DISPATCH(adaptive_avg_pool2d_backward_kernel, &adapative_avg_pool2d_backward_kernel_impl);
+REGISTER_DISPATCH(adaptive_avg_pool3d_kernel, &adaptive_avg_pool3d_kernel_impl);
+REGISTER_DISPATCH(adaptive_avg_pool3d_backward_kernel, &adapative_avg_pool3d_backward_kernel_impl);
} // at::native
diff --git a/aten/src/ATen/native/cpu/AdaptiveMaxPoolKernel.cpp b/aten/src/ATen/native/cpu/AdaptiveMaxPoolKernel.cpp
index 536bf3ece3..2306fd05d1 100644
--- a/aten/src/ATen/native/cpu/AdaptiveMaxPoolKernel.cpp
+++ b/aten/src/ATen/native/cpu/AdaptiveMaxPoolKernel.cpp
@@ -15,7 +15,7 @@ namespace at::native {
namespace {
template <typename scalar_t, typename accscalar_t>
-void cpu_adaptive_max_pool(
+void cpu_adaptive_max_pool2d(
const Tensor& output_,
const Tensor& indices_,
const Tensor& input_,
@@ -83,13 +83,13 @@ void cpu_adaptive_max_pool(
template <typename scalar_t>
typename std::enable_if_t<std::is_same_v<scalar_t, at::opmath_type<scalar_t>>, void>
-cpu_adaptive_max_pool_channels_last(
+cpu_adaptive_max_pool2d_channels_last(
const Tensor& output_,
const Tensor& indices_,
const Tensor& input_,
IntArrayRef output_size) {
TORCH_CHECK(input_.ndimension() == 4,
- "adaptive max pooling with channels last format supports tensors with 4 dims");
+ "2d adaptive max pooling with channels last format supports tensors with 4 dims");
auto memory_format = at::MemoryFormat::ChannelsLast;
auto input = input_.contiguous(memory_format);
auto output = output_.contiguous(memory_format);
@@ -200,13 +200,13 @@ cpu_adaptive_max_pool_channels_last(
template <typename scalar_t>
typename std::enable_if_t<!std::is_same_v<scalar_t, at::opmath_type<scalar_t>>, void>
-cpu_adaptive_max_pool_channels_last(
+cpu_adaptive_max_pool2d_channels_last(
const Tensor& output_,
const Tensor& indices_,
const Tensor& input_,
IntArrayRef output_size) {
TORCH_CHECK(input_.ndimension() == 4,
- "adaptive max pooling with channels last format supports tensors with 4 dims");
+ "2d adaptive max pooling with channels last format supports tensors with 4 dims");
auto memory_format = at::MemoryFormat::ChannelsLast;
auto input = input_.contiguous(memory_format);
auto output = output_.contiguous(memory_format);
@@ -340,7 +340,7 @@ cpu_adaptive_max_pool_channels_last(
}
template <typename scalar_t>
-void cpu_adaptive_max_pool_backward(
+void cpu_adaptive_max_pool2d_backward(
const Tensor& grad_input_,
const Tensor& grad_output_,
const Tensor& indices_) {
@@ -386,12 +386,12 @@ void cpu_adaptive_max_pool_backward(
}
template <typename scalar_t>
-void cpu_adaptive_max_pool_backward_channels_last(
+void cpu_adaptive_max_pool2d_backward_channels_last(
const Tensor& grad_input_,
const Tensor& grad_output_,
const Tensor& indices_) {
TORCH_CHECK(grad_output_.ndimension() == 4,
- "adaptive max pooling backward with channels last format supports tensors with 4 dims.");
+ "2d adaptive max pooling backward with channels last format supports tensors with 4 dims.");
auto memory_format = at::MemoryFormat::ChannelsLast;
auto grad_input = grad_input_.contiguous(memory_format);
auto grad_output = grad_output_.contiguous(memory_format);
@@ -443,13 +443,13 @@ void adaptive_max_pool2d_kernel_impl(
case at::MemoryFormat::Contiguous: {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, input.scalar_type(), "adaptive_max_pool2d", [&] {
using param_t = at::opmath_type<scalar_t>;
- cpu_adaptive_max_pool<scalar_t, /*accscalar_t*/param_t>(output, indices, input, output_size);
+ cpu_adaptive_max_pool2d<scalar_t, /*accscalar_t*/param_t>(output, indices, input, output_size);
});
break;
}
case at::MemoryFormat::ChannelsLast: {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, input.scalar_type(), "adaptive_max_pool2d_channels_last", [&]{
- cpu_adaptive_max_pool_channels_last<scalar_t>(output, indices, input, output_size);
+ cpu_adaptive_max_pool2d_channels_last<scalar_t>(output, indices, input, output_size);
});
break;
}
@@ -466,13 +466,512 @@ void adaptive_max_pool2d_backward_kernel_impl(
switch (grad_input.suggest_memory_format()) {
case at::MemoryFormat::Contiguous: {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, grad_output.scalar_type(), "adaptive_max_pool2d_backward", [&] {
- cpu_adaptive_max_pool_backward<scalar_t>(grad_input, grad_output, indices);
+ cpu_adaptive_max_pool2d_backward<scalar_t>(grad_input, grad_output, indices);
});
break;
}
case at::MemoryFormat::ChannelsLast: {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, grad_output.scalar_type(), "adaptive_max_pool2d_backward_channels_last", [&]{
- cpu_adaptive_max_pool_backward_channels_last<scalar_t>(grad_input, grad_output, indices);
+ cpu_adaptive_max_pool2d_backward_channels_last<scalar_t>(grad_input, grad_output, indices);
+ });
+ break;
+ }
+ default:
+ TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
+ }
+}
+
+template <typename scalar_t, typename accscalar_t>
+void cpu_adaptive_max_pool3d(
+ const Tensor& output_,
+ const Tensor& indices_,
+ const Tensor& input_,
+ IntArrayRef output_size) {
+ auto input = input_.contiguous();
+ auto output = output_.contiguous();
+ auto indices = indices_.contiguous();
+
+ auto input_data = input.data_ptr<scalar_t>();
+ auto output_data = output.data_ptr<scalar_t>();
+ auto indices_data = indices.data_ptr<int64_t>();
+
+ int64_t ndim = input.ndimension();
+ // treat batch size and channels as one dimension
+ int64_t channels = ndim == 4 ? input.size(0) : input.size(0) * input.size(1);
+ int64_t input_depth = input.size(-3);
+ int64_t input_height = input.size(-2);
+ int64_t input_width = input.size(-1);
+ int64_t output_depth = output_size[0];
+ int64_t output_height = output_size[1];
+ int64_t output_width = output_size[2];
+
+ // parallel on dim of N, C
+ at::parallel_for(0, channels, 0, [&](int64_t begin, int64_t end) {
+ for (const auto c : c10::irange(begin, end)) {
+ scalar_t* input_ptr = input_data + c * input_depth * input_height * input_width;
+ scalar_t* output_ptr = output_data + c * output_depth * output_height * output_width;
+ int64_t* indices_ptr = indices_data + c * output_depth * output_height * output_width;
+
+ for (const auto od : c10::irange(output_depth)) {
+ int64_t id0 = start_index(od, output_depth, input_depth);
+ int64_t id1 = end_index(od, output_depth, input_depth);
+ for (const auto oh : c10::irange(output_height)) {
+ int64_t ih0 = start_index(oh, output_height, input_height);
+ int64_t ih1 = end_index(oh, output_height, input_height);
+
+ for (const auto ow : c10::irange(output_width)) {
+ int64_t iw0 = start_index(ow, output_width, input_width);
+ int64_t iw1 = end_index(ow, output_width, input_width);
+
+ // compute local max
+ int64_t maxindex = id0 * input_height * input_width + ih0 * input_width + iw0;
+ accscalar_t maxval = -std::numeric_limits<accscalar_t>::infinity();
+ for (int64_t id = id0; id < id1; id ++) {
+ for (int64_t ih = ih0; ih < ih1; ih ++) {
+ for (int64_t iw = iw0; iw < iw1; iw ++) {
+ int64_t index = id * input_height * input_width + ih * input_width + iw;
+ scalar_t val = input_ptr[index];
+ if ((val > maxval) || std::isnan(val)) {
+ maxval = val;
+ maxindex = index;
+ }
+ }
+ }
+ }
+
+ // set output to local max and store location of max
+ output_ptr[od * output_height * output_width + oh * output_width + ow] = maxval;
+ indices_ptr[od * output_height * output_width + oh * output_width + ow] = scalar_t(maxindex);
+ }
+ }
+ }
+ }
+ });
+
+ if (!output_.is_contiguous()) {
+ output_.copy_(output);
+ }
+ if (!indices_.is_contiguous()) {
+ indices_.copy_(indices);
+ }
+}
+
+template <typename scalar_t>
+typename std::enable_if_t<std::is_same_v<scalar_t, at::opmath_type<scalar_t>>, void>
+cpu_adaptive_max_pool3d_channels_last(
+ const Tensor& output_,
+ const Tensor& indices_,
+ const Tensor& input_,
+ IntArrayRef output_size) {
+ TORCH_CHECK(input_.ndimension() == 5,
+ "3d adaptive max pooling with channels last format supports tensors with 5 dims");
+ auto memory_format = at::MemoryFormat::ChannelsLast3d;
+ auto input = input_.contiguous(memory_format);
+ auto output = output_.contiguous(memory_format);
+ auto indices = indices_.contiguous(memory_format);
+
+ auto input_data = input.data_ptr<scalar_t>();
+ auto output_data = output.data_ptr<scalar_t>();
+ auto indices_data = indices.data_ptr<int64_t>();
+
+ int64_t nbatch = input.size(0);
+ int64_t channels = input.size(1);
+ int64_t input_depth = input.size(2);
+ int64_t input_height = input.size(3);
+ int64_t input_width = input.size(4);
+ int64_t output_depth = output_size[0];
+ int64_t output_height = output_size[1];
+ int64_t output_width = output_size[2];
+
+ using Vec = vec::Vectorized<scalar_t>;
+ using integer_t = vec::int_same_size_t<scalar_t>;
+ using iVec = vec::Vectorized<integer_t>;
+ // for the convience of vectorization, use integer of the same size of scalar_t,
+ // e.g. int32_t for float, int64_t for double
+ // need to make sure doesn't overflow
+ TORCH_CHECK(input_height * input_width <= std::numeric_limits<integer_t>::max());
+
+ // parallel on dim of N, H, W
+ at::parallel_for(0, nbatch * output_depth * output_height * output_width, 0, [&](int64_t begin, int64_t end) {
+ int64_t n = 0;
+ int64_t od = 0;
+ int64_t oh = 0;
+ int64_t ow = 0;
+ data_index_init(begin, n, nbatch, od, output_depth, oh, output_height, ow, output_width);
+
+ int64_t size = channels;
+ int64_t len = size - (size % Vec::size());
+ // temp buffer holding index with integer_t
+ auto index_buffer = std::make_unique<integer_t []>(len);
+
+ for (const auto i : c10::irange(begin, end)) {
+ int64_t id0 = start_index(od, output_depth, input_depth);
+ int64_t id1 = end_index(od, output_depth, input_depth);
+
+ int64_t ih0 = start_index(oh, output_height, input_height);
+ int64_t ih1 = end_index(oh, output_height, input_height);
+
+ int64_t iw0 = start_index(ow, output_width, input_width);
+ int64_t iw1 = end_index(ow, output_width, input_width);
+
+ scalar_t* out = output_data + i * channels;
+ int64_t* ind = indices_data + i * channels;
+
+ // Pass I: init out lane
+ iVec index0_vec = iVec(id0 * input_height * input_width + ih0 * input_width + iw0);
+ Vec out_vec = Vec(-std::numeric_limits<scalar_t>::infinity());
+ int64_t d1 = 0;
+ for (; d1 < len; d1 += Vec::size()) {
+ index0_vec.store(index_buffer.get() + d1);
+ out_vec.store(out + d1);
+ }
+ for (; d1 < size; d1++) {
+ ind[d1] = id0 * input_height * input_width + ih0 * input_width + iw0;
+ out[d1] = -std::numeric_limits<scalar_t>::infinity();
+ }
+ // Pass II: compute local max
+ for (int64_t id = id0; id < id1; id ++) {
+ for (int64_t ih = ih0; ih < ih1; ih ++) {
+ for (int64_t iw = iw0; iw < iw1; iw ++) {
+ scalar_t* in = input_data + n * input_depth * input_height * input_width * channels +
+ id * input_height * input_width * channels + ih * input_width * channels + iw * channels;
+
+ int64_t d2 = 0;
+ for (; d2 < len; d2 += Vec::size()) {
+ iVec index_vec = iVec(id * input_height * input_width + ih * input_width + iw);
+ Vec val_vec = Vec::loadu(in + d2);
+ iVec maxindex_vec = iVec::loadu(index_buffer.get() + d2);
+ Vec maxval_vec = Vec::loadu(out + d2);
+
+ // true = all ones, false = all zeros
+ Vec mask = (val_vec > maxval_vec) | val_vec.isnan();
+ iVec imask = vec::cast<integer_t>(mask);
+ Vec out_vec = Vec::blendv(maxval_vec, val_vec, mask);
+ iVec ind_vec = iVec::blendv(maxindex_vec, index_vec, imask);
+
+ out_vec.store(out + d2);
+ ind_vec.store(index_buffer.get() + d2);
+ }
+ for (; d2 < size; d2++) {
+ int64_t index = id * input_height * input_width + ih * input_width + iw;
+ scalar_t val = in[d2];
+ int64_t maxindex = ind[d2];
+ scalar_t maxval = out[d2];
+
+ bool mask = (val > maxval) || std::isnan(val);
+ out[d2] = mask ? val : maxval;
+ ind[d2] = mask ? index : maxindex;
+ }
+ }
+ }
+ }
+ // convert indice data type
+ vec::convert<integer_t, int64_t>(index_buffer.get(), ind, len);
+
+ // move on to next output index
+ data_index_step(n, nbatch, od, output_depth, oh, output_height, ow, output_width);
+ }
+ });
+
+ if (!output_.is_contiguous(memory_format)) {
+ output_.copy_(output);
+ }
+ if (!indices_.is_contiguous(memory_format)) {
+ indices_.copy_(indices);
+ }
+}
+
+template <typename scalar_t>
+typename std::enable_if_t<!std::is_same_v<scalar_t, at::opmath_type<scalar_t>>, void>
+cpu_adaptive_max_pool3d_channels_last(
+ const Tensor& output_,
+ const Tensor& indices_,
+ const Tensor& input_,
+ IntArrayRef output_size) {
+ TORCH_CHECK(input_.ndimension() == 5,
+ "3d adaptive max pooling with channels last format supports tensors with 5 dims");
+ auto memory_format = at::MemoryFormat::ChannelsLast3d;
+ auto input = input_.contiguous(memory_format);
+ auto output = output_.contiguous(memory_format);
+ auto indices = indices_.contiguous(memory_format);
+
+ auto input_data = input.data_ptr<BFloat16>();
+ auto output_data = output.data_ptr<BFloat16>();
+ auto indices_data = indices.data_ptr<int64_t>();
+
+ int64_t nbatch = input.size(0);
+ int64_t channels = input.size(1);
+ int64_t input_depth = input.size(2);
+ int64_t input_height = input.size(3);
+ int64_t input_width = input.size(4);
+ int64_t output_depth = output_size[0];
+ int64_t output_height = output_size[1];
+ int64_t output_width = output_size[2];
+
+ using bVec = vec::Vectorized<BFloat16>;
+ using fVec = vec::Vectorized<float>;
+ using iVec = vec::Vectorized<int32_t>;
+ // need to make sure doesn't overflow
+ TORCH_CHECK(input_height * input_width <= std::numeric_limits<int32_t>::max());
+
+ // parallel on dim of N, H, W
+ at::parallel_for(0, nbatch * output_depth * output_height * output_width, 0, [&](int64_t begin, int64_t end) {
+ int64_t n = 0;
+ int64_t od = 0;
+ int64_t oh = 0;
+ int64_t ow = 0;
+ data_index_init(begin, n, nbatch, od, output_depth, oh, output_height, ow, output_width);
+
+ int64_t size = channels;
+ int64_t len = size - (size % bVec::size());
+ // temp buffer holding index with integer_t
+ auto index_buffer = std::make_unique<int32_t []>(len);
+ // temp buffer holding max value with float
+ auto max_arr = std::make_unique<float []>(size);
+ float* max = max_arr.get();
+
+ for (const auto i : c10::irange(begin, end)) {
+ int64_t id0 = start_index(od, output_depth, input_depth);
+ int64_t id1 = end_index(od, output_depth, input_depth);
+
+ int64_t ih0 = start_index(oh, output_height, input_height);
+ int64_t ih1 = end_index(oh, output_height, input_height);
+
+ int64_t iw0 = start_index(ow, output_width, input_width);
+ int64_t iw1 = end_index(ow, output_width, input_width);
+
+ BFloat16* out = output_data + i * channels;
+ int64_t* ind = indices_data + i * channels;
+
+ // Pass I: init out lane
+ iVec index0_ivec = iVec(id0 * input_height * input_width + ih0 * input_width + iw0);
+ fVec max_fvec = fVec(-std::numeric_limits<float>::infinity());
+ int64_t d1 = 0;
+ for (; d1 < len; d1 += fVec::size()) {
+ index0_ivec.store(index_buffer.get() + d1);
+ max_fvec.store(max + d1);
+ }
+ for (; d1 < size; d1++) {
+ ind[d1] = id0 * input_height * input_width + ih0 * input_width + iw0;
+ max[d1] = -std::numeric_limits<float>::infinity();
+ }
+ // Pass II: compute local max
+ for (int64_t id = id0; id < id1; id ++) {
+ for (int64_t ih = ih0; ih < ih1; ih ++) {
+ for (int64_t iw = iw0; iw < iw1; iw ++) {
+ BFloat16* in = input_data + n * input_depth * input_height * input_width * channels +
+ id * input_height * input_width * channels + ih * input_width * channels + iw * channels;
+
+ int64_t d2 = 0;
+ for (; d2 < len; d2 += bVec::size()) {
+ iVec index_ivec = iVec(id * input_height * input_width + ih * input_width + iw);
+ bVec val_bvec = bVec::loadu(in + d2);
+ fVec val_fvec0, val_fvec1;
+ std::tie(val_fvec0, val_fvec1) = convert_bfloat16_float(val_bvec);
+
+ iVec maxindex_ivec0 = iVec::loadu(index_buffer.get() + d2);
+ iVec maxindex_ivec1 = iVec::loadu(index_buffer.get() + d2 + iVec::size());
+ fVec maxval_fvec0 = fVec::loadu(max + d2);
+ fVec maxval_fvec1 = fVec::loadu(max + d2 + fVec::size());
+
+ // true = all ones, false = all zeros
+ fVec mask0 = (val_fvec0 > maxval_fvec0) | val_fvec0.isnan();
+ fVec mask1 = (val_fvec1 > maxval_fvec1) | val_fvec1.isnan();
+ iVec imask0 = vec::cast<int32_t>(mask0);
+ iVec imask1 = vec::cast<int32_t>(mask1);
+
+ fVec max_fvec0 = fVec::blendv(maxval_fvec0, val_fvec0, mask0);
+ fVec max_fvec1 = fVec::blendv(maxval_fvec1, val_fvec1, mask1);
+ iVec ind_ivec0 = iVec::blendv(maxindex_ivec0, index_ivec, imask0);
+ iVec ind_ivec1 = iVec::blendv(maxindex_ivec1, index_ivec, imask1);
+
+ max_fvec0.store(max + d2);
+ max_fvec1.store(max + d2 + fVec::size());
+ ind_ivec0.store(index_buffer.get() + d2);
+ ind_ivec1.store(index_buffer.get() + d2 + iVec::size());
+ }
+ for (; d2 < size; d2++) {
+ int64_t index = id * input_height * input_width + ih * input_width + iw;
+ float val = float(in[d2]);
+ int64_t maxindex = ind[d2];
+ float maxval = max[d2];
+
+ bool mask = (val > maxval) || std::isnan(val);
+ max[d2] = mask ? val : maxval;
+ ind[d2] = mask ? index : maxindex;
+ }
+ }
+ }
+ }
+ // Pass III: convert max values from float to bfloat16
+ int64_t d3 = 0;
+ for (; d3 < len; d3 += bVec::size()) {
+ fVec max_fvec0 = fVec::loadu(max + d3);
+ fVec max_fvec1 = fVec::loadu(max + d3 + fVec::size());
+ bVec max_bvec = convert_float_bfloat16(max_fvec0, max_fvec1);
+ max_bvec.store(out + d3);
+ }
+ for (; d3 < size; d3++) {
+ out[d3] = BFloat16(max[d3]);
+ }
+ // convert indice data type
+ vec::convert<int32_t, int64_t>(index_buffer.get(), ind, len);
+
+ // move on to next output index
+ data_index_step(n, nbatch, od, output_depth, oh, output_height, ow, output_width);
+ }
+ });
+
+ if (!output_.is_contiguous(memory_format)) {
+ output_.copy_(output);
+ }
+ if (!indices_.is_contiguous(memory_format)) {
+ indices_.copy_(indices);
+ }
+}
+
+template <typename scalar_t>
+void cpu_adaptive_max_pool3d_backward(
+ const Tensor& grad_input_,
+ const Tensor& grad_output_,
+ const Tensor& indices_) {
+ auto grad_output = grad_output_.contiguous();
+ auto indices = indices_.contiguous();
+ auto grad_input = grad_input_.contiguous();
+
+ auto grad_output_data = grad_output.data_ptr<scalar_t>();
+ auto indices_data = indices.data_ptr<int64_t>();
+ auto grad_input_data = grad_input.mutable_data_ptr<scalar_t>();
+
+ int64_t ndim = grad_output.ndimension();
+ // treat batch size and channels as one dimension
+ int64_t channels = ndim == 3 ? grad_output.size(0) : grad_output.size(0) * grad_output.size(1);
+ int64_t input_depth = grad_input.size(-3);
+ int64_t input_height = grad_input.size(-2);
+ int64_t input_width = grad_input.size(-1);
+ int64_t output_depth = grad_output.size(-3);
+ int64_t output_height = grad_output.size(-2);
+ int64_t output_width = grad_output.size(-1);
+
+ // parallel on dim of N, C
+ at::parallel_for(0, channels, 0, [&](int64_t begin, int64_t end) {
+ for (const auto c : c10::irange(begin, end)) {
+ scalar_t* grad_input_ptr = grad_input_data + c * input_depth * input_height * input_width;
+ scalar_t* grad_output_ptr = grad_output_data + c * output_depth * output_height * output_width;
+ int64_t* indices_ptr = indices_data + c * output_depth * output_height * output_width;
+
+ for (const auto od : c10::irange(output_depth)) {
+ for (const auto oh : c10::irange(output_height)) {
+ for (const auto ow : c10::irange(output_width)) {
+ // retrieve position of max
+ int64_t index = od * output_height * output_width + oh * output_width + ow;
+ int64_t maxindex = indices_ptr[index];
+
+ // update gradient
+ grad_input_ptr[maxindex] += grad_output_ptr[index];
+ }
+ }
+ }
+ }
+ });
+
+ if (!grad_input_.is_contiguous()) {
+ grad_input_.copy_(grad_input);
+ }
+}
+
+template <typename scalar_t>
+void cpu_adaptive_max_pool3d_backward_channels_last(
+ const Tensor& grad_input_,
+ const Tensor& grad_output_,
+ const Tensor& indices_) {
+ TORCH_CHECK(grad_output_.ndimension() == 5,
+ "3d adaptive max pooling backward with channels last format supports tensors with 5 dims.");
+ auto memory_format = at::MemoryFormat::ChannelsLast3d;
+ auto grad_input = grad_input_.contiguous(memory_format);
+ auto grad_output = grad_output_.contiguous(memory_format);
+ auto indices = indices_.contiguous(memory_format);
+
+ auto grad_input_data = grad_input.mutable_data_ptr<scalar_t>();
+ auto grad_output_data = grad_output.data_ptr<scalar_t>();
+ auto indices_data = indices.data_ptr<int64_t>();
+
+ int64_t nbatch = grad_input.size(0);
+ int64_t channels = grad_input.size(1);
+ int64_t input_depth = grad_input.size(2);
+ int64_t input_height = grad_input.size(3);
+ int64_t input_width = grad_input.size(4);
+ int64_t output_depth = grad_output.size(2);
+ int64_t output_height = grad_output.size(3);
+ int64_t output_width = grad_output.size(4);
+
+ // parallel on dim N
+ at::parallel_for(0, nbatch, 0, [&](int64_t begin, int64_t end) {
+ for (const auto n : c10::irange(begin, end)) {
+ scalar_t* grad_input_ptr = grad_input_data + n * input_depth * input_height * input_width * channels;
+ scalar_t* grad_output_ptr = grad_output_data + n * output_depth * output_height * output_width * channels;
+ int64_t* indices_ptr = indices_data + n * output_depth * output_height * output_width * channels;
+
+ for (const auto od : c10::irange(output_depth)) {
+ for (const auto oh : c10::irange(output_height)) {
+ for (const auto ow : c10::irange(output_width)) {
+ scalar_t* gout = grad_output_ptr + od * output_height * output_width * channels + oh * output_width * channels + ow * channels;
+ int64_t* ind = indices_ptr + od * output_height * output_width * channels + oh * output_width * channels + ow * channels;
+ // TODO: gcc vectorization
+ for (const auto c : c10::irange(channels)) {
+ int64_t maxindex = ind[c];
+ grad_input_ptr[maxindex * channels + c] += gout[c];
+ }
+ }
+ }
+ }
+ }
+ });
+
+ if (!grad_input_.is_contiguous(memory_format)) {
+ grad_input_.copy_(grad_input);
+ }
+}
+
+void adaptive_max_pool3d_kernel_impl(
+ const Tensor& output,
+ const Tensor& indices,
+ const Tensor& input,
+ IntArrayRef output_size) {
+ switch (input.suggest_memory_format()) {
+ case at::MemoryFormat::Contiguous: {
+ AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, input.scalar_type(), "adaptive_max_pool3d", [&] {
+ using param_t = at::opmath_type<scalar_t>;
+ cpu_adaptive_max_pool3d<scalar_t, /*accscalar_t*/param_t>(output, indices, input, output_size);
+ });
+ break;
+ }
+ case at::MemoryFormat::ChannelsLast3d: {
+ AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, input.scalar_type(), "adaptive_max_pool3d_channels_last", [&]{
+ cpu_adaptive_max_pool3d_channels_last<scalar_t>(output, indices, input, output_size);
+ });
+ break;
+ }
+ default:
+ TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
+ }
+}
+
+void adaptive_max_pool3d_backward_kernel_impl(
+ const Tensor& grad_input,
+ const Tensor& grad_output,
+ const Tensor& indices) {
+ // can't use grad_output memory format to switch here since grad_output might be NC11
+ switch (grad_input.suggest_memory_format()) {
+ case at::MemoryFormat::Contiguous: {
+ AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, grad_output.scalar_type(), "adaptive_max_pool3d_backward", [&] {
+ cpu_adaptive_max_pool3d_backward<scalar_t>(grad_input, grad_output, indices);
+ });
+ break;
+ }
+ case at::MemoryFormat::ChannelsLast3d: {
+ AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::BFloat16, ScalarType::Half, grad_output.scalar_type(), "adaptive_max_pool3d_backward_channels_last", [&]{
+ cpu_adaptive_max_pool3d_backward_channels_last<scalar_t>(grad_input, grad_output, indices);
});
break;
}
@@ -485,5 +984,7 @@ void adaptive_max_pool2d_backward_kernel_impl(
REGISTER_DISPATCH(adaptive_max_pool2d_kernel, &adaptive_max_pool2d_kernel_impl);
REGISTER_DISPATCH(adaptive_max_pool2d_backward_kernel, &adaptive_max_pool2d_backward_kernel_impl);
+REGISTER_DISPATCH(adaptive_max_pool3d_kernel, &adaptive_max_pool3d_kernel_impl);
+REGISTER_DISPATCH(adaptive_max_pool3d_backward_kernel, &adaptive_max_pool3d_backward_kernel_impl);
} // at::native
diff --git a/aten/src/ATen/native/cpu/AvgPoolKernel.cpp b/aten/src/ATen/native/cpu/AvgPoolKernel.cpp
index 160a38d24e..572d5af43f 100644
--- a/aten/src/ATen/native/cpu/AvgPoolKernel.cpp
+++ b/aten/src/ATen/native/cpu/AvgPoolKernel.cpp
@@ -14,7 +14,7 @@ namespace at::native {
namespace {
template <typename scalar_t>
-void cpu_avg_pool(
+void cpu_avg_pool2d(
const Tensor& output_,
const Tensor& input_,
int64_t kW, int64_t kH,
@@ -101,7 +101,7 @@ void cpu_avg_pool(
template <typename scalar_t,
typename std::enable_if<!is_reduced_floating_point<scalar_t>::value, int>::type = 0>
-void cpu_avg_pool_channels_last(
+void cpu_avg_pool2d_channels_last(
const Tensor& output_,
const Tensor& input_,
int64_t kW, int64_t kH,
@@ -110,7 +110,7 @@ void cpu_avg_pool_channels_last(
bool count_include_pad,
c10::optional<int64_t> divisor_override) {
TORCH_CHECK(input_.ndimension() == 4,
- "average pooling with channels last format supports tensors with 4 dims");
+ "2d average pooling with channels last format supports tensors with 4 dims");
auto memory_format = at::MemoryFormat::ChannelsLast;
auto input = input_.contiguous(memory_format);
auto output = output_.contiguous(memory_format);
@@ -215,7 +215,7 @@ void cpu_avg_pool_channels_last(
template <typename scalar_t,
typename std::enable_if<is_reduced_floating_point<scalar_t>::value, int>::type = 0>
-void cpu_avg_pool_channels_last(
+void cpu_avg_pool2d_channels_last(
const Tensor& output_,
const Tensor& input_,
int64_t kW, int64_t kH,
@@ -224,7 +224,7 @@ void cpu_avg_pool_channels_last(
bool count_include_pad,
c10::optional<int64_t> divisor_override) {
TORCH_CHECK(input_.ndimension() == 4,
- "average pooling with channels last format supports tensors with 4 dims");
+ "2d average pooling with channels last format supports tensors with 4 dims");
auto memory_format = at::MemoryFormat::ChannelsLast;
auto input = input_.contiguous(memory_format);
auto output = output_.contiguous(memory_format);
@@ -347,7 +347,7 @@ void cpu_avg_pool_channels_last(
}
template <typename scalar_t>
-void cpu_avg_pool_backward(
+void cpu_avg_pool2d_backward(
const Tensor& grad_input_,
const Tensor& grad_output_,
int kW, int kH,
@@ -415,7 +415,7 @@ void cpu_avg_pool_backward(
}
template <typename scalar_t>
-void cpu_avg_pool_backward_channels_last(
+void cpu_avg_pool2d_backward_channels_last(
const Tensor& grad_input_,
const Tensor& grad_output_,
int kW, int kH,
@@ -463,7 +463,7 @@ void cpu_avg_pool_backward_channels_last(
if(count_include_pad) {
divide_factor = pool_size;
} else {
- divide_factor = (ih1 - ih0) * (iw1 - iw0);
+ divide_factor = (ih1 - ih0) * (iw1 - iw0);
}
}
@@ -505,13 +505,13 @@ void avg_pool2d_kernel_impl(
switch (input.suggest_memory_format()) {
case at::MemoryFormat::Contiguous: {
AT_DISPATCH_FLOATING_TYPES_AND3(kLong, kBFloat16, kHalf, input.scalar_type(), "avg_pool2d", [&] {
- cpu_avg_pool<scalar_t>(output, input, kW, kH, dW, dH, padW, padH, count_include_pad, divisor_override);
+ cpu_avg_pool2d<scalar_t>(output, input, kW, kH, dW, dH, padW, padH, count_include_pad, divisor_override);
});
break;
}
case at::MemoryFormat::ChannelsLast: {
AT_DISPATCH_FLOATING_TYPES_AND3(kLong, kBFloat16, kHalf, input.scalar_type(), "avg_pool2d_channels_last", [&] {
- cpu_avg_pool_channels_last<scalar_t>(output, input, kW, kH, dW, dH, padW, padH, count_include_pad, divisor_override);
+ cpu_avg_pool2d_channels_last<scalar_t>(output, input, kW, kH, dW, dH, padW, padH, count_include_pad, divisor_override);
});
break;
}
@@ -531,13 +531,596 @@ void avg_pool2d_backward_kernel_impl(
switch (grad_output.suggest_memory_format()) {
case at::MemoryFormat::Contiguous: {
AT_DISPATCH_FLOATING_TYPES_AND3(kLong, kBFloat16, kHalf, grad_output.scalar_type(), "avg_pool2d_backward", [&] {
- cpu_avg_pool_backward<scalar_t>(grad_input, grad_output, kW, kH, dW, dH, padW, padH, count_include_pad, divisor_override);
+ cpu_avg_pool2d_backward<scalar_t>(grad_input, grad_output, kW, kH, dW, dH, padW, padH, count_include_pad, divisor_override);
});
break;
}
case at::MemoryFormat::ChannelsLast: {
AT_DISPATCH_FLOATING_TYPES_AND3(kLong, kBFloat16, kHalf, grad_output.scalar_type(), "avg_pool2d_backward_channels_last", [&] {
- cpu_avg_pool_backward_channels_last<scalar_t>(grad_input, grad_output, kW, kH, dW, dH, padW, padH, count_include_pad, divisor_override);
+ cpu_avg_pool2d_backward_channels_last<scalar_t>(grad_input, grad_output, kW, kH, dW, dH, padW, padH, count_include_pad, divisor_override);
+ });
+ break;
+ }
+ default:
+ TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
+ }
+}
+
+
+template <typename scalar_t>
+void cpu_avg_pool3d(
+ const Tensor& output_,
+ const Tensor& input_,
+ int64_t kW, int64_t kH, int64_t kD,
+ int64_t dW, int64_t dH, int64_t dD,
+ int64_t padW, int64_t padH, int64_t padD,
+ bool count_include_pad,
+ c10::optional<int64_t> divisor_override) {
+ using acc_t = at::opmath_type<scalar_t>;
+
+ auto input = input_.contiguous();
+ auto output = output_.contiguous();
+
+ auto input_data = input.data_ptr<scalar_t>();
+ auto output_data = output.data_ptr<scalar_t>();
+
+ int64_t numel = output.numel();
+ int64_t ndim = input.ndimension();
+ // treat batch size and channels as one dimension
+ int64_t channels = ndim == 4 ? input.size(0) : input.size(0) * input.size(1);
+ int64_t input_depth = input.size(-3);
+ int64_t input_height = input.size(-2);
+ int64_t input_width = input.size(-1);
+ int64_t output_depth = output.size(-3);
+ int64_t output_height = output.size(-2);
+ int64_t output_width = output.size(-1);
+
+ // parallel on dim N, C, D, H, W
+ at::parallel_for(0, numel, 0, [&](int64_t begin, int64_t end) {
+ int64_t c = 0;
+ int64_t od = 0;
+ int64_t oh = 0;
+ int64_t ow = 0;
+ data_index_init(begin, c, channels, od, output_depth, oh, output_height, ow, output_width);
+
+ for (const auto i : c10::irange(begin, end)) {
+ output_data[i] = static_cast<scalar_t>(0);
+
+ // local pointers
+ scalar_t* input_ptr = input_data + c * input_depth * input_height * input_width;
+
+ // compute the mean of the input image...
+ int64_t id0 = od * dD - padD;
+ int64_t ih0 = oh * dH - padH;
+ int64_t iw0 = ow * dW - padW;
+ int64_t id1 = std::min(id0 + kD, input_depth + padD);
+ int64_t ih1 = std::min(ih0 + kH, input_height + padH);
+ int64_t iw1 = std::min(iw0 + kW, input_width + padW);
+ int64_t pool_size = (id1 - id0) * (ih1 - ih0) * (iw1 - iw0);
+ id0 = std::max(id0, (int64_t) 0);
+ ih0 = std::max(ih0, (int64_t) 0);
+ iw0 = std::max(iw0, (int64_t) 0);
+ id1 = std::min(id1, input_depth);
+ ih1 = std::min(ih1, input_height);
+ iw1 = std::min(iw1, input_width);
+
+ if (id0 >= id1 || ih0 >= ih1 || iw0 >= iw1) {
+ // move on to next output index
+ data_index_step(c, channels, od, output_depth, oh, output_height, ow, output_width);
+ continue;
+ }
+
+ acc_t sum = 0;
+
+ int64_t divide_factor;
+ if (divisor_override.has_value()) {
+ divide_factor = divisor_override.value();
+ } else {
+ if(count_include_pad) {
+ divide_factor = pool_size;
+ } else {
+ divide_factor = (id1 - id0) * (ih1 - ih0) * (iw1 - iw0);
+ }
+ }
+
+ for (const auto id : c10::irange(id0, id1)) {
+ for (const auto ih : c10::irange(ih0, ih1)) {
+ for (const auto iw : c10::irange(iw0, iw1)) {
+ sum += input_ptr[id * input_height * input_width + ih * input_width + iw];
+ }
+ }
+ }
+ output_data[i] += scalar_t(sum / divide_factor);
+
+ // move on to next output index
+ data_index_step(c, channels, od, output_depth, oh, output_height, ow, output_width);
+ }
+ });
+
+ if (!output_.is_contiguous()) {
+ output_.copy_(output);
+ }
+}
+
+template <typename scalar_t,
+ typename std::enable_if<!is_reduced_floating_point<scalar_t>::value, int>::type = 0>
+void cpu_avg_pool3d_channels_last(
+ const Tensor& output_,
+ const Tensor& input_,
+ int64_t kW, int64_t kH, int64_t kD,
+ int64_t dW, int64_t dH, int64_t dD,
+ int64_t padW, int64_t padH, int64_t padD,
+ bool count_include_pad,
+ c10::optional<int64_t> divisor_override) {
+ TORCH_CHECK(input_.ndimension() == 5,
+ "3d average pooling with channels last format supports tensors with 5 dims");
+ auto memory_format = at::MemoryFormat::ChannelsLast3d;
+ auto input = input_.contiguous(memory_format);
+ auto output = output_.contiguous(memory_format);
+
+ auto input_data = input.data_ptr<scalar_t>();
+ auto output_data = output.data_ptr<scalar_t>();
+
+ int64_t nbatch = input.size(0);
+ int64_t channels = input.size(1);
+ int64_t input_depth = input.size(2);
+ int64_t input_height = input.size(3);
+ int64_t input_width = input.size(4);
+ int64_t output_depth = output.size(2);
+ int64_t output_height = output.size(3);
+ int64_t output_width = output.size(4);
+
+ using Vec = vec::Vectorized<scalar_t>;
+ // parallel on dim N, H, W
+ at::parallel_for(0, nbatch * output_depth * output_height * output_width, 0, [&](int64_t begin, int64_t end) {
+ int64_t n = 0;
+ int64_t od = 0;
+ int64_t oh = 0;
+ int64_t ow = 0;
+ data_index_init(begin, n, nbatch, od, output_depth, oh, output_height, ow, output_width);
+
+ int64_t size = channels;
+ int64_t len = size - (size % Vec::size());
+ for (const auto i : c10::irange(begin, end)) {
+ // compute the mean of the input image...
+ int64_t id0 = od * dD - padD;
+ int64_t ih0 = oh * dH - padH;
+ int64_t iw0 = ow * dW - padW;
+ int64_t id1 = std::min(id0 + kD, input_depth + padD);
+ int64_t ih1 = std::min(ih0 + kH, input_height + padH);
+ int64_t iw1 = std::min(iw0 + kW, input_width + padW);
+ int64_t pool_size = (id1 - id0) * (ih1 - ih0) * (iw1 - iw0);
+ id0 = std::max(id0, (int64_t) 0);
+ ih0 = std::max(ih0, (int64_t) 0);
+ iw0 = std::max(iw0, (int64_t) 0);
+ id1 = std::min(id1, input_depth);
+ ih1 = std::min(ih1, input_height);
+ iw1 = std::min(iw1, input_width);
+
+ int64_t divide_factor;
+ if (divisor_override.has_value()) {
+ divide_factor = divisor_override.value();
+ } else {
+ if(count_include_pad) {
+ divide_factor = pool_size;
+ } else {
+ divide_factor = (id1 - id0) * (ih1 - ih0) * (iw1 - iw0);
+ }
+ }
+
+ scalar_t* out = output_data + i * channels;
+
+ // Pass I: zero the out lane
+ int64_t d1 = 0;
+ for (; d1 < len; d1 += Vec::size()) {
+ Vec out_vec = Vec(scalar_t(0));
+ out_vec.store(out + d1);
+ }
+ for (; d1 < size; d1++) {
+ out[d1] = scalar_t(0);
+ }
+
+ if (id0 >= id1 || ih0 >= ih1 || iw0 >= iw1) {
+ // move on to next output index
+ data_index_step(n, nbatch, od, output_depth, oh, output_height, ow, output_width);
+ continue;
+ }
+
+ // Pass II: compute local sum
+ for (const auto id : c10::irange(id0, id1)) {
+ for (const auto ih : c10::irange(ih0, ih1)) {
+ for (const auto iw : c10::irange(iw0, iw1)) {
+ scalar_t* in = input_data + n * input_depth * input_height * input_width * channels +
+ id * input_height * input_width * channels + ih * input_width * channels + iw * channels;
+
+ int64_t d2 = 0;
+ for (; d2 < len; d2 += Vec::size()) {
+ Vec out_vec = Vec::loadu(out + d2) + Vec::loadu(in + d2);
+ out_vec.store(out + d2);
+ }
+ for (; d2 < size; d2++) {
+ out[d2] += in[d2];
+ }
+ }
+ }
+ }
+
+ // Pass III: compute local average
+ int64_t d3 = 0;
+ for (; d3 < len; d3 += Vec::size()) {
+ Vec out_vec = Vec::loadu(out + d3) / Vec(scalar_t(divide_factor));
+ out_vec.store(out + d3);
+ }
+ for (; d3 < size; d3++) {
+ out[d3] = out[d3] / divide_factor;
+ }
+
+ // move on to next output index
+ data_index_step(n, nbatch, od, output_depth, oh, output_height, ow, output_width);
+ }
+ });
+
+ if (!output_.is_contiguous(memory_format)) {
+ output_.copy_(output);
+ }
+}
+
+template <typename scalar_t,
+ typename std::enable_if<is_reduced_floating_point<scalar_t>::value, int>::type = 0>
+void cpu_avg_pool3d_channels_last(
+ const Tensor& output_,
+ const Tensor& input_,
+ int64_t kW, int64_t kH, int64_t kD,
+ int64_t dW, int64_t dH, int64_t dD,
+ int64_t padW, int64_t padH, int64_t padD,
+ bool count_include_pad,
+ c10::optional<int64_t> divisor_override) {
+ TORCH_CHECK(input_.ndimension() == 5,
+ "3d average pooling with channels last format supports tensors with 5 dims");
+ auto memory_format = at::MemoryFormat::ChannelsLast3d;
+ auto input = input_.contiguous(memory_format);
+ auto output = output_.contiguous(memory_format);
+
+ auto input_data = input.data_ptr<BFloat16>();
+ auto output_data = output.data_ptr<BFloat16>();
+
+ int64_t nbatch = input.size(0);
+ int64_t channels = input.size(1);
+ int64_t input_depth = input.size(2);
+ int64_t input_height = input.size(3);
+ int64_t input_width = input.size(4);
+ int64_t output_depth = output.size(2);
+ int64_t output_height = output.size(3);
+ int64_t output_width = output.size(4);
+
+ using bVec = vec::Vectorized<BFloat16>;
+ using fVec = vec::Vectorized<float>;
+ // parallel on dim N, H, W
+ at::parallel_for(0, nbatch * output_depth * output_height * output_width, 0, [&](int64_t begin, int64_t end) {
+ int64_t n = 0;
+ int64_t od = 0;
+ int64_t oh = 0;
+ int64_t ow = 0;
+ data_index_init(begin, n, nbatch, od, output_depth, oh, output_height, ow, output_width);
+
+ // temp buffer for sum, use float as accumulation type
+ // can't reuse output buffer to store sum since it is BFloat16
+ auto sum_arr = std::make_unique<float []>(channels);
+ float* sum = sum_arr.get();
+
+ int64_t size = channels;
+ for (const auto i : c10::irange(begin, end)) {
+ // compute the mean of the input image...
+ int64_t id0 = od * dD - padD;
+ int64_t ih0 = oh * dH - padH;
+ int64_t iw0 = ow * dW - padW;
+ int64_t id1 = std::min(id0 + kD, input_depth + padD);
+ int64_t ih1 = std::min(ih0 + kH, input_height + padH);
+ int64_t iw1 = std::min(iw0 + kW, input_width + padW);
+ int64_t pool_size = (id1 - id0) * (ih1 - ih0) * (iw1 - iw0);
+ id0 = std::max(id0, (int64_t) 0);
+ ih0 = std::max(ih0, (int64_t) 0);
+ iw0 = std::max(iw0, (int64_t) 0);
+ id1 = std::min(id1, input_depth);
+ ih1 = std::min(ih1, input_height);
+ iw1 = std::min(iw1, input_width);
+
+ int64_t divide_factor;
+ if (divisor_override.has_value()) {
+ divide_factor = divisor_override.value();
+ } else {
+ if(count_include_pad) {
+ divide_factor = pool_size;
+ } else {
+ divide_factor = (id1 - id0) * (ih1 - ih0) * (iw1 - iw0);
+ }
+ }
+
+ BFloat16* out = output_data + i * channels;
+
+ // Pass I: zero the out lane
+ int64_t d1 = 0;
+ for (; d1 < size - (size % fVec::size()); d1 += fVec::size()) {
+ fVec sum_fvec = fVec(float(0));
+ sum_fvec.store(sum + d1);
+ }
+ for (; d1 < size; d1++) {
+ sum[d1] = float(0);
+ }
+
+ if (id0 >= id1 || ih0 >= ih1 || iw0 >= iw1) {
+ // since we are not directly using output as the accumulation buffer,
+ // in case the kernel window is out of range, need to zero the output buffer here.
+ for (int64_t k = 0; k < size; k++) {
+ out[k] = 0;
+ }
+ // move on to next output index
+ data_index_step(n, nbatch, od, output_depth, oh, output_height, ow, output_width);
+ continue;
+ }
+
+ // Pass II: compute local sum
+ for (const auto id : c10::irange(id0, id1)) {
+ for (const auto ih : c10::irange(ih0, ih1)) {
+ for (const auto iw : c10::irange(iw0, iw1)) {
+ BFloat16* in = input_data + n * input_depth * input_height * input_width * channels +
+ id * input_height * input_width * channels + ih * input_width * channels + iw * channels;
+
+ int64_t d2 = 0;
+ for (; d2 < size - (size % bVec::size()); d2 += bVec::size()) {
+ bVec data_bvec = bVec::loadu(in + d2);
+ fVec data_fvec0, data_fvec1;
+ std::tie(data_fvec0, data_fvec1) = convert_bfloat16_float(data_bvec);
+
+ fVec sum_fvec0 = fVec::loadu(sum + d2) + data_fvec0;
+ fVec sum_fvec1 = fVec::loadu(sum + d2 + fVec::size()) + data_fvec1;
+ sum_fvec0.store(sum + d2);
+ sum_fvec1.store(sum + d2 + fVec::size());
+ }
+ for (; d2 < size; d2++) {
+ sum[d2] += float(in[d2]);
+ }
+ }
+ }
+ }
+
+ // Pass III: compute local average
+ int64_t d3 = 0;
+ for (; d3 < size - (size % bVec::size()); d3 += bVec::size()) {
+ fVec out_fvec0 = fVec::loadu(sum + d3) / fVec(float(divide_factor));
+ fVec out_fvec1 = fVec::loadu(sum + d3 + fVec::size()) / fVec(float(divide_factor));
+
+ bVec out_bvec = convert_float_bfloat16(out_fvec0, out_fvec1);
+ out_bvec.store(out + d3);
+ }
+ for (; d3 < size; d3++) {
+ out[d3] = BFloat16(sum[d3] / divide_factor);
+ }
+
+ // move on to next output index
+ data_index_step(n, nbatch, od, output_depth, oh, output_height, ow, output_width);
+ }
+ });
+
+ if (!output_.is_contiguous(memory_format)) {
+ output_.copy_(output);
+ }
+}
+
+template <typename scalar_t>
+void cpu_avg_pool3d_backward(
+ const Tensor& grad_input_,
+ const Tensor& grad_output_,
+ int kW, int kH, int kD,
+ int dW, int dH, int dD,
+ int padW, int padH, int padD,
+ bool count_include_pad,
+ c10::optional<int64_t> divisor_override) {
+ auto grad_output = grad_output_.contiguous();
+ auto grad_input = grad_input_.contiguous();
+
+ auto grad_output_data = grad_output.data_ptr<scalar_t>();
+ auto grad_input_data = grad_input.mutable_data_ptr<scalar_t>();
+
+ int64_t ndim = grad_output.ndimension();
+ // treat batch size and channels as one dimension
+ int64_t channels = ndim == 4 ? grad_output.size(0) : grad_output.size(0) * grad_output.size(1);
+ int64_t input_depth = grad_input.size(-3);
+ int64_t input_height = grad_input.size(-2);
+ int64_t input_width = grad_input.size(-1);
+ int64_t output_depth = grad_output.size(-3);
+ int64_t output_height = grad_output.size(-2);
+ int64_t output_width = grad_output.size(-1);
+
+ // parallel on dim of N, C
+ at::parallel_for(0, channels, 0, [&](int64_t begin, int64_t end) {
+ for (const auto c : c10::irange(begin, end)) {
+ scalar_t* grad_input_ptr = grad_input_data + c * input_depth * input_height * input_width;
+ scalar_t* grad_output_ptr = grad_output_data + c * output_depth * output_height * output_width;
+
+ for (const auto od : c10::irange(output_depth)) {
+ for (const auto oh : c10::irange(output_height)) {
+ for (const auto ow : c10::irange(output_width)) {
+ int64_t id0 = od * dD - padD;
+ int64_t ih0 = oh * dH - padH;
+ int64_t iw0 = ow * dW - padW;
+ int64_t id1 = std::min(id0 + kD, input_depth + padD);
+ int64_t ih1 = std::min(ih0 + kH, input_height + padH);
+ int64_t iw1 = std::min(iw0 + kW, input_width + padW);
+ int64_t pool_size = (id1 - id0) * (ih1 - ih0) * (iw1 - iw0);
+ id0 = std::max(id0, (int64_t) 0);
+ ih0 = std::max(ih0, (int64_t) 0);
+ iw0 = std::max(iw0, (int64_t) 0);
+ ih1 = std::min(ih1, input_height);
+ iw1 = std::min(iw1, input_width);
+
+ int64_t divide_factor;
+ if (divisor_override.has_value()) {
+ divide_factor = divisor_override.value();
+ } else {
+ if(count_include_pad) {
+ divide_factor = pool_size;
+ } else {
+ divide_factor = (id1 - id0) * (ih1 - ih0) * (iw1 - iw0);
+ }
+ }
+
+ scalar_t grad_delta = grad_output_ptr[od * output_height * output_width + oh * output_width + ow] / divide_factor;
+ for (const auto id : c10::irange(id0, id1)) {
+ for (const auto ih : c10::irange(ih0, ih1)) {
+ for (const auto iw : c10::irange(iw0, iw1)) {
+ grad_input_ptr[id * input_height * input_width + ih * input_width + iw] += grad_delta;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ });
+
+ if (!grad_input_.is_contiguous()) {
+ grad_input_.copy_(grad_input);
+ }
+}
+
+template <typename scalar_t>
+void cpu_avg_pool3d_backward_channels_last(
+ const Tensor& grad_input_,
+ const Tensor& grad_output_,
+ int kW, int kH, int kD,
+ int dW, int dH, int dD,
+ int padW, int padH, int padD,
+ bool count_include_pad,
+ c10::optional<int64_t> divisor_override) {
+ auto memory_format = at::MemoryFormat::ChannelsLast3d;
+ auto grad_input = grad_input_.contiguous(memory_format);
+ auto grad_output = grad_output_.contiguous(memory_format);
+
+ auto grad_input_data = grad_input.mutable_data_ptr<scalar_t>();
+ auto grad_output_data = grad_output.data_ptr<scalar_t>();
+
+ int64_t nbatch = grad_input.size(0);
+ int64_t channels = grad_input.size(1);
+ int64_t input_depth = grad_input.size(2);
+ int64_t input_height = grad_input.size(3);
+ int64_t input_width = grad_input.size(4);
+ int64_t output_depth = grad_output.size(2);
+ int64_t output_height = grad_output.size(3);
+ int64_t output_width = grad_output.size(4);
+
+ using Vec = vec::Vectorized<scalar_t>;
+ // parallel on dim N
+ at::parallel_for(0, nbatch, 0, [&](int64_t begin, int64_t end) {
+ for (const auto n : c10::irange(begin, end)) {
+ scalar_t* grad_input_ptr = grad_input_data + n * input_depth * input_height * input_width * channels;
+ scalar_t* grad_output_ptr = grad_output_data + n * output_height * output_width * channels;
+
+ for (const auto od : c10::irange(output_depth)) {
+ for (const auto oh : c10::irange(output_height)) {
+ for (const auto ow : c10::irange(output_width)) {
+ int64_t id0 = od * dD - padD;
+ int64_t ih0 = oh * dH - padH;
+ int64_t iw0 = ow * dW - padW;
+ int64_t id1 = std::min(id0 + kD, input_depth + padD);
+ int64_t ih1 = std::min(ih0 + kH, input_height + padH);
+ int64_t iw1 = std::min(iw0 + kW, input_width + padW);
+ int64_t pool_size = (id1 - id0) * (ih1 - ih0) * (iw1 - iw0);
+ id0 = std::max(id0, (int64_t) 0);
+ ih0 = std::max(ih0, (int64_t) 0);
+ iw0 = std::max(iw0, (int64_t) 0);
+ id1 = std::min(id1, input_depth);
+ ih1 = std::min(ih1, input_height);
+ iw1 = std::min(iw1, input_width);
+
+ int64_t divide_factor;
+ if (divisor_override.has_value()) {
+ divide_factor = divisor_override.value();
+ } else {
+ if(count_include_pad) {
+ divide_factor = pool_size;
+ } else {
+ divide_factor = (id1 - id0) * (ih1 - ih0) * (iw1 - iw0);
+ }
+ }
+
+ scalar_t* gout = grad_output_ptr + od * output_height * output_width * channels + oh * output_width * channels + ow * channels;
+ int64_t size = channels;
+ int64_t len = size - (size % Vec::size());
+ for (const auto id : c10::irange(id0, id1)) {
+ for (const auto ih : c10::irange(ih0, ih1)) {
+ for (const auto iw : c10::irange(iw0, iw1)) {
+ scalar_t* gin = grad_input_ptr + id * input_height * input_width * channels + ih * input_width * channels + iw * channels;
+
+ int64_t d = 0;
+ for (; d < len; d += Vec::size()) {
+ Vec gin_vec = Vec::loadu(gin + d) + Vec::loadu(gout + d) / Vec(scalar_t(divide_factor));
+ gin_vec.store(gin + d);
+ }
+ for (; d < size; d++) {
+ gin[d] += gout[d] / divide_factor;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ });
+
+ if (!grad_input_.is_contiguous(memory_format)) {
+ grad_input_.copy_(grad_input);
+ }
+}
+
+
+
+void avg_pool3d_kernel_impl(
+ const Tensor& output,
+ const Tensor& input,
+ int64_t kW, int64_t kH, int64_t kD,
+ int64_t dW, int64_t dH, int64_t dD,
+ int64_t padW, int64_t padH, int64_t padD,
+ bool count_include_pad,
+ c10::optional<int64_t> divisor_override) {
+ switch (input.suggest_memory_format()) {
+ case at::MemoryFormat::Contiguous: {
+ AT_DISPATCH_FLOATING_TYPES_AND3(kLong, kBFloat16, kHalf, input.scalar_type(), "avg_pool3d", [&] {
+ cpu_avg_pool3d<scalar_t>(output, input, kW, kH, kD, dW, dH, dD, padW, padH, padD, count_include_pad, divisor_override);
+ });
+ break;
+ }
+ case at::MemoryFormat::ChannelsLast: {
+ AT_DISPATCH_FLOATING_TYPES_AND3(kLong, kBFloat16, kHalf, input.scalar_type(), "avg_pool3d_channels_last", [&] {
+ cpu_avg_pool3d_channels_last<scalar_t>(output, input, kW, kH, kD, dW, dH, dD, padW, padH, padD, count_include_pad, divisor_override);
+ });
+ break;
+ }
+ default:
+ TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
+ }
+}
+
+
+void avg_pool3d_backward_kernel_impl(
+ const Tensor& grad_input,
+ const Tensor& grad_output,
+ int kW, int kH, int kD,
+ int dW, int dH, int dD,
+ int padW, int padH, int padD,
+ bool count_include_pad,
+ c10::optional<int64_t> divisor_override) {
+ switch (grad_output.suggest_memory_format()) {
+ case at::MemoryFormat::Contiguous: {
+ AT_DISPATCH_FLOATING_TYPES_AND3(kLong, kBFloat16, kHalf, grad_output.scalar_type(), "avg_pool3d_backward", [&] {
+ cpu_avg_pool3d_backward<scalar_t>(grad_input, grad_output, kW, kH, kD, dW, dH, dD, padW, padH, padD, count_include_pad, divisor_override);
+ });
+ break;
+ }
+ case at::MemoryFormat::ChannelsLast3d: {
+ AT_DISPATCH_FLOATING_TYPES_AND3(kLong, kBFloat16, kHalf, grad_output.scalar_type(), "avg_pool3d_backward_channels_last", [&] {
+ cpu_avg_pool3d_backward_channels_last<scalar_t>(grad_input, grad_output, kW, kH, kD, dW, dH, dD, padW, padH, padD, count_include_pad, divisor_override);
});
break;
}
@@ -546,9 +1129,12 @@ void avg_pool2d_backward_kernel_impl(
}
}
+
} // anonymous namespace
REGISTER_DISPATCH(avg_pool2d_kernel, &avg_pool2d_kernel_impl);
REGISTER_DISPATCH(avg_pool2d_backward_kernel, &avg_pool2d_backward_kernel_impl);
+REGISTER_DISPATCH(avg_pool3d_kernel, &avg_pool3d_kernel_impl);
+REGISTER_DISPATCH(avg_pool3d_backward_kernel, &avg_pool3d_backward_kernel_impl);
} // at::native
|
2.41.0
|
e5f890273de39ca7ba1650ac80709f59b851df9
|
Mon, 29 Apr 2024 16:59:35 -0700
|
[PATCH 0826/1000] [dynamo][source] Remove inspect getattr_static from AttrSource (#125200)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/125200 Approved by: https://github.com/jansel
|
diff --git a/torch/_dynamo/source.py b/torch/_dynamo/source.py
index b0a5e7cb56..0414fb7339 100644
--- a/torch/_dynamo/source.py
+++ b/torch/_dynamo/source.py
@@ -144,7 +144,6 @@ class GlobalWeakRefSource(Source):
@dataclasses.dataclass(frozen=True)
class AttrSource(ChainedSource):
member: str
- get_static: bool = False
def __post_init__(self):
assert self.base, "Can't construct an AttrSource without a valid base source"
@@ -163,9 +162,7 @@ class AttrSource(ChainedSource):
return self.base.guard_source()
def name(self):
- if self.get_static:
- return f"inspect.getattr_static({self.base.name()}, {self.member!r})"
- elif not self.member.isidentifier():
+ if not self.member.isidentifier():
return f"getattr({self.base.name()}, {self.member!r})"
return f"{self.base.name()}.{self.member}"
diff --git a/torch/_dynamo/variables/user_defined.py b/torch/_dynamo/variables/user_defined.py
index 544773f08a..3ba2f012f4 100644
--- a/torch/_dynamo/variables/user_defined.py
+++ b/torch/_dynamo/variables/user_defined.py
@@ -837,9 +837,10 @@ class UserDefinedObjectVariable(UserDefinedVariable):
unimplemented("UserDefined with non-function __getattr__")
if isinstance(subobj, property):
- # Rewrite the source being explicit about reading it statically.
if self.source:
- source = AttrSource(self.source, name, get_static=True)
+ # Read the class attribute to reach the property
+ source = AttrSource(AttrSource(self.source, "__class__"), name)
+ # Get the getter function
source = AttrSource(source, "fget")
return variables.UserMethodVariable(
subobj.fget, self, source=source
|
2.41.0
|
0d2c24de1e7726fbb161e7b5c91063bf97d6565
|
Tue, 30 Apr 2024 06:59:53 +0000
|
[PATCH 0827/1000] Fix device type issue in `_get_device_handle` (#124390)
|
Fix #124327 `device_type`, the first arg of [init_device_mesh()](https://github.com/pytorch/pytorch/blob/a0466061e17358fb621cfde3f85e0bd6d13cfc55/torch/distributed/device_mesh.py#L503), does not support types with indexes, such as `cuda:0`. If `cuda:0` is used as a parameter, `_get_device_handle()` will not correctly return `torch.cuda`. So the exception should be thrown before creating DeviceMesh object. > See https://github.com/pytorch/pytorch/issues/124327#issuecomment-2062551161, Pull Request resolved: https://github.com/pytorch/pytorch/pull/124390 Approved by: https://github.com/wz337, https://github.com/wanchaol
|
diff --git a/test/distributed/test_device_mesh.py b/test/distributed/test_device_mesh.py
index 89f98ce0b6..9c54cfa312 100644
--- a/test/distributed/test_device_mesh.py
+++ b/test/distributed/test_device_mesh.py
@@ -181,6 +181,17 @@ class DeviceMeshTest(DTensorTestBase):
ref_global_mesh._coordinate_on_dim, global_mesh._coordinate_on_dim
)
+ def test_raises_invalid_device_type(self):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Device type with GPU index is not supported",
+ ):
+ # test init_device_mesh with an invalid device type that contains a GPU index
+ mesh_shape = (2, self.world_size // 2)
+ mesh_2d = init_device_mesh(
+ "cuda:0", mesh_shape=mesh_shape, mesh_dim_names=("dp", "tp")
+ )
+
class DeviceMeshTestNDim(DTensorTestBase):
@property
diff --git a/torch/distributed/device_mesh.py b/torch/distributed/device_mesh.py
index 03fbd8b146..b199e82a17 100644
--- a/torch/distributed/device_mesh.py
+++ b/torch/distributed/device_mesh.py
@@ -535,6 +535,7 @@ else:
Args:
device_type (str): The device type of the mesh. Currently supports: "cpu", "cuda/cuda-like".
+ Passing in a device type with a GPU index, such as "cuda:0", is not allowed.
mesh_shape (Tuple[int]): A tuple defining the dimensions of the multi-dimensional array
describing the layout of devices.
mesh_dim_names (Tuple[str], optional): A tuple of mesh dimension names to assign to each dimension
@@ -565,6 +566,13 @@ else:
f"Found len(mesh_dim_names): {len(mesh_dim_names)} and len(mesh_shape):{len(mesh_shape)}.",
)
+ # assume valid device types are all letters
+ if device_type and not device_type.isalpha():
+ raise RuntimeError(
+ f"Device type with GPU index is not supported but got {device_type}. ",
+ "If you maintained a 'torch.device' object, it's recommended to pass in 'device.type'.",
+ )
+
# Always initialize the mesh's tensor on CPU, regardless of what the
# external device type has been set to be (e.g. meta)
with torch.device("cpu"):
|
2.41.0
|
587a93f4cac864e6a1c9ede396ff1bcbca39224
|
Mon, 29 Apr 2024 16:20:37 -0700
|
[PATCH 0828/1000] [inductor][easy] add buffer layout to SchedulerNode.debug_str (#125090)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/125090 Approved by: https://github.com/jansel
|
diff --git a/test/inductor/test_debug_trace.py b/test/inductor/test_debug_trace.py
index 00d65d260a..3154efffa4 100644
--- a/test/inductor/test_debug_trace.py
+++ b/test/inductor/test_debug_trace.py
@@ -59,6 +59,8 @@ buf0.users = [NodeUser(node=SchedulerNode(name='buf1'), can_inplace=True, is_wea
buf0.group.device = cpu
buf0.group.iteration = ((256,), ())
buf0.sizes = ([256], [])
+arg0_1_layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
+buf0_layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
class buf0_loop_body:
var_ranges = {z0: 256}
index0 = z0
@@ -80,6 +82,8 @@ buf1.users = [NodeUser(node=ExternKernelSchedulerNode(name='buf2'), can_inplace=
buf1.group.device = cpu
buf1.group.iteration = ((256,), ())
buf1.sizes = ([256], [])
+buf0_layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
+buf1_layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
class buf1_loop_body:
var_ranges = {z0: 256}
index0 = z0
@@ -117,6 +121,8 @@ buf0_buf1.users = []
buf0.group.device = cpu
buf0.group.iteration = ((256,), ())
buf0.sizes = ([256], [])
+ arg0_1_layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
+ buf0_layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
class buf0_loop_body:
var_ranges = {z0: 256}
index0 = z0
@@ -137,6 +143,8 @@ buf0_buf1.users = []
buf1.group.device = cpu
buf1.group.iteration = ((256,), ())
buf1.sizes = ([256], [])
+ buf0_layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
+ buf1_layout = FixedLayout('cpu', torch.float32, size=[16, 16], stride=[16, 1])
class buf1_loop_body:
var_ranges = {z0: 256}
index0 = z0
diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py
index d1548b73e5..41a988ec22 100644
--- a/torch/_inductor/scheduler.py
+++ b/torch/_inductor/scheduler.py
@@ -732,6 +732,10 @@ class SchedulerNode(BaseSchedulerNode):
f"{name}.group.iteration = {self.group[1]}",
f"{name}.sizes = {self._sizes}",
]
+ for dep in self.read_writes.reads_and_writes():
+ buf_name = dep.name
+ buf = V.graph.get_buffer(buf_name)
+ lines.append(f"{buf_name}_layout = {pformat(buf.layout)}")
if self.get_aliases():
lines.append(f"{name}.aliases = {pformat(self.get_aliases())}")
if self.get_mutations():
|
2.41.0
|
c514df2afad386739bf8471ab351a86d5c5ffc7
|
Mon, 29 Apr 2024 16:20:37 -0700
|
[PATCH 0829/1000] [inductor] add triton code to SchedulerNode.debug_str (#125091)
|
Here is an example print: https://gist.github.com/shunting314/75c161368a833a535bd0d240b8099d7e Pull Request resolved: https://github.com/pytorch/pytorch/pull/125091 Approved by: https://github.com/jansel ghstack dependencies: #125090
|
diff --git a/torch/_inductor/codegen/cuda_combined_scheduling.py b/torch/_inductor/codegen/cuda_combined_scheduling.py
index eceadeb4c7..3eac88881f 100644
--- a/torch/_inductor/codegen/cuda_combined_scheduling.py
+++ b/torch/_inductor/codegen/cuda_combined_scheduling.py
@@ -75,3 +75,8 @@ class CUDACombinedScheduling(BaseScheduling):
def benchmark_fused_nodes(self, nodes):
return self._triton_scheduling.benchmark_fused_nodes(nodes)
+
+ def generate_kernel_code_from_nodes(self, nodes, benchmark_kernel=False):
+ return self._triton_scheduling.generate_kernel_code_from_nodes(
+ nodes, benchmark_kernel
+ )
diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py
index 152621453c..17bb6e1a89 100644
--- a/torch/_inductor/codegen/triton.py
+++ b/torch/_inductor/codegen/triton.py
@@ -3921,8 +3921,7 @@ class TritonScheduling(BaseScheduling):
def ready_to_flush(self) -> bool:
return False
- @preserve_rng_state()
- def benchmark_fused_nodes(self, nodes):
+ def generate_kernel_code_from_nodes(self, nodes, benchmark_kernel=False):
@dataclasses.dataclass
class LastUsageHolder:
n: Any
@@ -3954,18 +3953,25 @@ class TritonScheduling(BaseScheduling):
)
self.codegen_node_schedule_with_kernel(node_schedule, kernel)
- with config.patch("benchmark_kernel", True), V.set_kernel_handler(kernel):
+ with config.patch(
+ "benchmark_kernel", benchmark_kernel
+ ), V.set_kernel_handler(kernel):
src_code = kernel.codegen_kernel()
else:
template_node = nodes[0]
epilogue_nodes = nodes[1:]
- with config.patch("benchmark_kernel", True):
+ with config.patch("benchmark_kernel", benchmark_kernel):
src_code = self.codegen_template(
template_node, epilogue_nodes, only_gen_src_code=True
)
src_code = src_code.replace(str(Placeholder.KERNEL_NAME), "triton_")
+ return src_code
+
+ @preserve_rng_state()
+ def benchmark_fused_nodes(self, nodes):
+ src_code = self.generate_kernel_code_from_nodes(nodes, benchmark_kernel=True)
mod = PyCodeCache.load(src_code)
def cache_file_path():
diff --git a/torch/_inductor/scheduler.py b/torch/_inductor/scheduler.py
index 41a988ec22..b827d72530 100644
--- a/torch/_inductor/scheduler.py
+++ b/torch/_inductor/scheduler.py
@@ -743,6 +743,20 @@ class SchedulerNode(BaseSchedulerNode):
if isinstance(self._body, ir.LoopBody):
lines.append(f"class {name}_loop_body:")
lines.append(textwrap.indent(self._body.debug_str(), " "))
+
+ if ir.is_triton(self.node.get_device()):
+ backend = self.scheduler.get_backend(self.node.get_device())
+ V.graph.scheduler.current_device = self.node.get_device()
+
+ # Don't increment kernel count when generating debug string.
+ # This will confuse some unit tests that check the number of
+ # generated kernels.
+ old_generated_kernel_count = metrics.generated_kernel_count
+ triton_code = backend.generate_kernel_code_from_nodes((self,)).strip()
+ metrics.generated_kernel_count = old_generated_kernel_count
+
+ lines.append(f"{self.get_name()} Triton code:")
+ lines.append(textwrap.indent(triton_code, " "))
return "\n".join(lines)
def get_ranges(self):
@@ -900,6 +914,16 @@ class FusedSchedulerNode(BaseSchedulerNode):
f"{self.get_name()}.snodes[{i}] =\n{node.debug_str()}"
for i, node in enumerate(self.snodes)
]
+ device = self.snodes[0].node.get_device()
+ if ir.is_triton(device):
+ backend = self.scheduler.get_backend(device)
+ V.graph.scheduler.current_device = device
+ old_generated_kernel_count = metrics.generated_kernel_count
+ triton_code = backend.generate_kernel_code_from_nodes(self.snodes).strip()
+ metrics.generated_kernel_count = old_generated_kernel_count
+ lines.append(f"{self.get_name()} Triton code:")
+ lines.append(textwrap.indent(triton_code, " "))
+
return textwrap.indent("\n".join(lines).rstrip(), " ")
def set_last_usage(
@@ -1271,6 +1295,7 @@ class Scheduler:
@dynamo_timed
def __init__(self, nodes):
super().__init__()
+ V.graph.scheduler = self
self.backends = {}
self.fuse_cache = {}
self.post_grad_graph_id = next(_post_grad_graph_counter)
@@ -1734,7 +1759,6 @@ class Scheduler:
"""
assert len(nodes) > 0
device = nodes[0].get_device()
- V.graph.scheduler = self
self.current_device = device
backend = self.get_backend(device)
return backend.benchmark_fused_nodes(nodes)
|
2.41.0
|
3db465029aa89f73ba1cb40174e57772070d270
|
Tue, 30 Apr 2024 12:53:40 +0000
|
[PATCH 0831/1000] Re-enable nightly testing for linux and macos binaries (#123390)
|
Related to: https://github.com/pytorch/pytorch/issues/123225 The skip tests logic lives here: https://github.com/pytorch/builder/blob/main/run_tests.sh#L19 Linux builds are using check_binary: https://github.com/pytorch/pytorch/actions/runs/8627625694/job/23649245546#step:16:339 Pull Request resolved: https://github.com/pytorch/pytorch/pull/123390 Approved by: https://github.com/ZainRizvi
|
diff --git a/.github/templates/linux_binary_build_workflow.yml.j2 b/.github/templates/linux_binary_build_workflow.yml.j2
index a7af0792fb..d44915f41d 100644
--- a/.github/templates/linux_binary_build_workflow.yml.j2
+++ b/.github/templates/linux_binary_build_workflow.yml.j2
@@ -46,7 +46,7 @@ env:
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
PYTORCH_ROOT: /pytorch
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 1
+ SKIP_ALL_TESTS: 0
!{{ common.concurrency(build_environment) }}
jobs:
diff --git a/.github/templates/macos_binary_build_workflow.yml.j2 b/.github/templates/macos_binary_build_workflow.yml.j2
index 505bde406d..591dc52ef9 100644
--- a/.github/templates/macos_binary_build_workflow.yml.j2
+++ b/.github/templates/macos_binary_build_workflow.yml.j2
@@ -48,7 +48,7 @@ env:
BUILD_ENVIRONMENT: !{{ build_environment }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
- SKIP_ALL_TESTS: 1
+ SKIP_ALL_TESTS: 0
{%- if cross_compile_arm64 %}
CROSS_COMPILE_ARM64: 1
{% endif %}
diff --git a/.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml b/.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
index cb20fd38f3..79a73abda9 100644
--- a/.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
+++ b/.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
@@ -31,7 +31,7 @@ env:
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
PYTORCH_ROOT: /pytorch
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 1
+ SKIP_ALL_TESTS: 0
concurrency:
group: linux-aarch64-binary-manywheel-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
cancel-in-progress: true
diff --git a/.github/workflows/generated-linux-binary-conda-nightly.yml b/.github/workflows/generated-linux-binary-conda-nightly.yml
index a2e9b4cf87..9c221b25fc 100644
--- a/.github/workflows/generated-linux-binary-conda-nightly.yml
+++ b/.github/workflows/generated-linux-binary-conda-nightly.yml
@@ -31,7 +31,7 @@ env:
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
PYTORCH_ROOT: /pytorch
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 1
+ SKIP_ALL_TESTS: 0
concurrency:
group: linux-binary-conda-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
cancel-in-progress: true
diff --git a/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-main.yml b/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-main.yml
index 2cf124a09a..5577a5e7d9 100644
--- a/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-main.yml
+++ b/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-main.yml
@@ -26,7 +26,7 @@ env:
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
PYTORCH_ROOT: /pytorch
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 1
+ SKIP_ALL_TESTS: 0
concurrency:
group: linux-binary-libtorch-cxx11-abi-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
cancel-in-progress: true
diff --git a/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml b/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml
index b31422818c..b28aa4a371 100644
--- a/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml
+++ b/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml
@@ -31,7 +31,7 @@ env:
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
PYTORCH_ROOT: /pytorch
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 1
+ SKIP_ALL_TESTS: 0
concurrency:
group: linux-binary-libtorch-cxx11-abi-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
cancel-in-progress: true
diff --git a/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-main.yml b/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-main.yml
index 992395fa68..0158860d6f 100644
--- a/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-main.yml
+++ b/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-main.yml
@@ -26,7 +26,7 @@ env:
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
PYTORCH_ROOT: /pytorch
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 1
+ SKIP_ALL_TESTS: 0
concurrency:
group: linux-binary-libtorch-pre-cxx11-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
cancel-in-progress: true
diff --git a/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml b/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml
index 6d22d4c1f6..248b699850 100644
--- a/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml
+++ b/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml
@@ -31,7 +31,7 @@ env:
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
PYTORCH_ROOT: /pytorch
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 1
+ SKIP_ALL_TESTS: 0
concurrency:
group: linux-binary-libtorch-pre-cxx11-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
cancel-in-progress: true
diff --git a/.github/workflows/generated-linux-binary-manywheel-main.yml b/.github/workflows/generated-linux-binary-manywheel-main.yml
index f1346b27ac..4764ede6bc 100644
--- a/.github/workflows/generated-linux-binary-manywheel-main.yml
+++ b/.github/workflows/generated-linux-binary-manywheel-main.yml
@@ -26,7 +26,7 @@ env:
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
PYTORCH_ROOT: /pytorch
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 1
+ SKIP_ALL_TESTS: 0
concurrency:
group: linux-binary-manywheel-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
cancel-in-progress: true
diff --git a/.github/workflows/generated-linux-binary-manywheel-nightly.yml b/.github/workflows/generated-linux-binary-manywheel-nightly.yml
index 6908d913bc..9f3d7224c1 100644
--- a/.github/workflows/generated-linux-binary-manywheel-nightly.yml
+++ b/.github/workflows/generated-linux-binary-manywheel-nightly.yml
@@ -31,7 +31,7 @@ env:
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
PYTORCH_ROOT: /pytorch
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
- SKIP_ALL_TESTS: 1
+ SKIP_ALL_TESTS: 0
concurrency:
group: linux-binary-manywheel-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
cancel-in-progress: true
diff --git a/.github/workflows/generated-macos-arm64-binary-conda-nightly.yml b/.github/workflows/generated-macos-arm64-binary-conda-nightly.yml
index 46f5a7621d..a8cbdb7cd6 100644
--- a/.github/workflows/generated-macos-arm64-binary-conda-nightly.yml
+++ b/.github/workflows/generated-macos-arm64-binary-conda-nightly.yml
@@ -26,7 +26,7 @@ env:
BUILD_ENVIRONMENT: macos-arm64-binary-conda
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
- SKIP_ALL_TESTS: 1
+ SKIP_ALL_TESTS: 0
concurrency:
group: macos-arm64-binary-conda-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
cancel-in-progress: true
diff --git a/.github/workflows/generated-macos-arm64-binary-libtorch-cxx11-abi-nightly.yml b/.github/workflows/generated-macos-arm64-binary-libtorch-cxx11-abi-nightly.yml
index dd9850e226..0ed7ba10a0 100644
--- a/.github/workflows/generated-macos-arm64-binary-libtorch-cxx11-abi-nightly.yml
+++ b/.github/workflows/generated-macos-arm64-binary-libtorch-cxx11-abi-nightly.yml
@@ -26,7 +26,7 @@ env:
BUILD_ENVIRONMENT: macos-arm64-binary-libtorch-cxx11-abi
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
- SKIP_ALL_TESTS: 1
+ SKIP_ALL_TESTS: 0
concurrency:
group: macos-arm64-binary-libtorch-cxx11-abi-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
cancel-in-progress: true
diff --git a/.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml b/.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
index fbe443f2ff..167161de36 100644
--- a/.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
+++ b/.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
@@ -26,7 +26,7 @@ env:
BUILD_ENVIRONMENT: macos-arm64-binary-wheel
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
- SKIP_ALL_TESTS: 1
+ SKIP_ALL_TESTS: 0
concurrency:
group: macos-arm64-binary-wheel-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
cancel-in-progress: true
|
2.41.0
|
aed5dcfe685b65ef9c3cb51aafae91048aaf115
|
Tue, 30 Apr 2024 14:01:19 +0000
|
[PATCH 0832/1000] Clarify wording in docstring for `CosineAnnealingWarmRestarts` within `lr_scheduler.py` (#125161)
|
- Clarifies wording in the docstring for `CosineAnnealingWarmRestarts` within `lr_scheduler.py` Pull Request resolved: https://github.com/pytorch/pytorch/pull/125161 Approved by: https://github.com/janeyx99
|
diff --git a/torch/optim/lr_scheduler.py b/torch/optim/lr_scheduler.py
index 8de3af4b87..12502f3ed9 100644
--- a/torch/optim/lr_scheduler.py
+++ b/torch/optim/lr_scheduler.py
@@ -1642,10 +1642,10 @@ class CosineAnnealingWarmRestarts(LRScheduler):
Args:
optimizer (Optimizer): Wrapped optimizer.
- T_0 (int): Number of iterations for the first restart.
- T_mult (int, optional): A factor increases :math:`T_{i}` after a restart. Default: 1.
+ T_0 (int): Number of iterations until the first restart.
+ T_mult (int, optional): A factor by which :math:`T_{i}` increases after a restart. Default: 1.
eta_min (float, optional): Minimum learning rate. Default: 0.
- last_epoch (int, optional): The index of last epoch. Default: -1.
+ last_epoch (int, optional): The index of the last epoch. Default: -1.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
|
2.41.0
|
480e8b8a17b8e28ba0a634ba3b7c79676588010
|
Mon, 29 Apr 2024 17:50:25 -0700
|
[PATCH 0834/1000] Add MAP_SHARED option for torch.load(mmap=True) (#124889)
|
Fixes #124528 Going over the options for our MapAllocator and what they do, I don't think any other of them need to be piped up to `torch.load` https://github.com/pytorch/pytorch/blob/4f29103749c5011529f1abb10b1508a682588909/aten/src/ATen/MapAllocator.h#L8-L16 ~However, I wonder if this `MmapVisibility(Enum)` is a good way to represent "or-ing" together of `mmap` flags if we want to extend it in the future. I looked over the flags for [`mmap(2)`](https://man7.org/linux/man-pages/man2/mmap.2.html), and could not immediately see how most of them would be useful for `torch.load` (would maybe `MAP_LOCKED` (like `mlock`) or `MAP_HUGE` ever be worthwhile?)~ Using the flags provided by the python `mmap` library so that we can extend the allowed flags and pipe them down to the cpp `mmap` call if there is a need for other flags in the future Pull Request resolved: https://github.com/pytorch/pytorch/pull/124889 Approved by: https://github.com/albanD
|
diff --git a/docs/source/notes/serialization.rst b/docs/source/notes/serialization.rst
index 1827ae84eb..09fd9e858b 100644
--- a/docs/source/notes/serialization.rst
+++ b/docs/source/notes/serialization.rst
@@ -392,3 +392,5 @@ The following utility functions are related to serialization:
.. autofunction:: register_package
.. autofunction:: get_default_load_endianness
.. autofunction:: set_default_load_endianness
+.. autofunction:: get_default_mmap_options
+.. autofunction:: set_default_mmap_options
diff --git a/test/test_serialization.py b/test/test_serialization.py
index 0779b744de..2f7e6babde 100644
--- a/test/test_serialization.py
+++ b/test/test_serialization.py
@@ -31,6 +31,11 @@ from torch.testing._internal.common_utils import (
from torch.testing._internal.common_device_type import instantiate_device_type_tests
from torch.testing._internal.common_dtype import all_types_and_complex_and
+if not IS_WINDOWS:
+ from mmap import MAP_SHARED, MAP_PRIVATE
+else:
+ MAP_SHARED, MAP_PRIVATE = None, None
+
# These tests were all copied from `test/test_torch.py` at some point, so see
# the actual blame, see this revision
# https://github.com/pytorch/pytorch/blame/9a2691f2fc948b9792686085b493c61793c2de30/test/test_torch.py
@@ -3954,6 +3959,32 @@ class TestSerialization(TestCase, SerializationMixin):
for v in result.values():
self.assertTrue(v.is_cuda)
+ def test_serialization_mmap_loading_options(self):
+ if IS_WINDOWS:
+ with self.assertRaisesRegex(RuntimeError, "Changing the default mmap options is currently not supported"):
+ torch.serialization.set_default_mmap_options(2)
+ return
+ m = torch.nn.Linear(3, 5)
+ sd = m.state_dict()
+ with tempfile.NamedTemporaryFile() as f:
+ torch.save(sd, f)
+ # with MmapVisibility.MAP_PRIVATE, should not be able to modify file
+ sd_loaded = torch.load(f.name, mmap=True)
+ sd_loaded['weight'][0][0] = 0
+ sd_loaded2 = torch.load(f.name, mmap=True)
+ self.assertEqual(sd_loaded2['weight'], sd['weight'])
+ # with MmapVisibility.MAP_SHARED, should be able to modify file
+ torch.serialization.set_default_mmap_options(MAP_SHARED)
+ try:
+ sd_loaded = torch.load(f.name, mmap=True)
+ sd_loaded['weight'][0][0] = 0
+ sd_loaded2 = torch.load(f.name, mmap=True)
+ self.assertNotEqual(sd_loaded2['weight'], sd['weight'])
+ self.assertEqual(sd_loaded2['weight'][0][0].item(), 0)
+ self.assertEqual(sd_loaded2['weight'], sd_loaded['weight'])
+ finally:
+ torch.serialization.set_default_mmap_options(MAP_PRIVATE)
+
@parametrize('dtype', (torch.float8_e5m2, torch.float8_e4m3fn, torch.complex32))
@parametrize('weights_only', (True, False))
def test_serialization_dtype(self, dtype, weights_only):
diff --git a/torch/serialization.py b/torch/serialization.py
index b4bcf2977d..df839408ee 100644
--- a/torch/serialization.py
+++ b/torch/serialization.py
@@ -34,6 +34,13 @@ FILE_LIKE: TypeAlias = Union[str, os.PathLike, BinaryIO, IO[bytes]]
MAP_LOCATION: TypeAlias = Optional[Union[Callable[[torch.Tensor, str], torch.Tensor], torch.device, str, Dict[str, str]]]
STORAGE: TypeAlias = Union[Storage, torch.storage.TypedStorage, torch.UntypedStorage]
+IS_WINDOWS = sys.platform == "win32"
+
+if not IS_WINDOWS:
+ from mmap import MAP_SHARED, MAP_PRIVATE
+else:
+ MAP_SHARED, MAP_PRIVATE = None, None # type: ignore[assignment]
+
__all__ = [
'SourceChangeWarning',
'mkdtemp',
@@ -105,6 +112,41 @@ def set_default_load_endianness(endianness):
raise TypeError("Invalid argument type in function set_default_load_endianness")
_default_load_endian = endianness
+_default_mmap_options: int = MAP_PRIVATE
+
+def get_default_mmap_options() -> int:
+ '''
+ Get default mmap options for :func:`torch.load` with ``mmap=True``.
+
+ Defaults to ``mmap.MAP_PRIVATE``.
+
+
+ Returns:
+ default_mmap_options: int
+ '''
+ return _default_mmap_options
+
+def set_default_mmap_options(flags: int):
+ '''
+ Set default mmap options for :func:`torch.load` with ``mmap=True`` to flags.
+
+ For now, only either ``mmap.MAP_PRIVATE`` or ``mmap.MAP_SHARED`` are supported.
+ Please open an issue if you need any other option to be added here.
+
+ .. note::
+ This feature is currently not supported for Windows.
+
+ Args:
+ flags: ``mmap.MAP_PRIVATE`` or ``mmap.MAP_SHARED``
+ '''
+ global _default_mmap_options
+ if IS_WINDOWS:
+ raise RuntimeError("Changing the default mmap options is currently not supported for Windows")
+ if (flags != MAP_PRIVATE and flags != MAP_SHARED):
+ raise ValueError("Invalid argument in function set_default_mmap_options, "
+ f"expected mmap.MAP_PRIVATE or mmap.MAP_SHARED, but got {flags}")
+ _default_mmap_options = flags
+
def _is_zipfile(f) -> bool:
# This is a stricter implementation than zipfile.is_zipfile().
# zipfile.is_zipfile() is True if the magic number appears anywhere in the
@@ -1012,7 +1054,11 @@ def load(
if not _is_path(f):
raise ValueError("f must be a file path in order to use the mmap argument")
size = os.path.getsize(f)
- overall_storage = torch.UntypedStorage.from_file(os.fspath(f), False, size)
+ if not IS_WINDOWS:
+ shared = get_default_mmap_options() == MAP_SHARED
+ else:
+ shared = False
+ overall_storage = torch.UntypedStorage.from_file(os.fspath(f), shared, size)
if weights_only:
try:
return _load(opened_zipfile,
|
2.41.0
|
7d67e476d5984032a0839b7ecb796a39a66312c
|
Tue, 30 Apr 2024 15:05:01 +0000
|
[PATCH 0835/1000] upload pt2 cprofile stats to manifold (#125162)
|
Summary: https://fb.workplace.com/groups/257735836456307/permalink/657458576484029/ upload cprofile to manifold D56696397 has a script to convert profiler stats to dot graphs (see its test plan) Test Plan: non-MAST `TORCH_COMPILE_CPROFILE=1 buck2 run mode/opt mode/inplace //pytorch/benchmark:run -- ads_mc_igctr_mc3_v0 -d cuda -t train --torchdynamo inductor --profile --profile-export-chrome-trace` https://www.internalfb.com/manifold/explorer/pyper_traces/tree/compilation_cprofile/test/20240428_234002_7562397568 MAST `buck2 run mode/opt aps_models/ads/icvr:icvr_launcher -- mode=mast_ctr_cvr_cmf_rep launcher.fbl_entitlement=ai_infra_training_rnd_tc features=ctr_cvr_conso_cmf_pipeline_features_455876776_3teach model=ctr_cvr_cmf_when_rep_config_msmn_3teach model_name=ctr_cvr_when model.when_arch.use_extended_residual_contexts=True optimizers.dense_default.lr_schedule.0.max_iters=20000 training.planner.storage_reservation_policy=FixedPercentage training.planner.storage_reservation_percentage=0.72 data_loader.dataset.batch_size=2048 trainer.garbage_collection.garbage_collection_interval=100 model.when_arch.layer_norm_init_weight=0.3 optimizers.dense_default.lr_schedule.0.value=0.001 model.when_arch.customized_mlp_init_scale=0.3 launcher.num_workers=128 launcher.max_retries=10 launcher.data_project=oncall_ads_model_platform launcher.hardware=ZIONEX_80G data_loader.dataset.table_ds="[2024-01-01]" launcher.job_name=test_inductor_logging` https://www.internalfb.com/manifold/explorer/pyper_traces/tree/compilation_cprofile/aps-test_inductor_logging-745febb51a Generating dotty files from D56696397 ``` Generating dot file from cprofile stats /home/daohang/aps-test_inductor_logging-745febb51a/0/0/_compile1.profile ... P1225733598: https://www.internalfb.com/intern/paste/P1225733598/ Dotty: https://www.internalfb.com/intern/graphviz/?paste=1225733598 Generating dot file from cprofile stats /home/daohang/aps-test_inductor_logging-745febb51a/0/0/_compile10.profile ... P1225733629: https://www.internalfb.com/intern/paste/P1225733629/ Dotty: https://www.internalfb.com/intern/graphviz/?paste=1225733629 Generating dot file from cprofile stats /home/daohang/aps-test_inductor_logging-745febb51a/0/0/_compile0.profile ... P1225733649: https://www.internalfb.com/intern/paste/P1225733649/ Dotty: https://www.internalfb.com/intern/graphviz/?paste=1225733649 ``` Differential Revision: D56679561 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125162 Approved by: https://github.com/anijain2305
|
diff --git a/torch/_dynamo/utils.py b/torch/_dynamo/utils.py
index f4f9bc93bb..e27f20d69e 100644
--- a/torch/_dynamo/utils.py
+++ b/torch/_dynamo/utils.py
@@ -50,6 +50,8 @@ from typing import (
ValuesView,
)
+from torch._utils_internal import maybe_upload_prof_stats_to_manifold
+
from ..utils.hooks import RemovableHandle
try:
@@ -144,7 +146,7 @@ def cprofile_wrapper(func):
def profile_wrapper(*args, **kwargs):
global timer_counter
profile_cnt = next(timer_counter)
- profile_path = Path(func.__name__ + f"{profile_cnt}.profile")
+ profile_path = Path("/tmp/" + func.__name__ + f"{profile_cnt}.profile")
prof = cProfile.Profile()
prof.enable()
start_ts = time.time()
@@ -182,6 +184,9 @@ def cprofile_wrapper(func):
)
ps.sort_stats(pstats.SortKey.TIME).print_stats(20)
ps.sort_stats(pstats.SortKey.CUMULATIVE).print_stats(20)
+
+ maybe_upload_prof_stats_to_manifold(str(profile_path)) # fb-only
+
return retval
return profile_wrapper
diff --git a/torch/_utils_internal.py b/torch/_utils_internal.py
index fe7d30fc6b..3da8bc2186 100644
--- a/torch/_utils_internal.py
+++ b/torch/_utils_internal.py
@@ -189,3 +189,8 @@ USE_RTLD_GLOBAL_WITH_LIBTORCH = False
# m.set_python_module("mylib.ops") call from C++ that associates
# the C++ op with a python module.
REQUIRES_SET_PYTHON_MODULE = False
+
+
+def maybe_upload_prof_stats_to_manifold(profile_path: str) -> None:
+ print("Uploading profile stats (fb-only otherwise no-op)")
+ pass
|
2.41.0
|
b80a59677ed9d57c5f95175d9f517bc879a1d1c
|
Tue, 30 Apr 2024 15:10:56 +0000
|
[PATCH 0836/1000] CI: add opt-in aarch64 linux workflow (#121284)
|
Triggered by `ciflow/linux-aarch64` and runs only `test_modules`, `test_mkldnn`, `test_mkldnn_fusion` and `test_openmp` as test for now. TODOS: - Enable sscache for fast CI - Extend to a more reasonable test coverage Pull Request resolved: https://github.com/pytorch/pytorch/pull/121284 Approved by: https://github.com/atalman, https://github.com/malfet
|
diff --git a/.ci/docker/build.sh b/.ci/docker/build.sh
index 2344862643..1b8ed8df93 100755
--- a/.ci/docker/build.sh
+++ b/.ci/docker/build.sh
@@ -306,6 +306,12 @@ case "$image" in
DB=yes
VISION=yes
CONDA_CMAKE=yes
+ # snadampal: skipping sccache due to the following issue
+ # https://github.com/pytorch/pytorch/issues/121559
+ SKIP_SCCACHE_INSTALL=yes
+ # snadampal: skipping llvm src build install because the current version
+ # from pytorch/llvm:9.0.1 is x86 specific
+ SKIP_LLVM_SRC_BUILD_INSTALL=yes
;;
*)
# Catch-all for builds that are not hardcoded.
@@ -399,6 +405,8 @@ DOCKER_BUILDKIT=1 docker build \
--build-arg "EXECUTORCH=${EXECUTORCH}" \
--build-arg "BASEKIT_VERSION=${BASEKIT_VERSION}" \
--build-arg "ACL=${ACL:-}" \
+ --build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \
+ --build-arg "SKIP_LLVM_SRC_BUILD_INSTALL=${SKIP_LLVM_SRC_BUILD_INSTALL:-}" \
-f $(dirname ${DOCKERFILE})/Dockerfile \
-t "$tmp_tag" \
"$@" \
diff --git a/.ci/docker/requirements-ci.txt b/.ci/docker/requirements-ci.txt
index e62ddfdd5f..1dc64e1ae5 100644
--- a/.ci/docker/requirements-ci.txt
+++ b/.ci/docker/requirements-ci.txt
@@ -263,10 +263,10 @@ unittest-xml-reporting<=3.2.0,>=2.0.0
#Pinned versions:
#test that import:
-#wheel not found on aarch64, and source build requires rust
-lintrunner==0.10.7 ; platform_machine == "x86_64"
+#lintrunner is supported on aarch64-linux only from 0.12.4 version
+lintrunner==0.12.5
#Description: all about linters!
-#Pinned versions: 0.10.7
+#Pinned versions: 0.12.5
#test that import:
rockset==1.0.3
diff --git a/.ci/docker/ubuntu/Dockerfile b/.ci/docker/ubuntu/Dockerfile
index bea3d3ec45..b471ce3b89 100644
--- a/.ci/docker/ubuntu/Dockerfile
+++ b/.ci/docker/ubuntu/Dockerfile
@@ -169,9 +169,11 @@ RUN rm install_acl.sh
ENV INSTALLED_ACL ${ACL}
# Install ccache/sccache (do this last, so we get priority in PATH)
+ARG SKIP_SCCACHE_INSTALL
COPY ./common/install_cache.sh install_cache.sh
ENV PATH /opt/cache/bin:$PATH
-RUN bash ./install_cache.sh && rm install_cache.sh
+RUN if [ -z "${SKIP_SCCACHE_INSTALL}" ]; then bash ./install_cache.sh; fi
+RUN rm install_cache.sh
# Add jni.h for java host build
COPY ./common/install_jni.sh install_jni.sh
@@ -188,7 +190,9 @@ ARG BUILD_ENVIRONMENT
ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
# Install LLVM dev version (Defined in the pytorch/builder github repository)
+ARG SKIP_LLVM_SRC_BUILD_INSTALL
COPY --from=pytorch/llvm:9.0.1 /opt/llvm /opt/llvm
+RUN if [ -n "${SKIP_LLVM_SRC_BUILD_INSTALL}" ]; then set -eu; rm -rf /opt/llvm; fi
# AWS specific CUDA build guidance
ENV TORCH_CUDA_ARCH_LIST Maxwell
diff --git a/.ci/pytorch/build.sh b/.ci/pytorch/build.sh
index 13069482ae..f7eee9fe9a 100755
--- a/.ci/pytorch/build.sh
+++ b/.ci/pytorch/build.sh
@@ -376,4 +376,8 @@ if [[ "$BUILD_ENVIRONMENT" != *libtorch* && "$BUILD_ENVIRONMENT" != *bazel* ]];
python tools/stats/export_test_times.py
fi
-print_sccache_stats
+# snadampal: skipping it till sccache support added for aarch64
+# https://github.com/pytorch/pytorch/issues/121559
+if [[ "$BUILD_ENVIRONMENT" != *aarch64* ]]; then
+ print_sccache_stats
+fi
diff --git a/.ci/pytorch/test.sh b/.ci/pytorch/test.sh
index b13e41681a..5a1e098636 100755
--- a/.ci/pytorch/test.sh
+++ b/.ci/pytorch/test.sh
@@ -181,6 +181,11 @@ if [[ "$BUILD_ENVIRONMENT" != *-bazel-* ]] ; then
export PATH="$HOME/.local/bin:$PATH"
fi
+if [[ "$BUILD_ENVIRONMENT" == *aarch64* ]]; then
+ # TODO: revisit this once the CI is stabilized on aarch64 linux
+ export VALGRIND=OFF
+fi
+
install_tlparse
# DANGER WILL ROBINSON. The LD_PRELOAD here could cause you problems
@@ -1152,11 +1157,18 @@ test_executorch() {
assert_git_not_dirty
}
+test_linux_aarch64(){
+ # TODO: extend unit tests list
+ python test/run_test.py --include test_modules test_mkldnn test_mkldnn_fusion test_openmp --verbose
+}
+
if ! [[ "${BUILD_ENVIRONMENT}" == *libtorch* || "${BUILD_ENVIRONMENT}" == *-bazel-* ]]; then
(cd test && python -c "import torch; print(torch.__config__.show())")
(cd test && python -c "import torch; print(torch.__config__.parallel_info())")
fi
-if [[ "${TEST_CONFIG}" == *backward* ]]; then
+if [[ "$BUILD_ENVIRONMENT" == *aarch64* ]]; then
+ test_linux_aarch64
+elif [[ "${TEST_CONFIG}" == *backward* ]]; then
test_forward_backward_compatibility
# Do NOT add tests after bc check tests, see its comment.
elif [[ "${TEST_CONFIG}" == *xla* ]]; then
diff --git a/.github/pytorch-probot.yml b/.github/pytorch-probot.yml
index c7b554ce44..fafa314652 100644
--- a/.github/pytorch-probot.yml
+++ b/.github/pytorch-probot.yml
@@ -8,6 +8,7 @@ ciflow_push_tags:
- ciflow/binaries_wheel
- ciflow/inductor
- ciflow/inductor-perf-compare
+- ciflow/linux-aarch64
- ciflow/mps
- ciflow/nightly
- ciflow/periodic
diff --git a/.github/workflows/linux-aarch64.yml b/.github/workflows/linux-aarch64.yml
new file mode 100644
index 0000000000..36461afb6a
--- /dev/null
+++ b/.github/workflows/linux-aarch64.yml
@@ -0,0 +1,36 @@
+name: linux-aarch64
+
+on:
+ push:
+ tags:
+ - ciflow/linux-aarch64/*
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }} but found ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
+ cancel-in-progress: true
+
+jobs:
+ linux-jammy-aarch64-py3_10-build:
+ name: linux-jammy-aarch64-py3.10
+ uses: ./.github/workflows/_linux-build.yml
+ with:
+ build-environment: linux-jammy-aarch64-py3.10
+ docker-image-name: pytorch-linux-jammy-aarch64-py3.10-gcc11
+ runner: linux.arm64.2xlarge
+ test-matrix: |
+ { include: [
+ { config: "default", shard: 1, num_shards: 1, runner: "linux.arm64.2xlarge" },
+ ]}
+
+ linux-jammy-aarch64-py3_10-test:
+ name: linux-jammy-aarch64-py3.10
+ uses: ./.github/workflows/_linux-test.yml
+ needs: linux-jammy-aarch64-py3_10-build
+ permissions:
+ id-token: write
+ contents: read
+ with:
+ build-environment: linux-jammy-aarch64-py3.10
+ docker-image: ${{ needs.linux-jammy-aarch64-py3_10-build.outputs.docker-image }}
+ test-matrix: ${{ needs.linux-jammy-aarch64-py3_10-build.outputs.test-matrix }}
|
2.41.0
|
cc2e034f7e55bf9ff7f4e5df4e9086a5c92caaa
|
Mon, 29 Apr 2024 11:35:42 -0700
|
[PATCH 0837/1000] Fakify script object inputs and attributes for non-strict export (#124239)
|
This PR fakify ScriptObject inputs and attributes in export non-strict mode by default. The basic idea is to `only fakify the script object during tracing (i.e. aot_export)`. After we get the traced graph module, eagerly executing, serializing, or running more passes will use the real script objects. This is essentially treating the script object as constant tensor. Concretely, we 1. fakify all the script object inputs, and module attributes (gathered by constant_attrs). 2. patch the module's attributes with fakified script object 3. right after aot_export, remove the patching (to avoid changing the original module) then modify the exported graph module's attribute to real script object. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124239 Approved by: https://github.com/zou3519
|
diff --git a/test/export/test_passes.py b/test/export/test_passes.py
index 58619e4775..6057474ee1 100644
--- a/test/export/test_passes.py
+++ b/test/export/test_passes.py
@@ -13,6 +13,10 @@ from typing import List, Set
import torch
from functorch.experimental.control_flow import cond
from torch._dynamo.eval_frame import is_dynamo_supported
+from torch._export.non_strict_utils import (
+ _fakify_script_objects,
+ _gather_constant_attrs,
+)
from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse
from torch._export.passes.functionalize_side_effectful_ops_pass import (
_FunctionalizeSideEffectfulOpsPass,
@@ -34,26 +38,24 @@ from torch._export.utils import (
sequential_split,
)
from torch._higher_order_ops.auto_functionalize import auto_functionalized
-from torch._higher_order_ops.torchbind import enable_torchbind_tracing
+from torch._subclasses.fake_tensor import FakeTensorMode
from torch.export import export
from torch.export._remove_auto_functionalized_pass import (
unsafe_remove_auto_functionalized_pass,
)
from torch.export._remove_effect_tokens_pass import _remove_effect_tokens
+from torch.fx.experimental.symbolic_shapes import ShapeEnv
from torch.fx.passes.infra.partitioner import Partition
from torch.fx.passes.operator_support import OperatorSupport
from torch.library import _scoped_library, impl
from torch.testing import FileCheck
from torch.testing._internal.common_utils import (
- find_library_location,
- IS_FBCODE,
- IS_MACOS,
- IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
skipIfTorchDynamo,
TestCase,
)
+from torch.testing._internal.torchbind_impls import init_torchbind_implementations
from torch.utils import _pytree as pytree
@@ -87,6 +89,53 @@ def _get_output_names(gm: torch.fx.GraphModule) -> List[str]:
return [str(arg) for arg in args]
+class ModelsWithScriptObjectAttr:
+ class Simple(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.attr = torch.classes._TorchScriptTesting._Foo(10, 20)
+
+ class SimpleWithAttrInContainer(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.attr = torch.classes._TorchScriptTesting._Foo(10, 20)
+ self.pytree_attr2 = [
+ torch.classes._TorchScriptTesting._Foo(1, 2),
+ {
+ torch.classes._TorchScriptTesting._Foo(3, 4),
+ },
+ {"foo": torch.classes._TorchScriptTesting._Foo(5, 6)},
+ ]
+
+ class NestedWithAttrInContainer(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.attr = torch.classes._TorchScriptTesting._Foo(10, 20)
+ self.pytree_attr2 = [
+ torch.classes._TorchScriptTesting._Foo(1, 2),
+ {
+ torch.classes._TorchScriptTesting._Foo(3, 4),
+ },
+ {"foo": torch.classes._TorchScriptTesting._Foo(5, 6)},
+ ]
+ self.sub_mod = ModelsWithScriptObjectAttr.Simple()
+ self.sub_mod2 = ModelsWithScriptObjectAttr.SimpleWithAttrInContainer()
+
+ class MoreNestedWithAttrInContainer(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.attr = torch.classes._TorchScriptTesting._Foo(10, 20)
+ self.pytree_attr2 = [
+ torch.classes._TorchScriptTesting._Foo(1, 2),
+ {
+ torch.classes._TorchScriptTesting._Foo(3, 4),
+ },
+ {"foo": torch.classes._TorchScriptTesting._Foo(5, 6)},
+ ]
+ self.sub_mod = ModelsWithScriptObjectAttr.Simple()
+ self.sub_mod2 = ModelsWithScriptObjectAttr.NestedWithAttrInContainer()
+
+
def _set_grad_enabled_tests():
from torch.export._trace import _export
@@ -213,17 +262,7 @@ class TestPasses(TestCase):
self.SEQUENTIAL_SPLIT_INLINE_TESTS = _sequential_split_inline_tests()
self.SET_GRAD_ENABLED_TESTS = _set_grad_enabled_tests()
- if IS_SANDCASTLE or IS_FBCODE:
- torch.ops.load_library(
- "//caffe2/test/cpp/jit:test_custom_class_registrations"
- )
- elif IS_MACOS:
- raise unittest.SkipTest("non-portable load_library call used in test")
- else:
- lib_file_path = find_library_location("libtorchbind_test.so")
- if IS_WINDOWS:
- lib_file_path = find_library_location("torchbind_test.dll")
- torch.ops.load_library(str(lib_file_path))
+ init_torchbind_implementations()
def tearDown(self):
self.SEQUENTIAL_SPLIT_INLINE_TESTS.clear()
@@ -421,8 +460,7 @@ class TestPasses(TestCase):
m = MyModule()
inputs = (torch.ones(2, 3),)
- with enable_torchbind_tracing():
- ep = torch.export.export(m, inputs, strict=False)
+ ep = torch.export.export(m, inputs, strict=False)
inp = torch.randn(2, 3)
orig_res = m(inp)
@@ -435,6 +473,48 @@ class TestPasses(TestCase):
self.assertTrue(torch.allclose(orig_res, ep_res))
self.assertTrue(torch.allclose(orig_res, without_token_res))
+ def test_fakify_script_objects(self):
+ for m in [
+ ModelsWithScriptObjectAttr.Simple(),
+ ModelsWithScriptObjectAttr.SimpleWithAttrInContainer(),
+ ModelsWithScriptObjectAttr.NestedWithAttrInContainer(),
+ ModelsWithScriptObjectAttr.MoreNestedWithAttrInContainer(),
+ ]:
+ constant_attrs = _gather_constant_attrs(m)
+ fake_mode = FakeTensorMode(
+ shape_env=ShapeEnv(tracked_fakes=[]),
+ allow_non_fake_inputs=True,
+ )
+ with _fakify_script_objects(m, tuple(), {}, fake_mode) as (
+ patched_mod,
+ _,
+ _,
+ fake_constant_attrs,
+ fake_to_real,
+ ):
+ self.assertEqual(len(fake_constant_attrs), len(constant_attrs))
+ for fake_obj, fqn in fake_constant_attrs.items():
+ self.assertEqual(constant_attrs[fake_to_real[fake_obj]], fqn)
+
+ # TODO: _gather_constants doesn't recursively look into the pytree containers.
+ @unittest.expectedFailure
+ def test_fakify_script_objects_properly_handle_containers(self):
+ m = ModelsWithScriptObjectAttr.SimpleWithAttrInContainer()
+ constant_attrs = _gather_constant_attrs(m)
+ fake_mode = FakeTensorMode(
+ shape_env=ShapeEnv(tracked_fakes=[]),
+ allow_non_fake_inputs=True,
+ )
+ with _fakify_script_objects(m, tuple(), {}, fake_mode) as (
+ patched_mod,
+ _,
+ _,
+ fake_constant_attrs,
+ fake_to_real,
+ ):
+ self.assertTrue("attr" in fake_constant_attrs.values())
+ self.assertTrue("pytree_attr2" in fake_constant_attrs.values())
+
def test_runtime_assert_inline_constraints_for_item(self) -> None:
class M(torch.nn.Module):
def __init__(self):
diff --git a/test/export/test_serialize.py b/test/export/test_serialize.py
index 186517d02d..2f8163333d 100644
--- a/test/export/test_serialize.py
+++ b/test/export/test_serialize.py
@@ -3,6 +3,7 @@ PYTEST_DONT_REWRITE (prevents pytest from rewriting assertions, which interferes
with test_sym_bool)
"""
+
# Owner(s): ["oncall: export"]
import copy
import io
@@ -30,11 +31,7 @@ from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch.export import Dim, export, load, save
from torch.fx.experimental.symbolic_shapes import is_concrete_int, ValueRanges
from torch.testing._internal.common_utils import (
- find_library_location,
instantiate_parametrized_tests,
- IS_FBCODE,
- IS_MACOS,
- IS_SANDCASTLE,
IS_WINDOWS,
parametrize,
run_tests,
@@ -42,6 +39,8 @@ from torch.testing._internal.common_utils import (
TestCase,
)
+from torch.testing._internal.torchbind_impls import init_torchbind_implementations
+
def get_filtered_export_db_tests():
return [
@@ -347,17 +346,8 @@ class TestSerialize(TestCase):
@unittest.skipIf(not torchdynamo.is_dynamo_supported(), "dynamo doesn't support")
class TestDeserialize(TestCase):
def setUp(self):
- if IS_SANDCASTLE or IS_FBCODE:
- torch.ops.load_library(
- "//caffe2/test/cpp/jit:test_custom_class_registrations"
- )
- elif IS_MACOS:
- raise unittest.SkipTest("non-portable load_library call used in test")
- else:
- lib_file_path = find_library_location("libtorchbind_test.so")
- if IS_WINDOWS:
- lib_file_path = find_library_location("torchbind_test.dll")
- torch.ops.load_library(str(lib_file_path))
+ super().setUp()
+ init_torchbind_implementations()
def _check_graph_nodes(self, gm1, gm2, _check_meta=True):
# TODO: The _check_meta flag bypasses checking for
@@ -837,8 +827,7 @@ class TestDeserialize(TestCase):
m = MyModule()
inputs = (torch.ones(2, 3),)
- with enable_torchbind_tracing():
- self.check_graph(m, inputs, strict=False)
+ self.check_graph(m, inputs, strict=False)
def test_custom_obj(self):
class MyModule(torch.nn.Module):
@@ -853,8 +842,7 @@ class TestDeserialize(TestCase):
m = MyModule()
inputs = (torch.ones(2, 3),)
- with enable_torchbind_tracing():
- self.check_graph(m, inputs, strict=False)
+ self.check_graph(m, inputs, strict=False)
def test_custom_obj_list_out(self):
class MyModule(torch.nn.Module):
@@ -870,8 +858,7 @@ class TestDeserialize(TestCase):
m = MyModule()
inputs = (torch.ones(2, 3),)
- with enable_torchbind_tracing():
- self.check_graph(m, inputs, strict=False)
+ self.check_graph(m, inputs, strict=False)
instantiate_parametrized_tests(TestDeserialize)
@@ -1061,17 +1048,8 @@ class TestSaveLoad(TestCase):
@unittest.skipIf(not torchdynamo.is_dynamo_supported(), "dynamo doesn't support")
class TestSerializeCustomClass(TestCase):
def setUp(self):
- if IS_SANDCASTLE or IS_FBCODE:
- torch.ops.load_library(
- "//caffe2/test/cpp/jit:test_custom_class_registrations"
- )
- elif IS_MACOS:
- raise unittest.SkipTest("non-portable load_library call used in test")
- else:
- lib_file_path = find_library_location("libtorchbind_test.so")
- if IS_WINDOWS:
- lib_file_path = find_library_location("torchbind_test.dll")
- torch.ops.load_library(str(lib_file_path))
+ super().setUp()
+ init_torchbind_implementations()
def test_custom_class(self):
custom_obj = torch.classes._TorchScriptTesting._PickleTester([3, 4])
diff --git a/test/export/test_torchbind.py b/test/export/test_torchbind.py
index 872c713571..b60fa71459 100644
--- a/test/export/test_torchbind.py
+++ b/test/export/test_torchbind.py
@@ -1,6 +1,5 @@
# Owner(s): ["oncall: export"]
-import unittest
import torch
import torch.utils._pytree as pytree
@@ -11,38 +10,25 @@ from torch.export import export
from torch.export._trace import _export
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
- find_library_location,
instantiate_parametrized_tests,
- IS_FBCODE,
- IS_MACOS,
- IS_SANDCASTLE,
- IS_WINDOWS,
parametrize,
run_tests,
skipIfTorchDynamo,
TestCase,
)
-from torch.testing._internal.torchbind_impls import register_fake_operators
-
-
-def load_torchbind_test_lib():
- if IS_SANDCASTLE or IS_FBCODE:
- torch.ops.load_library("//caffe2/test/cpp/jit:test_custom_class_registrations")
- elif IS_MACOS:
- raise unittest.SkipTest("non-portable load_library call used in test")
- else:
- lib_file_path = find_library_location("libtorchbind_test.so")
- if IS_WINDOWS:
- lib_file_path = find_library_location("torchbind_test.dll")
- torch.ops.load_library(str(lib_file_path))
-
- register_fake_operators()
+from torch.testing._internal.torchbind_impls import init_torchbind_implementations
@skipIfTorchDynamo("torchbind not supported with dynamo yet")
class TestExportTorchbind(TestCase):
def setUp(self):
- load_torchbind_test_lib()
+ init_torchbind_implementations()
+
+ test = self
+ test.tq_push_counter = 0
+ test.tq_pop_counter = 0
+ test.tq_size_counter = 0
+ test.foo_add_tensor_counter = 0
@torch._library.register_fake_class("_TorchScriptTesting::_Foo")
class FakeFoo:
@@ -56,13 +42,9 @@ class TestExportTorchbind(TestCase):
return cls(x, y)
def add_tensor(self, z):
+ test.foo_add_tensor_counter += 1
return (self.x + self.y) * z
- test = self
- test.tq_push_counter = 0
- test.tq_pop_counter = 0
- test.tq_size_counter = 0
-
@torch._library.register_fake_class("_TorchScriptTesting::_TensorQueue")
class FakeTensorQueue:
def __init__(self, q):
@@ -277,6 +259,10 @@ def forward(self, x, cc):
add = torch.ops.aten.add.Tensor(x, call_torchbind); x = call_torchbind = None
return (add,)""",
)
+ # aot_export_function runs the program twice
+ # in run_functionalized_fw_and_collect_metadata and create_aot_dispatcher_function
+ # We also have a re-tracing test, which doubles the count.
+ self.assertEqual(self.foo_add_tensor_counter, 4)
@parametrize("pre_dispatch", [True, False])
def test_input_as_custom_op_argument(self, pre_dispatch):
@@ -288,9 +274,31 @@ def forward(self, x, cc):
return x + torch.ops._TorchScriptTesting.takes_foo(cc, x)
cc = torch.classes._TorchScriptTesting._Foo(10, 20)
+
+ del torch.ops._TorchScriptTesting.takes_foo.default.py_kernels[
+ torch._C.DispatchKey.Meta
+ ]
+ torch.ops._TorchScriptTesting.takes_foo.default._dispatch_cache.clear()
+ # Even though a C++ implementation for takes_foo.default is registered,
+ # we still need the python implementation for takes_foo.default to trace with FakeFoo.
+ with self.assertRaisesRegex(RuntimeError, "no python implementation is found"):
+ self._test_export_same_as_eager(
+ MyModule(),
+ (torch.ones(2, 3), cc),
+ strict=False,
+ pre_dispatch=pre_dispatch,
+ )
+
+ torch.ops._TorchScriptTesting.takes_foo.default.py_impl(
+ torch._C.DispatchKey.Meta
+ )(lambda cc, x: cc.add_tensor(x))
ep = self._test_export_same_as_eager(
- MyModule(), (torch.ones(2, 3), cc), strict=False, pre_dispatch=pre_dispatch
+ MyModule(),
+ (torch.ones(2, 3), cc),
+ strict=False,
+ pre_dispatch=pre_dispatch,
)
+
self.assertExpectedInline(
ep.module().code.strip(),
"""\
@@ -805,7 +813,7 @@ def forward(self, arg0_1, arg1_1, arg2_1):
@skipIfTorchDynamo("torchbind not supported with dynamo yet")
class TestRegisterFakeClass(TestCase):
def setUp(self):
- load_torchbind_test_lib()
+ init_torchbind_implementations()
def tearDown(self):
torch._library.fake_class_registry.global_fake_class_registry.clear()
diff --git a/test/export/test_unflatten.py b/test/export/test_unflatten.py
index c9c60d8cc8..d7d6f4bf21 100644
--- a/test/export/test_unflatten.py
+++ b/test/export/test_unflatten.py
@@ -40,6 +40,8 @@ from torch.testing._internal.common_utils import (
skipIfTorchDynamo,
TestCase,
)
+
+from torch.testing._internal.torchbind_impls import init_torchbind_implementations
from torch.utils._pytree import (
LeafSpec,
tree_flatten,
@@ -561,18 +563,21 @@ class TestUnflatten(TestCase):
@skipIfTorchDynamo("custom objects not supported in dynamo yet")
def test_unflatten_constant_obj(self):
- if IS_MACOS:
- raise unittest.SkipTest("non-portable load_library call used in test")
- elif IS_SANDCASTLE or IS_FBCODE:
- torch.ops.load_library(
- "//caffe2/test/cpp/jit:test_custom_class_registrations"
- )
- elif IS_WINDOWS:
- lib_file_path = find_library_location("torchbind_test.dll")
- torch.ops.load_library(str(lib_file_path))
- else:
- lib_file_path = find_library_location("libtorchbind_test.so")
- torch.ops.load_library(str(lib_file_path))
+ init_torchbind_implementations()
+
+ @torch._library.register_fake_class("_TorchScriptTesting::_Foo")
+ class FakeFoo:
+ def __init__(self, x: int, y: int):
+ self.x = x
+ self.y = y
+
+ @classmethod
+ def from_real(cls, foo):
+ (x, y), _ = foo.__getstate__()
+ return cls(x, y)
+
+ def add_tensor(self, z):
+ return (self.x + self.y) * z
class SubMod(torch.nn.Module):
def __init__(self):
diff --git a/torch/_export/non_strict_utils.py b/torch/_export/non_strict_utils.py
index f102d1bfb0..cd20618e4b 100644
--- a/torch/_export/non_strict_utils.py
+++ b/torch/_export/non_strict_utils.py
@@ -1,8 +1,10 @@
+import contextlib
import inspect
from collections import defaultdict
from typing import Any, Callable, Dict, List, Tuple, Union
import torch
+import torch.utils._pytree as pytree
from torch._dynamo.source import (
AttrSource,
GetItemSource,
@@ -12,7 +14,9 @@ from torch._dynamo.source import (
)
from torch._dynamo.variables.builder import TrackedFake
from torch._export.passes.add_runtime_assertions_for_constraints_pass import InputDim
+from torch._export.passes.lift_constants_pass import ConstantAttrMap
from torch._guards import Source
+from torch._library.fake_class_registry import FakeScriptObject
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch.export import Constraint
from torch.export.dynamic_shapes import _Dim
@@ -67,6 +71,7 @@ def fakify(
source = key_path_to_source(kp)
if _is_constant_argument(t) or isinstance(t, torch.ScriptObject):
return t
+
if not isinstance(t, torch.Tensor):
raise ValueError(f"Unsupported input type {type(t)}")
n_dims = len(t.shape)
@@ -319,3 +324,115 @@ def make_constraints(
range_constraints[symbol] = shape_env.var_to_range[symbol]
return range_constraints
+
+
+def _gather_constant_attrs(m: torch.nn.Module) -> ConstantAttrMap:
+ """Search the module hierarchy, gathering up all tensor and ScriptObject constants.
+
+ Returns a dictionary mapping hash(value) to the name of the constant. We
+ have to abuse `hash` here unfortunately, see: [ScriptObject hash].
+ """
+ constants = ConstantAttrMap()
+ buffers_parameters = set(m.buffers())
+ buffers_parameters.update(m.parameters())
+
+ def inner(m: torch.nn.Module, prefix_atoms: List[str], constants):
+ for k, v in m.__dict__.items():
+ if isinstance(
+ v,
+ (
+ torch.Tensor,
+ torch.ScriptObject,
+ FakeScriptObject,
+ ),
+ ):
+ if v in buffers_parameters:
+ # filter out buffers and parameters, leaving only constants
+ continue
+
+ fqn = ".".join(prefix_atoms + [k])
+ if v in constants:
+ raise ValueError(
+ f"Duplicate reference to constant attribute found: '{constants[v]}' and '{fqn}'."
+ )
+
+ constants[v] = fqn
+ for k, v in m.named_children():
+ inner(v, prefix_atoms + [k], constants)
+
+ inner(m, [], constants)
+ return constants
+
+
+@contextlib.contextmanager
+def _fakify_script_objects(
+ mod: torch.nn.Module,
+ args: Tuple[Any],
+ kwargs: Dict[Any, Any],
+ fake_mode: torch._subclasses.fake_tensor.FakeTensorMode,
+):
+ # This context manager is used to fakify script objects into FakeScriptObject.
+ # Inputs:
+ # mod: the module to be exported, it (and its recursive submodules)'s script object attrs haven't been fakified.
+ # args, kwargs: the args and kwargs inputs for mod, script object inputs haven't been fakified.
+ # fake_mode: the fake mode to be used for fakifying script objects. It's the same mode that fakify input tensors.
+ #
+ # Returns:
+ # mod: the patched module, its (and its recursive submodules) script object attrs have been fakified.
+ # fake_args, fake_kwargs: new fakified args and kwargs.
+ # Script object inputs have been fakified. Don't touch the tensors.
+ # fake_constant_attrs: a new map from FakeScriptObject to the fqn of the original script object.
+ # fake_to_real: a mapping between FakeScriptObject and the original script object in order to un-do the patching.
+
+ constant_attrs: ConstantAttrMap = _gather_constant_attrs(mod)
+ assert not any(
+ isinstance(obj, FakeScriptObject) for obj in constant_attrs.values()
+ ), "Mod shouldn't contain any FakeScriptObject."
+ assert not pytree.tree_any(
+ lambda obj: isinstance(obj, FakeScriptObject), (args, kwargs)
+ ), "args and kwargs shouldn't contain any FakeScriptObject."
+
+ patched_attr = {}
+ fake_constant_attrs = ConstantAttrMap()
+ fake_to_real = {}
+
+ def _fakify_obj(obj):
+ fake_obj = torch._library.fake_class_registry.to_fake_obj(fake_mode, obj)
+ fake_to_real[fake_obj] = obj
+ return fake_obj
+
+ def _leaf_mod_and_attr(
+ mod: torch.nn.Module, attr_fqn: str
+ ) -> Tuple[torch.nn.Module, str]:
+ *prefix_attr, last_attr = attr_fqn.split(".")
+ cur_mod = mod
+ for attr in prefix_attr:
+ cur_mod = getattr(cur_mod, attr)
+ return cur_mod, last_attr
+
+ try:
+ for obj, fqn in constant_attrs.items():
+ if isinstance(obj, torch.ScriptObject):
+ cur_mod, attr = _leaf_mod_and_attr(mod, fqn)
+ assert obj is getattr(cur_mod, attr)
+ fake_script_obj = _fakify_obj(obj)
+ setattr(cur_mod, attr, fake_script_obj)
+ fake_constant_attrs[fake_script_obj] = fqn
+ patched_attr[fqn] = obj
+ else:
+ fake_constant_attrs[obj] = fqn
+
+ fake_args, fake_kwargs = pytree.tree_map_only(
+ torch.ScriptObject, _fakify_obj, (args, kwargs)
+ )
+ assert not any(
+ isinstance(obj, torch.ScriptObject) for obj in fake_constant_attrs.values()
+ ), "Patched mod shouldn't contain any torch.ScriptObject."
+ assert not pytree.tree_any(
+ lambda obj: isinstance(obj, torch.ScriptObject), (fake_args, fake_kwargs)
+ ), "Fakfied args and kwargs shouldn't contain any torch.ScriptObject."
+ yield (mod, fake_args, fake_kwargs, fake_constant_attrs, fake_to_real)
+ finally:
+ for fqn, orig_obj in patched_attr.items():
+ cur_mod, attr = _leaf_mod_and_attr(mod, fqn)
+ setattr(cur_mod, attr, orig_obj)
diff --git a/torch/_export/passes/lift_constants_pass.py b/torch/_export/passes/lift_constants_pass.py
index fc13403a3f..e4bda7a194 100644
--- a/torch/_export/passes/lift_constants_pass.py
+++ b/torch/_export/passes/lift_constants_pass.py
@@ -4,6 +4,8 @@ from typing import Any, Dict, Union
import torch
from torch._export.verifier import SpecViolationError
from torch._guards import detect_fake_mode
+
+from torch._library.fake_class_registry import FakeScriptObject
from torch.export.exported_program import (
ArgumentSpec,
CustomObjArgument,
@@ -15,33 +17,35 @@ from torch.export.exported_program import (
class ConstantAttrMap(collections.abc.MutableMapping):
- """A mapping class that understands how to use module constants (tensors and
- ScriptObjects) as keys. We store tensors normally, but ScriptObjects are
- stored by hash, because different torch.ScriptObjects can point to the same
- underlying value (but we guarantee that they will `hash()` to the same value
+ """A mapping class that understands how to use module constants (tensors,
+ ScriptObjects, FakeScriptObjects) as keys. We store tensors and FakeScriptObjects normally,
+ but ScriptObjects are stored by hash, because different torch.ScriptObjects can point to
+ the same underlying value (but we guarantee that they will `hash()` to the same value
if that's the case).
"""
def __init__(self):
# Underlying dict that we use to implement this mapping.
- self._constant_attrs: Dict[Union[int, torch.Tensor], Any] = {}
+ self._constant_attrs: Dict[Union[int, torch.Tensor, FakeScriptObject], Any] = {}
# Map from the hash(ScriptObject) to the ScriptObject itself. Used for
# APIs like `__iter__` that should look like they're returning the
# original ScriptObjects.
self._script_object_map: Dict[int, torch.ScriptObject] = {}
- def __getitem__(self, key: Union[torch.Tensor, torch.ScriptObject]) -> Any:
+ def __getitem__(
+ self, key: Union[torch.Tensor, torch.ScriptObject, FakeScriptObject]
+ ) -> Any:
real_key = hash(key) if isinstance(key, torch.ScriptObject) else key
- assert isinstance(real_key, (int, torch.Tensor))
+ assert isinstance(real_key, (int, torch.Tensor, FakeScriptObject))
return self._constant_attrs[real_key]
def __setitem__(
- self, key: Union[torch.Tensor, torch.ScriptObject], value: Any
+ self, key: Union[torch.Tensor, torch.ScriptObject, FakeScriptObject], value: Any
) -> None:
if isinstance(key, torch.ScriptObject):
self._constant_attrs[hash(key)] = value
self._script_object_map[hash(key)] = key
- elif isinstance(key, torch.Tensor):
+ elif isinstance(key, (torch.Tensor, FakeScriptObject)):
self._constant_attrs[key] = value
else:
raise TypeError(
@@ -83,7 +87,7 @@ def lift_constants_pass(
gm: torch.fx.GraphModule,
graph_signature: ExportGraphSignature,
constant_attrs: ConstantAttrMap,
-) -> Dict[str, Union[torch.Tensor, torch._C.ScriptObject]]:
+) -> Dict[str, Union[torch.Tensor, torch.ScriptObject, FakeScriptObject]]:
"""
Takes a graph module, graph signature, and modifies them implace to lift any
constants (tensors or custom classes) as inputs to the graph. Returns a
@@ -101,7 +105,9 @@ def lift_constants_pass(
Returns:
A dictionary of fqn => constant value.
"""
- all_constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]] = {}
+ all_constants: Dict[
+ str, Union[torch.Tensor, torch.ScriptObject, FakeScriptObject]
+ ] = {}
inputs = graph_signature.input_specs
num_custom_obj = sum(
@@ -135,7 +141,7 @@ def lift_constants_pass(
gm.graph.erase_node(node)
continue
- # For ScriptObject and Tensor constants:
+ # For ScriptObject, Tensor and FakeScriptObject constants:
# First check if the constant was an attribute on some module by
# consulting `constant_attrs` map. If it is, use the fqn that keeps
# its location consistent with the eager module.
@@ -144,7 +150,7 @@ def lift_constants_pass(
# constant (e.g. x + torch.tensor(0)), and thus did not have a
# specific location in the eager module. In that case, just generate
# some name and attach it to the module in which it was used.
- if isinstance(constant_val, torch.ScriptObject):
+ if isinstance(constant_val, (torch.ScriptObject, FakeScriptObject)):
constant_kind = InputKind.CUSTOM_OBJ
constant_fqn = constant_attrs.get(constant_val)
if constant_fqn is not None:
@@ -203,6 +209,14 @@ def lift_constants_pass(
input_spec_arg = CustomObjArgument(
name=const_placeholder_node.name, class_fqn=class_fqn
)
+ elif isinstance(constant_val, FakeScriptObject):
+ class_fqn = constant_val.script_class_name
+ const_placeholder_node.meta["val"] = CustomObjArgument(
+ constant_fqn, class_fqn
+ )
+ input_spec_arg = CustomObjArgument(
+ name=const_placeholder_node.name, class_fqn=class_fqn
+ )
else:
raise SpecViolationError(
f"tried to lift unsupported type {type(constant_val)} from node {node.format_node()}"
@@ -229,24 +243,35 @@ def lift_constants_pass(
def rewrite_script_object_meta(
gm: torch.fx.GraphModule,
-) -> Dict[str, Union[torch.Tensor, torch.ScriptObject]]:
- """When tracing, we produce a graph with an actual ScriptObject in the
- meta["val"]. Eventually we want to change this behavior, when FakeMode infra
- for ScriptObjects lands.
+) -> Dict[str, Union[torch.Tensor, FakeScriptObject],]:
+ """When tracing, we produce a graph with FakeScriptObject in the
+ meta["val"].
For now, we rewrie meta["val"] to be a placeholder CustomObjArgument
"""
- constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]] = {}
+ constants: Dict[
+ str,
+ Union[
+ torch.Tensor,
+ FakeScriptObject,
+ ],
+ ] = {}
for node in gm.graph.nodes:
- if "val" not in node.meta or not isinstance(
- node.meta["val"], torch.ScriptObject
- ):
+ if "val" not in node.meta:
continue
- old_meta = node.meta["val"]
- class_fqn = old_meta._type().qualified_name() # type: ignore[attr-defined]
- new_meta = CustomObjArgument(node.name, class_fqn)
- constants[node.name] = old_meta
- node.meta["val"] = new_meta
+ assert not isinstance(
+ node.meta["val"], torch.ScriptObject
+ ), "ScriptObject should already be fakified in to FakeScriptObject."
+
+ if isinstance(
+ node.meta["val"],
+ FakeScriptObject,
+ ):
+ old_meta = node.meta["val"]
+ class_fqn = old_meta.script_class_name # type: ignore[attr-defined]
+ new_meta = CustomObjArgument(node.name, class_fqn)
+ constants[node.name] = old_meta
+ node.meta["val"] = new_meta
return constants
diff --git a/torch/_library/fake_class_registry.py b/torch/_library/fake_class_registry.py
index 7eff756284..47b157b884 100644
--- a/torch/_library/fake_class_registry.py
+++ b/torch/_library/fake_class_registry.py
@@ -10,9 +10,12 @@ log = logging.getLogger(__name__)
class FakeScriptObject:
- def __init__(self, wrapped_obj):
+ def __init__(self, wrapped_obj: Any, script_class_name: str):
self.wrapped_obj = wrapped_obj
+ # The fully qualified name of the class of original script object
+ self.script_class_name = script_class_name
+
class HasStaticMethodFromReal(Protocol):
@classmethod
@@ -71,12 +74,13 @@ def to_fake_obj(fake_mode, x: torch.ScriptObject) -> FakeScriptObject:
return wrapped
- fake_x_wrapped = FakeScriptObject(fake_x)
+ fake_x_wrapped = FakeScriptObject(fake_x, x._type().qualified_name()) # type: ignore[attr-defined]
for name in x._method_names(): # type: ignore[attr-defined]
attr = getattr(fake_x, name, None)
if attr:
if not callable(attr):
raise RuntimeError(f"Expect {name} to be a callable but got {attr}.")
+
setattr(
fake_x_wrapped,
name,
diff --git a/torch/_ops.py b/torch/_ops.py
index 6e2119f16a..f5d7313591 100644
--- a/torch/_ops.py
+++ b/torch/_ops.py
@@ -807,6 +807,7 @@ class TorchBindOpOverload(OpOverload):
DispatchKey.AutogradCPU,
DispatchKey.AutogradCUDA,
DispatchKey.ADInplaceOrView,
+ DispatchKey.BackendSelect,
DispatchKey.PythonTLSSnapshot,
DispatchKey.PythonDispatcher,
]
@@ -889,8 +890,13 @@ class TorchBindOpOverload(OpOverload):
)
raise RuntimeError(
- f"Cannot handle FakeScriptObject with python dispatcher with dispatch key {handler}."
- f"Please implement it by annotating a python callable with py_impl({handler})."
+ f"Torchbind op {self} received a FakeScriptObject input when dispatching {handler}."
+ f" but no python implementation is found."
+ f" Please file an issue on this when you encounter this error."
+ f" This error can happen when you export or compile the model."
+ f" It can still happpen even if a C++ implementation for {dispatch_key}. "
+ f" has been registered. That's because FakeScriptObject purely lives in python and cannot work "
+ f" with a C++ implementation."
)
assert isinstance(handler, Callable) # type: ignore[arg-type]
diff --git a/torch/export/_trace.py b/torch/export/_trace.py
index 979aa53593..906e34e31a 100644
--- a/torch/export/_trace.py
+++ b/torch/export/_trace.py
@@ -15,6 +15,8 @@ import torch.fx
import torch.utils._pytree as pytree
from torch._dynamo.exc import UserError, UserErrorType
from torch._export.non_strict_utils import (
+ _fakify_script_objects,
+ _gather_constant_attrs,
make_constraints,
make_fake_inputs,
make_fake_params_buffers,
@@ -34,6 +36,8 @@ from torch._export.verifier import SpecViolationError
from torch._export.wrappers import _wrap_submodules
from torch._functorch.aot_autograd import aot_export_module
from torch._guards import detect_fake_mode
+
+from torch._library.fake_class_registry import FakeScriptObject
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._utils_internal import log_export_usage
from torch.export.exported_program import OutputKind
@@ -69,7 +73,6 @@ from .graph_signature import (
TokenArgument,
)
-
log = logging.getLogger(__name__)
@@ -453,37 +456,6 @@ def _export_to_torch_ir(
return gm_torch_level
-def _gather_constant_attrs(m: torch.nn.Module) -> ConstantAttrMap:
- """Search the module hierarchy, gathering up all tensor and ScriptObject constants.
-
- Returns a dictionary mapping hash(value) to the name of the constant. We
- have to abuse `hash` here unfortunately, see: [ScriptObject hash].
- """
- constants = ConstantAttrMap()
- buffers_parameters = set(m.buffers())
- buffers_parameters.update(m.parameters())
-
- def inner(m: torch.nn.Module, prefix_atoms: List[str], constants):
- for k, v in m.__dict__.items():
- if isinstance(v, (torch.Tensor, torch.ScriptObject)):
- if v in buffers_parameters:
- # filter out buffers and parameters, leaving only constants
- continue
-
- fqn = ".".join(prefix_atoms + [k])
- if v in constants:
- raise ValueError(
- f"Duplicate reference to constant attribute found: '{constants[v]}' and '{fqn}'."
- )
-
- constants[v] = fqn
- for k, v in m.named_children():
- inner(v, prefix_atoms + [k], constants)
-
- inner(m, [], constants)
- return constants
-
-
def _export_non_strict(
mod: torch.nn.Module,
fake_args,
@@ -494,6 +466,9 @@ def _export_non_strict(
transform=lambda x: x, # TODO(zhxchen17) Revisit if this is needed later.
pre_dispatch=False,
):
+ assert not any(
+ isinstance(obj, torch.ScriptObject) for obj in constant_attrs
+ ), "We expect all script objects have been replaced by FakeScriptObjects."
# [NOTE] If the user is exporting under training mode, we want to detect if there is any
# state change in the autograd global state and error. If the user is exporting under inference
# mode, we don't care. At predispatch level, we don't care about the state change.
@@ -585,10 +560,8 @@ def _export_non_strict(
return TensorArgument(name=node.name)
elif isinstance(val, torch.SymInt):
return SymIntArgument(name=node.name)
- elif isinstance(val, torch.ScriptObject):
- return CustomObjArgument(
- name=node.name, class_fqn=val._type().qualified_name() # type: ignore[attr-defined]
- )
+ elif isinstance(val, FakeScriptObject):
+ return CustomObjArgument(name=node.name, class_fqn=val.script_class_name)
elif isinstance(val, (int, bool, str, float, type(None))):
return ConstantArgument(name=node.name, value=val)
else:
@@ -626,7 +599,14 @@ def _export_non_strict(
)
constants = rewrite_script_object_meta(gm)
- constants.update(lift_constants_pass(gm, export_graph_signature, constant_attrs))
+ attr_constants = lift_constants_pass(gm, export_graph_signature, constant_attrs)
+ assert not any(
+ isinstance(obj, torch.ScriptObject) for obj in attr_constants.values()
+ ), "We expect all script objects have been replaced by FakeScriptObjects."
+ constants.update(attr_constants) # type: ignore[arg-type]
+ assert not any(
+ isinstance(obj, torch.ScriptObject) for obj in constants.values()
+ ), "We expect all script objects have been replaced by FakeScriptObjects."
# prettify names for placeholder nodes
placeholder_naming_pass(
@@ -643,7 +623,13 @@ def _export_non_strict(
class _ExportedProgramNonStrict:
gm: torch.fx.GraphModule
sig: ExportGraphSignature
- constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]]
+ constants: Dict[
+ str,
+ Union[
+ torch.Tensor,
+ FakeScriptObject,
+ ],
+ ]
return _ExportedProgramNonStrict(
gm,
@@ -941,8 +927,6 @@ def _export(
if isinstance(dynamic_shapes, torch.export.ShapesCollection):
dynamic_shapes = dynamic_shapes.dynamic_shapes(mod, args, kwargs)
- constant_attrs = _gather_constant_attrs(mod)
-
flat_args, orig_in_spec = pytree.tree_flatten((args, kwargs))
original_state_dict = mod.state_dict(keep_vars=True)
forward_arg_names = _get_forward_arg_names(mod, args, kwargs)
@@ -1029,16 +1013,32 @@ def _export(
fake_params_buffers = make_fake_params_buffers(
fake_mode, _get_params_buffers(mod)
)
+
with fake_mode:
- ep_non_strict = _export_non_strict(
- mod,
- fake_args,
- fake_kwargs,
- fake_params_buffers,
- constant_attrs,
- pre_dispatch=pre_dispatch,
- transform=_tuplify_outputs,
- )
+ with _fakify_script_objects(mod, fake_args, fake_kwargs, fake_mode) as (
+ patched_mod,
+ new_fake_args,
+ new_fake_kwargs,
+ new_fake_constant_attrs,
+ map_fake_to_real,
+ ):
+ ep_non_strict = _export_non_strict(
+ patched_mod,
+ new_fake_args,
+ new_fake_kwargs,
+ fake_params_buffers,
+ new_fake_constant_attrs,
+ pre_dispatch=pre_dispatch,
+ transform=_tuplify_outputs,
+ )
+ # ep_non_strict.constants contains only fake script objects, we need to map them back
+ ep_non_strict.constants = {
+ fqn: map_fake_to_real[obj]
+ if isinstance(obj, FakeScriptObject)
+ else obj
+ for fqn, obj in ep_non_strict.constants.items()
+ }
+
ep_non_strict.gm.meta["inline_constraints"] = {
k: v
for k, v in fake_mode.shape_env.var_to_range.items()
@@ -1217,6 +1217,7 @@ def _export(
_normalize_nn_module_stack(gm_torch_level, type(mod))
# NOTE: graph module expects only positional args
+ constant_attrs = _gather_constant_attrs(mod)
ep_non_strict = _export_non_strict(
gm_torch_level,
_convert_to_positional_args(orig_arg_names, fake_args, fake_kwargs),
diff --git a/torch/export/unflatten.py b/torch/export/unflatten.py
index ee3376204f..9c9f57728e 100644
--- a/torch/export/unflatten.py
+++ b/torch/export/unflatten.py
@@ -9,6 +9,7 @@ from typing import Any, cast, Dict, List, Optional, Union
import torch
import torch.fx._pytree as fx_pytree
import torch.utils._pytree as pytree
+from torch._library.fake_class_registry import FakeScriptObject
from torch.export._tree_utils import reorder_kwargs
from torch.export.exported_program import (
ConstantArgument,
@@ -54,7 +55,16 @@ def _assign_attr(
assert isinstance(from_obj, torch.Tensor)
to_module.register_buffer(field, from_obj, persistent=persistent)
elif attr_kind == _AttrKind.CONSTANT:
- assert isinstance(from_obj, (torch.Tensor, torch.ScriptObject))
+ assert not isinstance(
+ from_obj, FakeScriptObject
+ ), "FakeScriptObject should only exist during tracing."
+ assert isinstance(
+ from_obj,
+ (
+ torch.Tensor,
+ torch.ScriptObject,
+ ),
+ )
setattr(to_module, field, from_obj)
diff --git a/torch/fx/_symbolic_trace.py b/torch/fx/_symbolic_trace.py
index b3524dbde4..24b1428b83 100644
--- a/torch/fx/_symbolic_trace.py
+++ b/torch/fx/_symbolic_trace.py
@@ -24,6 +24,7 @@ from typing import (
import torch
import torch.utils._pytree as pytree
from torch._C import ScriptObject # type: ignore[attr-defined]
+from torch._library.fake_class_registry import FakeScriptObject
from ._compatibility import compatibility
from .graph import _PyTreeCodeGen, _PyTreeInfo, Graph
@@ -366,7 +367,7 @@ class Tracer(TracerBase):
# a get_attr to retrieve that tensor. Otherwise, we'll store away the
# tensor value into a special attribute on the Module s.t. we can
# retrieve it with a get_attr.
- if isinstance(a, (torch.Tensor, ScriptObject)):
+ if isinstance(a, (torch.Tensor, ScriptObject, FakeScriptObject)):
qualname: Optional[str] = self.tensor_attrs.get(a)
# Tensor was not found in the Module hierarchy, stow it away in a
@@ -729,11 +730,17 @@ class Tracer(TracerBase):
# is some other attribute on the model. Construct a dict mapping Tensor
# values to the qualified name here for efficiency. This is used downstream
# in create_arg
- self.tensor_attrs: Dict[Union[torch.Tensor, ScriptObject], str] = {}
+ self.tensor_attrs: Dict[
+ Union[
+ torch.Tensor,
+ ScriptObject,
+ FakeScriptObject
+ ], str
+ ] = {}
def collect_tensor_attrs(m: torch.nn.Module, prefix_atoms: List[str]):
for k, v in m.__dict__.items():
- if isinstance(v, (torch.Tensor, ScriptObject)):
+ if isinstance(v, (torch.Tensor, ScriptObject, FakeScriptObject)):
self.tensor_attrs[v] = ".".join(prefix_atoms + [k])
for k, v in m.named_children():
collect_tensor_attrs(v, prefix_atoms + [k])
diff --git a/torch/testing/_internal/torchbind_impls.py b/torch/testing/_internal/torchbind_impls.py
index f66388d2ed..7babba0530 100644
--- a/torch/testing/_internal/torchbind_impls.py
+++ b/torch/testing/_internal/torchbind_impls.py
@@ -1,32 +1,120 @@
+import contextlib
+
import torch
-def register_if_not(qualname):
- entry = torch._library.simple_registry.singleton.find(qualname)
- if entry.abstract_impl.kernel is None:
- return torch.library.impl_abstract(qualname)
- else:
+_TORCHBIND_IMPLS_INITIALIZED = False
+
- def dummy_wrapper(fn):
- return fn
+def init_torchbind_implementations():
+ global _TORCHBIND_IMPLS_INITIALIZED
+ if _TORCHBIND_IMPLS_INITIALIZED:
+ return
- return dummy_wrapper
+ load_torchbind_test_lib()
+ register_fake_operators()
+ register_fake_classes()
+ _TORCHBIND_IMPLS_INITIALIZED = True
# put these under a function because the corresponding library might not be loaded yet.
def register_fake_operators():
- @register_if_not("_TorchScriptTesting::takes_foo_python_meta")
+ @torch.library.register_fake("_TorchScriptTesting::takes_foo_python_meta")
def fake_takes_foo(foo, z):
return foo.add_tensor(z)
- @register_if_not("_TorchScriptTesting::queue_pop")
+ @torch.library.register_fake("_TorchScriptTesting::queue_pop")
def fake_queue_pop(tq):
return tq.pop()
- @register_if_not("_TorchScriptTesting::queue_push")
+ @torch.library.register_fake("_TorchScriptTesting::queue_push")
def fake_queue_push(tq, x):
return tq.push(x)
- @register_if_not("_TorchScriptTesting::queue_size")
+ @torch.library.register_fake("_TorchScriptTesting::queue_size")
def fake_queue_size(tq):
return tq.size()
+
+ def meta_takes_foo_list_return(foo, x):
+ a = foo.add_tensor(x)
+ b = foo.add_tensor(a)
+ c = foo.add_tensor(b)
+ return [a, b, c]
+
+ def meta_takes_foo_tuple_return(foo, x):
+ a = foo.add_tensor(x)
+ b = foo.add_tensor(a)
+ return (a, b)
+
+ torch.ops._TorchScriptTesting.takes_foo_list_return.default.py_impl(
+ torch._C.DispatchKey.Meta
+ )(meta_takes_foo_list_return)
+
+ torch.ops._TorchScriptTesting.takes_foo_tuple_return.default.py_impl(
+ torch._C.DispatchKey.Meta
+ )(meta_takes_foo_tuple_return)
+
+ torch.ops._TorchScriptTesting.takes_foo.default.py_impl(torch._C.DispatchKey.Meta)(
+ lambda cc, x: cc.add_tensor(x)
+ )
+
+
+def register_fake_classes():
+ @torch._library.register_fake_class("_TorchScriptTesting::_Foo")
+ class FakeFoo:
+ def __init__(self, x: int, y: int):
+ self.x = x
+ self.y = y
+
+ @classmethod
+ def from_real(cls, foo):
+ (x, y), _ = foo.__getstate__()
+ return cls(x, y)
+
+ def add_tensor(self, z):
+ return (self.x + self.y) * z
+
+ @torch._library.register_fake_class("_TorchScriptTesting::_ContainsTensor")
+ class FakeContainsTensor:
+ def __init__(self, x: torch.Tensor):
+ self.x = x
+
+ @classmethod
+ def from_real(cls, foo):
+ ctx = torch.library.get_ctx()
+ return cls(ctx.to_fake_tensor(foo.get()))
+
+ def get(self):
+ return self.x
+
+
+def load_torchbind_test_lib():
+ import unittest
+
+ from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
+ find_library_location,
+ IS_FBCODE,
+ IS_MACOS,
+ IS_SANDCASTLE,
+ IS_WINDOWS,
+ )
+
+ if IS_SANDCASTLE or IS_FBCODE:
+ torch.ops.load_library("//caffe2/test/cpp/jit:test_custom_class_registrations")
+ elif IS_MACOS:
+ raise unittest.SkipTest("non-portable load_library call used in test")
+ else:
+ lib_file_path = find_library_location("libtorchbind_test.so")
+ if IS_WINDOWS:
+ lib_file_path = find_library_location("torchbind_test.dll")
+ torch.ops.load_library(str(lib_file_path))
+
+
+@contextlib.contextmanager
+def _register_py_impl_temporarily(op_overload, key, fn):
+ try:
+ op_overload.py_impl(key)(fn)
+ yield
+ finally:
+ del op_overload.py_kernels[key]
+ op_overload._dispatch_cache.clear()
|
2.41.0
|
466335ae4cb049efd3f4c2b32b2115ba00694f3
|
Tue, 30 Apr 2024 16:19:39 +0000
|
[PATCH 0838/1000] Convert `ForeachFuncInfo` to `dataclass` (#125001)
|
- `ForeachFuncInfo` to `dataclass` for smaller diff from `OpInfo` - `skips` to `decorators` and `skip` to `xfail` Pull Request resolved: https://github.com/pytorch/pytorch/pull/125001 Approved by: https://github.com/janeyx99
|
diff --git a/test/test_foreach.py b/test/test_foreach.py
index 792d757a88..e9fc4ffc77 100644
--- a/test/test_foreach.py
+++ b/test/test_foreach.py
@@ -164,20 +164,22 @@ class TestForeach(TestCase):
wrapped_op, _, inplace_op, _ = self._get_funcs(op)
for sample in op.sample_zero_size_inputs(device, dtype):
- if op.supports_out:
+ if op.method_variant is not None:
wrapped_op(
(sample.input, *sample.args),
is_cuda=self.is_cuda,
expect_fastpath=True,
zero_size=True,
)
- with InplaceForeachVersionBumpCheck(self, sample.input):
- inplace_op(
- (sample.input, *sample.args),
- is_cuda=self.is_cuda,
- expect_fastpath=True,
- zero_size=True,
- )
+
+ if op.inplace_variant is not None:
+ with InplaceForeachVersionBumpCheck(self, sample.input):
+ inplace_op(
+ (sample.input, *sample.args),
+ is_cuda=self.is_cuda,
+ expect_fastpath=True,
+ zero_size=True,
+ )
@skipIfRocmVersionLessThan((6, 0))
@ops(
@@ -1225,12 +1227,16 @@ class TestForeach(TestCase):
"inplace", (False, True), name_fn=lambda x: "inplace" if x else "outplace"
)
def test_autodiff(self, device, dtype, op, inplace):
- if not (op.supports_autograd or op.supports_forward_ad):
- self.skipTest("neither reverse mode nor forward mode supported")
if (not inplace) and not op.supports_out:
self.skipTest("out-of-place not implemented")
if inplace and op.has_no_in_place:
self.skipTest("in-place not implemented")
+ if not (
+ op.supports_autograd
+ or op.supports_inplace_autograd
+ or op.supports_forward_ad
+ ):
+ self.skipTest("neither reverse mode nor forward mode supported")
# note(crcrpar): without this, some unary functions fail, unlike inplace and/or complex.
if (
diff --git a/torch/testing/_internal/common_device_type.py b/torch/testing/_internal/common_device_type.py
index 048ae83f72..d5285a6d0d 100644
--- a/torch/testing/_internal/common_device_type.py
+++ b/torch/testing/_internal/common_device_type.py
@@ -921,7 +921,7 @@ class ops(_TestParametrizer):
elif self.opinfo_dtypes == OpDTypes.unsupported:
dtypes = set(get_all_dtypes()).difference(op.supported_dtypes(device_cls.device_type))
elif self.opinfo_dtypes == OpDTypes.supported:
- dtypes = op.supported_dtypes(device_cls.device_type)
+ dtypes = set(op.supported_dtypes(device_cls.device_type))
elif self.opinfo_dtypes == OpDTypes.any_one:
# Tries to pick a dtype that supports both forward or backward
supported = op.supported_dtypes(device_cls.device_type)
@@ -936,7 +936,7 @@ class ops(_TestParametrizer):
dtypes = {}
elif self.opinfo_dtypes == OpDTypes.any_common_cpu_cuda_one:
# Tries to pick a dtype that supports both CPU and CUDA
- supported = op.dtypes.intersection(op.dtypesIfCUDA)
+ supported = set(op.dtypes).intersection(op.dtypesIfCUDA)
if supported:
dtypes = {next(dtype for dtype in ANY_DTYPE_ORDER if dtype in supported)}
else:
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
index ab73b5baf3..ff519abbec 100644
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -9296,7 +9296,7 @@ class foreach_inputs_sample_func:
opinfo, ForeachRightmostArgType.TensorList, device, dtype, NUM_SIZE0_TENSORS,
**zero_size_foreach_inputs_kwargs)[0])
kwargs = self._sample_kwargs(
- opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype, zero_size=True)
+ opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype)
else:
args = []
kwargs = {}
@@ -9475,55 +9475,95 @@ class foreach_pointwise_sample_func(foreach_inputs_sample_func):
foreach_unary_op_db: List[OpInfo] = [
ForeachFuncInfo(
'exp',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
backward_requires_result=True,
+ dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'acos',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'asin',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'atan',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'cos',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'cosh',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'log',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'log10',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'log2',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'tan',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
backward_requires_result=True,
+ dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
decorators=(
# due to https://github.com/pytorch/pytorch/pull/102427 enabling jiterator for complex
DecorateInfo(
@@ -9540,9 +9580,13 @@ foreach_unary_op_db: List[OpInfo] = [
),
ForeachFuncInfo(
'tanh',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
backward_requires_result=True,
+ dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
decorators=(
DecorateInfo(
toleranceOverride(
@@ -9556,129 +9600,186 @@ foreach_unary_op_db: List[OpInfo] = [
),
ForeachFuncInfo(
'sin',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'sinh',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'neg',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex(),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'sqrt',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
backward_requires_result=True,
),
ForeachFuncInfo(
'ceil',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'erf',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'erfc',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'expm1',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
backward_requires_result=True,
),
ForeachFuncInfo(
'floor',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'log1p',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'round',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'frac',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'reciprocal',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
backward_requires_result=True,
),
ForeachFuncInfo(
'sigmoid',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
backward_requires_result=True,
),
ForeachFuncInfo(
'trunc',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'abs',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
- skips=(
- DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestMeta",
- "test_dispatch_symbolic_meta_inplace", dtypes=complex_types()),
- DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestMeta",
- "test_dispatch_meta_inplace", dtypes=complex_types()),
- DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestMeta",
- "test_meta_inplace", dtypes=complex_types()),
+ decorators=(
+ DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=complex_types()),
+ DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=complex_types()),
+ DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types()),
),
),
ForeachFuncInfo(
'zero',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
+ dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
supports_out=False,
),
ForeachFuncInfo(
'sign',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
),
ForeachFuncInfo(
'lgamma',
- foreach_inputs_sample_func(1, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16),
- skips=(
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
+ decorators=(
DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta",
"test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool)),
DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta",
@@ -9692,11 +9793,14 @@ foreach_unary_op_db: List[OpInfo] = [
foreach_binary_op_db: List[OpInfo] = [
ForeachFuncInfo(
"add",
- foreach_inputs_sample_func(2, True, True, True),
+ sample_inputs_func=foreach_inputs_sample_func(2, True, True, True),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
- skips=(
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
+ decorators=(
# These tests fail with aten._local_scalar_dense not being implemented.
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -9711,11 +9815,14 @@ foreach_binary_op_db: List[OpInfo] = [
),
ForeachFuncInfo(
"sub",
- foreach_inputs_sample_func(2, True, True),
+ sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
- skips=(
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
+ decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -9731,7 +9838,10 @@ foreach_binary_op_db: List[OpInfo] = [
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=foreach_inputs_sample_func(2, True, True, True),
- skips=(
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
+ decorators=(
# Samples have complex types and inplace only works if the dtype is complex.
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace",
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)),
@@ -9748,7 +9858,10 @@ foreach_binary_op_db: List[OpInfo] = [
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=foreach_inputs_sample_func(2, True, True, True),
- skips=(
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
+ decorators=(
# Samples have complex types and inplace only works if the dtype is complex.
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace",
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)),
@@ -9771,10 +9884,13 @@ foreach_binary_op_db: List[OpInfo] = [
),
ForeachFuncInfo(
"clamp_min",
- foreach_inputs_sample_func(2, True, True),
+ sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
- skips=(
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
+ decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -9787,10 +9903,13 @@ foreach_binary_op_db: List[OpInfo] = [
),
ForeachFuncInfo(
"clamp_max",
- foreach_inputs_sample_func(2, True, True),
+ sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
- skips=(
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
+ decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -9804,12 +9923,13 @@ foreach_binary_op_db: List[OpInfo] = [
# note(crcrpar): forward ad not implemented.
ForeachFuncInfo(
"minimum",
- foreach_inputs_sample_func(2, True, True),
+ sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
- supports_forward_ad=False,
+ supports_autograd=True,
supports_inplace_autograd=False,
- skips=(
+ supports_forward_ad=False,
+ decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -9823,12 +9943,13 @@ foreach_binary_op_db: List[OpInfo] = [
# note(crcrpar): forward ad not implemented.
ForeachFuncInfo(
"maximum",
- foreach_inputs_sample_func(2, True, True),
+ sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
+ supports_autograd=True,
supports_forward_ad=False,
supports_inplace_autograd=False,
- skips=(
+ decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -9847,7 +9968,9 @@ foreach_binary_op_db: List[OpInfo] = [
supports_scalar_self_arg=True,
sample_inputs_func=foreach_inputs_sample_func(2, True, True),
supports_autograd=True,
- skips=(
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
+ decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -9857,27 +9980,30 @@ foreach_binary_op_db: List[OpInfo] = [
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"),
),
- supports_forward_ad=True,
backward_requires_result=True,
),
ForeachFuncInfo(
"copy",
- foreach_inputs_sample_func(2, False, False),
+ sample_inputs_func=foreach_inputs_sample_func(2, False, False),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=False,
supports_autograd=False,
+ supports_inplace_autograd=False,
)
]
foreach_pointwise_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"addcmul",
- foreach_pointwise_sample_func(4, True, True),
+ sample_inputs_func=foreach_pointwise_sample_func(4, True, True),
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
- skips=(
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
+ decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"),
@@ -9898,7 +10024,10 @@ foreach_pointwise_op_db: List[ForeachFuncInfo] = [
sample_inputs_func=foreach_pointwise_sample_func(4, True, True),
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
- skips=(
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
+ decorators=(
# Samples have complex types and inplace only works if the dtype is complex.
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace",
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)),
@@ -9920,10 +10049,13 @@ foreach_pointwise_op_db: List[ForeachFuncInfo] = [
foreach_reduce_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"norm",
- foreach_norm_sample_func(1, False, False),
+ sample_inputs_func=foreach_norm_sample_func(1, False, False),
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
- skips=(
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
+ decorators=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -9935,7 +10067,10 @@ foreach_reduce_op_db: List[ForeachFuncInfo] = [
foreach_other_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"lerp",
- foreach_lerp_sample_func(3, True, False),
+ sample_inputs_func=foreach_lerp_sample_func(3, True, False),
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_forward_ad=True,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
diff --git a/torch/testing/_internal/opinfo/core.py b/torch/testing/_internal/opinfo/core.py
index 87a57f7678..2237da1c19 100644
--- a/torch/testing/_internal/opinfo/core.py
+++ b/torch/testing/_internal/opinfo/core.py
@@ -2705,33 +2705,22 @@ def get_foreach_method_names(name):
return op, inplace_op, ref, ref_inplace
+@dataclass
class ForeachFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for foreach functions"""
- def __init__(
- self,
- name,
- sample_inputs_func,
- *,
- dtypes=floating_and_complex_types(),
- dtypesIfCUDA=None,
- dtypesIfROCM=None,
- supports_alpha_param=False,
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_scalar_self_arg=False,
- supports_forward_ad=True,
- backward_requires_result=False,
- supports_out=True,
- **kwargs,
- ):
+ supports_alpha_param: bool = False
+ supports_scalar_self_arg: bool = False
+ backward_requires_result: bool = False
+
+ def __post_init__(self):
(
foreach_method,
foreach_method_inplace,
torch_ref_method,
torch_ref_inplace,
- ) = get_foreach_method_names(name)
- if not supports_out:
+ ) = get_foreach_method_names(self.name)
+ if not self.supports_out:
# note(crcrpar): `foreach_method` for `"zero"` is `None` but `None` would call
# `_getattr_qual` in `OpInfo.__post_init__` which should fail since `_foreach_zero`
# is not defined at the moment. Thus to skip the qualification, set a similar torch
@@ -2740,29 +2729,16 @@ class ForeachFuncInfo(OpInfo):
assert torch_ref_method is None
foreach_method = foreach_method_inplace
torch_ref_method = torch_ref_inplace
- super().__init__(
- name="_foreach_" + name,
- op=foreach_method,
- ref=torch_ref_method,
- method_variant=foreach_method,
- inplace_variant=foreach_method_inplace,
- dtypes=dtypes,
- dtypesIfCUDA=dtypesIfCUDA,
- dtypesIfROCM=dtypesIfROCM,
- sample_inputs_func=sample_inputs_func,
- supports_autograd=supports_autograd,
- supports_forward_ad=supports_forward_ad,
- supports_out=supports_out,
- **kwargs,
- )
- self.supports_scalar_self_arg = supports_scalar_self_arg
+ self.op = foreach_method
+ self.method_variant = foreach_method
+ self.ref = torch_ref_method
+ self.inplace_variant = foreach_method_inplace
self.ref_inplace = torch_ref_inplace
- self.supports_alpha_param = supports_alpha_param
- self.backward_requires_result = backward_requires_result
self.has_no_in_place = self.inplace_variant is None
- self.supports_inplace_autograd = supports_inplace_autograd
+ name = self.name
+ self.name = f"_foreach_{name}"
if name == "norm":
self.ref = torch.linalg.vector_norm
elif name == "minimum":
|
2.41.0
|
f4c6d9b4942da5503c09ec4cd97fc264abac329
|
Tue, 30 Apr 2024 16:30:19 +0000
|
[PATCH 0839/1000] Upgrade nightly wheels to rocm6.1 (#124811)
|
Follow-up to https://github.com/pytorch/builder/pull/1789 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124811 Approved by: https://github.com/malfet
|
diff --git a/.github/scripts/generate_binary_build_matrix.py b/.github/scripts/generate_binary_build_matrix.py
index 52152926b6..81572ca1a9 100644
--- a/.github/scripts/generate_binary_build_matrix.py
+++ b/.github/scripts/generate_binary_build_matrix.py
@@ -22,7 +22,7 @@ CUDA_ARCHES_FULL_VERSION = {"11.8": "11.8.0", "12.1": "12.1.1"}
CUDA_ARCHES_CUDNN_VERSION = {"11.8": "8", "12.1": "8"}
-ROCM_ARCHES = ["5.7", "6.0"]
+ROCM_ARCHES = ["6.0", "6.1"]
CPU_CXX11_ABI_ARCH = ["cpu-cxx11-abi"]
diff --git a/.github/workflows/build-triton-wheel.yml b/.github/workflows/build-triton-wheel.yml
index 6513bbe287..ddba8ff890 100644
--- a/.github/workflows/build-triton-wheel.yml
+++ b/.github/workflows/build-triton-wheel.yml
@@ -37,7 +37,7 @@ jobs:
device: ["cuda", "rocm"]
include:
- device: "rocm"
- rocm_version: "6.0"
+ rocm_version: "6.1"
- device: "cuda"
rocm_version: ""
timeout-minutes: 40
diff --git a/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml b/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml
index b28aa4a371..c632d5fbd7 100644
--- a/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml
+++ b/.github/workflows/generated-linux-binary-libtorch-cxx11-abi-nightly.yml
@@ -229,7 +229,7 @@ jobs:
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
uses: ./.github/workflows/_binary-upload.yml
- libtorch-rocm5_7-shared-with-deps-cxx11-abi-build:
+ libtorch-rocm6_0-shared-with-deps-cxx11-abi-build:
if: ${{ github.repository_owner == 'pytorch' }}
uses: ./.github/workflows/_binary-build-linux.yml
with:
@@ -238,19 +238,19 @@ jobs:
PACKAGE_TYPE: libtorch
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.0-main
LIBTORCH_VARIANT: shared-with-deps
DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-rocm5_7-shared-with-deps-cxx11-abi
+ build_name: libtorch-rocm6_0-shared-with-deps-cxx11-abi
build_environment: linux-binary-libtorch-cxx11-abi
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-rocm5_7-shared-with-deps-cxx11-abi-test: # Testing
+ libtorch-rocm6_0-shared-with-deps-cxx11-abi-test: # Testing
if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-rocm5_7-shared-with-deps-cxx11-abi-build
+ needs: libtorch-rocm6_0-shared-with-deps-cxx11-abi-build
runs-on: linux.rocm.gpu
timeout-minutes: 240
env:
@@ -259,11 +259,11 @@ jobs:
PACKAGE_TYPE: libtorch
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.0-main
LIBTORCH_VARIANT: shared-with-deps
DESIRED_DEVTOOLSET: cxx11-abi
steps:
@@ -272,7 +272,7 @@ jobs:
- uses: actions/download-artifact@v3
name: Download Build Artifacts
with:
- name: libtorch-rocm5_7-shared-with-deps-cxx11-abi
+ name: libtorch-rocm6_0-shared-with-deps-cxx11-abi
path: "${{ runner.temp }}/artifacts/"
- name: Checkout PyTorch
uses: malfet/checkout@silent-checkout
@@ -305,37 +305,37 @@ jobs:
- name: Pull Docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
- docker-image: pytorch/libtorch-cxx11-builder:rocm5.7-main
+ docker-image: pytorch/libtorch-cxx11-builder:rocm6.0-main
- name: Test Pytorch binary
uses: ./pytorch/.github/actions/test-pytorch-binary
- name: Teardown ROCm
uses: ./.github/actions/teardown-rocm
- libtorch-rocm5_7-shared-with-deps-cxx11-abi-upload: # Uploading
+ libtorch-rocm6_0-shared-with-deps-cxx11-abi-upload: # Uploading
if: ${{ github.repository_owner == 'pytorch' }}
permissions:
id-token: write
contents: read
- needs: libtorch-rocm5_7-shared-with-deps-cxx11-abi-test
+ needs: libtorch-rocm6_0-shared-with-deps-cxx11-abi-test
with:
PYTORCH_ROOT: /pytorch
BUILDER_ROOT: /builder
PACKAGE_TYPE: libtorch
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.0-main
LIBTORCH_VARIANT: shared-with-deps
DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-rocm5_7-shared-with-deps-cxx11-abi
+ build_name: libtorch-rocm6_0-shared-with-deps-cxx11-abi
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
uses: ./.github/workflows/_binary-upload.yml
- libtorch-rocm6_0-shared-with-deps-cxx11-abi-build:
+ libtorch-rocm6_1-shared-with-deps-cxx11-abi-build:
if: ${{ github.repository_owner == 'pytorch' }}
uses: ./.github/workflows/_binary-build-linux.yml
with:
@@ -344,19 +344,19 @@ jobs:
PACKAGE_TYPE: libtorch
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.1-main
LIBTORCH_VARIANT: shared-with-deps
DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-rocm6_0-shared-with-deps-cxx11-abi
+ build_name: libtorch-rocm6_1-shared-with-deps-cxx11-abi
build_environment: linux-binary-libtorch-cxx11-abi
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-rocm6_0-shared-with-deps-cxx11-abi-test: # Testing
+ libtorch-rocm6_1-shared-with-deps-cxx11-abi-test: # Testing
if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-rocm6_0-shared-with-deps-cxx11-abi-build
+ needs: libtorch-rocm6_1-shared-with-deps-cxx11-abi-build
runs-on: linux.rocm.gpu
timeout-minutes: 240
env:
@@ -365,11 +365,11 @@ jobs:
PACKAGE_TYPE: libtorch
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.1-main
LIBTORCH_VARIANT: shared-with-deps
DESIRED_DEVTOOLSET: cxx11-abi
steps:
@@ -378,7 +378,7 @@ jobs:
- uses: actions/download-artifact@v3
name: Download Build Artifacts
with:
- name: libtorch-rocm6_0-shared-with-deps-cxx11-abi
+ name: libtorch-rocm6_1-shared-with-deps-cxx11-abi
path: "${{ runner.temp }}/artifacts/"
- name: Checkout PyTorch
uses: malfet/checkout@silent-checkout
@@ -411,30 +411,30 @@ jobs:
- name: Pull Docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
- docker-image: pytorch/libtorch-cxx11-builder:rocm6.0-main
+ docker-image: pytorch/libtorch-cxx11-builder:rocm6.1-main
- name: Test Pytorch binary
uses: ./pytorch/.github/actions/test-pytorch-binary
- name: Teardown ROCm
uses: ./.github/actions/teardown-rocm
- libtorch-rocm6_0-shared-with-deps-cxx11-abi-upload: # Uploading
+ libtorch-rocm6_1-shared-with-deps-cxx11-abi-upload: # Uploading
if: ${{ github.repository_owner == 'pytorch' }}
permissions:
id-token: write
contents: read
- needs: libtorch-rocm6_0-shared-with-deps-cxx11-abi-test
+ needs: libtorch-rocm6_1-shared-with-deps-cxx11-abi-test
with:
PYTORCH_ROOT: /pytorch
BUILDER_ROOT: /builder
PACKAGE_TYPE: libtorch
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/libtorch-cxx11-builder:rocm6.1-main
LIBTORCH_VARIANT: shared-with-deps
DESIRED_DEVTOOLSET: cxx11-abi
- build_name: libtorch-rocm6_0-shared-with-deps-cxx11-abi
+ build_name: libtorch-rocm6_1-shared-with-deps-cxx11-abi
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
diff --git a/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml b/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml
index 248b699850..d8d4650a52 100644
--- a/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml
+++ b/.github/workflows/generated-linux-binary-libtorch-pre-cxx11-nightly.yml
@@ -229,7 +229,7 @@ jobs:
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
uses: ./.github/workflows/_binary-upload.yml
- libtorch-rocm5_7-shared-with-deps-pre-cxx11-build:
+ libtorch-rocm6_0-shared-with-deps-pre-cxx11-build:
if: ${{ github.repository_owner == 'pytorch' }}
uses: ./.github/workflows/_binary-build-linux.yml
with:
@@ -238,19 +238,19 @@ jobs:
PACKAGE_TYPE: libtorch
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
LIBTORCH_VARIANT: shared-with-deps
DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-rocm5_7-shared-with-deps-pre-cxx11
+ build_name: libtorch-rocm6_0-shared-with-deps-pre-cxx11
build_environment: linux-binary-libtorch-pre-cxx11
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-rocm5_7-shared-with-deps-pre-cxx11-test: # Testing
+ libtorch-rocm6_0-shared-with-deps-pre-cxx11-test: # Testing
if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-rocm5_7-shared-with-deps-pre-cxx11-build
+ needs: libtorch-rocm6_0-shared-with-deps-pre-cxx11-build
runs-on: linux.rocm.gpu
timeout-minutes: 240
env:
@@ -259,11 +259,11 @@ jobs:
PACKAGE_TYPE: libtorch
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
LIBTORCH_VARIANT: shared-with-deps
DESIRED_DEVTOOLSET: pre-cxx11
steps:
@@ -272,7 +272,7 @@ jobs:
- uses: actions/download-artifact@v3
name: Download Build Artifacts
with:
- name: libtorch-rocm5_7-shared-with-deps-pre-cxx11
+ name: libtorch-rocm6_0-shared-with-deps-pre-cxx11
path: "${{ runner.temp }}/artifacts/"
- name: Checkout PyTorch
uses: malfet/checkout@silent-checkout
@@ -305,37 +305,37 @@ jobs:
- name: Pull Docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
- docker-image: pytorch/manylinux-builder:rocm5.7-main
+ docker-image: pytorch/manylinux-builder:rocm6.0-main
- name: Test Pytorch binary
uses: ./pytorch/.github/actions/test-pytorch-binary
- name: Teardown ROCm
uses: ./.github/actions/teardown-rocm
- libtorch-rocm5_7-shared-with-deps-pre-cxx11-upload: # Uploading
+ libtorch-rocm6_0-shared-with-deps-pre-cxx11-upload: # Uploading
if: ${{ github.repository_owner == 'pytorch' }}
permissions:
id-token: write
contents: read
- needs: libtorch-rocm5_7-shared-with-deps-pre-cxx11-test
+ needs: libtorch-rocm6_0-shared-with-deps-pre-cxx11-test
with:
PYTORCH_ROOT: /pytorch
BUILDER_ROOT: /builder
PACKAGE_TYPE: libtorch
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
LIBTORCH_VARIANT: shared-with-deps
DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-rocm5_7-shared-with-deps-pre-cxx11
+ build_name: libtorch-rocm6_0-shared-with-deps-pre-cxx11
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
uses: ./.github/workflows/_binary-upload.yml
- libtorch-rocm6_0-shared-with-deps-pre-cxx11-build:
+ libtorch-rocm6_1-shared-with-deps-pre-cxx11-build:
if: ${{ github.repository_owner == 'pytorch' }}
uses: ./.github/workflows/_binary-build-linux.yml
with:
@@ -344,19 +344,19 @@ jobs:
PACKAGE_TYPE: libtorch
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
LIBTORCH_VARIANT: shared-with-deps
DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-rocm6_0-shared-with-deps-pre-cxx11
+ build_name: libtorch-rocm6_1-shared-with-deps-pre-cxx11
build_environment: linux-binary-libtorch-pre-cxx11
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
- libtorch-rocm6_0-shared-with-deps-pre-cxx11-test: # Testing
+ libtorch-rocm6_1-shared-with-deps-pre-cxx11-test: # Testing
if: ${{ github.repository_owner == 'pytorch' }}
- needs: libtorch-rocm6_0-shared-with-deps-pre-cxx11-build
+ needs: libtorch-rocm6_1-shared-with-deps-pre-cxx11-build
runs-on: linux.rocm.gpu
timeout-minutes: 240
env:
@@ -365,11 +365,11 @@ jobs:
PACKAGE_TYPE: libtorch
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
LIBTORCH_VARIANT: shared-with-deps
DESIRED_DEVTOOLSET: pre-cxx11
steps:
@@ -378,7 +378,7 @@ jobs:
- uses: actions/download-artifact@v3
name: Download Build Artifacts
with:
- name: libtorch-rocm6_0-shared-with-deps-pre-cxx11
+ name: libtorch-rocm6_1-shared-with-deps-pre-cxx11
path: "${{ runner.temp }}/artifacts/"
- name: Checkout PyTorch
uses: malfet/checkout@silent-checkout
@@ -411,30 +411,30 @@ jobs:
- name: Pull Docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
- docker-image: pytorch/manylinux-builder:rocm6.0-main
+ docker-image: pytorch/manylinux-builder:rocm6.1-main
- name: Test Pytorch binary
uses: ./pytorch/.github/actions/test-pytorch-binary
- name: Teardown ROCm
uses: ./.github/actions/teardown-rocm
- libtorch-rocm6_0-shared-with-deps-pre-cxx11-upload: # Uploading
+ libtorch-rocm6_1-shared-with-deps-pre-cxx11-upload: # Uploading
if: ${{ github.repository_owner == 'pytorch' }}
permissions:
id-token: write
contents: read
- needs: libtorch-rocm6_0-shared-with-deps-pre-cxx11-test
+ needs: libtorch-rocm6_1-shared-with-deps-pre-cxx11-test
with:
PYTORCH_ROOT: /pytorch
BUILDER_ROOT: /builder
PACKAGE_TYPE: libtorch
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
LIBTORCH_VARIANT: shared-with-deps
DESIRED_DEVTOOLSET: pre-cxx11
- build_name: libtorch-rocm6_0-shared-with-deps-pre-cxx11
+ build_name: libtorch-rocm6_1-shared-with-deps-pre-cxx11
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
diff --git a/.github/workflows/generated-linux-binary-manywheel-nightly.yml b/.github/workflows/generated-linux-binary-manywheel-nightly.yml
index 9f3d7224c1..cfdd248733 100644
--- a/.github/workflows/generated-linux-binary-manywheel-nightly.yml
+++ b/.github/workflows/generated-linux-binary-manywheel-nightly.yml
@@ -284,7 +284,7 @@ jobs:
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
uses: ./.github/workflows/_binary-upload.yml
- manywheel-py3_8-rocm5_7-build:
+ manywheel-py3_8-rocm6_0-build:
if: ${{ github.repository_owner == 'pytorch' }}
uses: ./.github/workflows/_binary-build-linux.yml
with:
@@ -293,18 +293,18 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-rocm5_7
+ build_name: manywheel-py3_8-rocm6_0
build_environment: linux-binary-manywheel
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-rocm5_7-test: # Testing
+ manywheel-py3_8-rocm6_0-test: # Testing
if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_8-rocm5_7-build
+ needs: manywheel-py3_8-rocm6_0-build
runs-on: linux.rocm.gpu
timeout-minutes: 240
env:
@@ -313,11 +313,11 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
DESIRED_PYTHON: "3.8"
steps:
- name: Setup ROCm
@@ -325,7 +325,7 @@ jobs:
- uses: actions/download-artifact@v3
name: Download Build Artifacts
with:
- name: manywheel-py3_8-rocm5_7
+ name: manywheel-py3_8-rocm6_0
path: "${{ runner.temp }}/artifacts/"
- name: Checkout PyTorch
uses: malfet/checkout@silent-checkout
@@ -358,36 +358,36 @@ jobs:
- name: Pull Docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
- docker-image: pytorch/manylinux-builder:rocm5.7-main
+ docker-image: pytorch/manylinux-builder:rocm6.0-main
- name: Test Pytorch binary
uses: ./pytorch/.github/actions/test-pytorch-binary
- name: Teardown ROCm
uses: ./.github/actions/teardown-rocm
- manywheel-py3_8-rocm5_7-upload: # Uploading
+ manywheel-py3_8-rocm6_0-upload: # Uploading
if: ${{ github.repository_owner == 'pytorch' }}
permissions:
id-token: write
contents: read
- needs: manywheel-py3_8-rocm5_7-test
+ needs: manywheel-py3_8-rocm6_0-test
with:
PYTORCH_ROOT: /pytorch
BUILDER_ROOT: /builder
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-rocm5_7
+ build_name: manywheel-py3_8-rocm6_0
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
uses: ./.github/workflows/_binary-upload.yml
- manywheel-py3_8-rocm6_0-build:
+ manywheel-py3_8-rocm6_1-build:
if: ${{ github.repository_owner == 'pytorch' }}
uses: ./.github/workflows/_binary-build-linux.yml
with:
@@ -396,18 +396,18 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-rocm6_0
+ build_name: manywheel-py3_8-rocm6_1
build_environment: linux-binary-manywheel
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_8-rocm6_0-test: # Testing
+ manywheel-py3_8-rocm6_1-test: # Testing
if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_8-rocm6_0-build
+ needs: manywheel-py3_8-rocm6_1-build
runs-on: linux.rocm.gpu
timeout-minutes: 240
env:
@@ -416,11 +416,11 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
DESIRED_PYTHON: "3.8"
steps:
- name: Setup ROCm
@@ -428,7 +428,7 @@ jobs:
- uses: actions/download-artifact@v3
name: Download Build Artifacts
with:
- name: manywheel-py3_8-rocm6_0
+ name: manywheel-py3_8-rocm6_1
path: "${{ runner.temp }}/artifacts/"
- name: Checkout PyTorch
uses: malfet/checkout@silent-checkout
@@ -461,29 +461,29 @@ jobs:
- name: Pull Docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
- docker-image: pytorch/manylinux-builder:rocm6.0-main
+ docker-image: pytorch/manylinux-builder:rocm6.1-main
- name: Test Pytorch binary
uses: ./pytorch/.github/actions/test-pytorch-binary
- name: Teardown ROCm
uses: ./.github/actions/teardown-rocm
- manywheel-py3_8-rocm6_0-upload: # Uploading
+ manywheel-py3_8-rocm6_1-upload: # Uploading
if: ${{ github.repository_owner == 'pytorch' }}
permissions:
id-token: write
contents: read
- needs: manywheel-py3_8-rocm6_0-test
+ needs: manywheel-py3_8-rocm6_1-test
with:
PYTORCH_ROOT: /pytorch
BUILDER_ROOT: /builder
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
DESIRED_PYTHON: "3.8"
- build_name: manywheel-py3_8-rocm6_0
+ build_name: manywheel-py3_8-rocm6_1
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
@@ -737,7 +737,7 @@ jobs:
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
uses: ./.github/workflows/_binary-upload.yml
- manywheel-py3_9-rocm5_7-build:
+ manywheel-py3_9-rocm6_0-build:
if: ${{ github.repository_owner == 'pytorch' }}
uses: ./.github/workflows/_binary-build-linux.yml
with:
@@ -746,18 +746,18 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-rocm5_7
+ build_name: manywheel-py3_9-rocm6_0
build_environment: linux-binary-manywheel
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-rocm5_7-test: # Testing
+ manywheel-py3_9-rocm6_0-test: # Testing
if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_9-rocm5_7-build
+ needs: manywheel-py3_9-rocm6_0-build
runs-on: linux.rocm.gpu
timeout-minutes: 240
env:
@@ -766,11 +766,11 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
DESIRED_PYTHON: "3.9"
steps:
- name: Setup ROCm
@@ -778,7 +778,7 @@ jobs:
- uses: actions/download-artifact@v3
name: Download Build Artifacts
with:
- name: manywheel-py3_9-rocm5_7
+ name: manywheel-py3_9-rocm6_0
path: "${{ runner.temp }}/artifacts/"
- name: Checkout PyTorch
uses: malfet/checkout@silent-checkout
@@ -811,36 +811,36 @@ jobs:
- name: Pull Docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
- docker-image: pytorch/manylinux-builder:rocm5.7-main
+ docker-image: pytorch/manylinux-builder:rocm6.0-main
- name: Test Pytorch binary
uses: ./pytorch/.github/actions/test-pytorch-binary
- name: Teardown ROCm
uses: ./.github/actions/teardown-rocm
- manywheel-py3_9-rocm5_7-upload: # Uploading
+ manywheel-py3_9-rocm6_0-upload: # Uploading
if: ${{ github.repository_owner == 'pytorch' }}
permissions:
id-token: write
contents: read
- needs: manywheel-py3_9-rocm5_7-test
+ needs: manywheel-py3_9-rocm6_0-test
with:
PYTORCH_ROOT: /pytorch
BUILDER_ROOT: /builder
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-rocm5_7
+ build_name: manywheel-py3_9-rocm6_0
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
uses: ./.github/workflows/_binary-upload.yml
- manywheel-py3_9-rocm6_0-build:
+ manywheel-py3_9-rocm6_1-build:
if: ${{ github.repository_owner == 'pytorch' }}
uses: ./.github/workflows/_binary-build-linux.yml
with:
@@ -849,18 +849,18 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-rocm6_0
+ build_name: manywheel-py3_9-rocm6_1
build_environment: linux-binary-manywheel
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_9-rocm6_0-test: # Testing
+ manywheel-py3_9-rocm6_1-test: # Testing
if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_9-rocm6_0-build
+ needs: manywheel-py3_9-rocm6_1-build
runs-on: linux.rocm.gpu
timeout-minutes: 240
env:
@@ -869,11 +869,11 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
DESIRED_PYTHON: "3.9"
steps:
- name: Setup ROCm
@@ -881,7 +881,7 @@ jobs:
- uses: actions/download-artifact@v3
name: Download Build Artifacts
with:
- name: manywheel-py3_9-rocm6_0
+ name: manywheel-py3_9-rocm6_1
path: "${{ runner.temp }}/artifacts/"
- name: Checkout PyTorch
uses: malfet/checkout@silent-checkout
@@ -914,29 +914,29 @@ jobs:
- name: Pull Docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
- docker-image: pytorch/manylinux-builder:rocm6.0-main
+ docker-image: pytorch/manylinux-builder:rocm6.1-main
- name: Test Pytorch binary
uses: ./pytorch/.github/actions/test-pytorch-binary
- name: Teardown ROCm
uses: ./.github/actions/teardown-rocm
- manywheel-py3_9-rocm6_0-upload: # Uploading
+ manywheel-py3_9-rocm6_1-upload: # Uploading
if: ${{ github.repository_owner == 'pytorch' }}
permissions:
id-token: write
contents: read
- needs: manywheel-py3_9-rocm6_0-test
+ needs: manywheel-py3_9-rocm6_1-test
with:
PYTORCH_ROOT: /pytorch
BUILDER_ROOT: /builder
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
DESIRED_PYTHON: "3.9"
- build_name: manywheel-py3_9-rocm6_0
+ build_name: manywheel-py3_9-rocm6_1
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
@@ -1190,7 +1190,7 @@ jobs:
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
uses: ./.github/workflows/_binary-upload.yml
- manywheel-py3_10-rocm5_7-build:
+ manywheel-py3_10-rocm6_0-build:
if: ${{ github.repository_owner == 'pytorch' }}
uses: ./.github/workflows/_binary-build-linux.yml
with:
@@ -1199,18 +1199,18 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-rocm5_7
+ build_name: manywheel-py3_10-rocm6_0
build_environment: linux-binary-manywheel
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-rocm5_7-test: # Testing
+ manywheel-py3_10-rocm6_0-test: # Testing
if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_10-rocm5_7-build
+ needs: manywheel-py3_10-rocm6_0-build
runs-on: linux.rocm.gpu
timeout-minutes: 240
env:
@@ -1219,11 +1219,11 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
DESIRED_PYTHON: "3.10"
steps:
- name: Setup ROCm
@@ -1231,7 +1231,7 @@ jobs:
- uses: actions/download-artifact@v3
name: Download Build Artifacts
with:
- name: manywheel-py3_10-rocm5_7
+ name: manywheel-py3_10-rocm6_0
path: "${{ runner.temp }}/artifacts/"
- name: Checkout PyTorch
uses: malfet/checkout@silent-checkout
@@ -1264,36 +1264,36 @@ jobs:
- name: Pull Docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
- docker-image: pytorch/manylinux-builder:rocm5.7-main
+ docker-image: pytorch/manylinux-builder:rocm6.0-main
- name: Test Pytorch binary
uses: ./pytorch/.github/actions/test-pytorch-binary
- name: Teardown ROCm
uses: ./.github/actions/teardown-rocm
- manywheel-py3_10-rocm5_7-upload: # Uploading
+ manywheel-py3_10-rocm6_0-upload: # Uploading
if: ${{ github.repository_owner == 'pytorch' }}
permissions:
id-token: write
contents: read
- needs: manywheel-py3_10-rocm5_7-test
+ needs: manywheel-py3_10-rocm6_0-test
with:
PYTORCH_ROOT: /pytorch
BUILDER_ROOT: /builder
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-rocm5_7
+ build_name: manywheel-py3_10-rocm6_0
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
uses: ./.github/workflows/_binary-upload.yml
- manywheel-py3_10-rocm6_0-build:
+ manywheel-py3_10-rocm6_1-build:
if: ${{ github.repository_owner == 'pytorch' }}
uses: ./.github/workflows/_binary-build-linux.yml
with:
@@ -1302,18 +1302,18 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-rocm6_0
+ build_name: manywheel-py3_10-rocm6_1
build_environment: linux-binary-manywheel
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_10-rocm6_0-test: # Testing
+ manywheel-py3_10-rocm6_1-test: # Testing
if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_10-rocm6_0-build
+ needs: manywheel-py3_10-rocm6_1-build
runs-on: linux.rocm.gpu
timeout-minutes: 240
env:
@@ -1322,11 +1322,11 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
DESIRED_PYTHON: "3.10"
steps:
- name: Setup ROCm
@@ -1334,7 +1334,7 @@ jobs:
- uses: actions/download-artifact@v3
name: Download Build Artifacts
with:
- name: manywheel-py3_10-rocm6_0
+ name: manywheel-py3_10-rocm6_1
path: "${{ runner.temp }}/artifacts/"
- name: Checkout PyTorch
uses: malfet/checkout@silent-checkout
@@ -1367,29 +1367,29 @@ jobs:
- name: Pull Docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
- docker-image: pytorch/manylinux-builder:rocm6.0-main
+ docker-image: pytorch/manylinux-builder:rocm6.1-main
- name: Test Pytorch binary
uses: ./pytorch/.github/actions/test-pytorch-binary
- name: Teardown ROCm
uses: ./.github/actions/teardown-rocm
- manywheel-py3_10-rocm6_0-upload: # Uploading
+ manywheel-py3_10-rocm6_1-upload: # Uploading
if: ${{ github.repository_owner == 'pytorch' }}
permissions:
id-token: write
contents: read
- needs: manywheel-py3_10-rocm6_0-test
+ needs: manywheel-py3_10-rocm6_1-test
with:
PYTORCH_ROOT: /pytorch
BUILDER_ROOT: /builder
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
DESIRED_PYTHON: "3.10"
- build_name: manywheel-py3_10-rocm6_0
+ build_name: manywheel-py3_10-rocm6_1
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
@@ -1643,7 +1643,7 @@ jobs:
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
uses: ./.github/workflows/_binary-upload.yml
- manywheel-py3_11-rocm5_7-build:
+ manywheel-py3_11-rocm6_0-build:
if: ${{ github.repository_owner == 'pytorch' }}
uses: ./.github/workflows/_binary-build-linux.yml
with:
@@ -1652,18 +1652,18 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-rocm5_7
+ build_name: manywheel-py3_11-rocm6_0
build_environment: linux-binary-manywheel
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-rocm5_7-test: # Testing
+ manywheel-py3_11-rocm6_0-test: # Testing
if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_11-rocm5_7-build
+ needs: manywheel-py3_11-rocm6_0-build
runs-on: linux.rocm.gpu
timeout-minutes: 240
env:
@@ -1672,11 +1672,11 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
DESIRED_PYTHON: "3.11"
steps:
- name: Setup ROCm
@@ -1684,7 +1684,7 @@ jobs:
- uses: actions/download-artifact@v3
name: Download Build Artifacts
with:
- name: manywheel-py3_11-rocm5_7
+ name: manywheel-py3_11-rocm6_0
path: "${{ runner.temp }}/artifacts/"
- name: Checkout PyTorch
uses: malfet/checkout@silent-checkout
@@ -1717,36 +1717,36 @@ jobs:
- name: Pull Docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
- docker-image: pytorch/manylinux-builder:rocm5.7-main
+ docker-image: pytorch/manylinux-builder:rocm6.0-main
- name: Test Pytorch binary
uses: ./pytorch/.github/actions/test-pytorch-binary
- name: Teardown ROCm
uses: ./.github/actions/teardown-rocm
- manywheel-py3_11-rocm5_7-upload: # Uploading
+ manywheel-py3_11-rocm6_0-upload: # Uploading
if: ${{ github.repository_owner == 'pytorch' }}
permissions:
id-token: write
contents: read
- needs: manywheel-py3_11-rocm5_7-test
+ needs: manywheel-py3_11-rocm6_0-test
with:
PYTORCH_ROOT: /pytorch
BUILDER_ROOT: /builder
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-rocm5_7
+ build_name: manywheel-py3_11-rocm6_0
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
uses: ./.github/workflows/_binary-upload.yml
- manywheel-py3_11-rocm6_0-build:
+ manywheel-py3_11-rocm6_1-build:
if: ${{ github.repository_owner == 'pytorch' }}
uses: ./.github/workflows/_binary-build-linux.yml
with:
@@ -1755,18 +1755,18 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-rocm6_0
+ build_name: manywheel-py3_11-rocm6_1
build_environment: linux-binary-manywheel
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_11-rocm6_0-test: # Testing
+ manywheel-py3_11-rocm6_1-test: # Testing
if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_11-rocm6_0-build
+ needs: manywheel-py3_11-rocm6_1-build
runs-on: linux.rocm.gpu
timeout-minutes: 240
env:
@@ -1775,11 +1775,11 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
DESIRED_PYTHON: "3.11"
steps:
- name: Setup ROCm
@@ -1787,7 +1787,7 @@ jobs:
- uses: actions/download-artifact@v3
name: Download Build Artifacts
with:
- name: manywheel-py3_11-rocm6_0
+ name: manywheel-py3_11-rocm6_1
path: "${{ runner.temp }}/artifacts/"
- name: Checkout PyTorch
uses: malfet/checkout@silent-checkout
@@ -1820,29 +1820,29 @@ jobs:
- name: Pull Docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
- docker-image: pytorch/manylinux-builder:rocm6.0-main
+ docker-image: pytorch/manylinux-builder:rocm6.1-main
- name: Test Pytorch binary
uses: ./pytorch/.github/actions/test-pytorch-binary
- name: Teardown ROCm
uses: ./.github/actions/teardown-rocm
- manywheel-py3_11-rocm6_0-upload: # Uploading
+ manywheel-py3_11-rocm6_1-upload: # Uploading
if: ${{ github.repository_owner == 'pytorch' }}
permissions:
id-token: write
contents: read
- needs: manywheel-py3_11-rocm6_0-test
+ needs: manywheel-py3_11-rocm6_1-test
with:
PYTORCH_ROOT: /pytorch
BUILDER_ROOT: /builder
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
DESIRED_PYTHON: "3.11"
- build_name: manywheel-py3_11-rocm6_0
+ build_name: manywheel-py3_11-rocm6_1
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
@@ -2096,7 +2096,7 @@ jobs:
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
uses: ./.github/workflows/_binary-upload.yml
- manywheel-py3_12-rocm5_7-build:
+ manywheel-py3_12-rocm6_0-build:
if: ${{ github.repository_owner == 'pytorch' }}
uses: ./.github/workflows/_binary-build-linux.yml
with:
@@ -2105,18 +2105,18 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-rocm5_7
+ build_name: manywheel-py3_12-rocm6_0
build_environment: linux-binary-manywheel
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-rocm5_7-test: # Testing
+ manywheel-py3_12-rocm6_0-test: # Testing
if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_12-rocm5_7-build
+ needs: manywheel-py3_12-rocm6_0-build
runs-on: linux.rocm.gpu
timeout-minutes: 240
env:
@@ -2125,11 +2125,11 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
DESIRED_PYTHON: "3.12"
steps:
- name: Setup ROCm
@@ -2137,7 +2137,7 @@ jobs:
- uses: actions/download-artifact@v3
name: Download Build Artifacts
with:
- name: manywheel-py3_12-rocm5_7
+ name: manywheel-py3_12-rocm6_0
path: "${{ runner.temp }}/artifacts/"
- name: Checkout PyTorch
uses: malfet/checkout@silent-checkout
@@ -2170,36 +2170,36 @@ jobs:
- name: Pull Docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
- docker-image: pytorch/manylinux-builder:rocm5.7-main
+ docker-image: pytorch/manylinux-builder:rocm6.0-main
- name: Test Pytorch binary
uses: ./pytorch/.github/actions/test-pytorch-binary
- name: Teardown ROCm
uses: ./.github/actions/teardown-rocm
- manywheel-py3_12-rocm5_7-upload: # Uploading
+ manywheel-py3_12-rocm6_0-upload: # Uploading
if: ${{ github.repository_owner == 'pytorch' }}
permissions:
id-token: write
contents: read
- needs: manywheel-py3_12-rocm5_7-test
+ needs: manywheel-py3_12-rocm6_0-test
with:
PYTORCH_ROOT: /pytorch
BUILDER_ROOT: /builder
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm5.7
- GPU_ARCH_VERSION: 5.7
+ DESIRED_CUDA: rocm6.0
+ GPU_ARCH_VERSION: 6.0
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm5.7-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-rocm5_7
+ build_name: manywheel-py3_12-rocm6_0
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
conda-pytorchbot-token-test: ${{ secrets.CONDA_PYTORCHBOT_TOKEN_TEST }}
uses: ./.github/workflows/_binary-upload.yml
- manywheel-py3_12-rocm6_0-build:
+ manywheel-py3_12-rocm6_1-build:
if: ${{ github.repository_owner == 'pytorch' }}
uses: ./.github/workflows/_binary-build-linux.yml
with:
@@ -2208,18 +2208,18 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-rocm6_0
+ build_name: manywheel-py3_12-rocm6_1
build_environment: linux-binary-manywheel
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
- manywheel-py3_12-rocm6_0-test: # Testing
+ manywheel-py3_12-rocm6_1-test: # Testing
if: ${{ github.repository_owner == 'pytorch' }}
- needs: manywheel-py3_12-rocm6_0-build
+ needs: manywheel-py3_12-rocm6_1-build
runs-on: linux.rocm.gpu
timeout-minutes: 240
env:
@@ -2228,11 +2228,11 @@ jobs:
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
SKIP_ALL_TESTS: 1
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
DESIRED_PYTHON: "3.12"
steps:
- name: Setup ROCm
@@ -2240,7 +2240,7 @@ jobs:
- uses: actions/download-artifact@v3
name: Download Build Artifacts
with:
- name: manywheel-py3_12-rocm6_0
+ name: manywheel-py3_12-rocm6_1
path: "${{ runner.temp }}/artifacts/"
- name: Checkout PyTorch
uses: malfet/checkout@silent-checkout
@@ -2273,29 +2273,29 @@ jobs:
- name: Pull Docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
- docker-image: pytorch/manylinux-builder:rocm6.0-main
+ docker-image: pytorch/manylinux-builder:rocm6.1-main
- name: Test Pytorch binary
uses: ./pytorch/.github/actions/test-pytorch-binary
- name: Teardown ROCm
uses: ./.github/actions/teardown-rocm
- manywheel-py3_12-rocm6_0-upload: # Uploading
+ manywheel-py3_12-rocm6_1-upload: # Uploading
if: ${{ github.repository_owner == 'pytorch' }}
permissions:
id-token: write
contents: read
- needs: manywheel-py3_12-rocm6_0-test
+ needs: manywheel-py3_12-rocm6_1-test
with:
PYTORCH_ROOT: /pytorch
BUILDER_ROOT: /builder
PACKAGE_TYPE: manywheel
# TODO: This is a legacy variable that we eventually want to get rid of in
# favor of GPU_ARCH_VERSION
- DESIRED_CUDA: rocm6.0
- GPU_ARCH_VERSION: 6.0
+ DESIRED_CUDA: rocm6.1
+ GPU_ARCH_VERSION: 6.1
GPU_ARCH_TYPE: rocm
- DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.0-main
+ DOCKER_IMAGE: pytorch/manylinux-builder:rocm6.1-main
DESIRED_PYTHON: "3.12"
- build_name: manywheel-py3_12-rocm6_0
+ build_name: manywheel-py3_12-rocm6_1
secrets:
github-token: ${{ secrets.GITHUB_TOKEN }}
conda-pytorchbot-token: ${{ secrets.CONDA_PYTORCHBOT_TOKEN }}
|
2.41.0
|
54128c16e2a33899729846699d83fc0523dc5a6
|
Mon, 29 Apr 2024 16:49:31 -0700
|
[PATCH 0840/1000] [inductor] Remove usage of device_interface from _inductor.runtime (#124592)
|
Differential Revision: [D56723770](https://our.internmc.facebook.com/intern/diff/D56723770) Co-authored-by: Sam Larsen <slarsen@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124592 Approved by: https://github.com/masnesral
|
diff --git a/test/inductor/test_cuda_repro.py b/test/inductor/test_cuda_repro.py
index 684f3cef8f..db02d19310 100644
--- a/test/inductor/test_cuda_repro.py
+++ b/test/inductor/test_cuda_repro.py
@@ -14,6 +14,7 @@ from torch._dynamo.testing import rand_strided
from torch._dynamo.utils import same
from torch._inductor import config
from torch._inductor.compile_fx import compile_fx_inner
+from torch._inductor.runtime.hints import DeviceProperties
from torch._inductor.utils import run_and_get_code
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing import FileCheck
@@ -405,7 +406,7 @@ class CudaReproTests(TestCase):
],
meta={
"signature": {0: "*fp32", 1: "*fp32", 2: "i32"},
- "device": 0,
+ "device": DeviceProperties.create(torch.device("cuda")),
"configs": [instance_descriptor(divisible_by_16=(0, 1), equal_to_1=())],
"constants": {},
},
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 1d148f9d99..562a86709f 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -45,16 +45,12 @@ from typing import (
Optional,
Set,
Tuple,
- Type,
TYPE_CHECKING,
Union,
)
import torch
-from torch._dynamo.device_interface import (
- get_interface_for_device,
- get_registered_device_interfaces,
-)
+from torch._dynamo.device_interface import get_registered_device_interfaces
from torch._dynamo.utils import counters, dynamo_timed
from torch._inductor import config, exc, metrics
from torch._inductor.codegen.cuda import cuda_env
@@ -70,7 +66,6 @@ from torch._subclasses.fake_tensor import (
from torch.fx.experimental.symbolic_shapes import has_hint, hint_int, ShapeEnv
if TYPE_CHECKING:
- from torch._dynamo.device_interface import DeviceInterface
from torch._inductor.graph import GraphLowering
from torch._inductor.ir import ChoiceCaller
@@ -2823,14 +2818,9 @@ def _set_triton_ptxas_path() -> None:
def _worker_compile_triton(
load_kernel: Callable[[], Any],
- cc: int,
- device: torch.device,
- device_interface: Type[DeviceInterface],
):
_set_triton_ptxas_path()
- device_interface.Worker.set_device(device.index)
- kernel = load_kernel()
- kernel.precompile(warm_cache_only_with_cc=cc)
+ load_kernel().precompile(warm_cache_only=True)
class CodeCacheFuture:
@@ -2993,17 +2983,13 @@ class AsyncCompile:
kernel = TritonCodeCache.load(kernel_name, source_code)
if config.compile_threads > 1:
- device_interface = get_interface_for_device(device_str)
- device = torch.device(device_str, device_interface.current_device())
- cc = device_interface.get_compute_capability(device)
- future = self.process_pool().submit(
- _worker_compile_triton,
- kernel._reload_in_subproc,
- cc,
- device,
- device_interface,
+ return TritonFuture(
+ kernel,
+ self.process_pool().submit(
+ _worker_compile_triton,
+ kernel._reload_in_subproc,
+ ),
)
- return TritonFuture(kernel, future)
else:
kernel.precompile()
return kernel
diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py
index 17bb6e1a89..5e1d938fe2 100644
--- a/torch/_inductor/codegen/triton.py
+++ b/torch/_inductor/codegen/triton.py
@@ -34,7 +34,7 @@ import torch.utils._pytree as pytree
from torch._dynamo.utils import preserve_rng_state
from torch._inductor.metrics import is_metric_table_enabled, log_kernel_metadata
-from torch._inductor.runtime.hints import AutotuneHint
+from torch._inductor.runtime.hints import AutotuneHint, DeviceProperties
from torch._prims_common import is_integer_dtype
from torch.utils._sympy.functions import FloorDiv, ModularIndexing
from torch.utils._sympy.value_ranges import ValueRanges
@@ -125,7 +125,7 @@ def gen_common_triton_imports():
"""
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
- from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor
+ from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
"""
)
return imports.getvalue()
@@ -2833,8 +2833,7 @@ class TritonKernel(Kernel):
)
triton_meta = {
"signature": triton_meta_signature,
- "device": V.graph.scheduler.current_device.index,
- "device_type": V.graph.scheduler.current_device.type,
+ "device": DeviceProperties.create(V.graph.scheduler.current_device),
"constants": {},
}
diff --git a/torch/_inductor/codegen/triton_foreach.py b/torch/_inductor/codegen/triton_foreach.py
index a0acdcdae0..210ab6b50a 100644
--- a/torch/_inductor/codegen/triton_foreach.py
+++ b/torch/_inductor/codegen/triton_foreach.py
@@ -6,6 +6,7 @@ from typing import Dict, List, Tuple
from sympy import Integer
from .. import metrics
+from ..runtime.hints import DeviceProperties
from ..scheduler import SchedulerNode
from ..utils import ceildiv, Placeholder
from ..virtualized import V
@@ -157,8 +158,7 @@ class ForeachKernel(Kernel):
_, _, signature = self.args.python_argdefs()
triton_meta = {
"signature": signature_to_meta(signature, size_dtype=size_dtype),
- "device": V.graph.scheduler.current_device.index,
- "device_type": V.graph.scheduler.current_device.type,
+ "device": DeviceProperties.create(V.graph.scheduler.current_device),
"constants": {},
}
triton_meta["configs"] = [config_of(signature)]
diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py
index cc282d1005..2b4459509d 100644
--- a/torch/_inductor/codegen/wrapper.py
+++ b/torch/_inductor/codegen/wrapper.py
@@ -40,6 +40,7 @@ from torch.utils._sympy.singleton_int import SingletonInt
from .. import codecache, config, ir
from ..ir import ReinterpretView
from ..runtime import triton_heuristics
+from ..runtime.hints import DeviceProperties
from ..utils import (
cache_on_self,
get_benchmark_name,
@@ -1130,8 +1131,7 @@ class WrapperCodeGen(CodeGen):
size_dtype=index_dtype,
indices=non_constant_indices,
),
- "device": V.graph.scheduler.current_device.index,
- "device_type": V.graph.scheduler.current_device.type,
+ "device": DeviceProperties.create(V.graph.scheduler.current_device),
# Triton compiler includes equal_to_1 args into constants even
# when they are not constexpr. otherwise there may be a segfault
# during launching the Inductor-compiled Triton kernel.
diff --git a/torch/_inductor/runtime/hints.py b/torch/_inductor/runtime/hints.py
index 5b2b53ebff..325f37ae25 100644
--- a/torch/_inductor/runtime/hints.py
+++ b/torch/_inductor/runtime/hints.py
@@ -1,6 +1,8 @@
import collections
+import typing
from dataclasses import fields
from enum import auto, Enum
+from typing import Optional
# NOTE: if these fail asserts submit a PR to increase them
@@ -89,3 +91,39 @@ class AutotuneHint(Enum):
# which isn't valid python.
# Enum.__str__ will just return "AutotuneHint.ELEMENTS_PER_WARP_32".
__repr__ = Enum.__str__
+
+
+class DeviceProperties(typing.NamedTuple):
+ """Copy device properties into a data structure not requiring torch to be imported"""
+
+ type: str # type: ignore[assignment]
+ index: int # type: ignore[assignment]
+ cc: int
+ major: Optional[int] = None
+ regs_per_multiprocessor: Optional[int] = None
+ max_threads_per_multi_processor: Optional[int] = None
+ multi_processor_count: Optional[int] = None
+
+ @classmethod
+ def create(cls, device):
+ import torch
+ from torch._dynamo.device_interface import get_interface_for_device
+
+ device_type = device.type if torch.version.hip is None else "hip"
+ device_interface = get_interface_for_device(device)
+ if device_type == "cuda":
+ props = device_interface.get_device_properties(device)
+ return cls(
+ type=device_type,
+ index=device.index,
+ cc=device_interface.get_compute_capability(device),
+ major=props.major,
+ regs_per_multiprocessor=props.regs_per_multiprocessor,
+ max_threads_per_multi_processor=props.max_threads_per_multi_processor,
+ multi_processor_count=props.multi_processor_count,
+ )
+ return cls(
+ type=device_type,
+ index=device.index,
+ cc=device_interface.get_compute_capability(device),
+ )
diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py
index c6b871db87..c85ea37133 100644
--- a/torch/_inductor/runtime/triton_heuristics.py
+++ b/torch/_inductor/runtime/triton_heuristics.py
@@ -16,12 +16,12 @@ from typing import Any, Callable, Dict, List, Optional, Set, Tuple
import torch
-from torch._dynamo.device_interface import DeviceGuard, get_interface_for_device
from .coordinate_descent_tuner import CoordescTuner
from .hints import (
_NUM_THREADS_PER_WARP,
AutotuneHint,
+ DeviceProperties,
HeuristicType,
ReductionHint,
TileHint,
@@ -144,7 +144,12 @@ class CachingAutotuner(KernelInterface):
assert len(configs) > 0, "Non-empty TritonConfig list required for compiling"
self.fn = fn
- self.triton_meta = triton_meta
+ self.device_props: DeviceProperties = triton_meta["device"]
+ self.triton_meta = {
+ **triton_meta,
+ "device": self.device_props.index,
+ "device_type": self.device_props.type,
+ }
self.inductor_meta = {} if inductor_meta is None else inductor_meta
self.save_cache_hook = save_cache_hook
self.mutated_arg_names = mutated_arg_names
@@ -152,13 +157,6 @@ class CachingAutotuner(KernelInterface):
self.heuristic_type = heuristic_type
self.custom_kernel = custom_kernel
self.cuda_kernel_saved = False
-
- # Align the default design that default as cuda
- self.device_type = (
- triton_meta["device_type"] if "device_type" in triton_meta else "cuda"
- )
- self.device_interface = get_interface_for_device(self.device_type)
-
if log.isEnabledFor(logging.DEBUG):
log.debug(
"CachingAutotuner gets %d configs for %s",
@@ -186,7 +184,7 @@ class CachingAutotuner(KernelInterface):
)
self.filename = filename
- def precompile(self, warm_cache_only_with_cc=None):
+ def precompile(self, warm_cache_only=False):
with self.lock:
if self.launchers:
return
@@ -197,7 +195,7 @@ class CachingAutotuner(KernelInterface):
for c in self.configs:
try:
compiled_binary, launcher = self._precompile_config(
- c, warm_cache_only_with_cc
+ c, warm_cache_only
)
except OutOfResources as e:
if len(self.configs) == 1:
@@ -215,19 +213,19 @@ class CachingAutotuner(KernelInterface):
seen_configs = set(self.configs)
- device_prop = self.device_interface.Worker.get_device_properties(
- self.triton_meta["device"]
- )
+ device_prop = self.device_props
if (
self.inductor_meta.get("dynamic_scale_rblock", True)
and self.heuristic_type == HeuristicType.REDUCTION
and self.size_hints is not None
- # Disable for AMDGPU as Triton is not ready to return n_regs for a compiled_binary.
- and not self.inductor_meta.get("is_hip")
- # Disable for Intel GPU as Triton is not ready to return n_regs for a compiled_binary.
- and self.device_type != "xpu"
+ # Disable for AMDGPU/Intel as Triton is not ready to return n_regs for a compiled_binary.
+ and device_prop.type == "cuda"
+ and device_prop.major
and device_prop.major >= 8
):
+ assert device_prop.regs_per_multiprocessor
+ assert device_prop.max_threads_per_multi_processor
+ assert device_prop.multi_processor_count
for triton_config, compiled_binary in zip(
self.configs, compiled_binaries
):
@@ -288,15 +286,21 @@ class CachingAutotuner(KernelInterface):
continue
seen_configs.add(new_config)
self.launchers.append(
- self._precompile_config(new_config, warm_cache_only_with_cc)[1]
+ self._precompile_config(new_config, warm_cache_only)[1]
)
self.configs = None
- def _precompile_config(self, cfg: Config, warm_cache_only_with_cc: Optional[int]):
+ def get_device_interface(self):
+ # this code cannot run in compile workers, because it imports from torch
+ from torch._dynamo.device_interface import get_interface_for_device
+
+ return get_interface_for_device(self.device_props.type.replace("hip", "cuda"))
+
+ def _precompile_config(self, cfg: Config, warm_cache_only: bool):
"""Ahead of time compile a given autotuner config."""
compile_meta = copy.deepcopy(self.triton_meta)
for k, v in cfg.kwargs.items():
- if torch.version.hip is not None:
+ if self.device_props.type != "hip":
if k == "matrix_instr_nonkdim":
compile_meta["matrix_instr_nonkdim"] = v
continue
@@ -310,22 +314,9 @@ class CachingAutotuner(KernelInterface):
"assert_indirect_indexing", True
) and not self.inductor_meta.get("is_hip", False)
- # Setting device_type="hip" required on ROCm to pass down to triton
- compile_meta["device_type"] = (
- self.device_type if torch.version.hip is None else "hip"
- )
-
- if warm_cache_only_with_cc:
- cc = warm_cache_only_with_cc
- else:
- # Use device_type 'cuda' for both cuda and hip devices to retrieve
- # the compute capability.
- device_type = self.device_type if torch.version.hip is None else "cuda"
- device_id = compile_meta["device"]
- device = torch.device(device_type, device_id)
- cc = self.device_interface.get_compute_capability(device)
-
- compile_meta["cc"] = cc
+ # device type will be "hip" rather than "cuda" here
+ compile_meta["device_type"] = self.device_props.type
+ compile_meta["cc"] = self.device_props.cc
if ASTSource:
compile_args = (
@@ -354,7 +345,7 @@ class CachingAutotuner(KernelInterface):
"num_stages": compile_meta["num_stages"],
"debug": compile_meta["debug"],
}
- if torch.version.hip is not None:
+ if self.device_props.type != "hip":
if "waves_per_eu" in compile_meta:
options["waves_per_eu"] = compile_meta["waves_per_eu"]
if "matrix_instr_nonkdim" in compile_meta:
@@ -369,16 +360,21 @@ class CachingAutotuner(KernelInterface):
compile_args = (self.fn,)
compile_kwargs = compile_meta
- if warm_cache_only_with_cc:
+ if warm_cache_only:
return (
triton.compile(*compile_args, **compile_kwargs),
None,
)
+ # importing from torch is safe now that precompile has returned
+ from torch._dynamo.device_interface import DeviceGuard
+
+ device_interface = self.get_device_interface()
+
# load binary to the correct device
- with DeviceGuard(self.device_interface, compile_meta["device"]): # type: ignore[attr-defined]
+ with DeviceGuard(device_interface, compile_meta["device"]): # type: ignore[attr-defined]
# need to initialize context
- self.device_interface.synchronize(self.device_interface.current_device())
+ device_interface.synchronize(device_interface.current_device())
try:
binary = triton.compile(*compile_args, **compile_kwargs)
@@ -596,8 +592,9 @@ class CachingAutotuner(KernelInterface):
)
return float("inf")
- stream = self.device_interface.get_raw_stream( # type: ignore[call-arg]
- self.device_interface.current_device()
+ device_interface = self.get_device_interface()
+ stream = device_interface.get_raw_stream( # type: ignore[call-arg]
+ device_interface.current_device()
)
def kernel_call():
@@ -706,7 +703,7 @@ class CachingAutotuner(KernelInterface):
binary = (
launcher.bin.asm["cubin"]
- if torch.version.hip is None
+ if self.device_props.type != "hip"
else launcher.bin.asm["hsaco"]
)
CudaKernelParamCache.set(key, params, binary)
@@ -736,7 +733,7 @@ class CachingAutotuner(KernelInterface):
def benchmark_one_config(config):
with self.lock:
- _, launcher = self._precompile_config(config, None)
+ _, launcher = self._precompile_config(config, False)
config2launcher[config] = launcher
out = self.bench(launcher, *cloned_args, **kwargs)
diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py
index c301c3394f..577a1c318b 100644
--- a/torch/_inductor/select_algorithm.py
+++ b/torch/_inductor/select_algorithm.py
@@ -35,6 +35,7 @@ from .codegen.triton import (
from .codegen.triton_utils import config_of, signature_to_meta
from .exc import CUDACompileError
from .ir import ChoiceCaller, PrimitiveInfoType
+from .runtime.hints import DeviceProperties
from .runtime.runtime_utils import do_bench
from .utils import (
get_dtype_size,
@@ -154,8 +155,7 @@ class TritonTemplateKernel(TritonKernel):
argdefs, _, signature = self.args.python_argdefs()
triton_meta = {
"signature": signature_to_meta(signature, size_dtype=self.index_dtype),
- "device": self.output_node.get_device().index,
- "device_type": self.output_node.get_device().type,
+ "device": DeviceProperties.create(self.output_node.get_device()),
"constants": {},
}
triton_meta["configs"] = [config_of(signature)]
|
2.41.0
|
699ade0cb1458d8c9f36aef39d526efa9a85a78
|
Mon, 29 Apr 2024 16:49:32 -0700
|
[PATCH 0841/1000] [dynamo] Refactor into torch/_inductor/runtime/compile_tasks.py (#124681)
|
Differential Revision: [D56723769](https://our.internmc.facebook.com/intern/diff/D56723769) Co-authored-by: Sam Larsen <slarsen@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/124681 Approved by: https://github.com/masnesral ghstack dependencies: #124592
|
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 562a86709f..3b5a9597d1 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -54,6 +54,13 @@ from torch._dynamo.device_interface import get_registered_device_interfaces
from torch._dynamo.utils import counters, dynamo_timed
from torch._inductor import config, exc, metrics
from torch._inductor.codegen.cuda import cuda_env
+from torch._inductor.runtime.compile_tasks import (
+ _module_to_triton_kernel,
+ _reload_python_module,
+ _reload_python_module_in_subproc,
+ _set_triton_ptxas_path,
+ _worker_compile_triton,
+)
from torch._inductor.runtime.runtime_utils import cache_dir
from torch._inductor.utils import clear_on_fresh_inductor_cache, is_linux
@@ -2402,10 +2409,6 @@ class CppWrapperCodeCache(CppPythonBindingsCodeCache):
)
-def _reload_python_module_in_subproc(key, path):
- return PyCodeCache.load_by_key_path(key, path)
-
-
@clear_on_fresh_inductor_cache
class PyCodeCache:
cache: Dict[str, ModuleType] = dict()
@@ -2438,31 +2441,21 @@ class PyCodeCache:
if linemap is None:
linemap = []
if key not in cls.cache:
- with open(path) as f:
- try:
- code = compile(f.read(), path, "exec")
- except Exception as e:
- raise RuntimeError(
- f"Failed to import {path}\n{type(e).__name__}: {e}"
- ) from None
- mod = ModuleType(f"{__name__}.{key}")
- mod.__file__ = path
- mod.key = key # type: ignore[attr-defined]
- exec(code, mod.__dict__, mod.__dict__)
- sys.modules[mod.__name__] = mod
- # another thread might set this first
- cls.cache.setdefault(key, mod)
- # unzip into separate lines/nodes lists
- cls.linemaps[path] = list(zip(*linemap))
-
- if attrs is not None:
- for k, v in attrs.items():
- setattr(mod, k, v)
-
- if not (linemap or attrs):
- mod._reload_in_subproc = functools.partial( # type: ignore[attr-defined]
- _reload_python_module_in_subproc, key, path
- )
+ mod = _reload_python_module(key, path)
+
+ # another thread might set this first
+ cls.cache.setdefault(key, mod)
+ # unzip into separate lines/nodes lists
+ cls.linemaps[path] = list(zip(*linemap))
+
+ if attrs is not None:
+ for k, v in attrs.items():
+ setattr(mod, k, v)
+
+ if not (linemap or attrs):
+ mod._reload_in_subproc = functools.partial( # type: ignore[attr-defined]
+ _reload_python_module_in_subproc, key, path
+ )
return cls.cache[key]
@@ -2495,25 +2488,10 @@ class PyCodeCache:
return parse_stack_trace(entry)
-def _reload_triton_kernel_in_subproc(reload_module, kernel_name):
- return TritonCodeCache._mod_to_kernel(reload_module(), kernel_name)
-
-
class TritonCodeCache:
@classmethod
def load(cls, kernel_name: str, source_code: str) -> ModuleType:
- mod = PyCodeCache.load(source_code)
- return cls._mod_to_kernel(mod, kernel_name)
-
- @classmethod
- def _mod_to_kernel(cls, mod, kernel_name):
- kernel = getattr(mod, kernel_name)
- kernel._reload_in_subproc = functools.partial(
- _reload_triton_kernel_in_subproc,
- mod._reload_in_subproc,
- kernel_name,
- )
- return kernel
+ return _module_to_triton_kernel(PyCodeCache.load(source_code), kernel_name)
def _cuda_compiler() -> Optional[str]:
@@ -2801,28 +2779,6 @@ def caching_device_properties():
device_interface.Worker.get_device_properties()
-@functools.lru_cache(None)
-def _set_triton_ptxas_path() -> None:
- if os.environ.get("TRITON_PTXAS_PATH") is not None:
- return
- ptxas_path = os.path.abspath(
- os.path.join(os.path.dirname(__file__), "..", "bin", "ptxas")
- )
- if not os.path.exists(ptxas_path):
- return
- if os.path.isfile(ptxas_path) and os.access(ptxas_path, os.X_OK):
- os.environ["TRITON_PTXAS_PATH"] = ptxas_path
- else:
- warnings.warn(f"{ptxas_path} exists but is not an executable")
-
-
-def _worker_compile_triton(
- load_kernel: Callable[[], Any],
-):
- _set_triton_ptxas_path()
- load_kernel().precompile(warm_cache_only=True)
-
-
class CodeCacheFuture:
def result(self):
raise NotImplementedError
diff --git a/torch/_inductor/runtime/compile_tasks.py b/torch/_inductor/runtime/compile_tasks.py
new file mode 100644
index 0000000000..66a36703da
--- /dev/null
+++ b/torch/_inductor/runtime/compile_tasks.py
@@ -0,0 +1,68 @@
+from __future__ import annotations
+
+import functools
+import os
+import sys
+import warnings
+from types import ModuleType
+from typing import Any, Callable
+
+
+def _reload_triton_kernel_in_subproc(reload_module, kernel_name):
+ return _module_to_triton_kernel(reload_module(), kernel_name)
+
+
+def _module_to_triton_kernel(mod, kernel_name):
+ kernel = getattr(mod, kernel_name)
+ kernel._reload_in_subproc = functools.partial(
+ _reload_triton_kernel_in_subproc,
+ mod._reload_in_subproc,
+ kernel_name,
+ )
+ return kernel
+
+
+def _reload_python_module_in_subproc(key, path):
+ codecache = sys.modules.get("torch._inductor.codecache")
+ if codecache:
+ return codecache.PyCodeCache.load_by_key_path(key, path)
+ else:
+ return _reload_python_module(key, path)
+
+
+def _reload_python_module(key, path):
+ with open(path) as f:
+ try:
+ code = compile(f.read(), path, "exec")
+ except Exception as e:
+ raise RuntimeError(
+ f"Failed to import {path}\n{type(e).__name__}: {e}"
+ ) from None
+ mod = ModuleType(f"{__name__}.{key}")
+ mod.__file__ = path
+ mod.key = key # type: ignore[attr-defined]
+ exec(code, mod.__dict__, mod.__dict__)
+ sys.modules[mod.__name__] = mod
+ return mod
+
+
+@functools.lru_cache(None)
+def _set_triton_ptxas_path() -> None:
+ if os.environ.get("TRITON_PTXAS_PATH") is not None:
+ return
+ ptxas_path = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), "..", "bin", "ptxas")
+ )
+ if not os.path.exists(ptxas_path):
+ return
+ if os.path.isfile(ptxas_path) and os.access(ptxas_path, os.X_OK):
+ os.environ["TRITON_PTXAS_PATH"] = ptxas_path
+ else:
+ warnings.warn(f"{ptxas_path} exists but is not an executable")
+
+
+def _worker_compile_triton(
+ load_kernel: Callable[[], Any],
+):
+ _set_triton_ptxas_path()
+ load_kernel().precompile(warm_cache_only=True)
|
2.41.0
|
03b9a218994eede2551598dea178c230d2f0d23
|
Tue, 30 Apr 2024 16:57:36 +0000
|
[PATCH 0842/1000] fix: typo (#125226)
|
Fixes spelling error: spacial is an incorrect spelling of spatial Pull Request resolved: https://github.com/pytorch/pytorch/pull/125226 Approved by: https://github.com/Skylion007
|
diff --git a/torch/nn/functional.py b/torch/nn/functional.py
index 213a24e4f5..57dd5905e6 100644
--- a/torch/nn/functional.py
+++ b/torch/nn/functional.py
@@ -3210,7 +3210,7 @@ def binary_cross_entropy_with_logits(
operations. For a target of size [B, C, H, W] (where B is batch size) pos_weight of
size [B, C, H, W] will apply different pos_weights to each element of the batch or
[C, H, W] the same pos_weights across the batch. To apply the same positive weight
- along all spacial dimensions for a 2D multi-class target [C, H, W] use: [C, 1, 1].
+ along all spatial dimensions for a 2D multi-class target [C, H, W] use: [C, 1, 1].
Default: ``None``
Examples::
|
2.41.0
|
4c6424fbf1fbd8925a1f7ba4a1ae0246f0c7c5e
|
Tue, 30 Apr 2024 17:31:57 +0000
|
[PATCH 0843/1000] Remove caffe2 image and video (#125045)
|
This PR tries to decompose https://github.com/pytorch/pytorch/pull/122527 into a smaller one. Caffe2 image and video folders are removed along with the related CMake code. To be noted, this was inspired and is co-dev with @r-barnes. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125045 Approved by: https://github.com/eqy, https://github.com/albanD
|
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7adeac323a..215ec7a81a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -228,7 +228,6 @@ option(USE_FBGEMM "Use FBGEMM (quantized 8-bit server operators)" ON)
option(USE_KINETO "Use Kineto profiling library" ON)
option(USE_CUPTI_SO "Use CUPTI as a shared library" ON)
option(USE_FAKELOWP "Use FakeLowp operators" OFF)
-option(USE_FFMPEG "Use ffmpeg" OFF)
option(USE_GFLAGS "Use GFLAGS" OFF)
option(USE_GLOG "Use GLOG" OFF)
option(USE_LEVELDB "Use LEVELDB" OFF)
@@ -264,7 +263,6 @@ cmake_dependent_option(
option(USE_NUMPY "Use NumPy" ON)
option(USE_OBSERVERS "Use observers module." OFF)
option(USE_OPENCL "Use OpenCL" OFF)
-option(USE_OPENCV "Use OpenCV" OFF)
option(USE_OPENMP "Use OpenMP for parallel code" ON)
option(USE_PRECOMPILED_HEADERS "Use pre-compiled headers to accelerate build." OFF)
diff --git a/caffe2/CMakeLists.txt b/caffe2/CMakeLists.txt
index d02557e2cf..bcd171f0f7 100644
--- a/caffe2/CMakeLists.txt
+++ b/caffe2/CMakeLists.txt
@@ -125,8 +125,6 @@ if(BUILD_CAFFE2 AND NOT INTERN_BUILD_MOBILE)
add_subdirectory(db)
add_subdirectory(distributed)
add_subdirectory(ideep)
- add_subdirectory(image)
- add_subdirectory(video)
add_subdirectory(mobile)
add_subdirectory(mpi)
add_subdirectory(observers)
diff --git a/caffe2/image/CMakeLists.txt b/caffe2/image/CMakeLists.txt
deleted file mode 100644
index 023df8ebd0..0000000000
--- a/caffe2/image/CMakeLists.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-if(USE_OPENCV AND OpenCV_FOUND)
- message(STATUS "Including image processing operators")
- # ---[ GPU files
- # ------[ general GPU
- file(GLOB tmp *_gpu.cc)
- set(Caffe2_GPU_SRCS ${Caffe2_GPU_SRCS} ${tmp})
- # ------[ CUDA sources
- file(GLOB tmp *.cu)
- set(Caffe2_GPU_SRCS ${Caffe2_GPU_SRCS} ${tmp})
- # exclude test files
- file(GLOB tmp *_test.cc)
- exclude(Caffe2_GPU_SRCS "${Caffe2_GPU_SRCS}" ${tmp})
-
- # ---[ HIP files
- # ------[ general HIP
- file(GLOB tmp hip/*.cc)
- set(Caffe2_HIP_SRCS ${Caffe2_HIP_SRCS} ${tmp})
- # ------[ HIP sources
- file(GLOB tmp hip/*.hip)
- set(Caffe2_HIP_SRCS ${Caffe2_HIP_SRCS} ${tmp})
- # exclude test files
- file(GLOB tmp hip/*_test.cc)
- exclude(Caffe2_HIP_SRCS "${Caffe2_HIP_SRCS}" ${tmp})
-
- # ---[ CPU files.
- file(GLOB tmp *.cc)
- set(Caffe2_CPU_SRCS ${Caffe2_CPU_SRCS} ${tmp})
- # exclude test files and gpu files
- file(GLOB tmp *_test.cc)
- exclude(Caffe2_CPU_SRCS "${Caffe2_CPU_SRCS}" ${tmp})
- exclude(Caffe2_CPU_SRCS "${Caffe2_CPU_SRCS}" ${Caffe2_GPU_SRCS})
- exclude(Caffe2_CPU_SRCS "${Caffe2_CPU_SRCS}" ${Caffe2_HIP_SRCS})
-
- # ---[ GPU test files
- file(GLOB tmp *_gpu_test.cc)
- set(Caffe2_GPU_TEST_SRCS ${Caffe2_GPU_TEST_SRCS} ${tmp})
-
- # ---[ HIP test files
- file(GLOB tmp hip/*_test.cc)
- set(Caffe2_HIP_TEST_SRCS ${Caffe2_HIP_TEST_SRCS} ${tmp})
-
- # ---[ CPU test files
- file(GLOB tmp *_test.cc)
- set(Caffe2_CPU_TEST_SRCS ${Caffe2_CPU_TEST_SRCS} ${tmp})
- exclude(Caffe2_CPU_TEST_SRCS "${Caffe2_CPU_TEST_SRCS}" ${Caffe2_GPU_TEST_SRCS})
- exclude(Caffe2_CPU_TEST_SRCS "${Caffe2_CPU_TEST_SRCS}" ${Caffe2_HIP_TEST_SRCS})
-
- # ---[ Send the lists to the parent scope.
- set(Caffe2_CPU_SRCS ${Caffe2_CPU_SRCS} PARENT_SCOPE)
- set(Caffe2_GPU_SRCS ${Caffe2_GPU_SRCS} PARENT_SCOPE)
- set(Caffe2_HIP_SRCS ${Caffe2_HIP_SRCS} PARENT_SCOPE)
- set(Caffe2_CPU_TEST_SRCS ${Caffe2_CPU_TEST_SRCS} PARENT_SCOPE)
- set(Caffe2_GPU_TEST_SRCS ${Caffe2_GPU_TEST_SRCS} PARENT_SCOPE)
- set(Caffe2_HIP_TEST_SRCS ${Caffe2_HIP_TEST_SRCS} PARENT_SCOPE)
-else()
- message(STATUS "Excluding image processing operators due to no opencv")
-endif()
diff --git a/caffe2/image/image_input_op.cc b/caffe2/image/image_input_op.cc
deleted file mode 100644
index ff868e1370..0000000000
--- a/caffe2/image/image_input_op.cc
+++ /dev/null
@@ -1,167 +0,0 @@
-#include "caffe2/image/image_input_op.h"
-
-#ifdef USE_MKLDNN
-#include <caffe2/ideep/operators/operator_fallback_ideep.h>
-#include <caffe2/ideep/utils/ideep_operator.h>
-#endif
-
-namespace caffe2 {
-
-template <>
-bool ImageInputOp<CPUContext>::ApplyTransformOnGPU(
- const std::vector<std::int64_t>&,
- const c10::Device&) {
- return false;
-}
-
-REGISTER_CPU_OPERATOR(ImageInput, ImageInputOp<CPUContext>);
-
-OPERATOR_SCHEMA(ImageInput)
- .NumInputs(0, 1)
- .NumOutputs(2, INT_MAX)
- .TensorInferenceFunction([](const OperatorDef& def,
- const vector<TensorShape>& /* unused */) {
- vector<TensorShape> out(2);
- ArgumentHelper helper(def);
- int batch_size = helper.GetSingleArgument<int>("batch_size", 0);
- int crop = helper.GetSingleArgument<int>("crop", -1);
- int color = helper.GetSingleArgument<int>("color", 1);
- TORCH_CHECK_GT(crop, 0);
- out[0] = CreateTensorShape(
- vector<int>{batch_size, crop, crop, color ? 3 : 1},
- TensorProto::FLOAT);
- out[1] =
- CreateTensorShape(vector<int>{1, batch_size}, TensorProto::INT32);
- return out;
- })
- .SetDoc(R"DOC(
-Imports and processes images from a database. For each run of the operator,
-batch_size images will be processed. GPUs can optionally be used for
-part of the processing.
-
-The following transformations are applied to the image
- - A bounding box is applied to the initial image (optional)
- - The image is rescaled either up or down (with the scale argument) or
- just up (with the minsize argument)
- - The image is randomly cropped (crop size is passed as an argument but
- the location of the crop is random except if is_test is passed in which case
- the image in cropped at the center)
- - The image is normalized. Each of its color channels can have separate
- normalization values
-
-The dimension of the output image will always be cropxcrop
-)DOC")
- .Arg(
- "batch_size",
- "Number of images to output for each run of the operator"
- ". Must be 1 or greater")
- .Arg("color", "Number of color channels (1 or 3). Defaults to 1")
- .Arg("color_jitter", "Whether or not to do color jitter. Defaults to 0")
- .Arg(
- "img_saturation",
- "Image saturation scale used in color jittering. "
- "Defaults to 0.4")
- .Arg(
- "img_brightness",
- "Image brightness scale used in color jittering. "
- "Defaults to 0.4")
- .Arg(
- "img_contrast",
- "Image contrast scale used in color jittering. "
- "Defaults to 0.4")
- .Arg(
- "color_lighting",
- "Whether or not to do color lighting."
- " Defaults to 0")
- .Arg(
- "color_lighting_std",
- "Std of normal distribution where color lighting"
- " scaling factor is sampled. Defaults to 0.1")
- .Arg(
- "scale_jitter_type",
- "Type 0: No scale jittering "
- "Type 1: Inception-style scale jittering")
- .Arg(
- "label_type",
- "Type 0: single integer label for multi-class "
- "classification. Type 1: sparse active label indices for multi-label "
- "classification. Type 2: dense label embedding vector for label "
- "embedding regression")
- .Arg(
- "scale",
- "Scale the size of the smallest dimension of the image to"
- " this. Scale and minsize are mutually exclusive."
- " Must be larger than crop")
- .Arg(
- "minsize",
- "Scale the size of the smallest dimension of the image to"
- " this only if the size is initially smaller. Scale and minsize are"
- " mutually exclusive. Must be larger than crop.")
- .Arg(
- "warp",
- "If 1, both dimensions of the image will be set to minsize or"
- " scale; otherwise, the other dimension is proportionally scaled."
- " Defaults to 0")
- .Arg("crop", "Size to crop the image to. Must be provided")
- .Arg("mirror", "Whether or not to mirror the image. Defaults to 0")
- .Arg(
- "mean",
- "Mean by which to normalize color channels."
- " Defaults to 0.")
- .Arg(
- "mean_per_channel",
- "Vector of means per color channel "
- " (1 or 3 elements). Defaults to mean argument. Channel order BGR")
- .Arg(
- "std",
- "Standard deviation by which to normalize color channels."
- " Defaults to 1.")
- .Arg(
- "std_per_channel",
- "Vector of standard dev. per color channel "
- " (1 or 3 elements). Defaults to std argument. Channel order is BGR")
- .Arg("bounding_ymin", "Bounding box coordinate. Defaults to -1 (none)")
- .Arg("bounding_xmin", "Bounding box coordinate. Defaults to -1 (none)")
- .Arg("bounding_height", "Bounding box coordinate. Defaults to -1 (none)")
- .Arg("bounding_width", "Bounding box coordinate. Defaults to -1 (none)")
- .ArgIsTest("Set to 1 to do deterministic cropping. Defaults to 0")
- .Arg("use_caffe_datum", "1 if the input is in Caffe format. Defaults to 0")
- .Arg(
- "use_gpu_transform",
- "1 if GPU acceleration should be used."
- " Defaults to 0. Can only be 1 in a CUDAContext")
- .Arg(
- "decode_threads",
- "Number of CPU decode/transform threads."
- " Defaults to 4")
- .Arg("output_type", "If gpu_transform, can set to FLOAT or FLOAT16.")
- .Arg("db", "Name of the database (if not passed as input)")
- .Arg(
- "db_type",
- "Type of database (if not passed as input)."
- " Defaults to leveldb")
- .Arg(
- "output_sizes",
- "The sizes of any outputs besides the data and label "
- "(should have a number of elements equal to the number of additional "
- "outputs)")
- .Arg(
- "random_scale",
- "[min, max] shortest-side desired for image resize. "
- "Defaults to [-1, -1] or no random resize desired.")
- .Input(0, "reader", "The input reader (a db::DBReader)")
- .Output(0, "data", "Tensor containing the images")
- .Output(1, "label", "Tensor containing the labels")
- .Output(
- 2,
- "additional outputs",
- "Any outputs after the first 2 will be "
- "Tensors read from the input TensorProtos");
-
-NO_GRADIENT(ImageInput);
-
-#ifdef USE_MKLDNN
-REGISTER_IDEEP_OPERATOR(ImageInput, IDEEPFallbackOp<ImageInputOp<CPUContext>>);
-#endif
-
-} // namespace caffe2
diff --git a/caffe2/image/image_input_op.h b/caffe2/image/image_input_op.h
deleted file mode 100644
index 51367788c0..0000000000
--- a/caffe2/image/image_input_op.h
+++ /dev/null
@@ -1,1347 +0,0 @@
-
-#ifndef CAFFE2_IMAGE_IMAGE_INPUT_OP_H_
-#define CAFFE2_IMAGE_IMAGE_INPUT_OP_H_
-
-#include <opencv2/opencv.hpp>
-
-#include <algorithm>
-#include <iostream>
-
-#include "c10/core/thread_pool.h"
-#include <c10/util/irange.h>
-#include "caffe2/core/common.h"
-#include "caffe2/core/db.h"
-#include "caffe2/image/transform_gpu.h"
-#include "caffe2/operators/prefetch_op.h"
-#include "caffe2/proto/caffe2_legacy.pb.h"
-#include "caffe2/utils/cast.h"
-#include "caffe2/utils/math.h"
-
-namespace caffe2 {
-
-class CUDAContext;
-
-template <class Context>
-class ImageInputOp final : public PrefetchOperator<Context> {
- // SINGLE_LABEL: single integer label for multi-class classification
- // MULTI_LABEL_SPARSE: sparse active label indices for multi-label
- // classification MULTI_LABEL_DENSE: dense label embedding vector for label
- // embedding regression MULTI_LABEL_WEIGHTED_SPARSE: sparse active label
- // indices with per-label weights for multi-label classification
- // SINGLE_LABEL_WEIGHTED: single integer label for multi-class classification
- // with weighted sampling EMBEDDING_LABEL: an array of floating numbers
- // representing dense embedding.
- // It is useful for model distillation
- enum LABEL_TYPE {
- SINGLE_LABEL = 0,
- MULTI_LABEL_SPARSE = 1,
- MULTI_LABEL_DENSE = 2,
- MULTI_LABEL_WEIGHTED_SPARSE = 3,
- SINGLE_LABEL_WEIGHTED = 4,
- EMBEDDING_LABEL = 5,
- };
-
- // INCEPTION_STYLE: Random crop with size 8% - 100% image area and aspect
- // ratio in [3/4, 4/3]. Reference: GoogleNet paper
- enum SCALE_JITTER_TYPE {
- NO_SCALE_JITTER = 0,
- INCEPTION_STYLE = 1
- // TODO(zyan3): ResNet-style random scale jitter
- };
-
- public:
- using OperatorBase::OutputSize;
- using PrefetchOperator<Context>::context_;
- using PrefetchOperator<Context>::prefetch_thread_;
- explicit ImageInputOp(const OperatorDef& operator_def, Workspace* ws);
- ~ImageInputOp() {
- PrefetchOperator<Context>::Finalize();
- }
-
- bool Prefetch() override;
- bool CopyPrefetched() override;
-
- private:
- struct BoundingBox {
- bool valid;
- int ymin;
- int xmin;
- int height;
- int width;
- };
-
- // Structure to store per-image information
- // This can be modified by the DecodeAnd* so needs
- // to be privatized per launch.
- struct PerImageArg { BoundingBox bounding_params; };
-
- bool GetImageAndLabelAndInfoFromDBValue(
- const string& value,
- cv::Mat* img,
- PerImageArg& info,
- int item_id,
- std::mt19937* randgen);
- void DecodeAndTransform(
- const std::string& value,
- float* image_data,
- int item_id,
- const int channels,
- std::size_t thread_index);
- void DecodeAndTransposeOnly(
- const std::string& value,
- uint8_t* image_data,
- int item_id,
- const int channels,
- std::size_t thread_index);
- bool ApplyTransformOnGPU(
- const std::vector<std::int64_t>& dims,
- const c10::Device& type);
-
- unique_ptr<db::DBReader> owned_reader_;
- const db::DBReader* reader_;
- Tensor prefetched_image_;
- Tensor prefetched_label_;
- vector<Tensor> prefetched_additional_outputs_;
- Tensor prefetched_image_on_device_;
- Tensor prefetched_label_on_device_;
- vector<Tensor> prefetched_additional_outputs_on_device_;
- // Default parameters for images
- PerImageArg default_arg_;
- int batch_size_;
- LABEL_TYPE label_type_;
- int num_labels_;
-
- bool color_;
- bool color_jitter_;
- float img_saturation_;
- float img_brightness_;
- float img_contrast_;
- bool color_lighting_;
- float color_lighting_std_;
- std::vector<std::vector<float>> color_lighting_eigvecs_;
- std::vector<float> color_lighting_eigvals_;
- SCALE_JITTER_TYPE scale_jitter_type_;
- int scale_;
- // Minsize is similar to scale except that it will only
- // force the image to scale up if it is too small. In other words,
- // it ensures that both dimensions of the image are at least minsize_
- int minsize_;
- bool warp_;
- int crop_;
- std::vector<float> mean_;
- std::vector<float> std_;
- Tensor mean_gpu_;
- Tensor std_gpu_;
- bool mirror_;
- bool is_test_;
- bool use_caffe_datum_;
- bool gpu_transform_;
- bool mean_std_copied_ = false;
-
- // thread pool for parse + decode
- int num_decode_threads_;
- int additional_inputs_offset_;
- int additional_inputs_count_;
- std::vector<int> additional_output_sizes_;
- std::shared_ptr<TaskThreadPool> thread_pool_;
-
- // Output type for GPU transform path
- TensorProto_DataType output_type_;
-
- // random minsize
- vector<int> random_scale_;
- bool random_scaling_;
-
- // Working variables
- std::vector<std::mt19937> randgen_per_thread_;
-
- // number of exceptions produced by opencv while reading image data
- std::atomic<long> num_decode_errors_in_batch_{0};
- // opencv exceptions tolerance
- float max_decode_error_ratio_;
-};
-
-template <class Context>
-ImageInputOp<Context>::ImageInputOp(
- const OperatorDef& operator_def,
- Workspace* ws)
- : PrefetchOperator<Context>(operator_def, ws),
- reader_(nullptr),
- batch_size_(
- OperatorBase::template GetSingleArgument<int>("batch_size", 0)),
- label_type_(static_cast<LABEL_TYPE>(
- OperatorBase::template GetSingleArgument<int>("label_type", 0))),
- num_labels_(
- OperatorBase::template GetSingleArgument<int>("num_labels", 0)),
- color_(OperatorBase::template GetSingleArgument<int>("color", 1)),
- color_jitter_(
- OperatorBase::template GetSingleArgument<int>("color_jitter", 0)),
- img_saturation_(OperatorBase::template GetSingleArgument<float>(
- "img_saturation",
- 0.4)),
- img_brightness_(OperatorBase::template GetSingleArgument<float>(
- "img_brightness",
- 0.4)),
- img_contrast_(
- OperatorBase::template GetSingleArgument<float>("img_contrast", 0.4)),
- color_lighting_(
- OperatorBase::template GetSingleArgument<int>("color_lighting", 0)),
- color_lighting_std_(OperatorBase::template GetSingleArgument<float>(
- "color_lighting_std",
- 0.1)),
- scale_jitter_type_(static_cast<SCALE_JITTER_TYPE>(
- OperatorBase::template GetSingleArgument<int>(
- "scale_jitter_type",
- 0))),
- scale_(OperatorBase::template GetSingleArgument<int>("scale", -1)),
- minsize_(OperatorBase::template GetSingleArgument<int>("minsize", -1)),
- warp_(OperatorBase::template GetSingleArgument<int>("warp", 0)),
- crop_(OperatorBase::template GetSingleArgument<int>("crop", -1)),
- mirror_(OperatorBase::template GetSingleArgument<int>("mirror", 0)),
- is_test_(OperatorBase::template GetSingleArgument<int>(
- OpSchema::Arg_IsTest,
- 0)),
- use_caffe_datum_(
- OperatorBase::template GetSingleArgument<int>("use_caffe_datum", 0)),
- gpu_transform_(OperatorBase::template GetSingleArgument<int>(
- "use_gpu_transform",
- 0)),
- num_decode_threads_(
- OperatorBase::template GetSingleArgument<int>("decode_threads", 4)),
- additional_output_sizes_(
- OperatorBase::template GetRepeatedArgument<int>("output_sizes", {})),
- thread_pool_(std::make_shared<TaskThreadPool>(num_decode_threads_)),
- // output type only supported with CUDA and use_gpu_transform for now
- output_type_(
- cast::GetCastDataType(ArgumentHelper(operator_def), "output_type")),
- random_scale_(OperatorBase::template GetRepeatedArgument<int>(
- "random_scale",
- {-1, -1})),
- max_decode_error_ratio_(OperatorBase::template GetSingleArgument<float>(
- "max_decode_error_ratio",
- 1.0)) {
- if ((random_scale_[0] == -1) || (random_scale_[1] == -1)) {
- random_scaling_ = false;
- } else {
- random_scaling_ = true;
- minsize_ = random_scale_[0];
- }
-
- mean_ = OperatorBase::template GetRepeatedArgument<float>(
- "mean_per_channel",
- {OperatorBase::template GetSingleArgument<float>("mean", 0.)});
-
- std_ = OperatorBase::template GetRepeatedArgument<float>(
- "std_per_channel",
- {OperatorBase::template GetSingleArgument<float>("std", 1.)});
-
- if (additional_output_sizes_.size() == 0) {
- additional_output_sizes_ = std::vector<int>(OutputSize() - 2, 1);
- } else {
- CAFFE_ENFORCE(
- additional_output_sizes_.size() == OutputSize() - 2,
- "If the output sizes are specified, they must be specified for all "
- "additional outputs");
- }
- additional_inputs_count_ = OutputSize() - 2;
-
- default_arg_.bounding_params = {
- false,
- OperatorBase::template GetSingleArgument<int>("bounding_ymin", -1),
- OperatorBase::template GetSingleArgument<int>("bounding_xmin", -1),
- OperatorBase::template GetSingleArgument<int>("bounding_height", -1),
- OperatorBase::template GetSingleArgument<int>("bounding_width", -1),
- };
-
- if (operator_def.input_size() == 0) {
- LOG(ERROR) << "You are using an old ImageInputOp format that creates "
- "a local db reader. Consider moving to the new style "
- "that takes in a DBReader blob instead.";
- string db_name = OperatorBase::template GetSingleArgument<string>("db", "");
- CAFFE_ENFORCE_GT(db_name.size(), 0, "Must specify a db name.");
- owned_reader_.reset(new db::DBReader(
- OperatorBase::template GetSingleArgument<string>("db_type", "leveldb"),
- db_name));
- reader_ = owned_reader_.get();
- }
-
- // hard-coded PCA eigenvectors and eigenvalues, based on RBG channel order
- color_lighting_eigvecs_.push_back(
- std::vector<float>{-144.7125f, 183.396f, 102.2295f});
- color_lighting_eigvecs_.push_back(
- std::vector<float>{-148.104f, -1.1475f, -207.57f});
- color_lighting_eigvecs_.push_back(
- std::vector<float>{-148.818f, -177.174f, 107.1765f});
-
- color_lighting_eigvals_ = std::vector<float>{0.2175f, 0.0188f, 0.0045f};
-
- CAFFE_ENFORCE_GT(batch_size_, 0, "Batch size should be nonnegative.");
- if (use_caffe_datum_) {
- CAFFE_ENFORCE(
- label_type_ == SINGLE_LABEL || label_type_ == SINGLE_LABEL_WEIGHTED,
- "Caffe datum only supports single integer label");
- }
- if (label_type_ != SINGLE_LABEL && label_type_ != SINGLE_LABEL_WEIGHTED) {
- CAFFE_ENFORCE_GT(
- num_labels_,
- 0,
- "Number of labels must be set for using either sparse label indices or dense label embedding.");
- }
- if (label_type_ == MULTI_LABEL_WEIGHTED_SPARSE ||
- label_type_ == SINGLE_LABEL_WEIGHTED) {
- additional_inputs_offset_ = 3;
- } else {
- additional_inputs_offset_ = 2;
- }
- CAFFE_ENFORCE(
- (scale_ > 0) != (minsize_ > 0),
- "Must provide one and only one of scaling or minsize");
- CAFFE_ENFORCE_GT(crop_, 0, "Must provide the cropping value.");
- CAFFE_ENFORCE_GE(
- scale_ > 0 ? scale_ : minsize_,
- crop_,
- "The scale/minsize value must be no smaller than the crop value.");
-
- CAFFE_ENFORCE_EQ(
- mean_.size(),
- std_.size(),
- "The mean and std. dev vectors must be of the same size.");
- CAFFE_ENFORCE(
- mean_.size() == 1 || mean_.size() == 3,
- "The mean and std. dev vectors must be of size 1 or 3");
- CAFFE_ENFORCE(
- !use_caffe_datum_ || OutputSize() == 2,
- "There can only be 2 outputs if the Caffe datum format is used");
-
- CAFFE_ENFORCE(
- random_scale_.size() == 2, "Must provide [scale_min, scale_max]");
- CAFFE_ENFORCE_GE(
- random_scale_[1],
- random_scale_[0],
- "random scale must provide a range [min, max]");
-
- if (default_arg_.bounding_params.ymin < 0 ||
- default_arg_.bounding_params.xmin < 0 ||
- default_arg_.bounding_params.height < 0 ||
- default_arg_.bounding_params.width < 0) {
- default_arg_.bounding_params.valid = false;
- } else {
- default_arg_.bounding_params.valid = true;
- }
-
- if (mean_.size() == 1) {
- // We are going to extend to 3 using the first value
- mean_.resize(3, mean_[0]);
- std_.resize(3, std_[0]);
- }
-
- LOG(INFO) << "Creating an image input op with the following setting: ";
- LOG(INFO) << " Using " << num_decode_threads_ << " CPU threads;";
- if (gpu_transform_) {
- LOG(INFO) << " Performing transformation on GPU";
- }
- LOG(INFO) << " Outputting in batches of " << batch_size_ << " images;";
- LOG(INFO) << " Treating input image as "
- << (color_ ? "color " : "grayscale ") << "image;";
- if (default_arg_.bounding_params.valid) {
- LOG(INFO) << " Applying a default bounding box of Y ["
- << default_arg_.bounding_params.ymin << "; "
- << default_arg_.bounding_params.ymin +
- default_arg_.bounding_params.height
- << ") x X [" << default_arg_.bounding_params.xmin << "; "
- << default_arg_.bounding_params.xmin +
- default_arg_.bounding_params.width
- << ")";
- }
- if (scale_ > 0 && !random_scaling_) {
- LOG(INFO) << " Scaling image to " << scale_
- << (warp_ ? " with " : " without ") << "warping;";
- } else {
- if (random_scaling_) {
- // randomly set min_size_ for each image
- LOG(INFO) << " Randomly scaling shortest side between "
- << random_scale_[0] << " and " << random_scale_[1];
- } else {
- // Here, minsize_ > 0
- LOG(INFO) << " Ensuring minimum image size of " << minsize_
- << (warp_ ? " with " : " without ") << "warping;";
- }
- }
- LOG(INFO) << " " << (is_test_ ? "Central" : "Random")
- << " cropping image to " << crop_
- << (mirror_ ? " with " : " without ") << "random mirroring;";
- LOG(INFO) << "Label Type: " << label_type_;
- LOG(INFO) << "Num Labels: " << num_labels_;
-
- auto mit = mean_.begin();
- auto sit = std_.begin();
-
- for (int i = 0; mit != mean_.end() && sit != std_.end(); ++mit, ++sit, ++i) {
- LOG(INFO) << " Default [Channel " << i << "] Subtract mean " << *mit
- << " and divide by std " << *sit << ".";
- // We actually will use the inverse of std, so inverse it here
- *sit = 1.f / *sit;
- }
- LOG(INFO) << " Outputting images as "
- << OperatorBase::template GetSingleArgument<string>(
- "output_type", "unknown")
- << ".";
-
- std::mt19937 meta_randgen(time(nullptr));
- for (const auto i : c10::irange(num_decode_threads_)) {
- randgen_per_thread_.emplace_back(meta_randgen());
- }
- ReinitializeTensor(
- &prefetched_image_,
- {int64_t(batch_size_),
- int64_t(crop_),
- int64_t(crop_),
- int64_t(color_ ? 3 : 1)},
- at::dtype<uint8_t>().device(CPU));
- std::vector<int64_t> sizes;
- if (label_type_ != SINGLE_LABEL && label_type_ != SINGLE_LABEL_WEIGHTED) {
- sizes = std::vector<int64_t>{int64_t(batch_size_), int64_t(num_labels_)};
- } else {
- sizes = std::vector<int64_t>{batch_size_};
- }
- // data type for prefetched_label_ is actually not known here..
- ReinitializeTensor(&prefetched_label_, sizes, at::dtype<int>().device(CPU));
-
- for (const auto i : c10::irange(additional_output_sizes_.size())) {
- prefetched_additional_outputs_on_device_.emplace_back();
- prefetched_additional_outputs_.emplace_back();
- }
-}
-
-// Inception-stype scale jittering
-template <class Context>
-bool RandomSizedCropping(cv::Mat* img, const int crop, std::mt19937* randgen) {
- cv::Mat scaled_img;
- bool inception_scale_jitter = false;
- int im_height = img->rows, im_width = img->cols;
- int area = im_height * im_width;
- std::uniform_real_distribution<> area_dis(0.08, 1.0);
- std::uniform_real_distribution<> aspect_ratio_dis(3.0 / 4.0, 4.0 / 3.0);
-
- cv::Mat cropping;
- for (const auto i : c10::irange(10)) {
- int target_area = int(ceil(area_dis(*randgen) * area));
- float aspect_ratio = aspect_ratio_dis(*randgen);
- int nh = floor(std::sqrt(((float)target_area / aspect_ratio)));
- int nw = floor(std::sqrt(((float)target_area * aspect_ratio)));
- if (nh >= 1 && nh <= im_height && nw >= 1 && nw <= im_width) {
- int height_offset =
- std::uniform_int_distribution<>(0, im_height - nh)(*randgen);
- int width_offset =
- std::uniform_int_distribution<>(0, im_width - nw)(*randgen);
- cv::Rect ROI(width_offset, height_offset, nw, nh);
- cropping = (*img)(ROI);
- cv::resize(
- cropping, scaled_img, cv::Size(crop, crop), 0, 0, cv::INTER_AREA);
- *img = scaled_img;
- inception_scale_jitter = true;
- break;
- }
- }
- return inception_scale_jitter;
-}
-
-template <class Context>
-bool ImageInputOp<Context>::GetImageAndLabelAndInfoFromDBValue(
- const string& value,
- cv::Mat* img,
- PerImageArg& info,
- int item_id,
- std::mt19937* randgen) {
- //
- // recommend using --caffe2_use_fatal_for_enforce=1 when using ImageInputOp
- // as this function runs on a worker thread and the exceptions from
- // CAFFE_ENFORCE are silently dropped by the thread worker functions
- //
- cv::Mat src;
-
- // Use the default information for images
- info = default_arg_;
- if (use_caffe_datum_) {
- // The input is a caffe datum format.
- CaffeDatum datum;
- CAFFE_ENFORCE(datum.ParseFromString(value));
-
- prefetched_label_.mutable_data<int>()[item_id] = datum.label();
- if (datum.encoded()) {
- // encoded image in datum.
- // count the number of exceptions from opencv imdecode
- try {
- src = cv::imdecode(
- cv::Mat(
- 1,
- datum.data().size(),
- CV_8UC1,
- const_cast<char*>(datum.data().data())),
- color_ ? cv::IMREAD_COLOR : cv::IMREAD_GRAYSCALE);
- if (src.rows == 0 || src.cols == 0) {
- num_decode_errors_in_batch_++;
- src = cv::Mat::zeros(cv::Size(224, 224), CV_8UC3);
- }
- } catch (cv::Exception& e) {
- num_decode_errors_in_batch_++;
- src = cv::Mat::zeros(cv::Size(224, 224), CV_8UC3);
- }
- } else {
- // Raw image in datum.
- CAFFE_ENFORCE(datum.channels() == 3 || datum.channels() == 1);
-
- int src_c = datum.channels();
- src.create(
- datum.height(), datum.width(), (src_c == 3) ? CV_8UC3 : CV_8UC1);
-
- if (src_c == 1) {
- memcpy(src.ptr<uchar>(0), datum.data().data(), datum.data().size());
- } else {
- // Datum stores things in CHW order, let's do HWC for images to make
- // things more consistent with conventional image storage.
- for (const auto c : c10::irange(3)) {
- const char* datum_buffer =
- datum.data().data() + datum.height() * datum.width() * c;
- uchar* ptr = src.ptr<uchar>(0) + c;
- for (const auto h : c10::irange(datum.height())) {
- for (const auto w : c10::irange(datum.width())) {
- *ptr = *(datum_buffer++);
- ptr += 3;
- }
- }
- }
- }
- }
- } else {
- // The input is a caffe2 format.
- TensorProtos protos;
- CAFFE_ENFORCE(protos.ParseFromString(value));
- const TensorProto& image_proto = protos.protos(0);
- const TensorProto& label_proto = protos.protos(1);
- // add handle protos
- vector<TensorProto> additional_output_protos;
- int start = additional_inputs_offset_;
- int end = start + additional_inputs_count_;
- for (const auto i : c10::irange(start, end)) {
- additional_output_protos.push_back(protos.protos(i));
- }
-
- if (protos.protos_size() == end + 1) {
- // We have bounding box information
- const TensorProto& bounding_proto = protos.protos(end);
- TORCH_DCHECK_EQ(bounding_proto.data_type(), TensorProto::INT32);
- TORCH_DCHECK_EQ(bounding_proto.int32_data_size(), 4);
- info.bounding_params.valid = true;
- info.bounding_params.ymin = bounding_proto.int32_data(0);
- info.bounding_params.xmin = bounding_proto.int32_data(1);
- info.bounding_params.height = bounding_proto.int32_data(2);
- info.bounding_params.width = bounding_proto.int32_data(3);
- }
-
- if (image_proto.data_type() == TensorProto::STRING) {
- // encoded image string.
- TORCH_DCHECK_EQ(image_proto.string_data_size(), 1);
- const string& encoded_image_str = image_proto.string_data(0);
- int encoded_size = encoded_image_str.size();
- // We use a cv::Mat to wrap the encoded str so we do not need a copy.
- // count the number of exceptions from opencv imdecode
- try {
- src = cv::imdecode(
- cv::Mat(
- 1,
- &encoded_size,
- CV_8UC1,
- const_cast<char*>(encoded_image_str.data())),
- color_ ? cv::IMREAD_COLOR : cv::IMREAD_GRAYSCALE);
- if (src.rows == 0 || src.cols == 0) {
- num_decode_errors_in_batch_++;
- src = cv::Mat::zeros(cv::Size(224, 224), CV_8UC3);
- }
- } catch (cv::Exception& e) {
- num_decode_errors_in_batch_++;
- src = cv::Mat::zeros(cv::Size(224, 224), CV_8UC3);
- }
- } else if (image_proto.data_type() == TensorProto::BYTE) {
- // raw image content.
- int src_c = (image_proto.dims_size() == 3) ? image_proto.dims(2) : 1;
- CAFFE_ENFORCE(src_c == 3 || src_c == 1);
-
- src.create(
- image_proto.dims(0),
- image_proto.dims(1),
- (src_c == 3) ? CV_8UC3 : CV_8UC1);
- memcpy(
- src.ptr<uchar>(0),
- image_proto.byte_data().data(),
- image_proto.byte_data().size());
- } else {
- LOG(FATAL) << "Unknown image data type.";
- }
-
- // TODO: if image decoding was unsuccessful, set label to 0
- if (label_proto.data_type() == TensorProto::FLOAT) {
- if (label_type_ == SINGLE_LABEL || label_type_ == SINGLE_LABEL_WEIGHTED) {
- TORCH_DCHECK_EQ(label_proto.float_data_size(), 1);
- prefetched_label_.mutable_data<float>()[item_id] =
- label_proto.float_data(0);
- } else if (label_type_ == MULTI_LABEL_SPARSE) {
- float* label_data =
- prefetched_label_.mutable_data<float>() + item_id * num_labels_;
- memset(label_data, 0, sizeof(float) * num_labels_);
- for (const auto i : c10::irange(label_proto.float_data_size())) {
- label_data[(int)label_proto.float_data(i)] = 1.0;
- }
- } else if (label_type_ == MULTI_LABEL_WEIGHTED_SPARSE) {
- const TensorProto& weight_proto = protos.protos(2);
- float* label_data =
- prefetched_label_.mutable_data<float>() + item_id * num_labels_;
- memset(label_data, 0, sizeof(float) * num_labels_);
- for (const auto i : c10::irange(label_proto.float_data_size())) {
- label_data[(int)label_proto.float_data(i)] =
- weight_proto.float_data(i);
- }
- } else if (
- label_type_ == MULTI_LABEL_DENSE || label_type_ == EMBEDDING_LABEL) {
- CAFFE_ENFORCE(label_proto.float_data_size() == num_labels_);
- float* label_data =
- prefetched_label_.mutable_data<float>() + item_id * num_labels_;
- for (const auto i : c10::irange(label_proto.float_data_size())) {
- label_data[i] = label_proto.float_data(i);
- }
- } else {
- LOG(ERROR) << "Unknown label type:" << label_type_;
- }
- } else if (label_proto.data_type() == TensorProto::INT32) {
- if (label_type_ == SINGLE_LABEL || label_type_ == SINGLE_LABEL_WEIGHTED) {
- TORCH_DCHECK_EQ(label_proto.int32_data_size(), 1);
- prefetched_label_.mutable_data<int>()[item_id] =
- label_proto.int32_data(0);
- } else if (label_type_ == MULTI_LABEL_SPARSE) {
- int* label_data =
- prefetched_label_.mutable_data<int>() + item_id * num_labels_;
- memset(label_data, 0, sizeof(int) * num_labels_);
- for (const auto i : c10::irange(label_proto.int32_data_size())) {
- label_data[label_proto.int32_data(i)] = 1;
- }
- } else if (label_type_ == MULTI_LABEL_WEIGHTED_SPARSE) {
- const TensorProto& weight_proto = protos.protos(2);
- float* label_data =
- prefetched_label_.mutable_data<float>() + item_id * num_labels_;
- memset(label_data, 0, sizeof(float) * num_labels_);
- for (const auto i : c10::irange(label_proto.int32_data_size())) {
- label_data[label_proto.int32_data(i)] = weight_proto.float_data(i);
- }
- } else if (
- label_type_ == MULTI_LABEL_DENSE || label_type_ == EMBEDDING_LABEL) {
- CAFFE_ENFORCE(label_proto.int32_data_size() == num_labels_);
- int* label_data =
- prefetched_label_.mutable_data<int>() + item_id * num_labels_;
- for (const auto i : c10::irange(label_proto.int32_data_size())) {
- label_data[i] = label_proto.int32_data(i);
- }
- } else {
- LOG(ERROR) << "Unknown label type:" << label_type_;
- }
- } else {
- LOG(FATAL) << "Unsupported label data type.";
- }
-
- for (const auto i : c10::irange(additional_output_protos.size())) {
- auto additional_output_proto = additional_output_protos[i];
- if (additional_output_proto.data_type() == TensorProto::FLOAT) {
- float* additional_output =
- prefetched_additional_outputs_[i].template mutable_data<float>() +
- item_id * additional_output_proto.float_data_size();
-
- for (const auto j : c10::irange(additional_output_proto.float_data_size())) {
- additional_output[j] = additional_output_proto.float_data(j);
- }
- } else if (additional_output_proto.data_type() == TensorProto::INT32) {
- int* additional_output =
- prefetched_additional_outputs_[i].template mutable_data<int>() +
- item_id * additional_output_proto.int32_data_size();
-
- for (const auto j : c10::irange(additional_output_proto.int32_data_size())) {
- additional_output[j] = additional_output_proto.int32_data(j);
- }
- } else if (additional_output_proto.data_type() == TensorProto::INT64) {
- int64_t* additional_output =
- prefetched_additional_outputs_[i].template mutable_data<int64_t>() +
- item_id * additional_output_proto.int64_data_size();
-
- for (const auto j : c10::irange(additional_output_proto.int64_data_size())) {
- additional_output[j] = additional_output_proto.int64_data(j);
- }
- } else if (additional_output_proto.data_type() == TensorProto::UINT8) {
- uint8_t* additional_output =
- prefetched_additional_outputs_[i].template mutable_data<uint8_t>() +
- item_id * additional_output_proto.int32_data_size();
-
- for (const auto j : c10::irange(additional_output_proto.int32_data_size())) {
- additional_output[j] =
- static_cast<uint8_t>(additional_output_proto.int32_data(j));
- }
- } else {
- LOG(FATAL) << "Unsupported output type.";
- }
- }
- }
-
- //
- // convert source to the color format requested from Op
- //
- int out_c = color_ ? 3 : 1;
- if (out_c == src.channels()) {
- *img = src;
- } else {
- cv::cvtColor(
- src, *img, (out_c == 1) ? cv::COLOR_BGR2GRAY : cv::COLOR_GRAY2BGR);
- }
-
- // Note(Yangqing): I believe that the mat should be created continuous.
- CAFFE_ENFORCE(img->isContinuous());
-
- // Sanity check now that we decoded everything
-
- // Ensure that the bounding box is legit
- if (info.bounding_params.valid &&
- (src.rows < info.bounding_params.ymin + info.bounding_params.height ||
- src.cols < info.bounding_params.xmin + info.bounding_params.width)) {
- info.bounding_params.valid = false;
- }
-
- // Apply the bounding box if requested
- if (info.bounding_params.valid) {
- // If we reach here, we know the parameters are sane
- cv::Rect bounding_box(
- info.bounding_params.xmin,
- info.bounding_params.ymin,
- info.bounding_params.width,
- info.bounding_params.height);
- *img = (*img)(bounding_box);
-
- /*
- LOG(INFO) << "Did bounding with ymin:"
- << info.bounding_params.ymin << " xmin:" <<
- info.bounding_params.xmin
- << " height:" << info.bounding_params.height
- << " width:" << info.bounding_params.width << "\n";
- LOG(INFO) << "Bounded matrix: " << img;
- */
- } else {
- // LOG(INFO) << "No bounding\n";
- }
-
- cv::Mat scaled_img;
- bool inception_scale_jitter = false;
- if (scale_jitter_type_ == INCEPTION_STYLE) {
- if (!is_test_) {
- // Inception-stype scale jittering is only used for training
- inception_scale_jitter =
- RandomSizedCropping<Context>(img, crop_, randgen);
- // if a random crop is still not found, do simple random cropping later
- }
- }
-
- if ((scale_jitter_type_ == NO_SCALE_JITTER) ||
- (scale_jitter_type_ == INCEPTION_STYLE && !inception_scale_jitter)) {
- int scaled_width, scaled_height;
- int scale_to_use = scale_ > 0 ? scale_ : minsize_;
-
- // set the random minsize
- if (random_scaling_) {
- scale_to_use = std::uniform_int_distribution<>(
- random_scale_[0], random_scale_[1])(*randgen);
- }
-
- if (warp_) {
- scaled_width = scale_to_use;
- scaled_height = scale_to_use;
- } else if (img->rows > img->cols) {
- scaled_width = scale_to_use;
- scaled_height = static_cast<float>(img->rows) * scale_to_use / img->cols;
- } else {
- scaled_height = scale_to_use;
- scaled_width = static_cast<float>(img->cols) * scale_to_use / img->rows;
- }
- if ((scale_ > 0 &&
- (scaled_height != img->rows || scaled_width != img->cols)) ||
- (scaled_height > img->rows || scaled_width > img->cols)) {
- // We rescale in all cases if we are using scale_
- // but only to make the image bigger if using minsize_
- /*
- LOG(INFO) << "Scaling to " << scaled_width << " x " << scaled_height
- << " From " << img->cols << " x " << img->rows;
- */
- cv::resize(
- *img,
- scaled_img,
- cv::Size(scaled_width, scaled_height),
- 0,
- 0,
- cv::INTER_AREA);
- *img = scaled_img;
- }
- }
-
- // TODO(Yangqing): return false if any error happens.
- return true;
-}
-
-// assume HWC order and color channels BGR
-template <class Context>
-void Saturation(
- float* img,
- const int img_size,
- const float alpha_rand,
- std::mt19937* randgen) {
- float alpha = 1.0f +
- std::uniform_real_distribution<float>(-alpha_rand, alpha_rand)(*randgen);
- // BGR to Gray scale image: R -> 0.299, G -> 0.587, B -> 0.114
- int p = 0;
- for (const auto h : c10::irange(img_size)) {
- for (const auto w : c10::irange(img_size)) {
- float gray_color = img[3 * p] * 0.114f + img[3 * p + 1] * 0.587f +
- img[3 * p + 2] * 0.299f;
- for (const auto c : c10::irange(3)) {
- img[3 * p + c] = img[3 * p + c] * alpha + gray_color * (1.0f - alpha);
- }
- p++;
- }
- }
-}
-
-// assume HWC order and color channels BGR
-template <class Context>
-void Brightness(
- float* img,
- const int img_size,
- const float alpha_rand,
- std::mt19937* randgen) {
- float alpha = 1.0f +
- std::uniform_real_distribution<float>(-alpha_rand, alpha_rand)(*randgen);
- int p = 0;
- for (const auto h : c10::irange(img_size)) {
- for (const auto w : c10::irange(img_size)) {
- for (const auto c : c10::irange(3)) {
- img[p++] *= alpha;
- }
- }
- }
-}
-
-// assume HWC order and color channels BGR
-template <class Context>
-void Contrast(
- float* img,
- const int img_size,
- const float alpha_rand,
- std::mt19937* randgen) {
- float gray_mean = 0;
- int p = 0;
- for (const auto h : c10::irange(img_size)) {
- for (const auto w : c10::irange(img_size)) {
- // BGR to Gray scale image: R -> 0.299, G -> 0.587, B -> 0.114
- gray_mean += img[3 * p] * 0.114f + img[3 * p + 1] * 0.587f +
- img[3 * p + 2] * 0.299f;
- p++;
- }
- }
- gray_mean /= (img_size * img_size);
-
- float alpha = 1.0f +
- std::uniform_real_distribution<float>(-alpha_rand, alpha_rand)(*randgen);
- p = 0;
- for (const auto h : c10::irange(img_size)) {
- for (const auto w : c10::irange(img_size)) {
- for (const auto c : c10::irange(3)) {
- img[p] = img[p] * alpha + gray_mean * (1.0f - alpha);
- p++;
- }
- }
- }
-}
-
-// assume HWC order and color channels BGR
-template <class Context>
-void ColorJitter(
- float* img,
- const int img_size,
- const float saturation,
- const float brightness,
- const float contrast,
- std::mt19937* randgen) {
- std::srand(unsigned(std::time(0)));
- std::vector<int> jitter_order{0, 1, 2};
- // obtain a time-based seed:
- unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
- std::shuffle(
- jitter_order.begin(),
- jitter_order.end(),
- std::default_random_engine(seed));
-
- for (const auto i : c10::irange(3)) {
- if (jitter_order[i] == 0) {
- Saturation<Context>(img, img_size, saturation, randgen);
- } else if (jitter_order[i] == 1) {
- Brightness<Context>(img, img_size, brightness, randgen);
- } else {
- Contrast<Context>(img, img_size, contrast, randgen);
- }
- }
-}
-
-// assume HWC order and color channels BGR
-template <class Context>
-void ColorLighting(
- float* img,
- const int img_size,
- const float alpha_std,
- const std::vector<std::vector<float>>& eigvecs,
- const std::vector<float>& eigvals,
- std::mt19937* randgen) {
- std::normal_distribution<float> d(0, alpha_std);
- std::vector<float> alphas(3);
- for (const auto i : c10::irange(3)) {
- alphas[i] = d(*randgen);
- }
-
- std::vector<float> delta_rgb(3, 0.0);
- for (const auto i : c10::irange(3)) {
- for (const auto j : c10::irange(3)) {
- delta_rgb[i] += eigvecs[i][j] * eigvals[j] * alphas[j];
- }
- }
-
- int p = 0;
- for (const auto h : c10::irange(img_size)) {
- for (const auto w : c10::irange(img_size)) {
- for (const auto c : c10::irange(3)) {
- img[p++] += delta_rgb[2 - c];
- }
- }
- }
-}
-
-// assume HWC order and color channels BGR
-// mean subtraction and scaling.
-template <class Context>
-void ColorNormalization(
- float* img,
- const int img_size,
- const int channels,
- const std::vector<float>& mean,
- const std::vector<float>& std) {
- int p = 0;
- for (const auto h : c10::irange(img_size)) {
- for (const auto w : c10::irange(img_size)) {
- for (const auto c : c10::irange(channels)) {
- img[p] = (img[p] - mean[c]) * std[c];
- p++;
- }
- }
- }
-}
-
-// Factored out image transformation
-template <class Context>
-void TransformImage(
- const cv::Mat& scaled_img,
- const int channels,
- float* image_data,
- const bool color_jitter,
- const float saturation,
- const float brightness,
- const float contrast,
- const bool color_lighting,
- const float color_lighting_std,
- const std::vector<std::vector<float>>& color_lighting_eigvecs,
- const std::vector<float>& color_lighting_eigvals,
- const int crop,
- const bool mirror,
- const std::vector<float>& mean,
- const std::vector<float>& std,
- std::mt19937* randgen,
- std::bernoulli_distribution* mirror_this_image,
- bool is_test = false) {
- CAFFE_ENFORCE_GE(
- scaled_img.rows, crop, "Image height must be bigger than crop.");
- CAFFE_ENFORCE_GE(
- scaled_img.cols, crop, "Image width must be bigger than crop.");
-
- // find the cropped region, and copy it to the destination matrix
- int width_offset, height_offset;
- if (is_test) {
- width_offset = (scaled_img.cols - crop) / 2;
- height_offset = (scaled_img.rows - crop) / 2;
- } else {
- width_offset =
- std::uniform_int_distribution<>(0, scaled_img.cols - crop)(*randgen);
- height_offset =
- std::uniform_int_distribution<>(0, scaled_img.rows - crop)(*randgen);
- }
-
- float* image_data_ptr = image_data;
- if (!is_test && mirror && (*mirror_this_image)(*randgen)) {
- // Copy mirrored image.
- for (int h = height_offset; h < height_offset + crop; ++h) {
- for (int w = width_offset + crop - 1; w >= width_offset; --w) {
- const uint8_t* cv_data = scaled_img.ptr(h) + w * channels;
- for (const auto c : c10::irange(channels)) {
- *(image_data_ptr++) = static_cast<float>(cv_data[c]);
- }
- }
- }
- } else {
- // Copy normally.
- for (int h = height_offset; h < height_offset + crop; ++h) {
- for (int w = width_offset; w < width_offset + crop; ++w) {
- const uint8_t* cv_data = scaled_img.ptr(h) + w * channels;
- for (const auto c : c10::irange(channels)) {
- *(image_data_ptr++) = static_cast<float>(cv_data[c]);
- }
- }
- }
- }
-
- if (color_jitter && channels == 3 && !is_test) {
- ColorJitter<Context>(
- image_data, crop, saturation, brightness, contrast, randgen);
- }
- if (color_lighting && channels == 3 && !is_test) {
- ColorLighting<Context>(
- image_data,
- crop,
- color_lighting_std,
- color_lighting_eigvecs,
- color_lighting_eigvals,
- randgen);
- }
-
- // Color normalization
- // Mean subtraction and scaling.
- ColorNormalization<Context>(image_data, crop, channels, mean, std);
-}
-
-// Only crop / transpose the image
-// leave in uint8_t dataType
-template <class Context>
-void CropTransposeImage(
- const cv::Mat& scaled_img,
- const int channels,
- uint8_t* cropped_data,
- const int crop,
- const bool mirror,
- std::mt19937* randgen,
- std::bernoulli_distribution* mirror_this_image,
- bool is_test = false) {
- CAFFE_ENFORCE_GE(
- scaled_img.rows, crop, "Image height must be bigger than crop.");
- CAFFE_ENFORCE_GE(
- scaled_img.cols, crop, "Image width must be bigger than crop.");
-
- // find the cropped region, and copy it to the destination matrix
- int width_offset, height_offset;
- if (is_test) {
- width_offset = (scaled_img.cols - crop) / 2;
- height_offset = (scaled_img.rows - crop) / 2;
- } else {
- width_offset =
- std::uniform_int_distribution<>(0, scaled_img.cols - crop)(*randgen);
- height_offset =
- std::uniform_int_distribution<>(0, scaled_img.rows - crop)(*randgen);
- }
-
- if (mirror && (*mirror_this_image)(*randgen)) {
- // Copy mirrored image.
- for (int h = height_offset; h < height_offset + crop; ++h) {
- for (int w = width_offset + crop - 1; w >= width_offset; --w) {
- const uint8_t* cv_data = scaled_img.ptr(h) + w * channels;
- for (const auto c : c10::irange(channels)) {
- *(cropped_data++) = cv_data[c];
- }
- }
- }
- } else {
- // Copy normally.
- for (int h = height_offset; h < height_offset + crop; ++h) {
- for (int w = width_offset; w < width_offset + crop; ++w) {
- const uint8_t* cv_data = scaled_img.ptr(h) + w * channels;
- for (const auto c : c10::irange(channels)) {
- *(cropped_data++) = cv_data[c];
- }
- }
- }
- }
-}
-
-// Parse datum, decode image, perform transform
-// Intended as entry point for binding to thread pool
-template <class Context>
-void ImageInputOp<Context>::DecodeAndTransform(
- const std::string& value,
- float* image_data,
- int item_id,
- const int channels,
- std::size_t thread_index) {
- CAFFE_ENFORCE((int)thread_index < num_decode_threads_);
-
- std::bernoulli_distribution mirror_this_image(0.5f);
- std::mt19937* randgen = &(randgen_per_thread_[thread_index]);
-
- cv::Mat img;
- // Decode the image
- PerImageArg info;
- CHECK(
- GetImageAndLabelAndInfoFromDBValue(value, &img, info, item_id, randgen));
- // Factor out the image transformation
- TransformImage<Context>(
- img,
- channels,
- image_data,
- color_jitter_,
- img_saturation_,
- img_brightness_,
- img_contrast_,
- color_lighting_,
- color_lighting_std_,
- color_lighting_eigvecs_,
- color_lighting_eigvals_,
- crop_,
- mirror_,
- mean_,
- std_,
- randgen,
- &mirror_this_image,
- is_test_);
-}
-
-template <class Context>
-void ImageInputOp<Context>::DecodeAndTransposeOnly(
- const std::string& value,
- uint8_t* image_data,
- int item_id,
- const int channels,
- std::size_t thread_index) {
- CAFFE_ENFORCE((int)thread_index < num_decode_threads_);
-
- std::bernoulli_distribution mirror_this_image(0.5f);
- std::mt19937* randgen = &(randgen_per_thread_[thread_index]);
-
- cv::Mat img;
- // Decode the image
- PerImageArg info;
- CHECK(
- GetImageAndLabelAndInfoFromDBValue(value, &img, info, item_id, randgen));
-
- // Factor out the image transformation
- CropTransposeImage<Context>(
- img,
- channels,
- image_data,
- crop_,
- mirror_,
- randgen,
- &mirror_this_image,
- is_test_);
-}
-
-template <class Context>
-bool ImageInputOp<Context>::Prefetch() {
- if (!owned_reader_.get()) {
- // if we are not owning the reader, we will get the reader pointer from
- // input. Otherwise the constructor should have already set the reader
- // pointer.
- reader_ = &OperatorBase::Input<db::DBReader>(0);
- }
- const int channels = color_ ? 3 : 1;
- // Call mutable_data() once to allocate the underlying memory.
- if (gpu_transform_) {
- // we'll transfer up in int8, then convert later
- prefetched_image_.mutable_data<uint8_t>();
- } else {
- prefetched_image_.mutable_data<float>();
- }
-
- prefetched_label_.mutable_data<int>();
- // Prefetching handled with a thread pool of "decode_threads" threads.
-
- for (const auto item_id : c10::irange(batch_size_)) {
- std::string key, value;
- cv::Mat img;
-
- // read data
- reader_->Read(&key, &value);
-
- // determine label type based on first item
- if (item_id == 0) {
- if (use_caffe_datum_) {
- prefetched_label_.mutable_data<int>();
- } else {
- TensorProtos protos;
- CAFFE_ENFORCE(protos.ParseFromString(value));
- TensorProto_DataType labeldt = protos.protos(1).data_type();
- if (labeldt == TensorProto::INT32) {
- prefetched_label_.mutable_data<int>();
- } else if (labeldt == TensorProto::FLOAT) {
- prefetched_label_.mutable_data<float>();
- } else {
- LOG(FATAL) << "Unsupported label type.";
- }
-
- for (const auto i : c10::irange(additional_inputs_count_)) {
- int index = additional_inputs_offset_ + i;
- TensorProto additional_output_proto = protos.protos(index);
- auto sizes =
- std::vector<int64_t>({batch_size_, additional_output_sizes_[i]});
- if (additional_output_proto.data_type() == TensorProto::FLOAT) {
- prefetched_additional_outputs_[i] =
- caffe2::empty(sizes, at::dtype<float>().device(CPU));
- } else if (
- additional_output_proto.data_type() == TensorProto::INT32) {
- prefetched_additional_outputs_[i] =
- caffe2::empty(sizes, at::dtype<int>().device(CPU));
- } else if (
- additional_output_proto.data_type() == TensorProto::INT64) {
- prefetched_additional_outputs_[i] =
- caffe2::empty(sizes, at::dtype<int64_t>().device(CPU));
- } else if (
- additional_output_proto.data_type() == TensorProto::UINT8) {
- prefetched_additional_outputs_[i] =
- caffe2::empty(sizes, at::dtype<uint8_t>().device(CPU));
- } else {
- LOG(FATAL) << "Unsupported output type.";
- }
- }
- }
- }
-
- // launch into thread pool for processing
- // TODO: support color jitter and color lighting in gpu_transform
- if (gpu_transform_) {
- // output of decode will still be int8
- uint8_t* image_data = prefetched_image_.mutable_data<uint8_t>() +
- crop_ * crop_ * channels * item_id;
- thread_pool_->runTaskWithID(std::bind(
- &ImageInputOp<Context>::DecodeAndTransposeOnly,
- this,
- std::string(value),
- image_data,
- item_id,
- channels,
- std::placeholders::_1));
- } else {
- float* image_data = prefetched_image_.mutable_data<float>() +
- crop_ * crop_ * channels * item_id;
- thread_pool_->runTaskWithID(std::bind(
- &ImageInputOp<Context>::DecodeAndTransform,
- this,
- std::string(value),
- image_data,
- item_id,
- channels,
- std::placeholders::_1));
- }
- }
- thread_pool_->waitWorkComplete();
-
- // we allow to get at most max_decode_error_ratio from
- // opencv imdecode until raising a runtime exception
- if ((float)num_decode_errors_in_batch_ / batch_size_ >
- max_decode_error_ratio_) {
- throw std::runtime_error(
- "max_decode_error_ratio exceeded " +
- c10::to_string(max_decode_error_ratio_));
- }
-
- // If the context is not CPUContext, we will need to do a copy in the
- // prefetch function as well.
- auto device = at::device(Context::GetDeviceType());
- if (!std::is_same<Context, CPUContext>::value) {
- // do sync copies
- ReinitializeAndCopyFrom(
- &prefetched_image_on_device_, device, prefetched_image_);
- ReinitializeAndCopyFrom(
- &prefetched_label_on_device_, device, prefetched_label_);
-
- for (const auto i : c10::irange(prefetched_additional_outputs_on_device_.size())) {
- ReinitializeAndCopyFrom(
- &prefetched_additional_outputs_on_device_[i],
- device,
- prefetched_additional_outputs_[i]);
- }
- }
-
- num_decode_errors_in_batch_ = 0;
-
- return true;
-}
-
-template <class Context>
-bool ImageInputOp<Context>::CopyPrefetched() {
- auto type = Device(Context::GetDeviceType());
- auto options = at::device(type);
-
- // Note(jiayq): The if statement below should be optimized away by the
- // compiler since std::is_same is a constexpr.
- if (std::is_same<Context, CPUContext>::value) {
- OperatorBase::OutputTensorCopyFrom(
- 0, options, prefetched_image_, /* async */ true);
- OperatorBase::OutputTensorCopyFrom(
- 1, options, prefetched_label_, /* async */ true);
-
- for (const auto i : c10::irange(2, OutputSize())) {
- OperatorBase::OutputTensorCopyFrom(
- i, options, prefetched_additional_outputs_[i - 2], /* async */ true);
- }
- } else {
- // TODO: support color jitter and color lighting in gpu_transform
- if (gpu_transform_) {
- if (!mean_std_copied_) {
- ReinitializeTensor(
- &mean_gpu_,
- {static_cast<int64_t>(mean_.size())},
- at::dtype<float>().device(Context::GetDeviceType()));
- ReinitializeTensor(
- &std_gpu_,
- {static_cast<int64_t>(std_.size())},
- at::dtype<float>().device(Context::GetDeviceType()));
-
- context_.template CopyFromCPU<float>(
- mean_.size(),
- mean_.data(),
- mean_gpu_.template mutable_data<float>());
- context_.template CopyFromCPU<float>(
- std_.size(), std_.data(), std_gpu_.template mutable_data<float>());
- mean_std_copied_ = true;
- }
- const auto& X = prefetched_image_on_device_;
- // data comes in as NHWC
- const int N = X.dim32(0), C = X.dim32(3), H = X.dim32(1), W = X.dim32(2);
- // data goes out as NCHW
- auto dims = std::vector<int64_t>{N, C, H, W};
- if (!ApplyTransformOnGPU(dims, type)) {
- return false;
- }
-
- } else {
- OperatorBase::OutputTensorCopyFrom(
- 0, type, prefetched_image_on_device_, /* async */ true);
- }
- OperatorBase::OutputTensorCopyFrom(
- 1, type, prefetched_label_on_device_, /* async */ true);
-
- for (const auto i : c10::irange(2, OutputSize())) {
- OperatorBase::OutputTensorCopyFrom(
- i,
- type,
- prefetched_additional_outputs_on_device_[i - 2],
- /* async */ true);
- }
- }
- return true;
-}
-} // namespace caffe2
-
-#endif // CAFFE2_IMAGE_IMAGE_INPUT_OP_H_
diff --git a/caffe2/image/image_input_op_gpu.cc b/caffe2/image/image_input_op_gpu.cc
deleted file mode 100644
index a484585770..0000000000
--- a/caffe2/image/image_input_op_gpu.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-#include "caffe2/core/common_gpu.h"
-#include "caffe2/core/context_gpu.h"
-#include "caffe2/image/image_input_op.h"
-
-namespace caffe2 {
-
-template <>
-bool ImageInputOp<CUDAContext>::ApplyTransformOnGPU(
- const std::vector<std::int64_t>& dims,
- const c10::Device& type) {
- // GPU transform kernel allows explicitly setting output type
- if (output_type_ == TensorProto_DataType_FLOAT) {
- auto* image_output =
- OperatorBase::OutputTensor(0, dims, at::dtype<float>().device(type));
- TransformOnGPU<uint8_t, float, CUDAContext>(
- prefetched_image_on_device_,
- image_output,
- mean_gpu_,
- std_gpu_,
- &context_);
- } else if (output_type_ == TensorProto_DataType_FLOAT16) {
- auto* image_output =
- OperatorBase::OutputTensor(0, dims, at::dtype<at::Half>().device(type));
- TransformOnGPU<uint8_t, at::Half, CUDAContext>(
- prefetched_image_on_device_,
- image_output,
- mean_gpu_,
- std_gpu_,
- &context_);
- } else {
- return false;
- }
- return true;
-}
-
-REGISTER_CUDA_OPERATOR(ImageInput, ImageInputOp<CUDAContext>);
-
-} // namespace caffe2
diff --git a/caffe2/image/transform_gpu.cu b/caffe2/image/transform_gpu.cu
deleted file mode 100644
index b89886f5fc..0000000000
--- a/caffe2/image/transform_gpu.cu
+++ /dev/null
@@ -1,85 +0,0 @@
-#include "caffe2/core/context_gpu.h"
-#include "caffe2/image/transform_gpu.h"
-#include "caffe2/utils/conversions.h"
-
-/**
- *
- * Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved
- * Distributed under 2-clause BSD license; see accompanying LICENSE file
- *
- **/
-
-namespace caffe2 {
-
-namespace {
-
-// input in (int8, NHWC), output in (fp32, NCHW)
-template <typename In, typename Out>
-__global__ void transform_kernel(
- const int C,
- const int H,
- const int W,
- const float* mean,
- const float* std,
- const In* in,
- Out* out) {
- const auto n = blockIdx.x;
-
- const auto nStride = C*H*W;
-
- // pointers to data for this image
- const In *const input_ptr = &in[n*nStride];
- Out *const output_ptr = &out[n*nStride];
-
- // either read or write uncoalesced - try reading
- for (int c=0; c < C; ++c) {
- for (int h=threadIdx.y; h < H; h += blockDim.y) {
- for (int w=threadIdx.x; w < W; w += blockDim.x) {
- const int in_idx = c + C*w + C*W*h; // HWC
- const int out_idx = c*H*W + h*W + w; // CHW
-
- output_ptr[out_idx] = convert::To<float,Out>(
- (convert::To<In,float>(input_ptr[in_idx])-mean[c]) * std[c]);
- }
- }
- }
-}
-
-}
-
-template <typename T_IN, typename T_OUT, class Context>
-
-bool TransformOnGPU(
- Tensor& X,
- Tensor* Y,
- Tensor& mean,
- Tensor& std,
- Context* context) {
- const int N = X.dim32(0), C = X.dim32(3), H = X.dim32(1), W = X.dim32(2);
- auto* input_data = X.template data<T_IN>();
- auto* output_data = Y->template mutable_data<T_OUT>();
-
- transform_kernel<
- T_IN, T_OUT><<<N, dim3(16, 16), 0, context->cuda_stream()>>>(
- C, H, W, mean.template data<float>(), std.template data<float>(),
- input_data, output_data);
- C10_CUDA_KERNEL_LAUNCH_CHECK();
-
- return true;
-};
-
-template bool TransformOnGPU<uint8_t, float, CUDAContext>(
- Tensor& X,
- Tensor* Y,
- Tensor& mean,
- Tensor& std,
- CUDAContext* context);
-
-template bool TransformOnGPU<uint8_t, at::Half, CUDAContext>(
- Tensor& X,
- Tensor* Y,
- Tensor& mean,
- Tensor& std,
- CUDAContext* context);
-
-} // namespace caffe2
diff --git a/caffe2/image/transform_gpu.h b/caffe2/image/transform_gpu.h
deleted file mode 100644
index 3ca11ce159..0000000000
--- a/caffe2/image/transform_gpu.h
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef CAFFE2_IMAGE_TRANSFORM_GPU_H_
-#define CAFFE2_IMAGE_TRANSFORM_GPU_H_
-
-/**
- *
- * Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- **/
-
-#include "caffe2/core/context.h"
-
-namespace caffe2 {
-
-template <typename T_IN, typename T_OUT, class Context>
-bool TransformOnGPU(
- Tensor& X,
- Tensor* Y,
- Tensor& mean,
- Tensor& std,
- Context* context);
-
-} // namespace caffe2
-
-#endif
diff --git a/caffe2/video/CMakeLists.txt b/caffe2/video/CMakeLists.txt
deleted file mode 100644
index 195c3c04a4..0000000000
--- a/caffe2/video/CMakeLists.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-if(USE_OPENCV AND OpenCV_FOUND AND USE_FFMPEG AND FFMPEG_FOUND)
- message(STATUS "Including video processing operators")
- # ---[ GPU files
- # ------[ general GPU
- file(GLOB tmp *_gpu.cc)
- set(Caffe2_GPU_SRCS ${Caffe2_GPU_SRCS} ${tmp})
- # ------[ CUDA sources
- file(GLOB tmp *.cu)
- set(Caffe2_GPU_SRCS ${Caffe2_GPU_SRCS} ${tmp})
- # exclude test files
- file(GLOB tmp *_test.cc)
- exclude(Caffe2_GPU_SRCS "${Caffe2_GPU_SRCS}" ${tmp})
-
- # ---[ HIP files
- # ------[ general HIP
- file(GLOB tmp hip/*.cc)
- set(Caffe2_HIP_SRCS ${Caffe2_HIP_SRCS} ${tmp})
- # ------[ HIP sources
- file(GLOB tmp hip/*.hip)
- set(Caffe2_HIP_SRCS ${Caffe2_HIP_SRCS} ${tmp})
- # exclude test files
- file(GLOB tmp hip/*_test.cc)
- exclude(Caffe2_HIP_SRCS "${Caffe2_HIP_SRCS}" ${tmp})
-
- # ---[ CPU files.
- file(GLOB tmp *.cc)
- set(Caffe2_CPU_SRCS ${Caffe2_CPU_SRCS} ${tmp})
- # exclude test files and gpu files
- file(GLOB tmp *_test.cc)
- exclude(Caffe2_CPU_SRCS "${Caffe2_CPU_SRCS}" ${tmp})
- exclude(Caffe2_CPU_SRCS "${Caffe2_CPU_SRCS}" ${Caffe2_GPU_SRCS})
- exclude(Caffe2_CPU_SRCS "${Caffe2_CPU_SRCS}" ${Caffe2_HIP_SRCS})
-
- # ---[ GPU test files
- file(GLOB tmp *_gpu_test.cc)
- set(Caffe2_GPU_TEST_SRCS ${Caffe2_GPU_TEST_SRCS} ${tmp})
-
- # ---[ HIP test files
- file(GLOB tmp hip/*_test.cc)
- set(Caffe2_HIP_TEST_SRCS ${Caffe2_HIP_TEST_SRCS} ${tmp})
-
- # ---[ CPU test files
- file(GLOB tmp *_test.cc)
- set(Caffe2_CPU_TEST_SRCS ${Caffe2_CPU_TEST_SRCS} ${tmp})
- exclude(Caffe2_CPU_TEST_SRCS "${Caffe2_CPU_TEST_SRCS}"
- ${Caffe2_GPU_TEST_SRCS})
- exclude(Caffe2_CPU_TEST_SRCS "${Caffe2_CPU_TEST_SRCS}"
- ${Caffe2_GPU_TEST_SRCS})
-
- # ---[ Send the lists to the parent scope.
- set(Caffe2_CPU_SRCS ${Caffe2_CPU_SRCS} PARENT_SCOPE)
- set(Caffe2_GPU_SRCS ${Caffe2_GPU_SRCS} PARENT_SCOPE)
- set(Caffe2_HIP_SRCS ${Caffe2_HIP_SRCS} PARENT_SCOPE)
- set(Caffe2_CPU_TEST_SRCS ${Caffe2_CPU_TEST_SRCS} PARENT_SCOPE)
- set(Caffe2_GPU_TEST_SRCS ${Caffe2_GPU_TEST_SRCS} PARENT_SCOPE)
- set(Caffe2_HIP_TEST_SRCS ${Caffe2_HIP_TEST_SRCS} PARENT_SCOPE)
-else()
- message(STATUS "Excluding video processing operators due to no opencv")
-endif()
diff --git a/caffe2/video/optical_flow.cc b/caffe2/video/optical_flow.cc
deleted file mode 100644
index 8d343042f1..0000000000
--- a/caffe2/video/optical_flow.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-#include <caffe2/video/optical_flow.h>
-
-namespace caffe2 {
-
-void OpticalFlowExtractor(
- const cv::Mat& prev_gray,
- const cv::Mat& curr_gray,
- const int flow_alg_type,
- cv::Mat& flow) {
-#if CV_MAJOR_VERSION >= 4
- cv::Ptr<cv::DISOpticalFlow> tvl1 = cv::DISOpticalFlow::create();
-#else
- cv::Ptr<cv::DualTVL1OpticalFlow> tvl1 = cv::DualTVL1OpticalFlow::create();
-#endif
- switch (flow_alg_type) {
- case FLowAlgType::FarnebackOpticalFlow:
- cv::calcOpticalFlowFarneback(
- prev_gray,
- curr_gray,
- flow,
- std::sqrt(2) / 2.0,
- 5,
- 10,
- 2,
- 7,
- 1.5,
- cv::OPTFLOW_FARNEBACK_GAUSSIAN);
- break;
- case FLowAlgType::DensePyrLKOpticalFlow:
- LOG(ERROR) << "DensePyrLKOpticalFlow only has sparse version on CPU";
- break;
- case FLowAlgType::BroxOpticalFlow:
- LOG(ERROR) << "BroxOpticalFlow on CPU is not available";
- break;
- case FLowAlgType::OpticalFlowDual_TVL1:
- tvl1->calc(prev_gray, curr_gray, flow);
- break;
- default:
- LOG(ERROR) << "Unsupported optical flow type " << flow_alg_type;
- break;
- }
-}
-
-void MergeOpticalFlow(cv::Mat& prev_flow, const cv::Mat& curr_flow) {
- const int rows = prev_flow.rows;
- const int cols = prev_flow.cols;
-
- // merge two optical flows into one
- for (int y = 0; y < rows; y++) {
- for (int x = 0; x < cols; x++) {
- cv::Point2f u = prev_flow.at<cv::Point2f>(y, x);
- // get the new location
- int x_new = std::min(cols - 1, std::max(0, cvRound(u.x + x)));
- int y_new = std::min(rows - 1, std::max(0, cvRound(u.y + y)));
- cv::Point2f u_new = curr_flow.at<cv::Point2f>(y_new, x_new);
-
- // update the flow
- prev_flow.at<cv::Point2f>(y, x) += u_new;
- }
- }
-}
-
-void MultiFrameOpticalFlowExtractor(
- const std::vector<cv::Mat>& grays,
- const int optical_flow_alg_type,
- cv::Mat& flow) {
- int num_frames = grays.size();
- CAFFE_ENFORCE_GE(num_frames, 2, "need at least 2 frames!");
-
- // compute optical flow for every two frames
- std::vector<cv::Mat> flows;
- for (int i = 0; i < num_frames - 1; i++) {
- cv::Mat tmp;
- OpticalFlowExtractor(grays[i], grays[i + 1], optical_flow_alg_type, tmp);
- flows.push_back(tmp);
- }
-
- flows[0].copyTo(flow);
- // aggregate optical flow across multiple frame
- for (int i = 1; i < num_frames - 1; i++) {
- MergeOpticalFlow(flow, flows[i]);
- }
-}
-
-} // namespace caffe2
diff --git a/caffe2/video/optical_flow.h b/caffe2/video/optical_flow.h
deleted file mode 100644
index 2dbd7e31e4..0000000000
--- a/caffe2/video/optical_flow.h
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef CAFFE2_VIDEO_OPTICAL_FLOW_H_
-#define CAFFE2_VIDEO_OPTICAL_FLOW_H_
-
-#include <opencv2/core.hpp>
-#include <opencv2/highgui.hpp>
-#include <opencv2/opencv.hpp>
-#include <opencv2/video.hpp>
-
-#include <caffe2/core/logging.h>
-
-namespace caffe2 {
-
-// Four different types of optical flow algorithms supported;
-// BroxOpticalFlow doesn't have a CPU version;
-// DensePyrLKOpticalFlow only has sparse CPU version;
-enum FLowAlgType {
- FarnebackOpticalFlow = 0,
- DensePyrLKOpticalFlow = 1,
- BroxOpticalFlow = 2,
- OpticalFlowDual_TVL1 = 3,
-};
-
-// Define different types of optical flow data type
-// 0: original two channel optical flow
-// 1: three channel optical flow with magnitude as the third channel
-// 2: two channel optical flow + one channel gray
-// 3: two channel optical flow + three channel rgb
-enum FlowDataType {
- Flow2C = 0,
- Flow3C = 1,
- FlowWithGray = 2,
- FlowWithRGB = 3,
-};
-
-void OpticalFlowExtractor(
- const cv::Mat& prev_gray,
- const cv::Mat& curr_gray,
- const int optical_flow_alg_type,
- cv::Mat& flow);
-
-void MergeOpticalFlow(cv::Mat& prev_flow, const cv::Mat& curr_flow);
-
-void MultiFrameOpticalFlowExtractor(
- const std::vector<cv::Mat>& grays,
- const int optical_flow_alg_type,
- cv::Mat& flow);
-
-} // namespace caffe2
-
-#endif // CAFFE2_VIDEO_OPTICAL_FLOW_H_
diff --git a/caffe2/video/video_decoder.cc b/caffe2/video/video_decoder.cc
deleted file mode 100644
index 03f15731c3..0000000000
--- a/caffe2/video/video_decoder.cc
+++ /dev/null
@@ -1,800 +0,0 @@
-#include <assert.h>
-#include <caffe2/core/logging.h>
-#include <caffe2/video/video_decoder.h>
-#include <array>
-#include <mutex>
-#include <random>
-
-namespace caffe2 {
-
-VideoDecoder::VideoDecoder() {
- static bool gInitialized = false;
- static std::mutex gMutex;
- std::unique_lock<std::mutex> lock(gMutex);
- if (!gInitialized) {
- av_register_all();
- avcodec_register_all();
- avformat_network_init();
- gInitialized = true;
- }
-}
-
-void VideoDecoder::getAudioSample(
- AVPacket& packet,
- AVCodecContext* audioCodecContext_,
- AVFrame* audioStreamFrame_,
- SwrContext* convertCtx_,
- Callback& callback,
- const Params& params) {
- int frame_finished = 0;
- auto result = avcodec_decode_audio4(
- audioCodecContext_, audioStreamFrame_, &frame_finished, &packet);
-
- if (frame_finished) {
- // from
- // https://www.ffmpeg.org/doxygen/2.3/decoding_encoding_8c-example.html#a57
- auto c = audioCodecContext_;
- int data_size = av_samples_get_buffer_size(
- nullptr, c->channels, audioStreamFrame_->nb_samples, c->sample_fmt, 1);
- if (data_size < 0) {
- // This should not occur, checking just for paranoia
- LOG(ERROR) << "Failed to calculate data size";
- }
-
- // from https://www.ffmpeg.org/doxygen/2.1/group__lswr.html#details
- uint8_t* output;
- auto swr = convertCtx_;
- auto inrate = audioCodecContext_->sample_rate;
- auto in_samples = audioStreamFrame_->nb_samples;
-
- int out_samples = av_rescale_rnd(
- swr_get_delay(swr, inrate) + in_samples,
- params.outrate_,
- inrate,
- AV_ROUND_UP);
-
- if (out_samples > 0) {
- auto input = (const uint8_t**)&audioStreamFrame_->data[0];
- av_samples_alloc(
- &output,
- nullptr,
- c->channels,
- out_samples,
- (AVSampleFormat)params.outfmt_,
- 0);
-
- // resample the audio data
- out_samples = swr_convert(swr, &output, out_samples, input, in_samples);
- auto sample_size = out_samples * c->channels * sizeof(float);
- auto buffer = std::make_unique<float[]>(sample_size);
- memcpy(buffer.get(), output, sample_size);
- av_freep(&output);
-
- unique_ptr<DecodedAudio> audio_sample = make_unique<DecodedAudio>();
- audio_sample->dataSize_ = data_size;
- audio_sample->outSampleSize_ = out_samples * c->channels;
- audio_sample->audio_data_ = std::move(buffer);
- callback.audioDecoded(std::move(audio_sample));
- }
- } else {
- result = packet.size;
- }
- packet.size -= result;
- packet.data += result;
-}
-
-void VideoDecoder::ResizeAndKeepAspectRatio(
- const int origWidth,
- const int origHeight,
- const int short_edge,
- const int long_edge,
- int& outWidth,
- int& outHeight) {
- if (origWidth < origHeight) {
- // dominant height
- if (short_edge > 0) {
- // use short_edge for rescale
- float ratio = short_edge / float(origWidth);
- outWidth = short_edge;
- outHeight = (int)round(ratio * origHeight);
- } else {
- // use long_edge for rescale
- float ratio = long_edge / float(origHeight);
- outHeight = long_edge;
- outWidth = (int)round(ratio * origWidth);
- }
- } else {
- // dominant width
- if (short_edge > 0) {
- // use short_edge for rescale
- float ratio = short_edge / float(origHeight);
- outHeight = short_edge;
- outWidth = (int)round(ratio * origWidth);
- } else {
- // use long_edge for rescale
- float ratio = long_edge / float(origWidth);
- outWidth = long_edge;
- outHeight = (int)round(ratio * origHeight);
- }
- }
-}
-
-void VideoDecoder::decodeLoop(
- const string& videoName,
- VideoIOContext& ioctx,
- const Params& params,
- const int start_frm,
- Callback& callback) {
- AVPixelFormat pixFormat = params.pixelFormat_;
- AVFormatContext* inputContext = avformat_alloc_context();
- AVStream* videoStream_ = nullptr;
- AVCodecContext* videoCodecContext_ = nullptr;
- AVCodecContext* audioCodecContext_ = nullptr;
- AVFrame* videoStreamFrame_ = nullptr;
- AVFrame* audioStreamFrame_ = nullptr;
- SwrContext* convertCtx_ = nullptr;
- AVPacket packet;
- av_init_packet(&packet); // init packet
- SwsContext* scaleContext_ = nullptr;
-
- try {
- inputContext->pb = ioctx.get_avio();
- inputContext->flags |= AVFMT_FLAG_CUSTOM_IO;
- int ret = 0;
-
- // Determining the input format:
- int probeSz = 1 * 1024 + AVPROBE_PADDING_SIZE;
- DecodedFrame::AvDataPtr probe((uint8_t*)av_malloc(probeSz));
- memset(probe.get(), 0, probeSz);
- int len = ioctx.read(probe.get(), probeSz - AVPROBE_PADDING_SIZE);
- if (len < probeSz - AVPROBE_PADDING_SIZE) {
- LOG(ERROR) << "Insufficient data to determine video format";
- return;
- }
- // seek back to start of stream
- ioctx.seek(0, SEEK_SET);
-
- unique_ptr<AVProbeData> probeData(new AVProbeData());
- probeData->buf = probe.get();
- probeData->buf_size = len;
- probeData->filename = "";
- // Determine the input-format:
- inputContext->iformat = av_probe_input_format(probeData.get(), 1);
- // this is to avoid the double-free error
- if (inputContext->iformat == nullptr) {
- LOG(ERROR) << "inputContext iformat is nullptr!";
- return;
- }
-
- ret = avformat_open_input(&inputContext, "", nullptr, nullptr);
- if (ret < 0) {
- LOG(ERROR) << "Unable to open stream : " << ffmpegErrorStr(ret);
- return;
- }
-
- ret = avformat_find_stream_info(inputContext, nullptr);
- if (ret < 0) {
- LOG(ERROR) << "Unable to find stream info in " << videoName << " "
- << ffmpegErrorStr(ret);
- return;
- }
-
- // Decode the first video stream
- int videoStreamIndex_ = params.streamIndex_;
- int audioStreamIndex_ = params.streamIndex_;
- if (params.streamIndex_ == -1) {
- for (int i = 0; i < inputContext->nb_streams; i++) {
- auto stream = inputContext->streams[i];
- if (stream->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
- videoStreamIndex_ == -1) {
- videoStreamIndex_ = i;
- videoStream_ = stream;
- } else if (
- stream->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
- audioStreamIndex_ == -1) {
- audioStreamIndex_ = i;
- }
- if (videoStreamIndex_ != -1 && audioStreamIndex_ != -1) {
- break;
- }
- }
- }
- if (videoStream_ == nullptr) {
- LOG(ERROR) << "Unable to find video stream in " << videoName << " "
- << ffmpegErrorStr(ret);
- return;
- }
-
- // Initialize codec
- AVDictionary* opts = nullptr;
- videoCodecContext_ = videoStream_->codec;
- try {
- ret = avcodec_open2(
- videoCodecContext_,
- avcodec_find_decoder(videoCodecContext_->codec_id),
- &opts);
- } catch (const std::exception&) {
- LOG(ERROR) << "Exception during open video codec";
- return;
- }
-
- if (ret < 0) {
- LOG(ERROR) << "Cannot open video codec : "
- << videoCodecContext_->codec->name;
- return;
- }
-
- if (params.getAudio_ && audioStreamIndex_ >= 0) {
- // see e.g. ridge/decoder/StreamDecoder.cpp
- audioCodecContext_ = inputContext->streams[audioStreamIndex_]->codec;
- ret = avcodec_open2(
- audioCodecContext_,
- avcodec_find_decoder(audioCodecContext_->codec_id),
- nullptr);
-
- if (ret < 0) {
- LOG(ERROR) << "Cannot open audio codec : "
- << audioCodecContext_->codec->name;
- return;
- }
-
- convertCtx_ = swr_alloc_set_opts(
- nullptr,
- params.outlayout_,
- (AVSampleFormat)params.outfmt_,
- params.outrate_,
- audioCodecContext_->channel_layout,
- audioCodecContext_->sample_fmt,
- audioCodecContext_->sample_rate,
- 0,
- nullptr);
-
- if (convertCtx_ == nullptr) {
- LOG(ERROR) << "Cannot setup sample format converter.";
- return;
- }
- if (swr_init(convertCtx_) < 0) {
- LOG(ERROR) << "Cannot init sample format converter.";
- return;
- }
- }
-
- // Calculate if we need to rescale the frames
- const int origWidth = videoCodecContext_->width;
- const int origHeight = videoCodecContext_->height;
- int outWidth = origWidth;
- int outHeight = origHeight;
-
- if (params.video_res_type_ == VideoResType::ORIGINAL_RES) {
- // if the original resolution is too low,
- // make it at least the same size as crop_size_
- if (params.crop_size_ > origWidth || params.crop_size_ > origHeight) {
- ResizeAndKeepAspectRatio(
- origWidth, origHeight, params.crop_size_, -1, outWidth, outHeight);
- }
- } else if (params.video_res_type_ == VideoResType::USE_SHORT_EDGE) {
- // resize the image to the predefined
- // short_edge_ resolution while keep the aspect ratio
- ResizeAndKeepAspectRatio(
- origWidth, origHeight, params.short_edge_, -1, outWidth, outHeight);
- } else if (params.video_res_type_ == VideoResType::USE_WIDTH_HEIGHT) {
- // resize the image to the predefined
- // resolution and ignore the aspect ratio
- outWidth = params.outputWidth_;
- outHeight = params.outputHeight_;
- } else {
- LOG(ERROR) << "Unknown VideoResType: " << params.video_res_type_;
- return;
- }
-
- // Make sure that we have a valid format
- if (videoCodecContext_->pix_fmt == AV_PIX_FMT_NONE) {
- LOG(ERROR) << "pixel format is not valid.";
- return;
- }
-
- // Create a scale context
- scaleContext_ = sws_getContext(
- videoCodecContext_->width,
- videoCodecContext_->height,
- videoCodecContext_->pix_fmt,
- outWidth,
- outHeight,
- pixFormat,
- SWS_FAST_BILINEAR,
- nullptr,
- nullptr,
- nullptr);
-
- // Getting video meta data
- VideoMeta videoMeta;
- videoMeta.codec_type = videoCodecContext_->codec_type;
- videoMeta.width = outWidth;
- videoMeta.height = outHeight;
- videoMeta.pixFormat = pixFormat;
-
- // avoid division by zero, code adapted from
- // https://www.ffmpeg.org/doxygen/0.6/rational_8h-source.html
- if (videoStream_->avg_frame_rate.num == 0 ||
- videoStream_->avg_frame_rate.den == 0) {
- LOG(ERROR) << "Frame rate is wrong. No data found.";
- return;
- }
-
- videoMeta.fps = av_q2d(videoStream_->avg_frame_rate);
- callback.videoDecodingStarted(videoMeta);
-
- if (params.intervals_.size() == 0) {
- LOG(ERROR) << "Empty sampling intervals.";
- return;
- }
-
- std::vector<SampleInterval>::const_iterator itvlIter =
- params.intervals_.begin();
- if (itvlIter->timestamp != 0) {
- LOG(ERROR) << "Sampling interval starting timestamp is not zero.";
- return;
- }
-
- double currFps = itvlIter->fps;
- if (currFps < 0 && currFps != SpecialFps::SAMPLE_ALL_FRAMES &&
- currFps != SpecialFps::SAMPLE_TIMESTAMP_ONLY) {
- // fps must be 0, -1, -2 or > 0
- LOG(ERROR) << "Invalid sampling fps.";
- return;
- }
-
- double prevTimestamp = itvlIter->timestamp;
- itvlIter++;
- if (itvlIter != params.intervals_.end() &&
- prevTimestamp >= itvlIter->timestamp) {
- LOG(ERROR) << "Sampling interval timestamps must be strictly ascending.";
- return;
- }
-
- double lastFrameTimestamp = -1.0;
- double timestamp = -1.0;
-
- // Initialize frame and packet.
- // These will be reused across calls.
- videoStreamFrame_ = av_frame_alloc();
- audioStreamFrame_ = av_frame_alloc();
-
- // frame index in video stream
- int frameIndex = -1;
- // frame index of outputed frames
- int outputFrameIndex = -1;
-
- /* identify the starting point from where we must start decoding */
- std::mt19937 meta_randgen(time(nullptr));
- long int start_ts = -1;
- bool mustDecodeAll = false;
-
- if (videoStream_->duration > 0 && videoStream_->nb_frames > 0) {
- /* we have a valid duration and nb_frames. We can safely
- * detect an intermediate timestamp to start decoding from. */
-
- // leave a margin of 10 frames to take in to account the error
- // from av_seek_frame
- long int margin =
- int(ceil((10 * videoStream_->duration) / (videoStream_->nb_frames)));
- // if we need to do temporal jittering
- if (params.decode_type_ == DecodeType::DO_TMP_JITTER) {
- /* estimate the average duration for the required # of frames */
- double maxFramesDuration =
- (videoStream_->duration * params.num_of_required_frame_) /
- (videoStream_->nb_frames);
- int ts1 = 0;
- int ts2 = videoStream_->duration - int(ceil(maxFramesDuration));
- ts2 = ts2 > 0 ? ts2 : 0;
- // pick a random timestamp between ts1 and ts2. ts2 is selected such
- // that you have enough frames to satisfy the required # of frames.
- start_ts = std::uniform_int_distribution<>(ts1, ts2)(meta_randgen);
- // seek a frame at start_ts
- ret = av_seek_frame(
- inputContext,
- videoStreamIndex_,
- 0 > (start_ts - margin) ? 0 : (start_ts - margin),
- AVSEEK_FLAG_BACKWARD);
-
- // if we need to decode from the start_frm
- } else if (params.decode_type_ == DecodeType::USE_START_FRM) {
- if (videoStream_ == nullptr) {
- LOG(ERROR) << "Nullptr found at videoStream_";
- return;
- }
- start_ts = int(floor(
- (videoStream_->duration * start_frm) / (videoStream_->nb_frames)));
- // seek a frame at start_ts
- ret = av_seek_frame(
- inputContext,
- videoStreamIndex_,
- 0 > (start_ts - margin) ? 0 : (start_ts - margin),
- AVSEEK_FLAG_BACKWARD);
- } else {
- mustDecodeAll = true;
- }
-
- if (ret < 0) {
- LOG(INFO) << "Unable to decode from a random start point";
- /* fall back to default decoding of all frames from start */
- av_seek_frame(inputContext, videoStreamIndex_, 0, AVSEEK_FLAG_BACKWARD);
- mustDecodeAll = true;
- }
- } else {
- mustDecodeAll = true;
- }
-
- int gotPicture = 0;
- int eof = 0;
- int selectiveDecodedFrames = 0;
-
- int maxFrames = (params.decode_type_ == DecodeType::DO_UNIFORM_SMP)
- ? MAX_DECODING_FRAMES
- : params.num_of_required_frame_;
- // There is a delay between reading packets from the
- // transport and getting decoded frames back.
- // Therefore, after EOF, continue going while
- // the decoder is still giving us frames.
- while ((!eof || gotPicture) &&
- /* either you must decode all frames or decode up to maxFrames
- * based on status of the mustDecodeAll flag */
- (mustDecodeAll || (selectiveDecodedFrames < maxFrames)) &&
- /* If on the last interval and not autodecoding keyframes and a
- * SpecialFps indicates no more frames are needed, stop decoding */
- !((itvlIter == params.intervals_.end() &&
- (currFps == SpecialFps::SAMPLE_TIMESTAMP_ONLY ||
- currFps == SpecialFps::SAMPLE_NO_FRAME)) &&
- !params.keyFrames_)) {
- try {
- if (!eof) {
- ret = av_read_frame(inputContext, &packet);
- if (ret == AVERROR_EOF) {
- eof = 1;
- av_free_packet(&packet);
- packet.data = nullptr;
- packet.size = 0;
- // stay in the while loop to flush frames
- } else if (ret == AVERROR(EAGAIN)) {
- av_free_packet(&packet);
- continue;
- } else if (ret < 0) {
- LOG(ERROR) << "Error reading packet : " << ffmpegErrorStr(ret);
- return;
- }
-
- auto si = packet.stream_index;
- if (params.getAudio_ && audioStreamIndex_ >= 0 &&
- si == audioStreamIndex_) {
- // Audio packets can have multiple audio frames in a single packet
- while (packet.size > 0) {
- assert(audioCodecContext_ != nullptr);
- assert(convertCtx_ != nullptr);
- getAudioSample(
- packet,
- audioCodecContext_,
- audioStreamFrame_,
- convertCtx_,
- callback,
- params);
- }
- }
-
- if (si != videoStreamIndex_) {
- av_free_packet(&packet);
- continue;
- }
- }
-
- ret = avcodec_decode_video2(
- videoCodecContext_, videoStreamFrame_, &gotPicture, &packet);
- if (ret < 0) {
- LOG(ERROR) << "Error decoding video frame : " << ffmpegErrorStr(ret);
- return;
- }
- try {
- // Nothing to do without a picture
- if (!gotPicture) {
- av_free_packet(&packet);
- continue;
- }
- frameIndex++;
-
- long int frame_ts =
- av_frame_get_best_effort_timestamp(videoStreamFrame_);
- timestamp = frame_ts * av_q2d(videoStream_->time_base);
- if ((frame_ts >= start_ts && !mustDecodeAll) || mustDecodeAll) {
- /* process current frame if:
- * 1) We are not doing selective decoding and mustDecodeAll
- * OR
- * 2) We are doing selective decoding and current frame
- * timestamp is >= start_ts from where we start selective
- * decoding*/
- // if reaching the next interval, update the current fps
- // and reset lastFrameTimestamp so the current frame could be
- // sampled (unless fps == SpecialFps::SAMPLE_NO_FRAME)
- if (itvlIter != params.intervals_.end() &&
- timestamp >= itvlIter->timestamp) {
- lastFrameTimestamp = -1.0;
- currFps = itvlIter->fps;
- prevTimestamp = itvlIter->timestamp;
- itvlIter++;
- if (itvlIter != params.intervals_.end() &&
- prevTimestamp >= itvlIter->timestamp) {
- LOG(ERROR)
- << "Sampling interval timestamps must be strictly ascending.";
- return;
- }
- }
-
- // keyFrame will bypass all checks on fps sampling settings
- bool keyFrame = params.keyFrames_ && videoStreamFrame_->key_frame;
- if (!keyFrame) {
- // if fps == SpecialFps::SAMPLE_NO_FRAME (0), don't sample at all
- if (currFps == SpecialFps::SAMPLE_NO_FRAME) {
- av_free_packet(&packet);
- continue;
- }
-
- // fps is considered reached in the following cases:
- // 1. lastFrameTimestamp < 0 - start of a new interval
- // (or first frame)
- // 2. currFps == SpecialFps::SAMPLE_ALL_FRAMES (-1) - sample every
- // frame
- // 3. timestamp - lastFrameTimestamp has reached target fps and
- // currFps > 0 (not special fps setting)
- // different modes for fps:
- // SpecialFps::SAMPLE_NO_FRAMES (0):
- // disable fps sampling, no frame sampled at all
- // SpecialFps::SAMPLE_ALL_FRAMES (-1):
- // unlimited fps sampling, will sample at native video fps
- // SpecialFps::SAMPLE_TIMESTAMP_ONLY (-2):
- // disable fps sampling, but will get the frame at specific
- // timestamp
- // others (> 0): decoding at the specified fps
- bool fpsReached = lastFrameTimestamp < 0 ||
- currFps == SpecialFps::SAMPLE_ALL_FRAMES ||
- (currFps > 0 &&
- timestamp >= lastFrameTimestamp + (1 / currFps));
-
- if (!fpsReached) {
- av_free_packet(&packet);
- continue;
- }
- }
-
- lastFrameTimestamp = timestamp;
-
- outputFrameIndex++;
- if (params.maximumOutputFrames_ != -1 &&
- outputFrameIndex >= params.maximumOutputFrames_) {
- // enough frames
- av_free_packet(&packet);
- break;
- }
-
- AVFrame* rgbFrame = av_frame_alloc();
- if (!rgbFrame) {
- LOG(ERROR) << "Error allocating AVframe";
- return;
- }
-
- try {
- // Determine required buffer size and allocate buffer
- int numBytes = avpicture_get_size(pixFormat, outWidth, outHeight);
- DecodedFrame::AvDataPtr buffer(
- (uint8_t*)av_malloc(numBytes * sizeof(uint8_t)));
-
- int size = avpicture_fill(
- (AVPicture*)rgbFrame,
- buffer.get(),
- pixFormat,
- outWidth,
- outHeight);
-
- sws_scale(
- scaleContext_,
- videoStreamFrame_->data,
- videoStreamFrame_->linesize,
- 0,
- videoCodecContext_->height,
- rgbFrame->data,
- rgbFrame->linesize);
-
- unique_ptr<DecodedFrame> frame = make_unique<DecodedFrame>();
- frame->width_ = outWidth;
- frame->height_ = outHeight;
- frame->data_ = std::move(buffer);
- frame->size_ = size;
- frame->index_ = frameIndex;
- frame->outputFrameIndex_ = outputFrameIndex;
- frame->timestamp_ = timestamp;
- frame->keyFrame_ = videoStreamFrame_->key_frame;
-
- callback.frameDecoded(std::move(frame));
-
- selectiveDecodedFrames++;
- av_frame_free(&rgbFrame);
- } catch (const std::exception&) {
- av_frame_free(&rgbFrame);
- }
- }
- av_frame_unref(videoStreamFrame_);
- av_frame_unref(audioStreamFrame_);
- } catch (const std::exception&) {
- av_frame_unref(videoStreamFrame_);
- av_frame_unref(audioStreamFrame_);
- }
-
- av_free_packet(&packet);
- } catch (const std::exception&) {
- av_free_packet(&packet);
- }
- } // of while loop
- callback.videoDecodingEnded(timestamp);
-
- // free all stuffs
- sws_freeContext(scaleContext_);
- swr_free(&convertCtx_);
- av_packet_unref(&packet);
- av_frame_free(&videoStreamFrame_);
- av_frame_free(&audioStreamFrame_);
- avcodec_close(videoCodecContext_);
- if (audioCodecContext_ != nullptr) {
- avcodec_close(audioCodecContext_);
- }
- avformat_close_input(&inputContext);
- avformat_free_context(inputContext);
- } catch (const std::exception&) {
- // In case of decoding error
- // free all stuffs
- sws_freeContext(scaleContext_);
- swr_free(&convertCtx_);
- av_packet_unref(&packet);
- av_frame_free(&videoStreamFrame_);
- av_frame_free(&audioStreamFrame_);
- avcodec_close(videoCodecContext_);
- avcodec_close(audioCodecContext_);
- avformat_close_input(&inputContext);
- avformat_free_context(inputContext);
- }
-}
-
-void VideoDecoder::decodeMemory(
- const string& videoName,
- const char* buffer,
- const int size,
- const Params& params,
- const int start_frm,
- Callback& callback) {
- VideoIOContext ioctx(buffer, size);
- decodeLoop(videoName, ioctx, params, start_frm, callback);
-}
-
-void VideoDecoder::decodeFile(
- const string& file,
- const Params& params,
- const int start_frm,
- Callback& callback) {
- VideoIOContext ioctx(file);
- decodeLoop(file, ioctx, params, start_frm, callback);
-}
-
-string VideoDecoder::ffmpegErrorStr(int result) {
- std::array<char, 128> buf;
- av_strerror(result, buf.data(), buf.size());
- return string(buf.data());
-}
-
-void FreeDecodedData(
- std::vector<std::unique_ptr<DecodedFrame>>& sampledFrames,
- std::vector<std::unique_ptr<DecodedAudio>>& sampledAudio) {
- // free the sampledFrames and sampledAudio
- for (int i = 0; i < sampledFrames.size(); i++) {
- DecodedFrame* p = sampledFrames[i].release();
- delete p;
- }
- for (int i = 0; i < sampledAudio.size(); i++) {
- DecodedAudio* p = sampledAudio[i].release();
- delete p;
- }
- sampledFrames.clear();
- sampledAudio.clear();
-}
-
-bool DecodeMultipleClipsFromVideo(
- const char* video_buffer,
- const std::string& video_filename,
- const int encoded_size,
- const Params& params,
- const int start_frm,
- const int clip_per_video,
- const std::vector<int>& clip_start_positions,
- const bool use_local_file,
- int& height,
- int& width,
- std::vector<unsigned char*>& buffer_rgb) {
- std::vector<std::unique_ptr<DecodedFrame>> sampledFrames;
- std::vector<std::unique_ptr<DecodedAudio>> sampledAudio;
- VideoDecoder decoder;
-
- CallbackImpl callback;
- // decoding from buffer or file
- if (!use_local_file) {
- decoder.decodeMemory(
- string("Memory Buffer"),
- video_buffer,
- encoded_size,
- params,
- start_frm,
- callback);
- } else {
- decoder.decodeFile(video_filename, params, start_frm, callback);
- }
-
- for (auto& frame : callback.frames) {
- sampledFrames.push_back(std::move(frame));
- }
- for (auto& audio_sample : callback.audio_samples) {
- sampledAudio.push_back(std::move(audio_sample));
- }
-
- for (int i = 0; i < buffer_rgb.size(); i++) {
- unsigned char* buff = buffer_rgb[i];
- delete[] buff;
- }
- buffer_rgb.clear();
-
- if (sampledFrames.size() < params.num_of_required_frame_) {
- LOG(ERROR)
- << "The video seems faulty and we could not decode enough frames: "
- << sampledFrames.size() << " VS " << params.num_of_required_frame_;
- FreeDecodedData(sampledFrames, sampledAudio);
- return true;
- }
- if (sampledFrames.size() == 0) {
- LOG(ERROR) << "The samples frames have size 0, no frame to process";
- FreeDecodedData(sampledFrames, sampledAudio);
- return true;
- }
- height = sampledFrames[0]->height_;
- width = sampledFrames[0]->width_;
- float sample_stepsz = (clip_per_video <= 1)
- ? 0
- : (float(sampledFrames.size() - params.num_of_required_frame_) /
- (clip_per_video - 1));
-
- int image_size = 3 * height * width;
- int clip_size = params.num_of_required_frame_ * image_size;
- // get the RGB frames for each clip
- if (clip_start_positions.size() > 0) {
- for (int i = 0; i < clip_start_positions.size(); i++) {
- unsigned char* buffer_rgb_ptr = new unsigned char[clip_size];
- int clip_start = clip_start_positions[i];
- for (int j = 0; j < params.num_of_required_frame_; j++) {
- memcpy(
- buffer_rgb_ptr + j * image_size,
- (unsigned char*)sampledFrames[j + clip_start]->data_.get(),
- image_size * sizeof(unsigned char));
- }
- buffer_rgb.push_back(buffer_rgb_ptr);
- }
- } else {
- for (int i = 0; i < clip_per_video; i++) {
- unsigned char* buffer_rgb_ptr = new unsigned char[clip_size];
- int clip_start = floor(i * sample_stepsz);
- for (int j = 0; j < params.num_of_required_frame_; j++) {
- memcpy(
- buffer_rgb_ptr + j * image_size,
- (unsigned char*)sampledFrames[j + clip_start]->data_.get(),
- image_size * sizeof(unsigned char));
- }
- buffer_rgb.push_back(buffer_rgb_ptr);
- }
- }
- FreeDecodedData(sampledFrames, sampledAudio);
-
- return true;
-}
-
-} // namespace caffe2
diff --git a/caffe2/video/video_decoder.h b/caffe2/video/video_decoder.h
deleted file mode 100644
index ba607fd8da..0000000000
--- a/caffe2/video/video_decoder.h
+++ /dev/null
@@ -1,525 +0,0 @@
-#ifndef CAFFE2_VIDEO_VIDEO_DECODER_H_
-#define CAFFE2_VIDEO_VIDEO_DECODER_H_
-
-#include <caffe2/core/logging.h>
-#include <stdio.h>
-#include <memory>
-#include <string>
-#include <vector>
-
-extern "C" {
-#include <libavcodec/avcodec.h>
-#include <libavformat/avformat.h>
-#include <libavformat/avio.h>
-#include <libavutil/log.h>
-#include <libavutil/motion_vector.h>
-#include <libswresample/swresample.h>
-#include <libswscale/swscale.h>
-}
-
-namespace caffe2 {
-
-#define VIO_BUFFER_SZ 32768
-#define MAX_DECODING_FRAMES 10000
-
-// enum to specify 3 special fps sampling behaviors:
-// 0: disable fps sampling, no frame sampled at all
-// -1: unlimited fps sampling, will sample at native video fps
-// -2: disable fps sampling, but will get the frame at specific timestamp
-enum SpecialFps {
- SAMPLE_NO_FRAME = 0,
- SAMPLE_ALL_FRAMES = -1,
- SAMPLE_TIMESTAMP_ONLY = -2,
-};
-
-// three different types of resolution when decoding the video
-// 0: resize to width x height and ignore the aspect ratio;
-// 1: resize to short_edge and keep the aspect ratio;
-// 2: using the original resolution of the video; if resolution
-// is smaller than crop_size x crop_size, resize to crop_size
-// and keep the aspect ratio;
-// 3: for xray video service
-enum VideoResType {
- USE_WIDTH_HEIGHT = 0,
- USE_SHORT_EDGE = 1,
- ORIGINAL_RES = 2,
-};
-
-// three different types of decoding behavior are supported
-// 0: do temporal jittering to sample a random clip from the video
-// 1: uniformly sample multiple clips from the video;
-// 2: sample a clip from a given starting frame
-// 3: for xray video service
-enum DecodeType {
- DO_TMP_JITTER = 0,
- DO_UNIFORM_SMP = 1,
- USE_START_FRM = 2,
-};
-
-// sampling interval for fps starting at specified timestamp
-// use enum SpecialFps to set special fps decoding behavior
-// note sampled fps will not always accurately follow the target fps,
-// because sampled frame has to snap to actual frame timestamp,
-// e.g. video fps = 25, sample fps = 4 will sample every 0.28s, not 0.25
-// video fps = 25, sample fps = 5 will sample every 0.24s, not 0.2,
-// because of floating-point division accuracy (1 / 5.0 is not exactly 0.2)
-struct SampleInterval {
- double timestamp;
- double fps;
- SampleInterval() : timestamp(-1), fps(SpecialFps::SAMPLE_ALL_FRAMES) {}
- SampleInterval(double ts, double f) : timestamp(ts), fps(f) {}
- bool operator<(const SampleInterval& itvl) const {
- return (timestamp < itvl.timestamp);
- }
-};
-
-class Params {
- public:
- // return all key-frames regardless of specified fps
- bool keyFrames_ = false;
-
- // return audio data while decoding the video
- bool getAudio_ = false;
-
- // for sampling audio data
- int outrate_ = 22000;
- int outfmt_ = AV_SAMPLE_FMT_FLT;
- int64_t outlayout_ = AV_CH_LAYOUT_MONO;
-
- // Output image pixel format
- AVPixelFormat pixelFormat_ = AVPixelFormat::AV_PIX_FMT_RGB24;
-
- // Index of stream to decode.
- // -1 will automatically decode the first video stream.
- int streamIndex_ = -1;
-
- // How many frames to output at most from the video
- // -1 no limit
- int maximumOutputFrames_ = -1;
-
- // params for video resolution
- int video_res_type_ = VideoResType::USE_WIDTH_HEIGHT;
- int crop_size_ = -1;
- int short_edge_ = -1;
-
- // Output video size, -1 to preserve origianl dimension
- int outputWidth_ = -1;
- int outputHeight_ = -1;
-
- // max output dimension, -1 to preserve original size
- // the larger dimension of the video will be scaled to this size,
- // and the second dimension will be scaled to preserve aspect ratio
- int maxOutputDimension_ = -1;
-
- // params for decoding behavior
- int decode_type_ = DecodeType::DO_TMP_JITTER;
- int num_of_required_frame_ = -1;
-
- // intervals_ control variable sampling fps between different timestamps
- // intervals_ must be ordered strictly ascending by timestamps
- // the first interval must have a timestamp of zero
- // fps must be either the 3 special fps defined in SpecialFps, or > 0
- std::vector<SampleInterval> intervals_ = {{0, SpecialFps::SAMPLE_ALL_FRAMES}};
-
- Params() {}
-
- /**
- * FPS of output frames
- * setting here will reset intervals_ and force decoding at target FPS
- * This can be used if user just want to decode at a steady fps
- */
- Params& fps(float v) {
- intervals_.clear();
- intervals_.emplace_back(0, v);
- return *this;
- }
-
- /**
- * Sample output frames at a specified list of timestamps
- * Timestamps must be in increasing order, and timestamps past the end of the
- * video will be ignored
- * Setting here will reset intervals_
- */
- Params& setSampleTimestamps(const std::vector<double>& timestamps) {
- intervals_.clear();
- // insert an interval per desired frame.
- for (auto& timestamp : timestamps) {
- intervals_.emplace_back(timestamp, SpecialFps::SAMPLE_TIMESTAMP_ONLY);
- }
- return *this;
- }
-
- /**
- * Pixel format of output buffer, default PIX_FMT_RGB24
- */
- Params& pixelFormat(AVPixelFormat pixelFormat) {
- pixelFormat_ = pixelFormat;
- return *this;
- }
-
- /**
- * Return all key-frames
- */
- Params& keyFrames(bool keyFrames) {
- keyFrames_ = keyFrames;
- return *this;
- }
-
- /**
- * Index of video stream to process, defaults to the first video stream
- */
- Params& streamIndex(int index) {
- streamIndex_ = index;
- return *this;
- }
-
- /**
- * Only output this many frames, default to no limit
- */
- Params& maxOutputFrames(int count) {
- maximumOutputFrames_ = count;
- return *this;
- }
-
- /**
- * Output frame width, default to video width
- */
- Params& outputWidth(int width) {
- outputWidth_ = width;
- return *this;
- }
-
- /**
- * Output frame height, default to video height
- */
- Params& outputHeight(int height) {
- outputHeight_ = height;
- return *this;
- }
-
- /**
- * Max dimension of either width or height, if any is bigger
- * it will be scaled down to this and econd dimension
- * will be scaled down to maintain aspect ratio.
- */
- Params& maxOutputDimension(int size) {
- maxOutputDimension_ = size;
- return *this;
- }
-};
-
-// data structure for storing decoded video frames
-class DecodedFrame {
- public:
- struct avDeleter {
- void operator()(unsigned char* p) const {
- av_free(p);
- }
- };
- using AvDataPtr = std::unique_ptr<uint8_t, avDeleter>;
-
- // decoded data buffer
- AvDataPtr data_;
-
- // size in bytes
- int size_ = 0;
-
- // frame dimensions
- int width_ = 0;
- int height_ = 0;
-
- // timestamp in seconds since beginning of video
- double timestamp_ = 0;
-
- // true if this is a key frame.
- bool keyFrame_ = false;
-
- // index of frame in video
- int index_ = -1;
-
- // Sequential number of outputted frame
- int outputFrameIndex_ = -1;
-};
-
-// data structure for storing decoded audio data
-struct DecodedAudio {
- int dataSize_;
- int outSampleSize_;
- std::unique_ptr<float[]> audio_data_;
-
- explicit DecodedAudio(
- int dataSize = 0,
- int outSampleSize = 0,
- std::unique_ptr<float[]> audio_data = nullptr)
- : dataSize_(dataSize),
- outSampleSize_(outSampleSize),
- audio_data_(std::move(audio_data)) {}
-};
-
-class VideoIOContext {
- public:
- explicit VideoIOContext(const std::string& fname)
- : workBuffersize_(VIO_BUFFER_SZ),
- workBuffer_((uint8_t*)av_malloc(workBuffersize_)),
- inputFile_(nullptr),
- inputBuffer_(nullptr),
- inputBufferSize_(0) {
- inputFile_ = fopen(fname.c_str(), "rb");
- if (inputFile_ == nullptr) {
- LOG(ERROR) << "Error opening video file " << fname;
- return;
- }
- ctx_ = avio_alloc_context(
- static_cast<unsigned char*>(workBuffer_.get()),
- workBuffersize_,
- 0,
- this,
- &VideoIOContext::readFile,
- nullptr, // no write function
- &VideoIOContext::seekFile);
- }
-
- explicit VideoIOContext(const char* buffer, int size)
- : workBuffersize_(VIO_BUFFER_SZ),
- workBuffer_((uint8_t*)av_malloc(workBuffersize_)),
- inputFile_(nullptr),
- inputBuffer_(buffer),
- inputBufferSize_(size) {
- ctx_ = avio_alloc_context(
- static_cast<unsigned char*>(workBuffer_.get()),
- workBuffersize_,
- 0,
- this,
- &VideoIOContext::readMemory,
- nullptr, // no write function
- &VideoIOContext::seekMemory);
- }
-
- ~VideoIOContext() {
- av_free(ctx_);
- if (inputFile_) {
- fclose(inputFile_);
- }
- }
-
- int read(unsigned char* buf, int buf_size) {
- if (inputBuffer_) {
- return readMemory(this, buf, buf_size);
- } else if (inputFile_) {
- return readFile(this, buf, buf_size);
- } else {
- return -1;
- }
- }
-
- int64_t seek(int64_t offset, int whence) {
- if (inputBuffer_) {
- return seekMemory(this, offset, whence);
- } else if (inputFile_) {
- return seekFile(this, offset, whence);
- } else {
- return -1;
- }
- }
-
- static int readFile(void* opaque, unsigned char* buf, int buf_size) {
- VideoIOContext* h = static_cast<VideoIOContext*>(opaque);
- if (feof(h->inputFile_)) {
- return AVERROR_EOF;
- }
- size_t ret = fread(buf, 1, buf_size, h->inputFile_);
- if (ret < buf_size) {
- if (ferror(h->inputFile_)) {
- return -1;
- }
- }
- return ret;
- }
-
- static int64_t seekFile(void* opaque, int64_t offset, int whence) {
- VideoIOContext* h = static_cast<VideoIOContext*>(opaque);
- switch (whence) {
- case SEEK_CUR: // from current position
- case SEEK_END: // from eof
- case SEEK_SET: // from beginning of file
- return fseek(h->inputFile_, static_cast<long>(offset), whence);
- break;
- case AVSEEK_SIZE:
- int64_t cur = ftell(h->inputFile_);
- fseek(h->inputFile_, 0L, SEEK_END);
- int64_t size = ftell(h->inputFile_);
- fseek(h->inputFile_, cur, SEEK_SET);
- return size;
- }
-
- return -1;
- }
-
- static int readMemory(void* opaque, unsigned char* buf, int buf_size) {
- VideoIOContext* h = static_cast<VideoIOContext*>(opaque);
- if (buf_size < 0) {
- return -1;
- }
-
- int reminder = h->inputBufferSize_ - h->offset_;
- int r = buf_size < reminder ? buf_size : reminder;
- if (r < 0) {
- return AVERROR_EOF;
- }
-
- memcpy(buf, h->inputBuffer_ + h->offset_, r);
- h->offset_ += r;
- return r;
- }
-
- static int64_t seekMemory(void* opaque, int64_t offset, int whence) {
- VideoIOContext* h = static_cast<VideoIOContext*>(opaque);
- switch (whence) {
- case SEEK_CUR: // from current position
- h->offset_ += offset;
- break;
- case SEEK_END: // from eof
- h->offset_ = h->inputBufferSize_ + offset;
- break;
- case SEEK_SET: // from beginning of file
- h->offset_ = offset;
- break;
- case AVSEEK_SIZE:
- return h->inputBufferSize_;
- }
- return h->offset_;
- }
-
- AVIOContext* get_avio() {
- return ctx_;
- }
-
- private:
- int workBuffersize_;
- DecodedFrame::AvDataPtr workBuffer_;
- // for file mode
- FILE* inputFile_;
-
- // for memory mode
- const char* inputBuffer_;
- int inputBufferSize_;
- int offset_ = 0;
-
- AVIOContext* ctx_;
-};
-
-struct VideoMeta {
- double fps;
- int width;
- int height;
- enum AVMediaType codec_type;
- AVPixelFormat pixFormat;
- VideoMeta()
- : fps(-1),
- width(-1),
- height(-1),
- codec_type(AVMEDIA_TYPE_VIDEO),
- pixFormat(AVPixelFormat::AV_PIX_FMT_RGB24) {}
-};
-
-class Callback {
- public:
- virtual void frameDecoded(std::unique_ptr<DecodedFrame> img) = 0;
- virtual void audioDecoded(
- std::unique_ptr<DecodedAudio> /*decoded audio data*/) {}
- virtual void videoDecodingStarted(const VideoMeta& /*videoMeta*/) {}
- virtual void videoDecodingEnded(double /*lastFrameTimestamp*/) {}
- virtual ~Callback() {}
-};
-
-class VideoDecoder {
- public:
- VideoDecoder();
-
- void decodeFile(
- const std::string& filename,
- const Params& params,
- const int start_frm,
- Callback& callback);
-
- void decodeMemory(
- const std::string& filename,
- const char* buffer,
- const int size,
- const Params& params,
- const int start_frm,
- Callback& callback);
-
- private:
- std::string ffmpegErrorStr(int result);
-
- void ResizeAndKeepAspectRatio(
- const int origWidth,
- const int origHeight,
- const int short_edge,
- const int long_edge,
- int& outWidth,
- int& outHeight);
-
- void getAudioSample(
- AVPacket& packet,
- AVCodecContext* audioCodecContext_,
- AVFrame* audioStreamFrame_,
- SwrContext* convertCtx_,
- Callback& callback,
- const Params& params);
-
- void decodeLoop(
- const std::string& videoName,
- VideoIOContext& ioctx,
- const Params& params,
- const int start_frm,
- Callback& callback);
-};
-
-TORCH_API void FreeDecodedData(
- std::vector<std::unique_ptr<DecodedFrame>>& sampledFrames,
- std::vector<std::unique_ptr<DecodedAudio>>& sampledAudio);
-
-TORCH_API bool DecodeMultipleClipsFromVideo(
- const char* video_buffer,
- const std::string& video_filename,
- const int encoded_size,
- const Params& params,
- const int start_frm,
- const int clip_per_video,
- const std::vector<int>& clip_start_positions,
- const bool use_local_file,
- int& height,
- int& width,
- std::vector<unsigned char*>& buffer_rgb);
-
-class CallbackImpl : public Callback {
- public:
- std::vector<std::unique_ptr<DecodedFrame>> frames;
- std::vector<std::unique_ptr<DecodedAudio>> audio_samples;
-
- explicit CallbackImpl() {
- clear();
- }
-
- void clear() {
- FreeDecodedData(frames, audio_samples);
- }
-
- void frameDecoded(std::unique_ptr<DecodedFrame> frame) override {
- frames.push_back(std::move(frame));
- }
-
- void audioDecoded(std::unique_ptr<DecodedAudio> audio_sample) override {
- audio_samples.push_back(std::move(audio_sample));
- }
-
- void videoDecodingStarted(const VideoMeta& /*videoMeta*/) override {
- clear();
- }
-};
-
-} // namespace caffe2
-
-#endif // CAFFE2_VIDEO_VIDEO_DECODER_H_
diff --git a/caffe2/video/video_input_op.cc b/caffe2/video/video_input_op.cc
deleted file mode 100644
index 8a6530a207..0000000000
--- a/caffe2/video/video_input_op.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-#include <caffe2/video/video_input_op.h>
-
-namespace caffe2 {
-
-REGISTER_CPU_OPERATOR(VideoInput, VideoInputOp<CPUContext>);
-
-OPERATOR_SCHEMA(VideoInput)
- .NumInputs(0, 1)
- .NumOutputs(2, 5)
- .TensorInferenceFunction(
- [](const OperatorDef& def,
- const vector<TensorShape>& /* unused */ /*in*/) {
- ArgumentHelper helper(def);
- int batch_size = helper.GetSingleArgument<int>("batch_size", 0);
- int clip_per_video =
- helper.GetSingleArgument<int>("clip_per_video", 1);
- int crop_size = helper.GetSingleArgument<int>("crop_size", -1);
- int length_rgb = helper.GetSingleArgument<int>("length_rgb", 0);
- int channels_rgb = helper.GetSingleArgument<int>("channels_rgb", 3);
- int length_of = helper.GetSingleArgument<int>("length_of", 0);
- int channels_of = helper.GetSingleArgument<int>("channels_of", 2);
-
- // get the flags
- bool get_rgb = helper.GetSingleArgument<bool>("get_rgb", true);
- bool get_optical_flow =
- helper.GetSingleArgument<bool>("get_optical_flow", false);
- bool do_multi_label =
- helper.GetSingleArgument<bool>("do_multi_label", false);
- bool get_video_id =
- helper.GetSingleArgument<bool>("get_video_id", false);
- bool get_start_frame =
- helper.GetSingleArgument<bool>("get_start_frame", false);
- // get starting positions if available
- vector<int> clip_start_positions =
- helper.GetRepeatedArgument<int>("clip_start_positions", {});
- // In case clip_start_positions are given, set the clip_per_video arg
- if (clip_start_positions.size() > 0) {
- clip_per_video = clip_start_positions.size();
- }
-
- int output_size = 1;
- if (get_rgb) {
- output_size++;
- }
- if (get_optical_flow) {
- output_size++;
- }
- if (get_video_id) {
- output_size++;
- }
- if (get_start_frame) {
- output_size++;
- }
-
- int index = 0;
- vector<TensorShape> out(output_size);
- TORCH_CHECK_GT(crop_size, 0);
- batch_size *= clip_per_video;
- if (get_rgb) {
- out[index++] = CreateTensorShape(
- vector<int>{
- batch_size, channels_rgb, length_rgb, crop_size, crop_size},
- TensorProto::FLOAT);
- }
- if (get_optical_flow) {
- out[index++] = CreateTensorShape(
- vector<int>{
- batch_size, channels_of, length_of, crop_size, crop_size},
- TensorProto::FLOAT);
- }
- if (!do_multi_label) {
- out[index++] = CreateTensorShape(
- vector<int>{1, batch_size}, TensorProto::INT32);
- } else {
- int num_of_class = helper.GetSingleArgument<int>("num_of_class", 0);
- out[index++] = CreateTensorShape(
- vector<int>{batch_size, num_of_class}, TensorProto::INT32);
- }
- if (get_video_id) {
- out[index++] = CreateTensorShape(
- vector<int64_t>{1, batch_size}, TensorProto::INT64);
- }
- if (get_start_frame) {
- out[index] = CreateTensorShape(
- vector<int>{1, batch_size}, TensorProto::INT32);
- }
-
- return out;
- });
-
-NO_GRADIENT(VideoInput);
-
-} // namespace caffe2
diff --git a/caffe2/video/video_input_op.h b/caffe2/video/video_input_op.h
deleted file mode 100644
index 8da9d7c444..0000000000
--- a/caffe2/video/video_input_op.h
+++ /dev/null
@@ -1,1024 +0,0 @@
-#ifndef CAFFE2_VIDEO_VIDEO_INPUT_OP_H_
-#define CAFFE2_VIDEO_VIDEO_INPUT_OP_H_
-
-#include <exception>
-#include <istream>
-#include <ostream>
-#include <random>
-#include <string>
-
-#include <c10/core/thread_pool.h>
-#include <c10/util/irange.h>
-#include <caffe2/core/db.h>
-#include <caffe2/core/logging.h>
-#include <caffe2/operators/prefetch_op.h>
-#include <caffe2/utils/math.h>
-#include <caffe2/video/video_decoder.h>
-#include <caffe2/video/video_io.h>
-
-namespace caffe2 {
-
-template <class Context>
-class VideoInputOp final : public PrefetchOperator<Context> {
- public:
- using OperatorBase::OutputSize;
- using PrefetchOperator<Context>::context_;
- using PrefetchOperator<Context>::prefetch_thread_;
- explicit VideoInputOp(const OperatorDef& operator_def, Workspace* ws);
- ~VideoInputOp() {
- PrefetchOperator<Context>::Finalize();
- }
-
- // override methods
- bool Prefetch() override;
- bool CopyPrefetched() override;
-
- private:
- void CheckParamsAndPrint();
-
- bool GetClipsAndLabelsFromDBValue(
- const std::string& value,
- int& height,
- int& width,
- std::vector<unsigned char*>& buffer_rgb,
- int* label_data,
- int64_t* video_id_data,
- int* start_frame_data,
- std::mt19937* randgen);
-
- void DecodeAndTransform(
- const std::string& value,
- float* clip_rgb_data,
- float* clip_of_data,
- int* label_data,
- int64_t* video_id_data,
- int* start_frame_data,
- std::mt19937* randgen,
- std::bernoulli_distribution* mirror_this_clip);
-
- void GetLabelsFromProto(const TensorProto& label_proto, int* label_data);
-
- bool GetImageAndLabelsFromDBValue(
- const std::string& value,
- int& height,
- int& width,
- std::vector<unsigned char*>& buffer_rgb,
- int* label_data);
-
- const db::DBReader* reader_;
- CPUContext cpu_context_;
- Tensor prefetched_clip_rgb_;
- Tensor prefetched_clip_of_;
- Tensor prefetched_label_;
- Tensor prefetched_video_id_;
- Tensor prefetched_start_frame_;
- Tensor prefetched_clip_rgb_on_device_{Context::GetDeviceType()};
- Tensor prefetched_clip_of_on_device_{Context::GetDeviceType()};
- Tensor prefetched_label_on_device_{Context::GetDeviceType()};
- Tensor prefetched_video_id_on_device_{Context::GetDeviceType()};
- Tensor prefetched_start_frame_on_device_{Context::GetDeviceType()};
-
- int batch_size_;
- int clip_per_video_;
- std::vector<int> clip_start_positions_;
- std::vector<float> mean_rgb_;
- std::vector<float> inv_std_rgb_;
- std::vector<float> mean_of_;
- std::vector<float> inv_std_of_;
- int channels_rgb_;
- int channels_of_;
- int crop_size_;
- int scale_h_;
- int scale_w_;
- int short_edge_;
- std::vector<int> jitter_scales_;
- int length_rgb_;
- int sampling_rate_rgb_;
- int random_sampling_rate_;
- int num_of_required_frame_;
- int length_of_;
- int sampling_rate_of_;
- int frame_gap_of_;
- bool random_mirror_;
- int num_of_class_;
- bool use_local_file_;
- bool random_crop_;
- int crop_per_clip_;
- int flow_data_type_;
- int flow_alg_type_;
- int decode_type_;
- int video_res_type_;
- bool do_flow_aggregation_;
- bool image_as_input_;
- bool get_rgb_;
- bool get_optical_flow_;
- bool get_video_id_;
- bool get_start_frame_;
- bool do_multi_label_;
-
- // thread pool for parse + decode
- int num_decode_threads_;
- std::shared_ptr<TaskThreadPool> thread_pool_;
-};
-
-template <class Context>
-void VideoInputOp<Context>::CheckParamsAndPrint() {
- // check whether the input parameters are valid or not
- CAFFE_ENFORCE_GT(batch_size_, 0, "Batch size should be positive.");
- CAFFE_ENFORCE_GT(
- clip_per_video_, 0, "Number of clips per video should be positive.");
- CAFFE_ENFORCE_GT(crop_size_, 0, "Must provide the cropping value.");
-
- if (!image_as_input_) {
- CAFFE_ENFORCE_GT(
- num_of_required_frame_,
- 0,
- "Required number of frames must be positive.");
- }
-
- if (image_as_input_) {
- CAFFE_ENFORCE_EQ(
- video_res_type_,
- VideoResType::USE_WIDTH_HEIGHT,
- "Currently only USE_WIDTH_HEIGHT option is supported with images");
- }
-
- if (video_res_type_ == VideoResType::USE_SHORT_EDGE) {
- CAFFE_ENFORCE_GT(short_edge_, 0, "Must provide the short edge value.");
- CAFFE_ENFORCE_GE(
- short_edge_,
- crop_size_,
- "The short edge must be no smaller than the crop value.");
- } else if (video_res_type_ == VideoResType::USE_WIDTH_HEIGHT) {
- CAFFE_ENFORCE_GT(scale_h_, 0, "Must provide the scale height value.");
- CAFFE_ENFORCE_GT(scale_w_, 0, "Must provide the scale width value.");
- CAFFE_ENFORCE_GE(
- scale_h_,
- crop_size_,
- "The scaled height must be no smaller than the crop value.");
- CAFFE_ENFORCE_GE(
- scale_w_,
- crop_size_,
- "The scaled width must be no smaller than the crop value.");
- }
-
- if (jitter_scales_.size() > 0) {
- CAFFE_ENFORCE_GE(
- video_res_type_,
- VideoResType::USE_SHORT_EDGE,
- "Scale jittering is used with short_edge scaling only");
- }
-
- if (get_rgb_) {
- CAFFE_ENFORCE_GT(length_rgb_, 0, "Must provide rgb clip length.");
- CAFFE_ENFORCE_GT(
- sampling_rate_rgb_, 0, "4 frames for mc2; 2 frames for res3d.");
- CAFFE_ENFORCE_EQ(
- channels_rgb_, mean_rgb_.size(), "Number rgb channels is wrong!");
- CAFFE_ENFORCE_EQ(
- channels_rgb_, inv_std_rgb_.size(), "Number rgb channels is wrong!");
- }
-
- if (get_optical_flow_) {
- CAFFE_ENFORCE_GT(length_of_, 0, "Must provide optical flow clip length.");
- CAFFE_ENFORCE_GT(
- sampling_rate_of_, 0, "4 frames for mc2; 2 frames for res3d.");
- CAFFE_ENFORCE_EQ(
- channels_of_,
- mean_of_.size(),
- "Number of optical flow channels is wrong!");
- CAFFE_ENFORCE_EQ(
- channels_of_,
- inv_std_of_.size(),
- "Number of optical flow channels is wrong!");
- }
-
- if (clip_per_video_ > 1) {
- CAFFE_ENFORCE_EQ(
- decode_type_,
- DecodeType::DO_UNIFORM_SMP,
- "Only uniformly sampling is supported when sampling multiple clips!");
- }
-
- if (do_multi_label_) {
- CAFFE_ENFORCE_GT(
- num_of_class_,
- 0,
- "Number of classes must be set when using multiple labels.");
- }
-
- // print out the parameter settings
- LOG(INFO) << "Creating a clip input op with the following setting: ";
- LOG(INFO) << " Input Type: " << (image_as_input_ ? "Image" : "Video");
- LOG(INFO) << " Using " << num_decode_threads_ << " CPU threads;";
- LOG(INFO) << " Outputting in batches of " << batch_size_ << " videos;";
- LOG(INFO) << " Each video has " << clip_per_video_ << " clips;";
- LOG(INFO) << " Scaling image to " << scale_h_ << "x" << scale_w_;
- LOG(INFO) << " Cropping video frame to " << crop_size_
- << (random_mirror_ ? " with " : " without ") << "random mirroring;";
- LOG(INFO) << " Using " << (random_crop_ ? "random" : "center") << " crop";
- LOG(INFO) << " Using " << crop_per_clip_ << " spatial crop(s)";
-
- if (get_rgb_) {
- LOG(INFO) << " Using a clip of " << length_rgb_ << " rgb frames "
- << "with " << channels_rgb_ << " channels "
- << "and a sampling rate of 1:" << sampling_rate_rgb_;
- if (random_sampling_rate_) {
- LOG(INFO) << "random sampling with max:" << random_sampling_rate_;
- }
- for (const auto i : c10::irange(channels_rgb_)) {
- LOG(INFO) << " RGB " << i << "-th channel mean: " << mean_rgb_[i]
- << " std: " << 1.f / inv_std_rgb_[i];
- }
- }
-
- if (get_optical_flow_) {
- LOG(INFO) << " Using a clip of " << length_of_ << " optical flow frames "
- << "with " << channels_of_ << " channels "
- << "and a sampling rate of 1:" << sampling_rate_of_
- << " flow_data_type_: " << flow_data_type_
- << " flow_alg_type_: " << flow_alg_type_;
- for (const auto i : c10::irange(channels_of_)) {
- LOG(INFO) << " Optical flow" << i
- << "-th channel mean: " << mean_of_[i]
- << " std: " << 1.f / inv_std_of_[i];
- }
- }
-
- if (video_res_type_ == VideoResType::ORIGINAL_RES) {
- LOG(INFO) << " Use original resolution";
- } else if (video_res_type_ == VideoResType::USE_SHORT_EDGE) {
- LOG(INFO) << " Resize and keep aspect ratio";
- } else if (video_res_type_ == VideoResType::USE_WIDTH_HEIGHT) {
- LOG(INFO) << " Resize and ignore aspect ratio";
- } else {
- LOG(ERROR) << " Unknown video resolution type";
- }
-
- if (video_res_type_ == VideoResType::USE_SHORT_EDGE) {
- if (jitter_scales_.size() > 0) {
- LOG(INFO) << "Using scale jittering:";
- for (const auto idx : c10::irange(jitter_scales_.size())) {
- LOG(INFO) << "scale " << idx << ": " << jitter_scales_[idx];
- }
- } else {
- LOG(INFO) << "No scale jittering is used.";
- }
- }
-
- if (decode_type_ == DecodeType::DO_TMP_JITTER) {
- LOG(INFO) << " Do temporal jittering";
- } else if (decode_type_ == DecodeType::USE_START_FRM) {
- LOG(INFO) << " Use start_frm for decoding";
- } else if (decode_type_ == DecodeType::DO_UNIFORM_SMP) {
- LOG(INFO) << " Do uniformly sampling";
- } else {
- LOG(ERROR) << " Unknown video decoding type";
- }
- if (get_start_frame_) {
- CAFFE_ENFORCE_EQ(
- decode_type_,
- DecodeType::USE_START_FRM,
- "Only decoding with starting frame is supported w/ get start_frame!");
- CAFFE_ENFORCE_EQ(
- clip_per_video_, 1, "get start frame support only clip per video = 1");
- }
-}
-
-template <class Context>
-VideoInputOp<Context>::VideoInputOp(
- const OperatorDef& operator_def,
- Workspace* ws)
- : PrefetchOperator<Context>(operator_def, ws),
- reader_(nullptr),
- batch_size_(
- OperatorBase::template GetSingleArgument<int>("batch_size", 0)),
- clip_per_video_(
- OperatorBase::template GetSingleArgument<int>("clip_per_video", 1)),
- clip_start_positions_(OperatorBase::template GetRepeatedArgument<int>(
- "clip_start_positions",
- {})),
- channels_rgb_(
- OperatorBase::template GetSingleArgument<int>("channels_rgb", 3)),
- channels_of_(
- OperatorBase::template GetSingleArgument<int>("channels_of", 2)),
- crop_size_(OperatorBase::template GetSingleArgument<int>("crop_size", 0)),
- scale_h_(OperatorBase::template GetSingleArgument<int>("scale_h", 0)),
- scale_w_(OperatorBase::template GetSingleArgument<int>("scale_w", 0)),
- short_edge_(
- OperatorBase::template GetSingleArgument<int>("short_edge", 0)),
- jitter_scales_(
- OperatorBase::template GetRepeatedArgument<int>("jitter_scales", {})),
- length_rgb_(
- OperatorBase::template GetSingleArgument<int>("length_rgb", 0)),
- sampling_rate_rgb_(OperatorBase::template GetSingleArgument<int>(
- "sampling_rate_rgb",
- 1)),
- random_sampling_rate_(OperatorBase::template GetSingleArgument<int>(
- "random_sampling_rate",
- 0)),
- length_of_(OperatorBase::template GetSingleArgument<int>("length_of", 0)),
- sampling_rate_of_(
- OperatorBase::template GetSingleArgument<int>("sampling_rate_of", 1)),
- frame_gap_of_(
- OperatorBase::template GetSingleArgument<int>("frame_gap_of", 1)),
- random_mirror_(OperatorBase::template GetSingleArgument<bool>(
- "random_mirror",
- true)),
- num_of_class_(
- OperatorBase::template GetSingleArgument<int>("num_of_class", 0)),
- use_local_file_(OperatorBase::template GetSingleArgument<bool>(
- "use_local_file",
- false)),
- random_crop_(
- OperatorBase::template GetSingleArgument<bool>("random_crop", true)),
- crop_per_clip_(
- OperatorBase::template GetSingleArgument<int>("crop_per_clip", 1)),
- flow_data_type_(
- OperatorBase::template GetSingleArgument<int>("flow_data_type", 0)),
- flow_alg_type_(
- OperatorBase::template GetSingleArgument<int>("flow_alg_type", 0)),
- decode_type_(
- OperatorBase::template GetSingleArgument<int>("decode_type", 0)),
- video_res_type_(
- OperatorBase::template GetSingleArgument<int>("video_res_type", 0)),
- do_flow_aggregation_(OperatorBase::template GetSingleArgument<bool>(
- "do_flow_aggregation",
- true)),
- image_as_input_(OperatorBase::template GetSingleArgument<bool>(
- "image_as_input",
- false)),
- get_rgb_(OperatorBase::template GetSingleArgument<bool>("get_rgb", true)),
- get_optical_flow_(OperatorBase::template GetSingleArgument<bool>(
- "get_optical_flow",
- false)),
- get_video_id_(OperatorBase::template GetSingleArgument<bool>(
- "get_video_id",
- false)),
- get_start_frame_(OperatorBase::template GetSingleArgument<bool>(
- "get_start_frame",
- false)),
- do_multi_label_(OperatorBase::template GetSingleArgument<bool>(
- "do_multi_label",
- false)),
- num_decode_threads_(OperatorBase::template GetSingleArgument<int>(
- "num_decode_threads",
- 4)),
- thread_pool_(std::make_shared<TaskThreadPool>(num_decode_threads_)) {
- try {
- num_of_required_frame_ = 0;
-
- // mean and std for normalizing different optical flow data type;
- // Example statistics generated from SOA are shown below, and you may
- // want to change them if you are running on a different dataset;
-
- // 7 channels: (flow_x, flow_y, flow_magitude, gray, Red, Green, Blue)
- const std::vector<float> InputDataMean = {
- 0.0046635, 0.0046261, 0.963986, 102.976, 110.201, 100.64, 95.9966};
- const std::vector<float> InputDataStd = {
- 0.972347, 0.755146, 1.43588, 55.3691, 58.1489, 56.4701, 55.3324};
-
- // if we need RGB as an input
- if (get_rgb_ && !image_as_input_) {
- // how many frames we need for RGB
- num_of_required_frame_ = std::max(
- num_of_required_frame_, (length_rgb_ - 1) * sampling_rate_rgb_ + 1);
-
- if (random_sampling_rate_) {
- num_of_required_frame_ = std::max(
- num_of_required_frame_,
- (length_rgb_ - 1) * random_sampling_rate_ + 1);
- }
-
- channels_rgb_ = 3;
- for (const auto i : c10::irange(4, 7)) {
- mean_rgb_.push_back(InputDataMean[i]);
- inv_std_rgb_.push_back(1.f / InputDataStd[i]);
- }
- }
-
- if (image_as_input_) {
- channels_rgb_ = 3;
- length_rgb_ = 1;
- clip_per_video_ = 1;
- get_optical_flow_ = false;
- get_rgb_ = true;
- sampling_rate_rgb_ = 1;
- for (const auto i : c10::irange(4, 7)) {
- mean_rgb_.push_back(InputDataMean[i]);
- inv_std_rgb_.push_back(1.f / InputDataStd[i]);
- }
- }
-
- // if we need optical flow as an input
- if (get_optical_flow_) {
- // how many frames we need for optical flow
- num_of_required_frame_ = std::max(
- num_of_required_frame_,
- (length_of_ - 1) * sampling_rate_of_ + frame_gap_of_ + 1);
-
- // set the parameters for different input data types
- switch (flow_data_type_) {
- case FlowDataType::Flow2C:
- channels_of_ = 2;
- for (const auto i : c10::irange(channels_of_)) {
- mean_of_.push_back(InputDataMean[i]);
- inv_std_of_.push_back(1.f / InputDataStd[i]);
- }
- break;
-
- case FlowDataType::Flow3C:
- channels_of_ = 3;
- for (const auto i : c10::irange(channels_of_)) {
- mean_of_.push_back(InputDataMean[i]);
- inv_std_of_.push_back(1.f / InputDataStd[i]);
- }
- break;
-
- // early fusion with gray
- case FlowDataType::FlowWithGray:
- channels_of_ = 3;
- for (const auto i : c10::irange(2)) {
- mean_of_.push_back(InputDataMean[i]);
- inv_std_of_.push_back(1.f / InputDataStd[i]);
- }
- mean_of_.push_back(InputDataMean[3]);
- inv_std_of_.push_back(1.f / InputDataStd[3]);
- break;
-
- // early fusion with RGB
- case FlowDataType::FlowWithRGB:
- channels_of_ = 5;
- for (const auto i : c10::irange(2)) {
- mean_of_.push_back(InputDataMean[i]);
- inv_std_of_.push_back(1.f / InputDataStd[i]);
- }
- for (const auto i : c10::irange(4, 7)) {
- mean_of_.push_back(InputDataMean[i]);
- inv_std_of_.push_back(1.f / InputDataStd[i]);
- }
- break;
-
- default:
- LOG(ERROR) << "Unknown optical flow type " << flow_data_type_;
- break;
- }
- }
-
- CheckParamsAndPrint();
- // Always need a dbreader, even when using local video files
- CAFFE_ENFORCE_GT(
- operator_def.input_size(), 0, "Need to have a DBReader blob input");
-
- vector<int64_t> data_shape(5);
- vector<int64_t> label_shape(2);
-
- // In case clip_start_positions are given, set the clip_per_video arg
- if (clip_start_positions_.size() > 0) {
- clip_per_video_ = clip_start_positions_.size();
- }
-
- // for RGB data
- data_shape[0] = batch_size_ * clip_per_video_ * crop_per_clip_;
- data_shape[1] = channels_rgb_;
- data_shape[2] = length_rgb_;
- data_shape[3] = crop_size_;
- data_shape[4] = crop_size_;
- ReinitializeTensor(
- &prefetched_clip_rgb_, data_shape, at::dtype<float>().device(CPU));
-
- // for optical flow data
- data_shape[1] = channels_of_;
- data_shape[2] = length_of_;
- ReinitializeTensor(
- &prefetched_clip_of_, data_shape, at::dtype<float>().device(CPU));
-
- // If do_multi_label is used, output label is a binary vector
- // of length num_of_class indicating which labels present
- if (do_multi_label_) {
- label_shape[0] = batch_size_ * clip_per_video_ * crop_per_clip_;
- label_shape[1] = num_of_class_;
- ReinitializeTensor(
- &prefetched_label_, label_shape, at::dtype<int>().device(CPU));
- } else {
- ReinitializeTensor(
- &prefetched_label_,
- vector<int64_t>(1, batch_size_ * clip_per_video_ * crop_per_clip_),
- at::dtype<int>().device(CPU));
- }
-
- ReinitializeTensor(
- &prefetched_video_id_,
- vector<int64_t>(1, batch_size_ * clip_per_video_ * crop_per_clip_),
- at::dtype<int>().device(CPU));
- ReinitializeTensor(
- &prefetched_start_frame_,
- vector<int64_t>(1, batch_size_ * clip_per_video_ * crop_per_clip_),
- at::dtype<int>().device(CPU));
-
- } catch (const std::exception& exc) {
- std::cerr << "While calling VideoInputOp initialization\n";
- std::cerr << exc.what();
- }
-}
-
-template <class Context>
-void VideoInputOp<Context>::GetLabelsFromProto(
- const TensorProto& label_proto,
- int* label_data) {
- int num_clips = clip_per_video_ * crop_per_clip_;
- if (!do_multi_label_) {
- for (const auto i : c10::irange(num_clips)) {
- label_data[i] = label_proto.int32_data(0);
- }
- } else {
- // For multiple label case, output label is a binary vector
- // where presented concepts are marked 1
- memset(label_data, 0, sizeof(int) * num_of_class_ * num_clips);
- for (const auto i : c10::irange(num_clips)) {
- for (const auto j : c10::irange(label_proto.int32_data_size())) {
- CAFFE_ENFORCE_LT(
- label_proto.int32_data(j),
- num_of_class_,
- "Label should be less than the number of classes.");
- label_data[i * num_of_class_ + label_proto.int32_data(j)] = 1;
- }
- }
- }
-}
-
-template <class Context>
-bool VideoInputOp<Context>::GetImageAndLabelsFromDBValue(
- const std::string& value,
- int& height,
- int& width,
- std::vector<unsigned char*>& buffer_rgb,
- int* label_data) {
- TensorProtos protos;
- CAFFE_ENFORCE(protos.ParseFromString(value));
- const TensorProto& image_proto = protos.protos(0);
- const TensorProto& label_proto = protos.protos(1);
-
- GetLabelsFromProto(label_proto, label_data);
-
- cv::Mat src;
- if (image_proto.data_type() == TensorProto::STRING) {
- // encoded image string.
- TORCH_DCHECK_EQ(image_proto.string_data_size(), 1);
- const string& encoded_image_str = image_proto.string_data(0);
- int encoded_size = encoded_image_str.size();
- // We use a cv::Mat to wrap the encoded str so we do not need a copy.
- src = cv::imdecode(
- cv::Mat(
- 1,
- &encoded_size,
- CV_8UC1,
- const_cast<char*>(encoded_image_str.data())),
- cv::IMREAD_COLOR);
- if (src.rows == 0 || src.cols == 0) {
- throw std::runtime_error("Both rows and cols are 0 for image");
- }
- } else if (image_proto.data_type() == TensorProto::BYTE) {
- // raw image content.
- int src_c = (image_proto.dims_size() == 3) ? image_proto.dims(2) : 1;
- CAFFE_ENFORCE(src_c == 3 || src_c == 1);
-
- src.create(
- image_proto.dims(0),
- image_proto.dims(1),
- (src_c == 3) ? CV_8UC3 : CV_8UC1);
- memcpy(
- src.ptr<uchar>(0),
- image_proto.byte_data().data(),
- image_proto.byte_data().size());
- } else {
- throw std::runtime_error(
- "Unknown image data type: " +
- caffe2::to_string(image_proto.data_type()));
- }
- CAFFE_ENFORCE(src.isContinuous());
-
- cv::Mat scaled_img;
- cv::resize(
- src, scaled_img, cv::Size(scale_w_, scale_h_), 0, 0, cv::INTER_AREA);
-
- cv::Mat img;
- if (channels_rgb_ == src.channels()) {
- img = scaled_img;
- } else {
- cv::cvtColor(
- scaled_img,
- img,
- (channels_rgb_ == 1) ? cv::COLOR_BGR2GRAY : cv::COLOR_GRAY2BGR);
- }
-
- cv::Mat rgb_img;
-
- if (channels_rgb_ == 1) {
- cv::cvtColor(img, rgb_img, cv::COLOR_BGR2RGB);
- } else {
- rgb_img = img;
- }
- CAFFE_ENFORCE(rgb_img.isContinuous());
-
- unsigned char* data = new unsigned char[scale_h_ * scale_w_ * channels_rgb_];
- memcpy(
- data,
- rgb_img.data,
- scale_h_ * scale_w_ * channels_rgb_ * sizeof(unsigned char));
- buffer_rgb.push_back(data);
- width = scale_w_;
- height = scale_h_;
- return true;
-}
-
-template <class Context>
-bool VideoInputOp<Context>::GetClipsAndLabelsFromDBValue(
- const std::string& value,
- int& height,
- int& width,
- std::vector<unsigned char*>& buffer_rgb,
- int* label_data,
- int64_t* video_id_data,
- int* start_frame_data,
- std::mt19937* randgen) {
- TensorProtos protos;
- int curr_proto_idx = 0;
- CAFFE_ENFORCE(protos.ParseFromString(value));
- const TensorProto& video_proto = protos.protos(curr_proto_idx++);
- const TensorProto& label_proto = protos.protos(curr_proto_idx++);
-
- int start_frm = 0;
- int num_clips = clip_per_video_ * crop_per_clip_;
- // start_frm is only valid when sampling 1 clip per video without
- // temporal jitterring
- if (decode_type_ == DecodeType::USE_START_FRM) {
- CAFFE_ENFORCE_GE(
- protos.protos_size(),
- curr_proto_idx + 1,
- "Start frm proto not provided");
- const TensorProto& start_frm_proto = protos.protos(curr_proto_idx++);
- start_frm = start_frm_proto.int32_data(0);
- if (get_start_frame_) {
- for (const auto i : c10::irange(num_clips)) {
- start_frame_data[i] = start_frm;
- }
- }
- }
-
- if (get_video_id_) {
- CAFFE_ENFORCE_GE(
- protos.protos_size(), curr_proto_idx + 1, "Video Id not provided");
- const TensorProto& video_id_proto = protos.protos(curr_proto_idx);
- for (const auto i : c10::irange(num_clips)) {
- video_id_data[i] = video_id_proto.int64_data(0);
- }
- }
-
- // assign labels
- GetLabelsFromProto(label_proto, label_data);
-
- if (use_local_file_) {
- CAFFE_ENFORCE_EQ(
- video_proto.data_type(),
- TensorProto::STRING,
- "Database with a file_list is expected to be string data");
- }
-
- // initializing the decoding params
- Params params;
- params.maximumOutputFrames_ = MAX_DECODING_FRAMES;
- params.video_res_type_ = video_res_type_;
- params.crop_size_ = crop_size_;
- params.short_edge_ = short_edge_;
- params.outputWidth_ = scale_w_;
- params.outputHeight_ = scale_h_;
- params.decode_type_ = decode_type_;
- params.num_of_required_frame_ = num_of_required_frame_;
-
- if (jitter_scales_.size() > 0) {
- int select_idx =
- std::uniform_int_distribution<>(0, jitter_scales_.size() - 1)(*randgen);
- params.short_edge_ = jitter_scales_[select_idx];
- }
-
- char* video_buffer = nullptr; // for decoding from buffer
- std::string video_filename; // for decoding from file
- int encoded_size = 0;
- if (video_proto.data_type() == TensorProto::STRING) {
- const string& encoded_video_str = video_proto.string_data(0);
- if (!use_local_file_) {
- encoded_size = encoded_video_str.size();
- video_buffer = const_cast<char*>(encoded_video_str.data());
- } else {
- video_filename = encoded_video_str;
- }
- } else if (video_proto.data_type() == TensorProto::BYTE) {
- if (!use_local_file_) {
- encoded_size = video_proto.byte_data().size();
- video_buffer = const_cast<char*>(video_proto.byte_data().data());
- } else {
- // TODO: does this works?
- video_filename = video_proto.string_data(0);
- }
- } else {
- CAFFE_ENFORCE(false, "Unknown video data type.");
- }
-
- DecodeMultipleClipsFromVideo(
- video_buffer,
- video_filename,
- encoded_size,
- params,
- start_frm,
- clip_per_video_,
- clip_start_positions_,
- use_local_file_,
- height,
- width,
- buffer_rgb);
- return true;
-}
-
-template <class Context>
-void VideoInputOp<Context>::DecodeAndTransform(
- const std::string& value,
- float* clip_rgb_data,
- float* clip_of_data,
- int* label_data,
- int64_t* video_id_data,
- int* start_frame_data,
- std::mt19937* randgen,
- std::bernoulli_distribution* mirror_this_clip) {
- try {
- std::vector<unsigned char*> buffer_rgb;
- // get the video resolution after decoding
- int height = 0;
- int width = 0;
-
- if (image_as_input_) {
- CHECK(GetImageAndLabelsFromDBValue(
- value, height, width, buffer_rgb, label_data));
- } else {
- // Decode the video from memory or read from a local file
- CHECK(GetClipsAndLabelsFromDBValue(
- value,
- height,
- width,
- buffer_rgb,
- label_data,
- video_id_data,
- start_frame_data,
- randgen));
- }
- int clip_offset_rgb = channels_rgb_ * length_rgb_ * crop_size_ * crop_size_;
- int clip_offset_of = channels_of_ * length_of_ * crop_size_ * crop_size_;
- for (int i = 0; i < std::min(clip_per_video_, int(buffer_rgb.size()));
- i++) {
- for (const auto j : c10::irange(crop_per_clip_)) {
- // get the rectangle for cropping
- int h_off = 0;
- int w_off = 0;
- if (crop_per_clip_ > 1) {
- CAFFE_ENFORCE(
- random_crop_ == false,
- "Only using multiple crops w/o random cropping");
- }
- if (random_crop_) {
- // using random crop for training
- h_off =
- std::uniform_int_distribution<>(0, height - crop_size_)(*randgen);
- w_off =
- std::uniform_int_distribution<>(0, width - crop_size_)(*randgen);
- } else {
- // using multiple spatial crops
- if (crop_per_clip_ > 1) { // normally 3 crops
- if (height < width) {
- h_off = (height - crop_size_) / 2;
- w_off = j * (width - crop_size_) / (crop_per_clip_ - 1);
- } else {
- h_off = j * (height - crop_size_) / (crop_per_clip_ - 1);
- w_off = (width - crop_size_) / 2;
- }
- // LOG(INFO) << "crop " << j << "-th " << h_off << " & " << w_off;
- } else { // using center crop for testing
- h_off = (height - crop_size_) / 2;
- w_off = (width - crop_size_) / 2;
- }
- }
- cv::Rect rect(w_off, h_off, crop_size_, crop_size_);
-
- int this_clip_sampling_rate;
- if (random_sampling_rate_) {
- this_clip_sampling_rate = std::uniform_int_distribution<>(
- 1, random_sampling_rate_)(*randgen);
- }
-
- // randomly mirror the image or not
- bool mirror_me = random_mirror_ && (*mirror_this_clip)(*randgen);
-
- if (get_rgb_ && clip_rgb_data) {
- ClipTransformRGB(
- buffer_rgb[i],
- crop_size_,
- length_rgb_,
- channels_rgb_,
- (random_sampling_rate_ == 0 ? sampling_rate_rgb_
- : this_clip_sampling_rate),
- height,
- width,
- h_off,
- w_off,
- mirror_me,
- mean_rgb_,
- inv_std_rgb_,
- clip_rgb_data + ((i * crop_per_clip_ + j) * clip_offset_rgb));
- }
-
- if (get_optical_flow_ && clip_of_data) {
- ClipTransformOpticalFlow(
- buffer_rgb[i],
- crop_size_,
- length_of_,
- channels_of_,
- sampling_rate_of_,
- height,
- width,
- rect,
- channels_rgb_,
- mirror_me,
- flow_alg_type_,
- flow_data_type_,
- frame_gap_of_,
- do_flow_aggregation_,
- mean_of_,
- inv_std_of_,
- clip_of_data + ((i * crop_per_clip_ + j) * clip_offset_of));
- }
- }
- }
- if (buffer_rgb.size() > 0) {
- for (const auto i : c10::irange(buffer_rgb.size())) {
- unsigned char* buff = buffer_rgb[i];
- delete[] buff;
- }
- }
- buffer_rgb.clear();
- } catch (const std::exception& exc) {
- std::cerr << "While calling DecodeAndTransform()\n";
- std::cerr << exc.what();
- }
-}
-
-template <class Context>
-bool VideoInputOp<Context>::Prefetch() {
- try {
- // We will get the reader pointer from input.
- // If we use local clips, db will store the list
- reader_ = &OperatorBase::Input<db::DBReader>(0);
-
- // Call mutable_data() once to allocate the underlying memory.
- prefetched_clip_rgb_.mutable_data<float>();
- prefetched_clip_of_.mutable_data<float>();
- prefetched_label_.mutable_data<int>();
- prefetched_video_id_.mutable_data<int64_t>();
- prefetched_start_frame_.mutable_data<int>();
-
- // Prefetching handled with a thread pool of "decode_threads" threads.
- std::mt19937 meta_randgen(time(nullptr));
- std::vector<std::mt19937> randgen_per_thread;
- for (const auto i : c10::irange(num_decode_threads_)) {
- randgen_per_thread.emplace_back(meta_randgen());
- }
-
- std::bernoulli_distribution mirror_this_clip(0.5);
- for (const auto item_id : c10::irange(batch_size_)) {
- std::mt19937* randgen =
- &randgen_per_thread[item_id % num_decode_threads_];
-
- int frame_size = crop_size_ * crop_size_;
- // get the clip data pointer for the item_id -th example
- float* clip_rgb_data = prefetched_clip_rgb_.mutable_data<float>() +
- frame_size * length_rgb_ * channels_rgb_ * item_id * clip_per_video_ *
- crop_per_clip_;
-
- // get the optical flow data for the current clip
- float* clip_of_data = prefetched_clip_of_.mutable_data<float>() +
- frame_size * length_of_ * channels_of_ * item_id * clip_per_video_ *
- crop_per_clip_;
-
- // get the label data pointer for the item_id -th example
- int* label_data = prefetched_label_.mutable_data<int>() +
- (do_multi_label_ ? num_of_class_ : 1) * item_id * clip_per_video_ *
- crop_per_clip_;
-
- // get the video id data pointer for the item_id -th example
- int64_t* video_id_data = prefetched_video_id_.mutable_data<int64_t>() +
- item_id * clip_per_video_ * crop_per_clip_;
-
- int* start_frame_data = prefetched_start_frame_.mutable_data<int>() +
- item_id * clip_per_video_ * crop_per_clip_;
-
- std::string key, value;
- // read data
- reader_->Read(&key, &value);
-
- thread_pool_->run(std::bind(
- &VideoInputOp<Context>::DecodeAndTransform,
- this,
- std::string(value),
- clip_rgb_data,
- clip_of_data,
- label_data,
- video_id_data,
- start_frame_data,
- randgen,
- &mirror_this_clip));
- } // for over the batch
- thread_pool_->waitWorkComplete();
-
- // If the context is not CPUContext, we will need to do a copy in the
- // prefetch function as well.
- if (!std::is_same<Context, CPUContext>::value) {
- if (get_rgb_) {
- prefetched_clip_rgb_on_device_.CopyFrom(
- prefetched_clip_rgb_, true);
- }
- if (get_optical_flow_) {
- prefetched_clip_of_on_device_.CopyFrom(prefetched_clip_of_, true);
- }
- prefetched_label_on_device_.CopyFrom(prefetched_label_, true);
- if (get_video_id_) {
- prefetched_video_id_on_device_.CopyFrom(
- prefetched_video_id_, true);
- }
- if (get_start_frame_) {
- prefetched_start_frame_on_device_.CopyFrom(
- prefetched_start_frame_, true);
- }
- }
- } catch (const std::exception& exc) {
- std::cerr << "While calling Prefetch()\n";
- std::cerr << exc.what();
- }
- return true;
-}
-
-template <class Context>
-bool VideoInputOp<Context>::CopyPrefetched() {
- try {
- int index = 0;
- auto type = Context::GetDeviceType();
- if (get_rgb_) {
- auto* clip_rgb_output = OperatorBase::Output<Tensor>(index++, type);
- if (std::is_same<Context, CPUContext>::value) {
- clip_rgb_output->CopyFrom(prefetched_clip_rgb_, true);
- } else {
- clip_rgb_output->CopyFrom(prefetched_clip_rgb_on_device_, true);
- }
- }
-
- if (get_optical_flow_) {
- auto* clip_of_output = OperatorBase::Output<Tensor>(index++, type);
- if (std::is_same<Context, CPUContext>::value) {
- clip_of_output->CopyFrom(prefetched_clip_of_, true);
- } else {
- clip_of_output->CopyFrom(prefetched_clip_of_on_device_, true);
- }
- }
-
- auto* label_output = OperatorBase::Output<Tensor>(index++, type);
- if (std::is_same<Context, CPUContext>::value) {
- label_output->CopyFrom(prefetched_label_, true);
- } else {
- label_output->CopyFrom(prefetched_label_on_device_, true);
- }
-
- if (get_video_id_) {
- auto* video_id_output = OperatorBase::Output<Tensor>(index++, type);
- if (std::is_same<Context, CPUContext>::value) {
- video_id_output->CopyFrom(prefetched_video_id_, true);
- } else {
- video_id_output->CopyFrom(prefetched_video_id_on_device_, true);
- }
- }
- if (get_start_frame_) {
- auto* start_frame_output = OperatorBase::Output<Tensor>(index, type);
- if (std::is_same<Context, CPUContext>::value) {
- start_frame_output->CopyFrom(prefetched_start_frame_, true);
- } else {
- start_frame_output->CopyFrom(
- prefetched_start_frame_on_device_, true);
- }
- }
- } catch (const std::exception& exc) {
- std::cerr << "While calling CopyPrefetched()\n";
- std::cerr << exc.what();
- }
-
- return true;
-}
-
-} // namespace caffe2
-
-#endif // CAFFE2_VIDEO_VIDEO_INPUT_OP_H_
diff --git a/caffe2/video/video_input_op_gpu.cc b/caffe2/video/video_input_op_gpu.cc
deleted file mode 100644
index b9e60e6309..0000000000
--- a/caffe2/video/video_input_op_gpu.cc
+++ /dev/null
@@ -1,9 +0,0 @@
-#include <caffe2/core/common_gpu.h>
-#include <caffe2/core/context_gpu.h>
-#include <caffe2/video/video_input_op.h>
-
-namespace caffe2 {
-
-REGISTER_CUDA_OPERATOR(VideoInput, VideoInputOp<CUDAContext>);
-
-} // namespace caffe2
diff --git a/caffe2/video/video_io.cc b/caffe2/video/video_io.cc
deleted file mode 100644
index 0b70dc5676..0000000000
--- a/caffe2/video/video_io.cc
+++ /dev/null
@@ -1,210 +0,0 @@
-#include <caffe2/core/logging.h>
-#include <caffe2/video/video_io.h>
-#include <algorithm>
-#include <random>
-#include <string>
-
-namespace caffe2 {
-
-void ClipTransformRGB(
- const unsigned char* buffer_rgb,
- const int crop_size,
- const int length_rgb,
- const int channels_rgb,
- const int sampling_rate_rgb,
- const int height,
- const int width,
- const int h_off,
- const int w_off,
- const bool mirror_me,
- const std::vector<float>& mean_rgb,
- const std::vector<float>& inv_std_rgb,
- float* transformed_clip) {
- // The order of output dimensions is C, L, H, W
- int orig_index, tran_index;
- for (int c = 0; c < channels_rgb; ++c) {
- for (int l = 0; l < length_rgb; ++l) {
- int orig_index_l = l * sampling_rate_rgb * height * width * channels_rgb;
- int tran_index_l = (c * length_rgb + l) * crop_size;
-
- for (int h = 0; h < crop_size; ++h) {
- int orig_index_h = orig_index_l + (h + h_off) * width * channels_rgb;
- int tran_index_h = (tran_index_l + h) * crop_size;
-
- for (int w = 0; w < crop_size; ++w) {
- orig_index = orig_index_h + (w + w_off) * channels_rgb + c;
-
- // mirror the frame
- if (mirror_me) {
- tran_index = tran_index_h + (crop_size - 1 - w);
- } else {
- tran_index = tran_index_h + w;
- }
-
- // normalize and transform the clip
- transformed_clip[tran_index] =
- (buffer_rgb[orig_index] - mean_rgb[c]) * inv_std_rgb[c];
- }
- }
- }
- }
-}
-
-void ClipTransformOpticalFlow(
- const unsigned char* buffer_rgb,
- const int crop_size,
- const int length_of,
- const int channels_of,
- const int sampling_rate_of,
- const int height,
- const int width,
- const cv::Rect& rect,
- const int channels_rgb,
- const bool mirror_me,
- const int flow_alg_type,
- const int flow_data_type,
- const int frame_gap_of,
- const bool do_flow_aggregation,
- const std::vector<float>& mean_of,
- const std::vector<float>& inv_std_of,
- float* transformed_clip) {
- const int frame_size = crop_size * crop_size;
- const int channel_size_flow = length_of * frame_size;
-
- // for get the mean and std of the input data
- bool extract_statistics = false;
- static std::vector<double> mean_static(channels_of, 0.f);
- static std::vector<double> std_static(channels_of, 0.f);
- static long long count = 0;
- cv::Scalar mean_img, std_img;
-
- for (int l = 0; l < length_of; l++) {
- // get the grayscale frames
- std::vector<cv::Mat> grays, rgbs;
- int step_size = do_flow_aggregation ? 1 : frame_gap_of;
- for (int j = 0; j <= frame_gap_of; j += step_size) {
- // get the current frame
- const unsigned char* curr_frame = buffer_rgb +
- (l * sampling_rate_of + j) * height * width * channels_rgb;
- cv::Mat img = cv::Mat::zeros(height, width, CV_8UC3);
- memcpy(
- img.data,
- curr_frame,
- height * width * channels_rgb * sizeof(unsigned char));
-
- // crop and mirror the frame
- cv::Mat img_cropped = img(rect);
- if (mirror_me) {
- cv::flip(img_cropped, img_cropped, 1);
- }
-
- cv::Mat gray;
- cv::cvtColor(img_cropped, gray, cv::COLOR_RGB2GRAY);
- grays.push_back(gray);
- rgbs.push_back(img_cropped);
- }
-
- cv::Mat first_gray, first_rgb;
- cv::Mat flow = cv::Mat::zeros(crop_size, crop_size, CV_32FC2);
- MultiFrameOpticalFlowExtractor(grays, flow_alg_type, flow);
-
- std::vector<cv::Mat> imgs;
- cv::split(flow, imgs);
- // save the 2-channel optical flow first
- int c = 0;
- for (; c < 2; c++) {
- if (extract_statistics) {
- cv::meanStdDev(imgs[c], mean_img, std_img);
- mean_static[c] += mean_img[0];
- std_static[c] += std_img[0];
- }
-
- imgs[c] -= mean_of[c];
- imgs[c] *= inv_std_of[c];
- memcpy(
- transformed_clip + c * channel_size_flow + l * frame_size,
- imgs[c].data,
- frame_size * sizeof(float));
- }
-
- cv::Mat mag;
- std::vector<cv::Mat> chans;
- // augment the optical flow with more channels
- switch (flow_data_type) {
- case FlowDataType::Flow2C:
- // nothing to do if we only need two channels
- break;
-
- case FlowDataType::Flow3C:
- // use magnitude as the third channel
- mag = cv::abs(imgs[0]) + cv::abs(imgs[1]);
- if (extract_statistics) {
- cv::meanStdDev(mag, mean_img, std_img);
- mean_static[c] += mean_img[0];
- std_static[c] += std_img[0];
- }
-
- mag -= mean_of[c];
- mag *= inv_std_of[c];
- memcpy(
- transformed_clip + c * channel_size_flow + l * frame_size,
- mag.data,
- frame_size * sizeof(float));
- break;
-
- case FlowDataType::FlowWithGray:
- // add grayscale image as the third channel
- grays[0].convertTo(first_gray, CV_32FC1);
- if (extract_statistics) {
- cv::meanStdDev(first_gray, mean_img, std_img);
- mean_static[c] += mean_img[0];
- std_static[c] += std_img[0];
- }
-
- first_gray -= mean_of[c];
- first_gray *= inv_std_of[c];
- memcpy(
- transformed_clip + c * channel_size_flow + l * frame_size,
- first_gray.data,
- frame_size * sizeof(float));
- break;
-
- case FlowDataType::FlowWithRGB:
- // add all three rgb channels
- rgbs[0].convertTo(first_rgb, CV_32FC3);
- cv::split(first_rgb, chans);
- for (; c < channels_of; c++) {
- if (extract_statistics) {
- cv::meanStdDev(chans[c - 2], mean_img, std_img);
- mean_static[c] += mean_img[0];
- std_static[c] += std_img[0];
- }
-
- chans[c - 2] -= mean_of[c];
- chans[c - 2] *= inv_std_of[c];
- memcpy(
- transformed_clip + c * channel_size_flow + l * frame_size,
- chans[c - 2].data,
- frame_size * sizeof(float));
- }
- break;
-
- default:
- LOG(ERROR) << "Unsupported optical flow data type " << flow_data_type;
- break;
- }
-
- if (extract_statistics) {
- count++;
- if (count % 1000 == 1) {
- for (int i = 0; i < channels_of; i++) {
- LOG(INFO) << i
- << "-th channel mean: " << mean_static[i] / float(count)
- << " std: " << std_static[i] / float(count);
- }
- }
- }
- }
-}
-
-} // namespace caffe2
diff --git a/caffe2/video/video_io.h b/caffe2/video/video_io.h
deleted file mode 100644
index beefd7b078..0000000000
--- a/caffe2/video/video_io.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef CAFFE2_VIDEO_VIDEO_IO_H_
-#define CAFFE2_VIDEO_VIDEO_IO_H_
-
-#include <caffe2/core/common.h>
-#include <caffe2/video/optical_flow.h>
-#include <caffe2/video/video_decoder.h>
-#include <opencv2/opencv.hpp>
-#include <random>
-
-#include <istream>
-#include <ostream>
-
-namespace caffe2 {
-
-TORCH_API void ClipTransformRGB(
- const unsigned char* buffer_rgb,
- const int crop_size,
- const int length_rgb,
- const int channels_rgb,
- const int sampling_rate_rgb,
- const int height,
- const int width,
- const int h_off,
- const int w_off,
- const bool mirror_me,
- const std::vector<float>& mean_rgb,
- const std::vector<float>& inv_std_rgb,
- float* transformed_clip);
-
-TORCH_API void ClipTransformOpticalFlow(
- const unsigned char* buffer_rgb,
- const int crop_size,
- const int length_of,
- const int channels_of,
- const int sampling_rate_of,
- const int height,
- const int width,
- const cv::Rect& rect,
- const int channels_rgb,
- const bool mirror_me,
- const int flow_alg_type,
- const int flow_data_type,
- const int frame_gap_of,
- const bool do_flow_aggregation,
- const std::vector<float>& mean_of,
- const std::vector<float>& inv_std_of,
- float* transformed_clip);
-
-} // namespace caffe2
-
-#endif // CAFFE2_VIDEO_VIDEO_IO_H_
diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
index f696d94549..b6f46fb043 100644
--- a/cmake/Dependencies.cmake
+++ b/cmake/Dependencies.cmake
@@ -932,45 +932,6 @@ if(USE_REDIS)
endif()
endif()
-
-# ---[ OpenCV
-if(USE_OPENCV)
- # OpenCV 4
- find_package(OpenCV 4 QUIET COMPONENTS core highgui imgproc imgcodecs optflow videoio video)
- if(NOT OpenCV_FOUND)
- # OpenCV 3
- find_package(OpenCV 3 QUIET COMPONENTS core highgui imgproc imgcodecs videoio video)
- if(NOT OpenCV_FOUND)
- # OpenCV 2
- find_package(OpenCV QUIET COMPONENTS core highgui imgproc)
- endif()
- endif()
- if(OpenCV_FOUND)
- include_directories(SYSTEM ${OpenCV_INCLUDE_DIRS})
- list(APPEND Caffe2_DEPENDENCY_LIBS ${OpenCV_LIBS})
- if(MSVC AND USE_CUDA)
- list(APPEND Caffe2_CUDA_DEPENDENCY_LIBS ${OpenCV_LIBS})
- endif()
- message(STATUS "OpenCV found (${OpenCV_CONFIG_PATH})")
- else()
- message(WARNING "Not compiling with OpenCV. Suppress this warning with -DUSE_OPENCV=OFF")
- caffe2_update_option(USE_OPENCV OFF)
- endif()
-endif()
-
-# ---[ FFMPEG
-if(USE_FFMPEG)
- find_package(FFmpeg REQUIRED)
- if(FFMPEG_FOUND)
- message("Found FFMPEG/LibAV libraries")
- include_directories(SYSTEM ${FFMPEG_INCLUDE_DIR})
- list(APPEND Caffe2_DEPENDENCY_LIBS ${FFMPEG_LIBRARIES})
- else()
- message("Not compiling with FFmpeg. Suppress this warning with -DUSE_FFMPEG=OFF")
- caffe2_update_option(USE_FFMPEG OFF)
- endif()
-endif()
-
if(USE_ITT)
find_package(ITT)
if(ITT_FOUND)
diff --git a/cmake/Modules/FindFFmpeg.cmake b/cmake/Modules/FindFFmpeg.cmake
deleted file mode 100644
index 04437562ee..0000000000
--- a/cmake/Modules/FindFFmpeg.cmake
+++ /dev/null
@@ -1,71 +0,0 @@
-# - Try to find ffmpeg libraries
-# (libavcodec, libavformat, libavutil, libswscale)
-# Once done this will define
-#
-# FFMPEG_FOUND - system has ffmpeg or libav
-# FFMPEG_INCLUDE_DIR - the ffmpeg include directory
-# FFMPEG_LIBRARIES - Link these to use ffmpeg
-#
-
-if (FFMPEG_LIBRARIES AND FFMPEG_INCLUDE_DIR)
- # in cache already
- set(FFMPEG_FOUND TRUE)
-else (FFMPEG_LIBRARIES AND FFMPEG_INCLUDE_DIR)
-
- find_path(FFMPEG_AVCODEC_INCLUDE_DIR
- NAMES libavcodec/avcodec.h
- PATHS ${_FFMPEG_AVCODEC_INCLUDE_DIRS} /usr/include /usr/local/include /opt/local/include /sw/include
- PATH_SUFFIXES ffmpeg libav
- )
-
- find_library(FFMPEG_LIBAVCODEC
- NAMES avcodec
- PATHS ${_FFMPEG_AVCODEC_LIBRARY_DIRS} /usr/lib /usr/local/lib /opt/local/lib /sw/lib
- )
-
- find_library(FFMPEG_LIBAVFORMAT
- NAMES avformat
- PATHS ${_FFMPEG_AVFORMAT_LIBRARY_DIRS} /usr/lib /usr/local/lib /opt/local/lib /sw/lib
- )
-
- find_library(FFMPEG_LIBAVUTIL
- NAMES avutil
- PATHS ${_FFMPEG_AVUTIL_LIBRARY_DIRS} /usr/lib /usr/local/lib /opt/local/lib /sw/lib
- )
-
-
- find_library(FFMPEG_LIBSWSCALE
- NAMES swscale
- PATHS ${_FFMPEG_SWSCALE_LIBRARY_DIRS} /usr/lib /usr/local/lib /opt/local/lib /sw/lib
- )
-
- find_library(FFMPEG_LIBSWRESAMPLE
- NAMES swresample
- PATHS ${_FFMPEG_SWSCALE_LIBRARY_DIRS} /usr/lib /usr/local/lib /opt/local/lib /sw/lib
- )
-
- if (FFMPEG_LIBAVCODEC AND FFMPEG_LIBAVFORMAT)
- set(FFMPEG_FOUND TRUE)
- endif()
-
- if (FFMPEG_FOUND)
- set(FFMPEG_INCLUDE_DIR ${FFMPEG_AVCODEC_INCLUDE_DIR})
-
- set(FFMPEG_LIBRARIES
- ${FFMPEG_LIBAVCODEC}
- ${FFMPEG_LIBAVFORMAT}
- ${FFMPEG_LIBAVUTIL}
- ${FFMPEG_LIBSWSCALE}
- ${FFMPEG_LIBSWRESAMPLE}
- )
-
- if (NOT FFMPEG_FIND_QUIETLY)
- message(STATUS "Found FFMPEG or Libav: ${FFMPEG_LIBRARIES}, ${FFMPEG_INCLUDE_DIR}")
- endif (NOT FFMPEG_FIND_QUIETLY)
- else (FFMPEG_FOUND)
- if (FFMPEG_FIND_REQUIRED)
- message(FATAL_ERROR "Could not find libavcodec or libavformat or libavutil")
- endif (FFMPEG_FIND_REQUIRED)
- endif (FFMPEG_FOUND)
-
-endif (FFMPEG_LIBRARIES AND FFMPEG_INCLUDE_DIR)
diff --git a/cmake/Summary.cmake b/cmake/Summary.cmake
index b6781da3f4..8a0ae2e38d 100644
--- a/cmake/Summary.cmake
+++ b/cmake/Summary.cmake
@@ -128,7 +128,6 @@ function(caffe2_print_configuration_summary)
message(STATUS " USE_FBGEMM : ${USE_FBGEMM}")
message(STATUS " USE_FAKELOWP : ${USE_FAKELOWP}")
message(STATUS " USE_KINETO : ${USE_KINETO}")
- message(STATUS " USE_FFMPEG : ${USE_FFMPEG}")
message(STATUS " USE_GFLAGS : ${USE_GFLAGS}")
message(STATUS " USE_GLOG : ${USE_GLOG}")
message(STATUS " USE_LEVELDB : ${USE_LEVELDB}")
@@ -164,10 +163,6 @@ function(caffe2_print_configuration_summary)
message(STATUS " USE_NUMPY : ${USE_NUMPY}")
message(STATUS " USE_OBSERVERS : ${USE_OBSERVERS}")
message(STATUS " USE_OPENCL : ${USE_OPENCL}")
- message(STATUS " USE_OPENCV : ${USE_OPENCV}")
- if(${USE_OPENCV})
- message(STATUS " OpenCV version : ${OpenCV_VERSION}")
- endif()
message(STATUS " USE_OPENMP : ${USE_OPENMP}")
message(STATUS " USE_TBB : ${USE_TBB}")
if(${USE_TBB})
|
2.41.0
|
0046c315bbffc1feb8c94762e4f5d0d0ed323bf
|
Tue, 30 Apr 2024 17:40:44 +0000
|
[PATCH 0844/1000] Add templated attention BLOCK_M & BLOCK_N default size for different head_dim (#125139)
|
Run different head_dims [64, 128], which are the most popular ones across major GPT models. Enumerate different ```BLOCK_M``` and ```BLOCK_N``` candidates [16, 32, 64, 128], and get the best config as default one. ## Before ``` | Type | Speedup | batch_size | num_heads | q_seq_len | k_seq_len | head_dim | score_mod | dtype | |---------|-----------|--------------|-------------|-------------|-------------|------------|-------------|----------------| | Average | 0.704 | | | | | | | | | Max | 0.953 | 1 | 16 | 512 | 512 | 64 | noop | torch.bfloat16 | | Min | 0.482 | 1 | 16 | 4096 | 4096 | 128 | causal_mask | torch.bfloat16 | ``` ## After ``` | Type | Speedup | batch_size | num_heads | q_seq_len | k_seq_len | head_dim | score_mod | dtype | |---------|-----------|--------------|-------------|-------------|-------------|------------|-------------|----------------| | Average | 0.823 | | | | | | | | | Max | 0.926 | 1 | 16 | 512 | 512 | 64 | noop | torch.bfloat16 | | Min | 0.723 | 1 | 16 | 512 | 512 | 128 | causal_mask | torch.bfloat16 | ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/125139 Approved by: https://github.com/Chillee
|
diff --git a/benchmarks/transformer/score_mod.py b/benchmarks/transformer/score_mod.py
index 067f18f8e3..2f49f234e8 100644
--- a/benchmarks/transformer/score_mod.py
+++ b/benchmarks/transformer/score_mod.py
@@ -211,7 +211,7 @@ def generate_experiment_configs() -> List[ExperimentConfig]:
batch_sizes = [1, 8, 16]
num_heads = [16]
q_kv_seq_lens = [(512, 512), (1024, 1024), (4096, 4096)]
- head_dims = [64]
+ head_dims = [64, 128]
dtypes = [
torch.bfloat16,
]
diff --git a/torch/_inductor/kernel/templated_attention.py b/torch/_inductor/kernel/templated_attention.py
index 0b62dfbd68..b1c2571267 100644
--- a/torch/_inductor/kernel/templated_attention.py
+++ b/torch/_inductor/kernel/templated_attention.py
@@ -174,14 +174,18 @@ sdpa_template = TritonTemplate(
def _get_default_config(query):
+ head_dim = query.get_size()[-1]
default_config = None
is_big_shared_mem = utils.get_gpu_shared_memory() > 128 * 1024
if is_big_shared_mem:
if query.get_dtype() == torch.float32:
- default_config = (64, 64, 4, 3)
+ default_config = (128, 32, 4, 3)
else:
- default_config = (128, 64, 4, 3)
+ if head_dim == 64:
+ default_config = (128, 64, 4, 3)
+ else:
+ default_config = (128, 32, 4, 3)
else:
if query.get_dtype() == torch.float32:
default_config = (32, 32, 4, 3)
|
2.41.0
|
5be0fb62d1f1509000b4e142aafe05896ba7232
|
Tue, 30 Apr 2024 18:02:46 +0000
|
[PATCH 0845/1000] [minimizer] Add exclusion function to minimizer base (#124504)
|
Summary: Add exclusion list to minimizer: 1. some operations cannot be lowered when constructing subgraphs; this usually happens when they are isolated from operation group 2. exclude them in search strategies for automation Reviewed By: jimone1 Differential Revision: D56327289 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124504 Approved by: https://github.com/jfix71
|
diff --git a/torch/fx/passes/net_min_base.py b/torch/fx/passes/net_min_base.py
index 98aef4eb54..e6ab95225b 100644
--- a/torch/fx/passes/net_min_base.py
+++ b/torch/fx/passes/net_min_base.py
@@ -112,10 +112,13 @@ class _MinimizerBase:
settings: _MinimizerSettingBase,
module_exporter: Optional[
Callable[
- [List[torch.Tensor], torch.fx.GraphModule, str],
+ [Tensors, torch.fx.GraphModule, str],
None
]
] = None,
+ exclusion_fn: Optional[
+ Callable[[NodeList, int, int], None]
+ ] = None,
):
assert isinstance(module, torch.fx.GraphModule)
@@ -124,6 +127,7 @@ class _MinimizerBase:
self.compare_fn = compare_fn
self.module_exporter = module_exporter
self.settings = settings
+ self.exclusion_fn = exclusion_fn
# Stores outputs of run_a function
self.a_outputs: Dict[str, Any] = {}
@@ -382,10 +386,10 @@ class _MinimizerBase:
report.append(f"Result mismatch for {result_key}")
if self.module_exporter:
self.module_exporter(
- List[torch.Tensor](a_input), submodule, str(result_key[0]) + "_cpu",
+ a_input, submodule, str(result_key[0]) + "_cpu",
)
self.module_exporter(
- List[torch.Tensor](b_input), submodule, str(result_key[0]) + "_acc",
+ b_input, submodule, str(result_key[0]) + "_acc",
)
raise FxNetMinimizerResultMismatchError(f"Result mismatch for {result_key}")
@@ -395,26 +399,32 @@ class _MinimizerBase:
"""
Recursive binary search implementation.
"""
+ culprits: NodeSet = set()
nodes: NodeList = all_nodes[start_idx:end_idx]
report: List[str] = []
- self.reports.append(report)
+ if self.exclusion_fn is not None:
+ self.exclusion_fn(nodes, start_idx, end_idx)
+ if len(nodes) == 0:
+ report = ["All nodes are excluded by user"]
+ self.reports.append(report)
+ return culprits
+
+ first_node_name = nodes[0].name
+ output_node_name = nodes[-1].name
self.iteration += 1
- report.append(f"Binary search iteration {self.iteration}.")
+ self.reports.append(report)
+ report.append(f"Binary search iteration {self.iteration}")
report.append(
- f"From node index {start_idx} to {end_idx-1}. "
+ f"From node index {start_idx}:{first_node_name} to {end_idx-1}:{output_node_name}. "
f"Size of the interested node list is {len(nodes)}"
)
-
cur_nodes: NodeSet = set(nodes)
- for node in nodes:
- if node in self.fusions:
- cur_nodes.update(self.fusions[node])
-
try:
split_module, submod_name = self._build_submodule(cur_nodes)
- self._run_and_compare(split_module, submod_name, [])
+ self._run_and_compare(split_module, submod_name, [output_node_name])
+
except (FxNetMinimizerRunFuncError, FxNetMinimizerResultMismatchError):
if len(nodes) == 1:
@@ -472,6 +482,14 @@ class _MinimizerBase:
report.append(f"Visit node: {node.name}")
_LOGGER.info("Visit node: %s", node.name)
+ node_list: NodeList = [node]
+ if self.exclusion_fn is not None:
+ self.exclusion_fn(node_list, -1, -1)
+ if len(node_list) == 0:
+ report.append(f"User exclusion : {node.name}")
+ self.print_report(report)
+ return culprits
+
cur_nodes: NodeSet = {node}
if node in self.fusions:
@@ -501,6 +519,12 @@ class _MinimizerBase:
run user defined `nodes` and determine if it is a culprit.
"""
culprits: NodeSet = set()
+ if self.exclusion_fn is not None:
+ self.exclusion_fn(nodes, -1, -1)
+ if len(nodes) == 0:
+ report = ["All nodes are excluded by user"]
+ self.reports.append(report)
+ return culprits
first_node_name = nodes[0].name
output_node_name = nodes[-1].name
@@ -562,7 +586,14 @@ class _MinimizerBase:
"""
culprits: NodeSet = set()
nodes: NodeList = all_nodes[start_idx:end_idx]
-
+ cur_nodes: NodeSet = set(nodes)
+ if self.exclusion_fn is not None:
+ self.exclusion_fn(nodes, start_idx, end_idx)
+ cur_nodes = set(nodes)
+ else:
+ for node in nodes:
+ if node in self.fusions:
+ cur_nodes.update(self.fusions[node])
report: List[str] = []
self.reports.append(report)
self.iteration += 1
@@ -572,12 +603,6 @@ class _MinimizerBase:
f"Size of the interested node list is {len(nodes)}"
)
- cur_nodes: NodeSet = set(nodes)
-
- for node in nodes:
- if node in self.fusions:
- cur_nodes.update(self.fusions[node])
-
try:
split_module, submod_name = self._build_submodule(cur_nodes)
self._run_and_compare(split_module, submod_name, [])
@@ -588,7 +613,7 @@ class _MinimizerBase:
return culprits
except (FxNetMinimizerRunFuncError):
culprits.update(cur_nodes)
- report.append(f"Found culprit from run error: {node}")
+ report.append(f"Found culprit from run error: {cur_nodes}")
self.print_report(report)
return culprits
else:
|
2.41.0
|
242fb62a7e69c97933202a3f76460e4cf81678e
|
Mon, 29 Apr 2024 19:37:55 -0700
|
[PATCH 0846/1000] [quant][pt2e] Fix conv-bn weight + bias per channel QAT (#125208)
|
Summary: This commit fixes the pattern matching for conv-bn during QAT fusion where both weight and bias are quantized per channel. Previously this failed because weights and biases used the same example kwargs for their scales and zero points, causing these qparams to be tied during pattern matching. Test Plan: python test/test_quantization.py TestQuantizePT2EQAT_ConvBn1d.test_qat_conv_bn_per_channel_weight_bias python test/test_quantization.py TestQuantizePT2EQAT_ConvBn2d.test_qat_conv_bn_per_channel_weight_bias Reviewers: jerryzh168, angelayi Subscribers: jerryzh168, angelayi, supriyar Differential Revision: [D56740694](https://our.internmc.facebook.com/intern/diff/D56740694) Pull Request resolved: https://github.com/pytorch/pytorch/pull/125208 Approved by: https://github.com/angelayi
|
diff --git a/test/quantization/pt2e/test_quantize_pt2e_qat.py b/test/quantization/pt2e/test_quantize_pt2e_qat.py
index e016ba4e49..d0398652d8 100644
--- a/test/quantization/pt2e/test_quantize_pt2e_qat.py
+++ b/test/quantization/pt2e/test_quantize_pt2e_qat.py
@@ -855,6 +855,60 @@ class TestQuantizePT2EQAT_ConvBn_Base(PT2EQATTestCase):
def test_qat_conv_transpose_bn_relu(self):
self._do_test_qat_conv_transpose_bn(has_relu=True)
+ def test_qat_conv_bn_per_channel_weight_bias(self):
+ m = self._get_conv_bn_model()
+ example_inputs = self.example_inputs
+ m = capture_pre_autograd_graph(m, example_inputs)
+ quantizer = ConvBnDerivedBiasQuantizer(is_per_channel=True)
+ m = prepare_qat_pt2e(m, quantizer)
+ m(*example_inputs)
+ m = convert_pt2e(m)
+ m(*example_inputs)
+
+ # Expected graph:
+ # x -> q_tensor -> dq_tensor -> conv -> q_tensor -> dq_tensor -> output
+ # weight -> q_channel -> dq_channel /
+ # bias -> q_channel -> dq_channel /
+
+ (conv_node, _, _) = _get_conv_bn_getitem_nodes(m)
+ conv_op = conv_node.target
+ conv_weight_dq_op = (
+ torch.ops.quantized_decomposed.dequantize_per_channel.default
+ )
+ node_occurrence = {
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ): 2,
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ): 2,
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_channel.default
+ ): 2,
+ }
+ node_list = [
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ),
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ),
+ ns.call_function(conv_weight_dq_op),
+ ns.call_function(conv_weight_dq_op),
+ ns.call_function(conv_op),
+ ns.call_function(
+ torch.ops.quantized_decomposed.quantize_per_tensor.default
+ ),
+ ns.call_function(
+ torch.ops.quantized_decomposed.dequantize_per_tensor.default
+ ),
+ ]
+ self.checkGraphModuleNodes(
+ m,
+ expected_node_list=node_list,
+ expected_node_occurrence=node_occurrence,
+ )
+
@skipIfNoQNNPACK
class TestQuantizePT2EQAT_ConvBn1d(TestQuantizePT2EQAT_ConvBn_Base):
@@ -952,22 +1006,45 @@ class ConvBnDerivedBiasQuantizer(Quantizer):
derived from the conv input activation and weight qparams.
"""
+ def __init__(self, is_per_channel: bool = False):
+ super().__init__()
+ self.is_per_channel = is_per_channel
+
def _derive_bias_qparams_from_act_and_weight_qparams(self, obs_or_fqs):
act_scale, _ = obs_or_fqs[0].calculate_qparams()
weight_scale, _ = obs_or_fqs[1].calculate_qparams()
- bias_scale = torch.tensor([act_scale * weight_scale], dtype=torch.float32)
- bias_zero_point = torch.tensor([0], dtype=torch.int32)
+ if self.is_per_channel:
+ bias_scale = act_scale * weight_scale
+ bias_zero_point = torch.zeros_like(bias_scale, dtype=torch.int32)
+ else:
+ bias_scale = torch.tensor([act_scale * weight_scale], dtype=torch.float32)
+ bias_zero_point = torch.tensor([0], dtype=torch.int32)
return bias_scale, bias_zero_point
def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule:
+ if self.is_per_channel:
+ weight_qscheme = torch.per_channel_symmetric
+ weight_fq = FusedMovingAvgObsFakeQuantize.with_args(
+ observer=MovingAveragePerChannelMinMaxObserver,
+ )
+ else:
+ weight_qscheme = torch.per_tensor_affine
+ weight_fq = default_fake_quant
conv_node, _, getitem_node = _get_conv_bn_getitem_nodes(model)
- act_and_weight_qspec = QuantizationSpec(
+ act_qspec = QuantizationSpec(
dtype=torch.uint8,
quant_min=0,
quant_max=255,
qscheme=torch.per_tensor_affine,
observer_or_fake_quant_ctr=default_fake_quant,
)
+ weight_qspec = QuantizationSpec(
+ dtype=torch.uint8,
+ quant_min=0,
+ quant_max=255,
+ qscheme=weight_qscheme,
+ observer_or_fake_quant_ctr=weight_fq,
+ )
bias_qspec = DerivedQuantizationSpec(
derived_from=[
(conv_node.args[0], conv_node),
@@ -977,18 +1054,19 @@ class ConvBnDerivedBiasQuantizer(Quantizer):
dtype=torch.int32,
quant_min=-(2**31),
quant_max=2**31 - 1,
- qscheme=torch.per_tensor_affine,
+ qscheme=weight_qscheme,
+ ch_axis=0 if self.is_per_channel else None,
)
conv_node.meta["quantization_annotation"] = QuantizationAnnotation(
input_qspec_map={
- conv_node.args[0]: act_and_weight_qspec,
- conv_node.args[1]: act_and_weight_qspec,
+ conv_node.args[0]: act_qspec,
+ conv_node.args[1]: weight_qspec,
conv_node.args[2]: bias_qspec,
},
_annotated=True,
)
getitem_node.meta["quantization_annotation"] = QuantizationAnnotation(
- output_qspec=act_and_weight_qspec,
+ output_qspec=act_qspec,
_annotated=True,
)
return model
diff --git a/torch/ao/quantization/pt2e/qat_utils.py b/torch/ao/quantization/pt2e/qat_utils.py
index 1434a75df6..1f89dbe11c 100644
--- a/torch/ao/quantization/pt2e/qat_utils.py
+++ b/torch/ao/quantization/pt2e/qat_utils.py
@@ -58,6 +58,7 @@ _quantized_conv2d_bn_example_inputs = (
def _get_quantized_conv_bn_example_inputs_kwargs(
is_per_channel: bool,
has_bias: bool,
+ bias_is_quantized: bool,
is_cuda: bool,
) -> Dict[str, Any]:
"""
@@ -68,8 +69,11 @@ def _get_quantized_conv_bn_example_inputs_kwargs(
# Per tensor quantization uses literals to represent scale and zero
# point, so there is no need to include them here as kwargs
if is_per_channel:
- kwargs["scale"] = torch.tensor([1], dtype=torch.float)
- kwargs["zero_point"] = torch.tensor([0], dtype=torch.int)
+ kwargs["weight_scale"] = torch.tensor([1], dtype=torch.float)
+ kwargs["weight_zero_point"] = torch.tensor([0], dtype=torch.int)
+ if has_bias and bias_is_quantized:
+ kwargs["bias_scale"] = torch.tensor([1], dtype=torch.float)
+ kwargs["bias_zero_point"] = torch.tensor([0], dtype=torch.int)
if has_bias:
kwargs["conv_bias"] = torch.randn(1)
if is_cuda:
@@ -157,7 +161,7 @@ def _get_qat_conv_bn_pattern_no_conv_bias(conv_fn: Callable) -> Callable:
return x
return _WrapperModule(_qat_conv_bn_pattern_no_conv_bias)
-def _append_qdq(x, is_per_channel, kwargs):
+def _append_qdq(x, is_per_channel, is_bias, kwargs):
"""
Helper function to append q-dq ops after `x`, using dummy values for the qparams
and qmin/qmax. We use dummy values here because we match with `ignore_literals=True`
@@ -167,8 +171,10 @@ def _append_qdq(x, is_per_channel, kwargs):
"""
# Dummy args to be passed into q-dq ops
per_channel_axis = 0
- scale = kwargs["scale"] if is_per_channel else 1.0
- zp = kwargs["zero_point"] if is_per_channel else 0
+ scale_key = "bias_scale" if is_bias else "weight_scale"
+ zp_key = "bias_zero_point" if is_bias else "weight_zero_point"
+ scale = kwargs[scale_key] if is_per_channel else 1.0
+ zp = kwargs[zp_key] if is_per_channel else 0
qmin = -127
qmax = 127
dtype = torch.int8
@@ -215,11 +221,15 @@ def _get_quantized_qat_conv_bn_pattern(
bias_shape = [1] * len(conv_weight.shape)
bias_shape[1] = -1
scaled_weight = conv_weight * scale_factor.reshape(weight_shape)
- scaled_weight = _append_qdq(scaled_weight, is_per_channel, kwargs)
+ scaled_weight = _append_qdq(
+ scaled_weight, is_per_channel, is_bias=False, kwargs=kwargs,
+ )
if has_bias:
zero_bias = torch.zeros_like(kwargs["conv_bias"], dtype=x.dtype)
if bias_is_quantized:
- zero_bias = _append_qdq(zero_bias, is_per_channel, kwargs)
+ zero_bias = _append_qdq(
+ zero_bias, is_per_channel, is_bias=True, kwargs=kwargs,
+ )
x = conv_fn(x, scaled_weight, zero_bias)
else:
x = conv_fn(x, scaled_weight, None)
@@ -252,11 +262,15 @@ def _get_folded_quantized_qat_conv_bn_pattern(
bn_running_var: torch.Tensor,
**kwargs,
) -> torch.Tensor:
- conv_weight = _append_qdq(conv_weight, is_per_channel, kwargs)
+ conv_weight = _append_qdq(
+ conv_weight, is_per_channel, is_bias=False, kwargs=kwargs,
+ )
if has_bias:
bias = kwargs["conv_bias"]
if bias_is_quantized:
- bias = _append_qdq(bias, is_per_channel, kwargs)
+ bias = _append_qdq(
+ bias, is_per_channel, is_bias=True, kwargs=kwargs,
+ )
else:
bias = None
x = conv_fn(x, conv_weight, bias)
@@ -739,7 +753,7 @@ def _fold_conv_bn_qat_helper(
# filter out one of the values for this flag to avoid having duplicate patterns
if not has_bias and bias_is_quantized:
continue
- kwargs = _get_quantized_conv_bn_example_inputs_kwargs(is_per_channel, has_bias, is_cuda)
+ kwargs = _get_quantized_conv_bn_example_inputs_kwargs(is_per_channel, has_bias, bias_is_quantized, is_cuda)
match_pattern = _get_quantized_qat_conv_bn_pattern(
is_per_channel, has_bias, bias_is_quantized, conv_fn, bn_is_training
)
|
2.41.0
|
7958c538cb91adae1b6c00aaa46217a5c4166ed
|
Mon, 29 Apr 2024 21:02:40 -0700
|
[PATCH 0847/1000] Setup initial testing harness and cache key generation for AOTAutograd Cache (#124642)
|
This doesn't introduce any new behavior, but sets up a basic cache key generation mechanism that I can test. From here I will: - Add checks on the ops in an input FXGraph to make sure they are safe to cache. We'll be conservative in the first version here. - Add serialization for FX graphs - Save these FX graphs to disk in the cache - Support graphs with more complicated ops like higher order ops and specialized nn modules Pull Request resolved: https://github.com/pytorch/pytorch/pull/124642 Approved by: https://github.com/aorenste
|
diff --git a/test/dynamo/test_aot_autograd_cache.py b/test/dynamo/test_aot_autograd_cache.py
new file mode 100644
index 0000000000..4164bfe346
--- /dev/null
+++ b/test/dynamo/test_aot_autograd_cache.py
@@ -0,0 +1,105 @@
+# Owner(s): ["module: dynamo"]
+
+import torch
+import torch._dynamo
+import torch._dynamo.test_case
+
+import torch._functorch._aot_autograd
+from torch._functorch._aot_autograd.autograd_cache import autograd_cache_hash
+from torch._functorch._aot_autograd.schemas import AOTConfig
+
+
+class AOTAutogradCachePicklerTests(torch._dynamo.test_case.TestCase):
+ def default_config(self):
+ return AOTConfig(
+ fw_compiler=None,
+ bw_compiler=None,
+ inference_compiler=None,
+ partition_fn=None,
+ decompositions={},
+ num_params_buffers=0,
+ aot_id=0,
+ keep_inference_input_mutations=False,
+ dynamic_shapes=True,
+ aot_autograd_arg_pos_to_source=None,
+ is_export=False,
+ no_tangents=False,
+ enable_log=False,
+ )
+
+ def _get_dynamo_output(self, fn, *args, **kwargs):
+ # Reset dynamo between runs
+ torch._dynamo.reset()
+ fx_graph = None
+
+ def compiler(gm, inputs, **kwargs):
+ nonlocal fx_graph
+ fx_graph = gm
+ return gm
+
+ g = torch.compile(fn, backend=compiler, fullgraph=True)
+ result = g(*args, **kwargs)
+ return (result, fx_graph)
+
+ def gen_cache_key(self, f, config, inputs=None):
+ if inputs is None:
+ inputs = [torch.randn(3)]
+ _, fx_g = self._get_dynamo_output(f, *inputs)
+ return autograd_cache_hash(fx_g, config)
+
+ def test_basic_hash_key(self):
+ def fn(x):
+ return x.sin().cos()
+
+ config = self.default_config()
+ # Check hash is stable on multiple runs
+ c1 = self.gen_cache_key(fn, config)
+ c2 = self.gen_cache_key(fn, config)
+ self.assertEqual(c1, c2)
+
+ def test_identical_graphs_and_configs(self):
+ def fn(x):
+ return x.sin().cos()
+
+ def fn2(x):
+ y = x.sin()
+ z = y.cos()
+ return z
+
+ # Make the id different, but otherwise identical
+ config = self.default_config()
+ config2 = self.default_config()
+ config2.aot_id = 1
+
+ c1 = self.gen_cache_key(fn, config)
+ c2 = self.gen_cache_key(fn, config2)
+ self.assertEqual(c1, c2)
+
+ def test_different_graphs(self):
+ def fn(x):
+ return x.cos().sin()
+
+ def fn2(x):
+ return x.sin().cos()
+
+ config = self.default_config()
+ c1 = self.gen_cache_key(fn, config)
+ c2 = self.gen_cache_key(fn2, config)
+ self.assertNotEqual(c1, c2)
+
+ def test_different_configs(self):
+ def fn(x):
+ return x.cos().sin()
+
+ config = self.default_config()
+ config2 = self.default_config()
+ config2.dynamic_shapes = False
+ c1 = self.gen_cache_key(fn, config)
+ c2 = self.gen_cache_key(fn, config2)
+ self.assertNotEqual(c1, c2)
+
+
+if __name__ == "__main__":
+ from torch._dynamo.test_case import run_tests
+
+ run_tests()
diff --git a/torch/_functorch/_aot_autograd/autograd_cache.py b/torch/_functorch/_aot_autograd/autograd_cache.py
new file mode 100644
index 0000000000..20fc61d2f1
--- /dev/null
+++ b/torch/_functorch/_aot_autograd/autograd_cache.py
@@ -0,0 +1,67 @@
+"""
+Utils for caching the outputs of AOTAutograd
+"""
+from __future__ import annotations
+
+import logging
+
+import torch
+from torch._inductor.codecache import _ident, FxGraphCachePickler
+
+from .schemas import AOTConfig # noqa: F401
+
+log = logging.getLogger(__name__)
+
+
+class AOTAutogradCacheDetails:
+ """
+ Object to capture all the details for a dynamo graph module relevant to computing
+ a safe and stable cache key for AOTAutograd.
+ """
+
+ def __init__(self, gm: torch.fx.GraphModule, config: AOTConfig):
+ self.gm = gm # TODO: we'll handle different parts of the graph module
+ # TODO: We'll want to handle the full_args passed in as well
+ self.config = config # Gets reduced by the Pickler
+
+ def debug_str(self) -> str:
+ return AOTAutogradCachePickler.debug_str(self)
+
+
+def _reduce_aot_config(config: AOTConfig):
+ """
+ Reduce the config to a stable key for caching.
+ """
+ return (
+ _ident,
+ (
+ config.num_params_buffers,
+ config.keep_inference_input_mutations,
+ config.is_export,
+ config.no_tangents,
+ config.dynamic_shapes,
+ config.aot_autograd_arg_pos_to_source,
+ config.enable_log,
+ config.pre_dispatch,
+ ),
+ )
+
+
+class AOTAutogradCachePickler(FxGraphCachePickler):
+ dispatch_table = FxGraphCachePickler.dispatch_table.copy()
+ dispatch_table[AOTConfig] = _reduce_aot_config
+
+
+def autograd_cache_hash(
+ gm: torch.fx.GraphModule,
+ config: AOTConfig,
+ # TODO: add args and parameters
+) -> str:
+ """
+ Generate a unique hash of the FX graph for caching.
+ """
+ details = AOTAutogradCacheDetails(gm, config)
+ # The prefix distinguishes among the other kinds of objects we cache
+ key = "a" + AOTAutogradCachePickler.get_hash(details)
+ log.debug("FX graph cache hash details for key %s:\n%s", key, details.debug_str())
+ return key
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 3b5a9597d1..ef3946cec2 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -495,25 +495,56 @@ class FxGraphCachePickler(pickle.Pickler):
dispatch_table[torch.Tensor] = _reduce_tensor
dispatch_table[torch.SymInt] = _reduce_symint
- @staticmethod
- def dumps(obj) -> bytes:
+ @classmethod
+ def dumps(cls, obj) -> bytes:
"""
Pickle an object using the FxGraphCachePickler.
"""
with io.BytesIO() as stream:
- pickler = FxGraphCachePickler(stream)
+ pickler = cls(stream)
pickler.dump(obj)
return stream.getvalue()
- @staticmethod
- def get_hash(obj: Any) -> str:
+ @classmethod
+ def get_hash(cls, obj: Any) -> str:
"""
Serialize an object using the FxGraphCachePickler and return a hash
of the pickled object.
"""
- serialized_data = FxGraphCachePickler.dumps(obj)
+ serialized_data = cls.dumps(obj)
return sha256_hash(serialized_data)
+ @classmethod
+ def debug_str(cls, inp: Any) -> str:
+ """
+ Get a printable string describing in more detail all the attributes
+ comprising an object. Useful for debugging when one graph hashes
+ to a different value than another.
+ """
+
+ def get_str(obj) -> str:
+ if isinstance(obj, torch.Tensor):
+ return str(extract_tensor_metadata(obj))
+ elif isinstance(obj, bytes):
+ return "<bytes>"
+ else:
+ return str(obj)
+
+ lines = []
+ for attr, obj in vars(inp).items():
+ if isinstance(obj, list):
+ for ii in range(len(obj)):
+ h = cls.get_hash(obj[ii])
+ lines.append(f"[{h}] {attr}[{ii}]: {get_str(obj[ii])}")
+ elif isinstance(obj, dict):
+ for k, v in obj.items():
+ h = cls.get_hash(v)
+ lines.append(f"[{h}] {attr}[{k}]: {get_str(v)}")
+ else:
+ h = cls.get_hash(obj)
+ lines.append(f"[{h}] {attr}: {get_str(obj)}")
+ return "\n".join(lines)
+
@functools.lru_cache(None)
def get_inductor_code_hash() -> bytes:
@@ -616,29 +647,7 @@ class FxGraphHashDetails:
comprising this object. Useful for debugging when one graph hashes
to a different value than another.
"""
-
- def get_str(obj) -> str:
- if isinstance(obj, torch.Tensor):
- return str(extract_tensor_metadata(obj))
- elif isinstance(obj, bytes):
- return "<bytes>"
- else:
- return str(obj)
-
- lines = []
- for attr, obj in vars(self).items():
- if isinstance(obj, list):
- for ii in range(len(obj)):
- h = FxGraphCachePickler.get_hash(obj[ii])
- lines.append(f"[{h}] {attr}[{ii}]: {get_str(obj[ii])}")
- elif isinstance(obj, dict):
- for k, v in obj.items():
- h = FxGraphCachePickler.get_hash(v)
- lines.append(f"[{h}] {attr}[{k}]: {get_str(v)}")
- else:
- h = FxGraphCachePickler.get_hash(obj)
- lines.append(f"[{h}] {attr}: {get_str(obj)}")
- return "\n".join(lines)
+ return FxGraphCachePickler.debug_str(self)
def compiled_fx_graph_hash(
@@ -653,7 +662,11 @@ def compiled_fx_graph_hash(
# The prefix distinguishes among the other kinds of objects we
# cache in this module.
key = "f" + FxGraphCachePickler.get_hash(details)
- log.debug("FX graph cache hash details for key %s:\n%s", key, details.debug_str())
+ log.debug(
+ "FX graph cache hash details for key %s:\n%s",
+ key,
+ details.debug_str(),
+ )
return key
|
2.41.0
|
946fa1c1247055d6b491e912b9fab02516d8d42
|
Tue, 30 Apr 2024 18:21:29 +0000
|
[PATCH 0848/1000] Fix bug in get_update_constraint (#125194)
|
Summary: Title Test Plan: CI Differential Revision: D56726321 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125194 Approved by: https://github.com/pianpwk
|
diff --git a/torch/export/exported_program.py b/torch/export/exported_program.py
index 7a829df900..d5dd400a11 100644
--- a/torch/export/exported_program.py
+++ b/torch/export/exported_program.py
@@ -823,15 +823,15 @@ def _get_updated_range_constraints(
fake_mode = detect_fake_mode(vals)
if fake_mode is not None:
- return fake_mode.shape_env, fake_mode
+ return fake_mode.shape_env
for v in vals:
if isinstance(v, torch.SymInt):
- return v.node.shape_env, fake_mode
+ return v.node.shape_env
# FIXME(tmanlaibaatar) Remove this whole branch once https://github.com/pytorch/pytorch/pull/123764
if _is_executorch:
assert old_range_constraints is None
- shape_env, _ = get_shape_env(gm)
+ shape_env = get_shape_env(gm)
if shape_env is None:
return {}
range_constraints = {
@@ -849,7 +849,7 @@ def _get_updated_range_constraints(
assert old_range_constraints is not None
- shape_env, fake_mode = get_shape_env(gm)
+ shape_env = get_shape_env(gm)
if shape_env is None:
return {}
|
2.41.0
|
2e7800b3f1ad9fac962a8c438809e16ce4ef8fe
|
Tue, 30 Apr 2024 18:28:49 +0000
|
[PATCH 0849/1000] [Torch][Timer] Skip expired timer logging for empty expired timers (#125039)
|
Summary: same as title Test Plan: unit tests Differential Revision: D56636566 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125039 Approved by: https://github.com/kurman
|
diff --git a/torch/distributed/elastic/timer/debug_info_logging.py b/torch/distributed/elastic/timer/debug_info_logging.py
index 8c8645d108..87af84e281 100644
--- a/torch/distributed/elastic/timer/debug_info_logging.py
+++ b/torch/distributed/elastic/timer/debug_info_logging.py
@@ -17,4 +17,5 @@ def log_debug_info_for_expired_timers(
run_id: str,
expired_timers: Dict[int, List[str]],
):
- logger.info("Timers expired for run:[%s] [%s].", run_id, expired_timers)
+ if expired_timers:
+ logger.info("Timers expired for run:[%s] [%s].", run_id, expired_timers)
|
2.41.0
|
0df0d3e941e13e4a8a1a30aef1dd81fabd5dacd
|
Mon, 29 Apr 2024 21:34:45 -0700
|
[PATCH 0850/1000] [dtensor] implement shard dim change with alltoall (#124872)
|
as titled, we implement a dedicated communication op to allow efficient sharding dimension change using alltoall, to replace our previous allgather + local chunk Pull Request resolved: https://github.com/pytorch/pytorch/pull/124872 Approved by: https://github.com/XilunWu, https://github.com/yifuwang ghstack dependencies: #124871
|
diff --git a/test/distributed/_tensor/test_redistribute.py b/test/distributed/_tensor/test_redistribute.py
index b073783759..9192f815b4 100644
--- a/test/distributed/_tensor/test_redistribute.py
+++ b/test/distributed/_tensor/test_redistribute.py
@@ -334,6 +334,89 @@ class RedistributeTest(DTensorTestBase):
dt_full_tensor = dt.full_tensor()
self.assertEqual(dt_full_tensor, input_tensor)
+ @with_comms
+ def test_redistribute_shard_dim_change(self):
+ # test 1d device mesh
+ mesh_1d = DeviceMesh(self.device_type, torch.arange(self.world_size))
+ data_to_test = [
+ # evenly sharded case
+ torch.randn((8, 8), device=self.device_type),
+ # 3d or more dims
+ torch.randn((8, 8, 8), device=self.device_type),
+ # uneven case 1
+ torch.randn((8, 5), device=self.device_type),
+ # uneven case 2
+ torch.randn((5, 8), device=self.device_type),
+ # uneven case 3
+ torch.randn((5, 5), device=self.device_type),
+ ]
+
+ sharding_src_dst_pairs = [([Shard(0)], [Shard(1)]), ([Shard(1)], [Shard(0)])]
+
+ comm_mode = CommDebugMode()
+
+ for input_data in data_to_test:
+ for src, dst in sharding_src_dst_pairs:
+ expected_dt = distribute_tensor(input_data.clone(), mesh_1d, dst)
+ sharded_dt = distribute_tensor(input_data, mesh_1d, src)
+ with comm_mode:
+ out_dt = sharded_dt.redistribute(mesh_1d, dst)
+ self.assertEqual(out_dt.placements, expected_dt.placements)
+ local_out_dt = out_dt.to_local()
+ local_expected_dt = expected_dt.to_local()
+ self.assertEqual(out_dt.to_local(), expected_dt.to_local())
+ if self.device_type == "cuda":
+ self.assertEqual(
+ comm_mode.get_comm_counts()[
+ torch.ops._dtensor.shard_dim_alltoall
+ ],
+ 1,
+ )
+ else:
+ self.assertEqual(
+ comm_mode.get_comm_counts()[funcol.all_gather_into_tensor],
+ 1,
+ )
+
+ # test 2d device mesh
+ mesh_2d = DeviceMesh(
+ self.device_type, torch.arange(self.world_size).reshape(2, 2)
+ )
+ data_to_test_2d = [
+ # evenly sharded case
+ torch.randn((8, 8), device=self.device_type),
+ # 3d or more dims
+ torch.randn((8, 8, 8), device=self.device_type),
+ # uneven case 1
+ torch.randn((8, 5), device=self.device_type),
+ # uneven case 2
+ torch.randn((5, 8), device=self.device_type),
+ # uneven case 3
+ torch.randn((5, 5), device=self.device_type),
+ ]
+ sharding_src_dst_pairs_2d = [
+ ([Shard(0), Shard(1)], [Shard(0), Shard(0)]),
+ ([Shard(0), Shard(1)], [Shard(1), Shard(0)]),
+ ([Shard(0), Shard(0)], [Shard(1), Shard(1)]),
+ ]
+
+ for input_data in data_to_test_2d:
+ if input_data.ndim > 2:
+ sharding_spec_combs = sharding_src_dst_pairs_2d + [
+ ([Shard(0), Shard(2)], [Shard(1), Shard(0)])
+ ]
+ else:
+ sharding_spec_combs = sharding_src_dst_pairs_2d
+ for src, dst in sharding_spec_combs:
+ expected_dt = distribute_tensor(input_data.clone(), mesh_2d, dst)
+ sharded_dt = distribute_tensor(input_data, mesh_2d, src)
+ out_dt = sharded_dt.redistribute(mesh_2d, dst)
+
+ self.assertEqual(out_dt.placements, expected_dt.placements)
+ local_out_dt = out_dt.to_local()
+ local_expected_dt = expected_dt.to_local()
+ self.assertEqual(out_dt.to_local(), expected_dt.to_local())
+
class MultiDimRedistributeTest(DTensorTestBase):
@property
diff --git a/test/distributed/tensor/parallel/test_tp_examples.py b/test/distributed/tensor/parallel/test_tp_examples.py
index c85032fe2f..b35e5c7564 100644
--- a/test/distributed/tensor/parallel/test_tp_examples.py
+++ b/test/distributed/tensor/parallel/test_tp_examples.py
@@ -230,17 +230,16 @@ class DistTensorParallelExampleTest(DTensorTestBase):
self.assertDictEqual(
comm_mode.get_comm_counts(),
{
- c10d_functional.all_reduce: 1,
- c10d_functional.reduce_scatter_tensor: 4,
- c10d_functional.all_gather_into_tensor: 7,
+ c10d_functional.reduce_scatter_tensor: 6,
+ c10d_functional.all_gather_into_tensor: 6,
},
)
else:
self.assertDictEqual(
comm_mode.get_comm_counts(),
{
- c10d_functional.all_reduce: 5,
- c10d_functional.all_gather_into_tensor: 2,
+ c10d_functional.all_reduce: 6,
+ c10d_functional.all_gather_into_tensor: 1,
},
)
@@ -253,16 +252,15 @@ class DistTensorParallelExampleTest(DTensorTestBase):
self.assertDictEqual(
comm_mode.get_comm_counts(),
{
- c10d_functional.reduce_scatter_tensor: 4,
- c10d_functional.all_gather_into_tensor: 7,
+ c10d_functional.reduce_scatter_tensor: 5,
+ c10d_functional.all_gather_into_tensor: 6,
},
)
else:
self.assertDictEqual(
comm_mode.get_comm_counts(),
{
- c10d_functional.all_reduce: 8,
- c10d_functional.all_gather_into_tensor: 1,
+ c10d_functional.all_reduce: 9,
},
)
diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py
index a1e5de75f9..4b219fc517 100644
--- a/torch/_inductor/lowering.py
+++ b/torch/_inductor/lowering.py
@@ -6002,6 +6002,18 @@ try:
ir._WaitKernel.create_wait(_c10d_functional.wait_tensor.default, inp)
return inp
+ @register_lowering(torch.ops._dtensor.shard_dim_alltoall)
+ def _shard_dim_alltoall(inp, gather_dim, shard_dim, group_name):
+ return ir.TensorBox.create(
+ ir._CollectiveKernel.create_out_of_place(
+ torch.ops._dtensor.shard_dim_alltoall.default,
+ inp,
+ gather_dim,
+ shard_dim,
+ group_name,
+ )
+ )
+
except ImportError:
log.info(
"Inductor support for distributed collectives depends on building torch.distributed"
diff --git a/torch/csrc/distributed/c10d/Functional.cpp b/torch/csrc/distributed/c10d/Functional.cpp
index d633429bb3..d392c0213b 100644
--- a/torch/csrc/distributed/c10d/Functional.cpp
+++ b/torch/csrc/distributed/c10d/Functional.cpp
@@ -579,3 +579,50 @@ TORCH_LIBRARY(_c10d_functional_autograd, m) {
c10::DispatchKey::Autograd, ::all_gather_into_tensor_autograd),
{at::Tag::pt2_compliant_tag});
}
+
+namespace {
+// DTensor related comm operations, sharing code with functional collective for
+// now
+at::Tensor shard_dim_alltoall(
+ const at::Tensor& input,
+ int64_t gather_dim,
+ int64_t shard_dim,
+ std::string group_name) {
+ auto group = c10d::resolve_process_group(group_name);
+ auto group_size = group->getSize();
+ std::vector<int64_t> output_sizes = input.sizes().vec();
+ if (output_sizes[shard_dim] % group_size != 0) {
+ LOG(WARNING) << "The first dimension of the shard_dim_alltoall input ("
+ << output_sizes[shard_dim]
+ << ") is not divisible by the group size (" << group_size
+ << ").";
+ }
+ output_sizes[shard_dim] = output_sizes[shard_dim] / group_size;
+ std::vector<at::Tensor> inputs;
+ auto length = output_sizes[shard_dim];
+ for (int i = 0; i < group_size; i++) {
+ inputs.push_back(input.narrow(shard_dim, i * length, length).contiguous());
+ }
+ // allocate outputs
+ std::vector<at::Tensor> outputs;
+ for (int i = 0; i < group_size; i++) {
+ outputs.push_back(input.new_empty(output_sizes).contiguous());
+ }
+ auto work = group->alltoall(outputs, inputs);
+
+ work->wait();
+ // TODO: it's very tricky to get the current async behavior work for shard dim
+ // alltoall so for now we just keep this comm op to be synchronous. We can
+ // revisit later how to support the async case with the Work registry.
+ return at::cat(outputs, gather_dim);
+}
+} // namespace
+
+// DTensor comm op registry
+TORCH_LIBRARY(_dtensor, m) {
+ m.def(
+ "shard_dim_alltoall(Tensor input, int gather_dim, int shard_dim, str group_name) -> Tensor",
+ torch::dispatch(
+ c10::DispatchKey::CompositeExplicitAutograd, ::shard_dim_alltoall),
+ {at::Tag::pt2_compliant_tag});
+}
diff --git a/torch/distributed/_tensor/_collective_utils.py b/torch/distributed/_tensor/_collective_utils.py
index 603ac09f4a..51c1379625 100644
--- a/torch/distributed/_tensor/_collective_utils.py
+++ b/torch/distributed/_tensor/_collective_utils.py
@@ -6,9 +6,11 @@ from functools import lru_cache
from typing import List, Optional
import torch
+import torch.distributed._functional_collectives as funcol
import torch.distributed._tensor.placement_types as placement_types
from torch.distributed.device_mesh import _mesh_resources, DeviceMesh
from torch.distributed.distributed_c10d import (
+ _get_group_size_by_name,
all_to_all,
broadcast,
get_global_rank,
@@ -23,7 +25,33 @@ from torch.distributed.distributed_c10d import (
logger = logging.getLogger(__name__)
-# TODO: we need to migrate these APIs to be functional collectives
+@torch.library.register_fake("_dtensor::shard_dim_alltoall")
+def _shard_dim_alltoall_meta(input, gather_dim, shard_dim, group_name):
+ group_size = _get_group_size_by_name(group_name)
+ stacked_list = [torch.empty_like(input) for _ in range(group_size)]
+ return torch.cat(stacked_list, dim=gather_dim).chunk(group_size, dim=shard_dim)
+
+
+def shard_dim_alltoall(input, gather_dim, shard_dim, mesh, mesh_dim):
+ if mesh.device_type == "cpu":
+ # Gloo does not support alltoall, so falling back to allgather + chunk
+ logger.warning(
+ "CPU process group does not support alltoall yet, falling back with allgather + chunk!"
+ )
+ out = funcol.all_gather_tensor(input, gather_dim, (mesh, mesh_dim))
+ if isinstance(out, funcol.AsyncCollectiveTensor):
+ # stick to the same behavior for the alltoall case, remove this once we enable alltoall async
+ out = out.wait()
+ out = torch.chunk(out, mesh.size(mesh_dim), dim=shard_dim)[
+ mesh.get_local_rank(mesh_dim)
+ ]
+ return out.contiguous() if not out.is_contiguous() else out
+
+ group_name = funcol._resolve_group_name((mesh, mesh_dim))
+ # TODO: enable async op for shard_dim_alltoall
+ return torch.ops._dtensor.shard_dim_alltoall(
+ input, gather_dim, shard_dim, group_name
+ )
def mesh_scatter(
diff --git a/torch/distributed/_tensor/debug/comm_mode.py b/torch/distributed/_tensor/debug/comm_mode.py
index b195b30154..604380a37f 100644
--- a/torch/distributed/_tensor/debug/comm_mode.py
+++ b/torch/distributed/_tensor/debug/comm_mode.py
@@ -50,6 +50,8 @@ class CommDebugMode(TorchDispatchMode):
self.comm_registry.add(native_op)
self.comm_registry.add(py_op)
+ self.comm_registry.add(torch.ops._dtensor.shard_dim_alltoall)
+
def get_total_counts(self) -> int:
return sum(self.comm_counts.values())
diff --git a/torch/distributed/_tensor/ops/embedding_ops.py b/torch/distributed/_tensor/ops/embedding_ops.py
index 763f122549..a993f90fa4 100644
--- a/torch/distributed/_tensor/ops/embedding_ops.py
+++ b/torch/distributed/_tensor/ops/embedding_ops.py
@@ -174,7 +174,6 @@ def embedding_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType:
"""
This strategy handles embedding op. We have two possible embedding shardings:
rowwise and colwise
- # TODO: implement rowwise sharding
"""
weight_strategy = cast(OpStrategy, op_schema.args_schema[0])
indices_strategy = cast(OpStrategy, op_schema.args_schema[1])
@@ -250,7 +249,6 @@ def embedding_dense_backward_strategy(
"""
This strategy handles embedding op. We have two possible embedding shardings:
rowwise and colwise
- # TODO: implement rowwise sharding backward
"""
grad_out_strategy = cast(OpStrategy, op_schema.args_schema[0])
indices_strategy = cast(OpStrategy, op_schema.args_schema[1])
diff --git a/torch/distributed/_tensor/placement_types.py b/torch/distributed/_tensor/placement_types.py
index b955208d81..0bb5d95ac3 100644
--- a/torch/distributed/_tensor/placement_types.py
+++ b/torch/distributed/_tensor/placement_types.py
@@ -10,6 +10,7 @@ from torch.distributed._tensor._collective_utils import (
mesh_broadcast,
mesh_scatter,
pad_tensor,
+ shard_dim_alltoall,
unpad_tensor,
)
from torch.distributed.device_mesh import DeviceMesh
@@ -240,6 +241,67 @@ class Shard(Placement):
)
return shards[shard_index].clone()
+ def _to_new_shard_dim(
+ self,
+ local_tensor: torch.Tensor,
+ mesh: DeviceMesh,
+ mesh_dim: int,
+ current_logical_shape: List[int],
+ new_shard_dim: int,
+ ) -> torch.Tensor:
+ """
+ transform from existing sharded tensor to a new sharded tensor on
+ that shard on a new dimension, which performs an alltoall
+ """
+ my_coordinate = mesh.get_coordinate()
+ if my_coordinate is None:
+ # if rank is not part of mesh, we simply return local_tensor,
+ # which should be an empty tensor
+ return local_tensor
+
+ num_chunks = mesh.size(mesh_dim=mesh_dim)
+
+ old_dim_logical_size = current_logical_shape[self.dim]
+ new_dim_logical_size = current_logical_shape[new_shard_dim]
+ old_dim_padding = old_dim_logical_size % num_chunks != 0
+ new_dim_padding = new_dim_logical_size % num_chunks != 0
+ if old_dim_padding:
+ old_dim_full_chunk_size = (
+ old_dim_logical_size + num_chunks - 1
+ ) // num_chunks
+ old_dim_pad_size = old_dim_full_chunk_size - local_tensor.size(self.dim)
+ local_tensor = pad_tensor(local_tensor, self.dim, old_dim_pad_size)
+ if new_dim_padding:
+ new_dim_full_chunk_size = (
+ new_dim_logical_size + num_chunks - 1
+ ) // num_chunks
+ new_dim_pad_size = new_dim_full_chunk_size * num_chunks - local_tensor.size(
+ new_shard_dim
+ )
+ local_tensor = pad_tensor(local_tensor, new_shard_dim, new_dim_pad_size)
+
+ if not local_tensor.is_contiguous():
+ local_tensor = local_tensor.contiguous()
+
+ new_tensor = shard_dim_alltoall(
+ local_tensor, self.dim, new_shard_dim, mesh, mesh_dim
+ )
+
+ if old_dim_padding:
+ old_dim_unpad_size = (
+ old_dim_full_chunk_size * num_chunks - current_logical_shape[self.dim] # type: ignore[possibly-undefined]
+ )
+ new_tensor = unpad_tensor(new_tensor, self.dim, old_dim_unpad_size) # type: ignore[possibly-undefined]
+
+ if new_dim_padding:
+ local_shard_size_on_new_dim = self._local_shard_size_on_dim(
+ new_dim_logical_size, num_chunks, my_coordinate[mesh_dim]
+ )[0]
+ new_dim_unpad_size = new_dim_full_chunk_size - local_shard_size_on_new_dim # type: ignore[possibly-undefined]
+ new_tensor = unpad_tensor(new_tensor, new_shard_dim, new_dim_unpad_size) # type: ignore[possibly-undefined]
+
+ return new_tensor
+
def __eq__(self, other: object) -> bool:
if not isinstance(other, Shard):
return False
diff --git a/torch/distributed/_tensor/redistribute.py b/torch/distributed/_tensor/redistribute.py
index 1e4249f3fe..5cef7dbb04 100644
--- a/torch/distributed/_tensor/redistribute.py
+++ b/torch/distributed/_tensor/redistribute.py
@@ -49,17 +49,15 @@ def _gen_transform_infos(
dst_spec: DTensorSpec,
) -> List[_TransformInfo]:
"""
- Generate the transform infos from the source placements to the target placements, to
- transform from source to target placement it might have multipl steps, i.e. it might
- decompose Si -> Sj into Si -> R -> Sj.
+ Generate the transform infos from the source placements to the target placements.
+
+ To transform from source to target placement it might have multiple steps, i.e. it
+ might decompose Si -> Sj into Si -> R -> Sj.
This would detects if there're mis-aligned shardings between src/dst placements.
i.e. (Shard(0), Shard(0)) -> (Replicate(), Shard(0)), in this case Shard(0) -> Shard(0)
for mesh dimension 1 actually needs reshard, because in the first case it's a sub-sharding
of an already tensor dimension 0, and in the second case, it's the first sharding on tensor
dimension 0.
-
- Note that we also currently handles sharding on different tensor dimensions, e.g.
- Shard(0) -> Shard(1) in this pass
"""
src_dim_counts: Dict[int, int] = {}
dst_dim_counts: Dict[int, int] = {}
@@ -103,10 +101,10 @@ def _gen_transform_infos(
if (
isinstance(src, Shard)
and isinstance(dst, Shard)
- and (
- src.dim != dst.dim or src_dim_counts[src.dim] != dst_dim_counts[dst.dim]
- )
+ and (mesh_ndim > 1 or src_dim_counts[src.dim] != dst_dim_counts[dst.dim])
):
+ # for the case when mesh ndim > 1 or shard dim counts are different
+ # TODO: see if we can optimize the mesh_ndim > 1 case
# decompose Shard(i) -> Shard(j) into Shard(i) -> Replicate() -> Shard(j)
transform_infos.append(
_TransformInfo(
@@ -207,24 +205,18 @@ def redistribute_local_tensor(
local_tensor, device_mesh, i, my_coordinate[i]
)
else:
- # NOTE: we don't support this case efficiently yet, the fallback path we are going here is
- # to decompose Shard(0) -> Shard(1) into Shard(0) -> Replicate -> Shard(1)
- # TODO: enable this with all_to_all
assert (
current.is_shard()
), f"Current placement should be shard but found {current}"
shard_spec = cast(Shard, current)
if shard_spec.dim != target_placement.dim:
- new_local_tensor = shard_spec._to_replicate_tensor(
- local_tensor, device_mesh, i, transform_info.logical_shape
- )
- shards, _ = target_placement._split_tensor(
- new_local_tensor,
- num_chunks,
- with_padding=False,
- contiguous=False,
+ new_local_tensor = shard_spec._to_new_shard_dim(
+ local_tensor,
+ device_mesh,
+ i,
+ transform_info.logical_shape,
+ target_placement.dim,
)
- new_local_tensor = shards[my_coordinate[i]]
elif target.is_partial():
if current.is_replicate():
partial_spec = cast(_Partial, target)
diff --git a/torch/testing/_internal/distributed/_tensor/common_dtensor.py b/torch/testing/_internal/distributed/_tensor/common_dtensor.py
index 19e2da755a..b194485951 100644
--- a/torch/testing/_internal/distributed/_tensor/common_dtensor.py
+++ b/torch/testing/_internal/distributed/_tensor/common_dtensor.py
@@ -214,14 +214,14 @@ class Transformer(nn.Module):
# Parallelize the root submodules.
if use_seq_parallel:
root_plan = {
- "tok_embeddings": ColwiseParallel(output_layouts=Shard(1)),
- "pos_embeddings": ColwiseParallel(output_layouts=Shard(0)),
+ "tok_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Shard(1)),
+ "pos_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Shard(0)),
"norm": SequenceParallel(),
}
else:
root_plan = {
- "tok_embeddings": ColwiseParallel(output_layouts=Replicate()),
- "pos_embeddings": ColwiseParallel(output_layouts=Replicate()),
+ "tok_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Replicate()),
+ "pos_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Replicate()),
}
module_tp = parallelize_module(module, device_mesh, root_plan)
@@ -261,25 +261,14 @@ class Transformer(nn.Module):
# Parallelize the output submodule. If weight tying is enabled, we need to
# make sure output.weight is sharded consistently as tok_embeddings.weight,
# at the cost of the all_reduce operation using RowwiseParallel.
- output_parallelize_plan = None
- if not module_tp.model_args.weight_tying:
- output_parallelize_plan = (
- ColwiseParallel(
- input_layouts=Shard(1),
- output_layouts=Replicate(),
- )
- if use_seq_parallel
- else ColwiseParallel(output_layouts=Replicate())
- )
- else:
- output_parallelize_plan = (
- RowwiseParallel(
- input_layouts=Shard(1),
- output_layouts=Replicate(),
- )
- if use_seq_parallel
- else RowwiseParallel(input_layouts=Replicate())
+ output_parallelize_plan = (
+ ColwiseParallel(
+ input_layouts=Shard(1),
+ output_layouts=Replicate(),
)
+ if use_seq_parallel
+ else ColwiseParallel(output_layouts=Replicate())
+ )
parallelize_module(module_tp.output, device_mesh, output_parallelize_plan)
# Do manual setup on features that DTensor does not support yet.
|
2.41.0
|
4a241947ae9beecabed84bb36698552b82575f7
|
Mon, 29 Apr 2024 21:34:45 -0700
|
[PATCH 0851/1000] [dtensor] delete the old unused mesh_alltoall (#124879)
|
as titled, as we have a dedicated comm op, this is not needed anymore Pull Request resolved: https://github.com/pytorch/pytorch/pull/124879 Approved by: https://github.com/XilunWu, https://github.com/wz337 ghstack dependencies: #124871, #124872
|
diff --git a/test/distributed/test_device_mesh.py b/test/distributed/test_device_mesh.py
index 9c54cfa312..d04fcf938c 100644
--- a/test/distributed/test_device_mesh.py
+++ b/test/distributed/test_device_mesh.py
@@ -6,7 +6,6 @@ import torch
import torch.distributed._functional_collectives as funcol
from torch.distributed._tensor import DTensor
from torch.distributed._tensor._collective_utils import (
- mesh_all_to_all,
mesh_broadcast,
mesh_scatter,
unpad_tensor,
@@ -700,70 +699,6 @@ class DeviceMeshCollectiveTest(DTensorTestBase):
mesh_scatter(received_tensor, scattered_tensors, mesh, mesh_dim=dim)
self.assertEqual(received_tensor, torch.ones(3, 3) * self.rank)
- @with_comms
- def test_all_to_all_1d(self):
- # transpose on a 2D tensor distributed over N nodes:
- mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
- tensor_shape = [3, 3]
- input_tensor_list = [
- torch.ones(*tensor_shape, device=self.device_type)
- * (rank + self.rank * self.world_size)
- for rank in range(self.world_size)
- ]
- expected_tensor_list = [
- torch.ones(tensor_shape, device=self.device_type)
- * (self.rank + rank * self.world_size) # i.e. transpose
- for rank in range(self.world_size)
- ]
- for scatter_dim in range(len(tensor_shape)):
- output_tensor_list = [
- torch.empty_like(input_tensor_list[idx])
- for idx in range(len(input_tensor_list))
- ]
- # scatter on dim > 0 would generate non-contiguous tensor, verify that works
- mesh_all_to_all(output_tensor_list, input_tensor_list, mesh, mesh_dim=0)
- output_tensor = torch.cat(output_tensor_list, dim=scatter_dim)
- expected_tensor = torch.cat(expected_tensor_list, dim=scatter_dim)
-
- self.assertEqual(output_tensor, expected_tensor)
-
- @with_comms
- def test_all_to_all_nd(self):
- mesh_tensor = torch.arange(8).reshape(2, 2, 2)
- mesh = DeviceMesh(self.device_type, mesh_tensor)
- tensor_shape = [3, 3, 3]
- # check all dim groups
- dim_to_subgroups = mesh.get_group()
- for dim, dim_group in enumerate(dim_to_subgroups):
- my_coordinate = mesh.get_coordinate()[dim]
- dim_group_size = get_world_size(dim_group)
- global_ranks = [
- get_global_rank(dim_group, i) for i in range(dim_group_size)
- ]
- input_tensor_list = [
- torch.ones(*tensor_shape, device=self.device_type)
- * (i + self.rank * dim_group_size)
- for i in range(dim_group_size)
- ]
- expected_tensor_list = [
- torch.ones(*tensor_shape, device=self.device_type)
- * (my_coordinate + global_rank * dim_group_size) # i.e. transpose
- for global_rank in global_ranks
- ]
- for scatter_dim in range(len(tensor_shape)):
- # input_tensor = torch.cat(input_tensor_list, dim=scatter_dim)
- output_tensor_list = [
- torch.empty_like(input_tensor_list[idx])
- for idx in range(len(input_tensor_list))
- ]
- # scatter on dim > 0 would generate non-contiguous tensor, verify that works
- mesh_all_to_all(
- output_tensor_list, input_tensor_list, mesh, mesh_dim=dim
- )
- output_tensor = torch.cat(output_tensor_list, dim=scatter_dim)
- expected_tensor = torch.cat(expected_tensor_list, dim=scatter_dim)
- self.assertEqual(output_tensor, expected_tensor)
-
if __name__ == "__main__":
run_tests()
diff --git a/torch/distributed/_tensor/_collective_utils.py b/torch/distributed/_tensor/_collective_utils.py
index 51c1379625..ce4809d996 100644
--- a/torch/distributed/_tensor/_collective_utils.py
+++ b/torch/distributed/_tensor/_collective_utils.py
@@ -11,11 +11,9 @@ import torch.distributed._tensor.placement_types as placement_types
from torch.distributed.device_mesh import _mesh_resources, DeviceMesh
from torch.distributed.distributed_c10d import (
_get_group_size_by_name,
- all_to_all,
broadcast,
get_global_rank,
get_rank,
- get_world_size,
GroupMember,
ProcessGroup,
scatter,
@@ -150,48 +148,6 @@ def mesh_broadcast(
return broadcast(tensor, src=src_for_dim, group=dim_group, async_op=async_op)
-# TODO: test uneven split on GLOO and NCCL
-def mesh_all_to_all(
- output_tensor_list: List[torch.Tensor],
- input_tensor_list: List[torch.Tensor],
- mesh: DeviceMesh,
- mesh_dim: int = 0,
- async_op: bool = False,
-) -> Optional[Work]:
- dim_group = mesh.get_group(mesh_dim)
- assert isinstance(dim_group, ProcessGroup)
-
- work = None
- # no direct dist.all_to_all support on 'gloo' so we manually do scatters
- if mesh.device_type == "cpu":
- logger.warning(
- "ProcessGroupGloo does not support all_to_all, falling back with scatters!"
- )
- # TODO: pull the handle of uneven case in #492
- dim_group_size = get_world_size(dim_group)
- for i in range(dim_group_size):
- # src need to be global rank
- src_for_dim = i
- if dim_group is not GroupMember.WORLD:
- src_for_dim = get_global_rank(dim_group, i)
-
- work = scatter(
- output_tensor_list[i],
- input_tensor_list if mesh.get_rank() == src_for_dim else [],
- group=dim_group,
- src=src_for_dim,
- async_op=async_op,
- )
- else:
- work = all_to_all(
- output_tensor_list,
- input_tensor_list,
- dim_group,
- async_op=async_op,
- )
- return work
-
-
def pad_tensor(tensor: torch.Tensor, pad_dim: int, pad_size: int) -> torch.Tensor:
if pad_size == 0:
return tensor
|
2.41.0
|
0258e836965325af332f69574600c2e3cfde765
|
Tue, 30 Apr 2024 18:38:31 +0000
|
[PATCH 0852/1000] forward fix preferred blas backend and windows CI (#125080)
|
PR #122106 broke windows tests. The feature should have been disabled for Windows but was not disabled correctly. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125080 Approved by: https://github.com/clee2000
|
diff --git a/aten/src/ATen/Context.cpp b/aten/src/ATen/Context.cpp
index 7aa7542749..7fd191ef3f 100644
--- a/aten/src/ATen/Context.cpp
+++ b/aten/src/ATen/Context.cpp
@@ -268,6 +268,12 @@ at::BlasBackend Context::blasPreferredBackend() const {
}
void Context::setBlasPreferredBackend(at::BlasBackend b) {
+#ifdef _MSC_VER
+ TORCH_WARN_ONCE(
+ "torch.backends.cuda.preferred_blas_library is an experimental feature. "
+ "It is not supported on Windows."
+ );
+#else
TORCH_CHECK((b != at::BlasBackend::Cublaslt) || hasCuBLASLt(),
"Cannot set preferred backend to cuBLASLt if PyTorch has not been compiled with cuBLASLt.");
if (b != at::BlasBackend::Cublas) {
@@ -278,6 +284,7 @@ void Context::setBlasPreferredBackend(at::BlasBackend b) {
);
}
blas_preferred_backend = b;
+#endif
}
bool Context::allowFP16ReductionCuBLAS() const {
|
2.41.0
|
6e4cbc69d3158f35eae1bd87a7483a7d9c04c65
|
Tue, 30 Apr 2024 18:58:48 +0000
|
[PATCH 0853/1000] Fixes two build problems on ROCM 6.1 + Ubuntu 22.04 (#118216)
|
Fixes two build problems on ROCM 6.1 + Ubuntu 22.04 ### Inconsistency value of CMAKE_PREFIX_PATH between `.ci/pytorch/build.sh` and Build Instructions Current `CMAKE_PREFIX_PATH` points to the base environment of the conda (commonly `/opt/conda`). However the conda environment used in the CI should be `/opt/conda/envs/py_<VRESION>`, which is supplied by `$CONDA_PREFIX`. This divergence may cause libstdc++ version conflicts because the base conda environment may ship a different libstdc++ than the `pv_<VERSION>`, and/or the system default environment. One notable issue is on our internal CI system this script failed to build AOTriton library on Ubuntu 22.04 due to libstdc++ version conflicts between HIP compiler and conda base environment. This PR fixes this and make sure the CI script follows the official build instruction. ### Incorrect `tinfo` was linked on Ubuntu 22.04 due to flaws in parsing of `os-release` The code to parse /etc/os-release is incorrect and the distribution info was parsed as `PRETTY_Ubuntu` instead of `Ubuntu`. `libtinfo` will not be linked into the binary due to this flaw. Thus, cpp unit tests failed to build because of missing symbols from `libtinfo` Pull Request resolved: https://github.com/pytorch/pytorch/pull/118216 Approved by: https://github.com/jeffdaily, https://github.com/jithunnair-amd, https://github.com/pruthvistony, https://github.com/malfet, https://github.com/atalman
|
diff --git a/.ci/pytorch/build.sh b/.ci/pytorch/build.sh
index f7eee9fe9a..b81caa0513 100755
--- a/.ci/pytorch/build.sh
+++ b/.ci/pytorch/build.sh
@@ -81,7 +81,22 @@ if ! which conda; then
export USE_MKLDNN=0
fi
else
- export CMAKE_PREFIX_PATH=/opt/conda
+ # CMAKE_PREFIX_PATH precedences
+ # 1. $CONDA_PREFIX, if defined. This follows the pytorch official build instructions.
+ # 2. /opt/conda/envs/py_${ANACONDA_PYTHON_VERSION}, if ANACONDA_PYTHON_VERSION defined.
+ # This is for CI, which defines ANACONDA_PYTHON_VERSION but not CONDA_PREFIX.
+ # 3. $(conda info --base). The fallback value of pytorch official build
+ # instructions actually refers to this.
+ # Commonly this is /opt/conda/
+ if [[ -v CONDA_PREFIX ]]; then
+ export CMAKE_PREFIX_PATH=${CONDA_PREFIX}
+ elif [[ -v ANACONDA_PYTHON_VERSION ]]; then
+ export CMAKE_PREFIX_PATH="/opt/conda/envs/py_${ANACONDA_PYTHON_VERSION}"
+ else
+ # already checked by `! which conda`
+ CMAKE_PREFIX_PATH="$(conda info --base)"
+ export CMAKE_PREFIX_PATH
+ fi
# Workaround required for MKL library linkage
# https://github.com/pytorch/pytorch/issues/119557
diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
index b6f46fb043..5b30bef4fc 100644
--- a/cmake/Dependencies.cmake
+++ b/cmake/Dependencies.cmake
@@ -1214,9 +1214,19 @@ if(USE_ROCM)
# Currently only active for Ubuntu 20.04 and greater versions.
if(UNIX AND EXISTS "/etc/os-release")
file(STRINGS /etc/os-release OS_RELEASE)
- string(REGEX REPLACE "NAME=\"([A-Za-z]+).*" "\\1" OS_DISTRO ${OS_RELEASE})
- string(REGEX REPLACE ".*VERSION_ID=\"([0-9\.]+).*" "\\1" OS_VERSION ${OS_RELEASE})
- if(OS_DISTRO STREQUAL "Ubuntu" AND OS_VERSION VERSION_GREATER_EQUAL "20.04")
+ set(DISTRO_NAME "")
+ set(DISTRO_VERSION "")
+ foreach(line ${OS_RELEASE})
+ string(REGEX MATCH "^NAME=" DISTRO_NAME_MATCH ${line})
+ if(NOT DISTRO_NAME_MATCH STREQUAL "")
+ string(REGEX REPLACE "^NAME=\"(.*)\"" "\\1" DISTRO_NAME ${line})
+ endif()
+ string(REGEX MATCH "^VERSION_ID=" DISTRO_VERSION_MATCH ${line})
+ if(NOT DISTRO_VERSION_MATCH STREQUAL "")
+ string(REGEX REPLACE "^VERSION_ID=\"(.*)\"" "\\1" DISTRO_VERSION ${line})
+ endif()
+ endforeach()
+ if(DISTRO_NAME STREQUAL "Ubuntu" AND DISTRO_VERSION VERSION_GREATER_EQUAL "20.04")
find_library(LIBTINFO_LOC tinfo NO_CMAKE_PATH NO_CMAKE_ENVIRONMENT_PATH)
if(LIBTINFO_LOC)
get_filename_component(LIBTINFO_LOC_PARENT ${LIBTINFO_LOC} DIRECTORY)
|
2.41.0
|
5fa54a9d1117e67f3ac2ebb3f4187fd47101acf
|
Tue, 30 Apr 2024 19:05:53 +0000
|
[PATCH 0854/1000] Revert "Convert `ForeachFuncInfo` to `dataclass` (#125001)"
|
This reverts commit 9466335ae4cb049efd3f4c2b32b2115ba00694f3. Reverted https://github.com/pytorch/pytorch/pull/125001 on behalf of https://github.com/huydhn due to Sorry for reverting your change but I think it is breaking on ROCm https://hud.pytorch.org/pytorch/pytorch/commit/9466335ae4cb049efd3f4c2b32b2115ba00694f3 ([comment](https://github.com/pytorch/pytorch/pull/125001#issuecomment-2086640674))
|
diff --git a/test/test_foreach.py b/test/test_foreach.py
index e9fc4ffc77..792d757a88 100644
--- a/test/test_foreach.py
+++ b/test/test_foreach.py
@@ -164,22 +164,20 @@ class TestForeach(TestCase):
wrapped_op, _, inplace_op, _ = self._get_funcs(op)
for sample in op.sample_zero_size_inputs(device, dtype):
- if op.method_variant is not None:
+ if op.supports_out:
wrapped_op(
(sample.input, *sample.args),
is_cuda=self.is_cuda,
expect_fastpath=True,
zero_size=True,
)
-
- if op.inplace_variant is not None:
- with InplaceForeachVersionBumpCheck(self, sample.input):
- inplace_op(
- (sample.input, *sample.args),
- is_cuda=self.is_cuda,
- expect_fastpath=True,
- zero_size=True,
- )
+ with InplaceForeachVersionBumpCheck(self, sample.input):
+ inplace_op(
+ (sample.input, *sample.args),
+ is_cuda=self.is_cuda,
+ expect_fastpath=True,
+ zero_size=True,
+ )
@skipIfRocmVersionLessThan((6, 0))
@ops(
@@ -1227,16 +1225,12 @@ class TestForeach(TestCase):
"inplace", (False, True), name_fn=lambda x: "inplace" if x else "outplace"
)
def test_autodiff(self, device, dtype, op, inplace):
+ if not (op.supports_autograd or op.supports_forward_ad):
+ self.skipTest("neither reverse mode nor forward mode supported")
if (not inplace) and not op.supports_out:
self.skipTest("out-of-place not implemented")
if inplace and op.has_no_in_place:
self.skipTest("in-place not implemented")
- if not (
- op.supports_autograd
- or op.supports_inplace_autograd
- or op.supports_forward_ad
- ):
- self.skipTest("neither reverse mode nor forward mode supported")
# note(crcrpar): without this, some unary functions fail, unlike inplace and/or complex.
if (
diff --git a/torch/testing/_internal/common_device_type.py b/torch/testing/_internal/common_device_type.py
index d5285a6d0d..048ae83f72 100644
--- a/torch/testing/_internal/common_device_type.py
+++ b/torch/testing/_internal/common_device_type.py
@@ -921,7 +921,7 @@ class ops(_TestParametrizer):
elif self.opinfo_dtypes == OpDTypes.unsupported:
dtypes = set(get_all_dtypes()).difference(op.supported_dtypes(device_cls.device_type))
elif self.opinfo_dtypes == OpDTypes.supported:
- dtypes = set(op.supported_dtypes(device_cls.device_type))
+ dtypes = op.supported_dtypes(device_cls.device_type)
elif self.opinfo_dtypes == OpDTypes.any_one:
# Tries to pick a dtype that supports both forward or backward
supported = op.supported_dtypes(device_cls.device_type)
@@ -936,7 +936,7 @@ class ops(_TestParametrizer):
dtypes = {}
elif self.opinfo_dtypes == OpDTypes.any_common_cpu_cuda_one:
# Tries to pick a dtype that supports both CPU and CUDA
- supported = set(op.dtypes).intersection(op.dtypesIfCUDA)
+ supported = op.dtypes.intersection(op.dtypesIfCUDA)
if supported:
dtypes = {next(dtype for dtype in ANY_DTYPE_ORDER if dtype in supported)}
else:
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
index ff519abbec..ab73b5baf3 100644
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -9296,7 +9296,7 @@ class foreach_inputs_sample_func:
opinfo, ForeachRightmostArgType.TensorList, device, dtype, NUM_SIZE0_TENSORS,
**zero_size_foreach_inputs_kwargs)[0])
kwargs = self._sample_kwargs(
- opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype)
+ opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype, zero_size=True)
else:
args = []
kwargs = {}
@@ -9475,95 +9475,55 @@ class foreach_pointwise_sample_func(foreach_inputs_sample_func):
foreach_unary_op_db: List[OpInfo] = [
ForeachFuncInfo(
'exp',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
backward_requires_result=True,
- dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'acos',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
- dtypes=floating_and_complex_types(),
+ foreach_inputs_sample_func(1, False, False),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'asin',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
- dtypes=floating_and_complex_types(),
+ foreach_inputs_sample_func(1, False, False),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'atan',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
- dtypes=floating_and_complex_types(),
+ foreach_inputs_sample_func(1, False, False),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'cos',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
- dtypes=floating_and_complex_types(),
+ foreach_inputs_sample_func(1, False, False),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'cosh',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
- dtypes=floating_and_complex_types(),
+ foreach_inputs_sample_func(1, False, False),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'log',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
- dtypes=floating_and_complex_types(),
+ foreach_inputs_sample_func(1, False, False),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'log10',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
- dtypes=floating_and_complex_types(),
+ foreach_inputs_sample_func(1, False, False),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'log2',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
- dtypes=floating_and_complex_types(),
+ foreach_inputs_sample_func(1, False, False),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'tan',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
backward_requires_result=True,
- dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
decorators=(
# due to https://github.com/pytorch/pytorch/pull/102427 enabling jiterator for complex
DecorateInfo(
@@ -9580,13 +9540,9 @@ foreach_unary_op_db: List[OpInfo] = [
),
ForeachFuncInfo(
'tanh',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
backward_requires_result=True,
- dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
decorators=(
DecorateInfo(
toleranceOverride(
@@ -9600,186 +9556,129 @@ foreach_unary_op_db: List[OpInfo] = [
),
ForeachFuncInfo(
'sin',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
- dtypes=floating_and_complex_types(),
+ foreach_inputs_sample_func(1, False, False),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'sinh',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
- dtypes=floating_and_complex_types(),
+ foreach_inputs_sample_func(1, False, False),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'neg',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex(),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'sqrt',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
backward_requires_result=True,
),
ForeachFuncInfo(
'ceil',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'erf',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'erfc',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'expm1',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
backward_requires_result=True,
),
ForeachFuncInfo(
'floor',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'log1p',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'round',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'frac',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'reciprocal',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
backward_requires_result=True,
),
ForeachFuncInfo(
'sigmoid',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
backward_requires_result=True,
),
ForeachFuncInfo(
'trunc',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'abs',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
- decorators=(
- DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=complex_types()),
- DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=complex_types()),
- DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types()),
+ skips=(
+ DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestMeta",
+ "test_dispatch_symbolic_meta_inplace", dtypes=complex_types()),
+ DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestMeta",
+ "test_dispatch_meta_inplace", dtypes=complex_types()),
+ DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestMeta",
+ "test_meta_inplace", dtypes=complex_types()),
),
),
ForeachFuncInfo(
'zero',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
- dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
supports_out=False,
),
ForeachFuncInfo(
'sign',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
),
ForeachFuncInfo(
'lgamma',
- sample_inputs_func=foreach_inputs_sample_func(1, False, False),
+ foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
- decorators=(
+ skips=(
DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta",
"test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool)),
DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta",
@@ -9793,14 +9692,11 @@ foreach_unary_op_db: List[OpInfo] = [
foreach_binary_op_db: List[OpInfo] = [
ForeachFuncInfo(
"add",
- sample_inputs_func=foreach_inputs_sample_func(2, True, True, True),
+ foreach_inputs_sample_func(2, True, True, True),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
- decorators=(
+ skips=(
# These tests fail with aten._local_scalar_dense not being implemented.
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -9815,14 +9711,11 @@ foreach_binary_op_db: List[OpInfo] = [
),
ForeachFuncInfo(
"sub",
- sample_inputs_func=foreach_inputs_sample_func(2, True, True),
+ foreach_inputs_sample_func(2, True, True),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
- decorators=(
+ skips=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -9838,10 +9731,7 @@ foreach_binary_op_db: List[OpInfo] = [
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=foreach_inputs_sample_func(2, True, True, True),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
- decorators=(
+ skips=(
# Samples have complex types and inplace only works if the dtype is complex.
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace",
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)),
@@ -9858,10 +9748,7 @@ foreach_binary_op_db: List[OpInfo] = [
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=foreach_inputs_sample_func(2, True, True, True),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
- decorators=(
+ skips=(
# Samples have complex types and inplace only works if the dtype is complex.
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace",
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)),
@@ -9884,13 +9771,10 @@ foreach_binary_op_db: List[OpInfo] = [
),
ForeachFuncInfo(
"clamp_min",
- sample_inputs_func=foreach_inputs_sample_func(2, True, True),
+ foreach_inputs_sample_func(2, True, True),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
- decorators=(
+ skips=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -9903,13 +9787,10 @@ foreach_binary_op_db: List[OpInfo] = [
),
ForeachFuncInfo(
"clamp_max",
- sample_inputs_func=foreach_inputs_sample_func(2, True, True),
+ foreach_inputs_sample_func(2, True, True),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
- decorators=(
+ skips=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -9923,13 +9804,12 @@ foreach_binary_op_db: List[OpInfo] = [
# note(crcrpar): forward ad not implemented.
ForeachFuncInfo(
"minimum",
- sample_inputs_func=foreach_inputs_sample_func(2, True, True),
+ foreach_inputs_sample_func(2, True, True),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
- supports_autograd=True,
- supports_inplace_autograd=False,
supports_forward_ad=False,
- decorators=(
+ supports_inplace_autograd=False,
+ skips=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -9943,13 +9823,12 @@ foreach_binary_op_db: List[OpInfo] = [
# note(crcrpar): forward ad not implemented.
ForeachFuncInfo(
"maximum",
- sample_inputs_func=foreach_inputs_sample_func(2, True, True),
+ foreach_inputs_sample_func(2, True, True),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
- supports_autograd=True,
supports_forward_ad=False,
supports_inplace_autograd=False,
- decorators=(
+ skips=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -9968,9 +9847,7 @@ foreach_binary_op_db: List[OpInfo] = [
supports_scalar_self_arg=True,
sample_inputs_func=foreach_inputs_sample_func(2, True, True),
supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
- decorators=(
+ skips=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -9980,30 +9857,27 @@ foreach_binary_op_db: List[OpInfo] = [
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"),
),
+ supports_forward_ad=True,
backward_requires_result=True,
),
ForeachFuncInfo(
"copy",
- sample_inputs_func=foreach_inputs_sample_func(2, False, False),
+ foreach_inputs_sample_func(2, False, False),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=False,
supports_autograd=False,
- supports_inplace_autograd=False,
)
]
foreach_pointwise_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"addcmul",
- sample_inputs_func=foreach_pointwise_sample_func(4, True, True),
+ foreach_pointwise_sample_func(4, True, True),
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
- decorators=(
+ skips=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"),
@@ -10024,10 +9898,7 @@ foreach_pointwise_op_db: List[ForeachFuncInfo] = [
sample_inputs_func=foreach_pointwise_sample_func(4, True, True),
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
- decorators=(
+ skips=(
# Samples have complex types and inplace only works if the dtype is complex.
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace",
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16)),
@@ -10049,13 +9920,10 @@ foreach_pointwise_op_db: List[ForeachFuncInfo] = [
foreach_reduce_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"norm",
- sample_inputs_func=foreach_norm_sample_func(1, False, False),
+ foreach_norm_sample_func(1, False, False),
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
- decorators=(
+ skips=(
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"),
DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"),
@@ -10067,10 +9935,7 @@ foreach_reduce_op_db: List[ForeachFuncInfo] = [
foreach_other_op_db: List[ForeachFuncInfo] = [
ForeachFuncInfo(
"lerp",
- sample_inputs_func=foreach_lerp_sample_func(3, True, False),
- supports_autograd=True,
- supports_inplace_autograd=True,
- supports_forward_ad=True,
+ foreach_lerp_sample_func(3, True, False),
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
diff --git a/torch/testing/_internal/opinfo/core.py b/torch/testing/_internal/opinfo/core.py
index 2237da1c19..87a57f7678 100644
--- a/torch/testing/_internal/opinfo/core.py
+++ b/torch/testing/_internal/opinfo/core.py
@@ -2705,22 +2705,33 @@ def get_foreach_method_names(name):
return op, inplace_op, ref, ref_inplace
-@dataclass
class ForeachFuncInfo(OpInfo):
"""Early version of a specialized OpInfo for foreach functions"""
- supports_alpha_param: bool = False
- supports_scalar_self_arg: bool = False
- backward_requires_result: bool = False
-
- def __post_init__(self):
+ def __init__(
+ self,
+ name,
+ sample_inputs_func,
+ *,
+ dtypes=floating_and_complex_types(),
+ dtypesIfCUDA=None,
+ dtypesIfROCM=None,
+ supports_alpha_param=False,
+ supports_autograd=True,
+ supports_inplace_autograd=True,
+ supports_scalar_self_arg=False,
+ supports_forward_ad=True,
+ backward_requires_result=False,
+ supports_out=True,
+ **kwargs,
+ ):
(
foreach_method,
foreach_method_inplace,
torch_ref_method,
torch_ref_inplace,
- ) = get_foreach_method_names(self.name)
- if not self.supports_out:
+ ) = get_foreach_method_names(name)
+ if not supports_out:
# note(crcrpar): `foreach_method` for `"zero"` is `None` but `None` would call
# `_getattr_qual` in `OpInfo.__post_init__` which should fail since `_foreach_zero`
# is not defined at the moment. Thus to skip the qualification, set a similar torch
@@ -2729,16 +2740,29 @@ class ForeachFuncInfo(OpInfo):
assert torch_ref_method is None
foreach_method = foreach_method_inplace
torch_ref_method = torch_ref_inplace
+ super().__init__(
+ name="_foreach_" + name,
+ op=foreach_method,
+ ref=torch_ref_method,
+ method_variant=foreach_method,
+ inplace_variant=foreach_method_inplace,
+ dtypes=dtypes,
+ dtypesIfCUDA=dtypesIfCUDA,
+ dtypesIfROCM=dtypesIfROCM,
+ sample_inputs_func=sample_inputs_func,
+ supports_autograd=supports_autograd,
+ supports_forward_ad=supports_forward_ad,
+ supports_out=supports_out,
+ **kwargs,
+ )
+ self.supports_scalar_self_arg = supports_scalar_self_arg
- self.op = foreach_method
- self.method_variant = foreach_method
- self.ref = torch_ref_method
- self.inplace_variant = foreach_method_inplace
self.ref_inplace = torch_ref_inplace
+ self.supports_alpha_param = supports_alpha_param
+ self.backward_requires_result = backward_requires_result
self.has_no_in_place = self.inplace_variant is None
+ self.supports_inplace_autograd = supports_inplace_autograd
- name = self.name
- self.name = f"_foreach_{name}"
if name == "norm":
self.ref = torch.linalg.vector_norm
elif name == "minimum":
|
2.41.0
|
1a3fcfa479d6388e4b815ced7391d3f0bb696df
|
Tue, 30 Apr 2024 12:38:12 -0700
|
[PATCH 0855/1000] [pipelining] Add util and debug facilities (#124875)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124875 Approved by: https://github.com/H-Huang ghstack dependencies: #124776
|
diff --git a/torch/distributed/pipelining/_debug.py b/torch/distributed/pipelining/_debug.py
new file mode 100644
index 0000000000..90f232fdf1
--- /dev/null
+++ b/torch/distributed/pipelining/_debug.py
@@ -0,0 +1,39 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates
+import logging
+import os
+
+import torch
+
+
+# PIPPY_VERBOSITY is an environment variable that controls the logging level.
+# It can be set to one of the following:
+# - WARNING (default)
+# - INFO
+# - DEBUG
+PIPPY_VERBOSITY = os.getenv("PIPPY_VERBOSITY", "WARNING")
+if PIPPY_VERBOSITY not in ["WARNING", "INFO", "DEBUG"]:
+ print(f"Unsupported PIPPY_VERBOSITY level: {PIPPY_VERBOSITY}")
+ PIPPY_VERBOSITY = "WARNING"
+
+logging.getLogger("pippy").setLevel(PIPPY_VERBOSITY)
+# It seems we need to print something to make the level setting effective
+# for child loggers. Doing it here.
+print(f"Setting PiPPy logging level to: {PIPPY_VERBOSITY}")
+
+
+def friendly_debug_info(v):
+ """
+ Helper function to print out debug info in a friendly way.
+ """
+ if isinstance(v, torch.Tensor):
+ return f"Tensor({v.shape}, grad={v.requires_grad})"
+ else:
+ return str(v)
+
+
+def map_debug_info(a):
+ """
+ Helper function to apply `friendly_debug_info` to items in `a`.
+ `a` may be a list, tuple, or dict.
+ """
+ return torch.fx.node.map_aggregate(a, friendly_debug_info)
diff --git a/torch/distributed/pipelining/_utils.py b/torch/distributed/pipelining/_utils.py
new file mode 100644
index 0000000000..b1c8de4347
--- /dev/null
+++ b/torch/distributed/pipelining/_utils.py
@@ -0,0 +1,134 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates
+import logging
+from typing import Dict, Optional
+
+import torch
+from torch import fx
+from torch.export.unflatten import InterpreterModule
+
+
+logger = logging.getLogger(__name__)
+
+
+def flatten_args_detach(args):
+ """
+ Flatten the args into a list form and detach the tensors from computational graph.
+ """
+ flat_detached_args = []
+
+ def extract_tensor_args(a):
+ nonlocal flat_detached_args
+ if isinstance(a, torch.Tensor):
+ val = a.detach().requires_grad_(a.requires_grad)
+ flat_detached_args.append(val)
+ return val
+ else:
+ flat_detached_args.append(a)
+ return a
+
+ new_args = fx.node.map_aggregate(
+ args,
+ extract_tensor_args,
+ )
+
+ return new_args, flat_detached_args
+
+
+def flatten_args(args):
+ """
+ Flatten the args into a list form.
+ """
+ flat_args = []
+
+ def extract_tensor_args(a):
+ nonlocal flat_args
+ flat_args.append(a)
+ return a
+
+ fx.node.map_aggregate(
+ args,
+ extract_tensor_args,
+ )
+
+ return flat_args
+
+
+def modify_graph_op_device(
+ gm: torch.fx.GraphModule,
+ new_device: torch.device,
+):
+ """
+ Modify the device argument of all "call_function" nodes in the graph. This
+ is useful for moving the graph to a different device. In particular for
+ generator ops, like torch.ones.
+ """
+ modified = False
+ for node in gm.graph.nodes:
+ if node.op == "call_function":
+ if "device" in node.kwargs and node.kwargs["device"] != new_device:
+ logger.debug(
+ f"Changing device of Node {node.name} from {node.kwargs['device']} to {new_device}" # noqa: G004
+ )
+ node.update_kwarg("device", new_device)
+ modified = True
+ elif node.op == "call_module":
+ # Recursively modify "device" in submodules
+ submod = gm.get_submodule(node.target)
+ if isinstance(submod, torch.fx.GraphModule):
+ modify_graph_op_device(submod, new_device)
+ elif isinstance(submod, InterpreterModule):
+ # If unflattening has been performed, we need to access its graph module by `.graph_module`
+ modify_graph_op_device(submod.graph_module, new_device)
+ else:
+ logger.warning(
+ f"Skipping device modification for submodule {node.target} because it is a {type(submod)}" # noqa: G004
+ )
+
+ if modified:
+ gm.recompile()
+
+
+class QualnameMapMixin:
+ """
+ A mixin class that helps a `Pipe` object to remap its qualnames back to
+ original qualnames.
+ """
+
+ def __init__(
+ self,
+ splitter_qualname_map: Optional[Dict[str, str]] = None,
+ tracer_qualname_map: Optional[Dict[str, str]] = None,
+ ):
+ self.new_to_old_qualname_mapping: Dict[str, str] = splitter_qualname_map or {}
+ self.tracer_qualname_map = tracer_qualname_map
+
+ def remap_qualname(self, qualname: str):
+ # TODO: annoying
+ if qualname.startswith("split_gm."):
+ qualname = qualname[len("split_gm.") :]
+
+ name_before_split = None
+ if qualname in self.new_to_old_qualname_mapping:
+ name_before_split = self.new_to_old_qualname_mapping[qualname]
+ else:
+ # The qualname map does not store recursive items, thus,
+ # when passed a qualname with leaves, we need to perform longest prefix match
+ # Split from the right, one each time
+ split_names = qualname.rsplit(".", 1)
+ leaf = split_names[-1]
+ while len(split_names) > 1:
+ prefix = split_names[0]
+ if prefix in self.new_to_old_qualname_mapping:
+ old_prefix = self.new_to_old_qualname_mapping[prefix]
+ name_before_split = ".".join([old_prefix, leaf])
+ break
+ split_names = prefix.rsplit(".", 1)
+ leaf = ".".join([split_names[-1], leaf])
+
+ if name_before_split is None:
+ raise RuntimeError(f"Could not find mapping for {qualname}")
+
+ if self.tracer_qualname_map is not None:
+ return self.tracer_qualname_map[name_before_split]
+ else:
+ return name_before_split
|
2.41.0
|
a347fa6cef24cc0938b1d689f4e68c096877b81
|
Tue, 30 Apr 2024 20:00:37 +0000
|
[PATCH 0856/1000] Revert "Fix & optimze open device registration test. (#124712)"
|
This reverts commit f03cf9d4dc8ebe85552f450678988cac4e959da3. Reverted https://github.com/pytorch/pytorch/pull/124712 on behalf of https://github.com/kit1980 due to breaking internal builds ([comment](https://github.com/pytorch/pytorch/pull/124712#issuecomment-2086971499))
|
diff --git a/test/test_cpp_extensions_open_device_registration.py b/test/test_cpp_extensions_open_device_registration.py
index d1134d8182..3511070ce3 100644
--- a/test/test_cpp_extensions_open_device_registration.py
+++ b/test/test_cpp_extensions_open_device_registration.py
@@ -4,7 +4,6 @@ import os
import shutil
import sys
import tempfile
-import types
import unittest
from typing import Union
@@ -12,7 +11,7 @@ import torch
import torch.testing._internal.common_utils as common
import torch.utils.cpp_extension
-from torch.testing._internal.common_utils import IS_ARM64, skipIfTorchDynamo, TEST_CUDA
+from torch.testing._internal.common_utils import IS_ARM64, TEST_CUDA
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
@@ -29,37 +28,33 @@ def remove_build_path():
shutil.rmtree(default_build_root, ignore_errors=True)
-def generate_faked_module():
+class DummyModule:
+ @staticmethod
def device_count() -> int:
return 1
+ @staticmethod
def get_rng_state(device: Union[int, str, torch.device] = "foo") -> torch.Tensor:
# create a tensor using our custom device object.
return torch.empty(4, 4, device="foo")
+ @staticmethod
def set_rng_state(
new_state: torch.Tensor, device: Union[int, str, torch.device] = "foo"
) -> None:
pass
+ @staticmethod
def is_available():
return True
+ @staticmethod
def current_device():
return 0
- # create a new module to fake torch.foo dynamicaly
- foo = types.ModuleType("foo")
-
- foo.device_count = device_count
- foo.get_rng_state = get_rng_state
- foo.set_rng_state = set_rng_state
- foo.is_available = is_available
- foo.current_device = current_device
- foo._lazy_init = lambda: None
- foo.is_initialized = lambda: True
-
- return foo
+ @staticmethod
+ def is_initialized():
+ return True
@unittest.skipIf(IS_ARM64, "Does not work on arm")
@@ -71,24 +66,20 @@ class TestCppExtensionOpenRgistration(common.TestCase):
def setUp(self):
super().setUp()
-
# cpp extensions use relative paths. Those paths are relative to
# this file, so we'll change the working directory temporarily
self.old_working_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
-
assert self.module is not None
def tearDown(self):
super().tearDown()
-
# return the working directory (see setUp)
os.chdir(self.old_working_dir)
@classmethod
def setUpClass(cls):
remove_build_path()
-
cls.module = torch.utils.cpp_extension.load(
name="custom_device_extension",
sources=[
@@ -99,475 +90,509 @@ class TestCppExtensionOpenRgistration(common.TestCase):
verbose=True,
)
- # register torch.foo module and foo device to torch
- torch.utils.rename_privateuse1_backend("foo")
- torch.utils.generate_methods_for_privateuse1_backend(for_storage=True)
- torch._register_device_module("foo", generate_faked_module())
-
- def test_base_device_registration(self):
- self.assertFalse(self.module.custom_add_called())
- # create a tensor using our custom device object
- device = self.module.custom_device()
- x = torch.empty(4, 4, device=device)
- y = torch.empty(4, 4, device=device)
- # Check that our device is correct.
- self.assertTrue(x.device == device)
- self.assertFalse(x.is_cpu)
- self.assertFalse(self.module.custom_add_called())
- # calls out custom add kernel, registered to the dispatcher
- z = x + y
- # check that it was called
- self.assertTrue(self.module.custom_add_called())
- z_cpu = z.to(device="cpu")
- # Check that our cross-device copy correctly copied the data to cpu
- self.assertTrue(z_cpu.is_cpu)
- self.assertFalse(z.is_cpu)
- self.assertTrue(z.device == device)
- self.assertEqual(z, z_cpu)
-
- def test_common_registration(self):
- # check unsupported device and duplicated registration
- with self.assertRaisesRegex(RuntimeError, "Expected one of cpu"):
- torch._register_device_module("dev", generate_faked_module())
- with self.assertRaisesRegex(RuntimeError, "The runtime module of"):
- torch._register_device_module("foo", generate_faked_module())
-
- # backend name can be renamed to the same name multiple times
- torch.utils.rename_privateuse1_backend("foo")
-
- # backend name can't be renamed multiple times to different names.
- with self.assertRaisesRegex(
- RuntimeError, "torch.register_privateuse1_backend()"
- ):
- torch.utils.rename_privateuse1_backend("dev")
-
- # generator tensor and module can be registered only once
- with self.assertRaisesRegex(RuntimeError, "The custom device module of"):
- torch.utils.generate_methods_for_privateuse1_backend()
-
- # check whether torch.foo have been registered correctly
- self.assertTrue(
- torch.utils.backend_registration._get_custom_mod_func("device_count")() == 1
- )
- with self.assertRaisesRegex(RuntimeError, "Try to call torch.foo"):
- torch.utils.backend_registration._get_custom_mod_func("func_name_")
-
- # check attributes after registered
- self.assertTrue(hasattr(torch.Tensor, "is_foo"))
- self.assertTrue(hasattr(torch.Tensor, "foo"))
- self.assertTrue(hasattr(torch.TypedStorage, "is_foo"))
- self.assertTrue(hasattr(torch.TypedStorage, "foo"))
- self.assertTrue(hasattr(torch.UntypedStorage, "is_foo"))
- self.assertTrue(hasattr(torch.UntypedStorage, "foo"))
- self.assertTrue(hasattr(torch.nn.Module, "foo"))
- self.assertTrue(hasattr(torch.nn.utils.rnn.PackedSequence, "is_foo"))
- self.assertTrue(hasattr(torch.nn.utils.rnn.PackedSequence, "foo"))
-
- def test_open_device_generator_registration_and_hooks(self):
- device = self.module.custom_device()
- # None of our CPU operations should call the custom add function.
- self.assertFalse(self.module.custom_add_called())
-
- # check generator registered before using
- with self.assertRaisesRegex(
- RuntimeError,
- "Please register a generator to the PrivateUse1 dispatch key",
- ):
- torch.Generator(device=device)
-
- self.module.register_generator_first()
- gen = torch.Generator(device=device)
- self.assertTrue(gen.device == device)
-
- # generator can be registered only once
- with self.assertRaisesRegex(
- RuntimeError,
- "Only can register a generator to the PrivateUse1 dispatch key once",
- ):
- self.module.register_generator_second()
-
- self.module.register_hook()
- default_gen = self.module.default_generator(0)
- self.assertTrue(
- default_gen.device.type == torch._C._get_privateuse1_backend_name()
- )
+ @classmethod
+ def tearDownClass(cls):
+ remove_build_path()
- def test_open_device_dispatchstub(self):
- # test kernels could be reused by privateuse1 backend through dispatchstub
- input_data = torch.randn(2, 2, 3, dtype=torch.float32, device="cpu")
- foo_input_data = input_data.to("foo")
- output_data = torch.abs(input_data)
- foo_output_data = torch.abs(foo_input_data)
- self.assertEqual(output_data, foo_output_data.cpu())
-
- output_data = torch.randn(2, 2, 6, dtype=torch.float32, device="cpu")
- # output operand will resize flag is True in TensorIterator.
- foo_input_data = input_data.to("foo")
- foo_output_data = output_data.to("foo")
- # output operand will resize flag is False in TensorIterator.
- torch.abs(input_data, out=output_data[:, :, 0:6:2])
- torch.abs(foo_input_data, out=foo_output_data[:, :, 0:6:2])
- self.assertEqual(output_data, foo_output_data.cpu())
-
- # output operand will resize flag is True in TensorIterator.
- # and convert output to contiguous tensor in TensorIterator.
- output_data = torch.randn(2, 2, 6, dtype=torch.float32, device="cpu")
- foo_input_data = input_data.to("foo")
- foo_output_data = output_data.to("foo")
- torch.abs(input_data, out=output_data[:, :, 0:6:3])
- torch.abs(foo_input_data, out=foo_output_data[:, :, 0:6:3])
- self.assertEqual(output_data, foo_output_data.cpu())
-
- def test_open_device_quantized(self):
- input_data = torch.randn(3, 4, 5, dtype=torch.float32, device="cpu").to("foo")
- quantized_tensor = torch.quantize_per_tensor(input_data, 0.1, 10, torch.qint8)
- self.assertEqual(quantized_tensor.device, torch.device("foo:0"))
- self.assertEqual(quantized_tensor.dtype, torch.qint8)
-
- def test_open_device_random(self):
- # check if torch.foo have implemented get_rng_state
- with torch.random.fork_rng(device_type="foo"):
- pass
-
- def test_open_device_tensor(self):
- device = self.module.custom_device()
-
- # check whether print tensor.type() meets the expectation
- dtypes = {
- torch.bool: "torch.foo.BoolTensor",
- torch.double: "torch.foo.DoubleTensor",
- torch.float32: "torch.foo.FloatTensor",
- torch.half: "torch.foo.HalfTensor",
- torch.int32: "torch.foo.IntTensor",
- torch.int64: "torch.foo.LongTensor",
- torch.int8: "torch.foo.CharTensor",
- torch.short: "torch.foo.ShortTensor",
- torch.uint8: "torch.foo.ByteTensor",
- }
- for tt, dt in dtypes.items():
- test_tensor = torch.empty(4, 4, dtype=tt, device=device)
- self.assertTrue(test_tensor.type() == dt)
-
- # check whether the attributes and methods of the corresponding custom backend are generated correctly
- x = torch.empty(4, 4)
- self.assertFalse(x.is_foo)
-
- x = x.foo(torch.device("foo"))
- self.assertFalse(self.module.custom_add_called())
- self.assertTrue(x.is_foo)
-
- # test different device type input
- y = torch.empty(4, 4)
- self.assertFalse(y.is_foo)
-
- y = y.foo(torch.device("foo:0"))
- self.assertFalse(self.module.custom_add_called())
- self.assertTrue(y.is_foo)
-
- # test different device type input
- z = torch.empty(4, 4)
- self.assertFalse(z.is_foo)
-
- z = z.foo(0)
- self.assertFalse(self.module.custom_add_called())
- self.assertTrue(z.is_foo)
-
- def test_open_device_packed_sequence(self):
- device = self.module.custom_device()
- a = torch.rand(5, 3)
- b = torch.tensor([1, 1, 1, 1, 1])
- input = torch.nn.utils.rnn.PackedSequence(a, b)
- self.assertFalse(input.is_foo)
- input_foo = input.foo()
- self.assertTrue(input_foo.is_foo)
-
- def test_open_device_storage(self):
- # check whether the attributes and methods for storage of the corresponding custom backend are generated correctly
- x = torch.empty(4, 4)
- z1 = x.storage()
- self.assertFalse(z1.is_foo)
-
- z1 = z1.foo()
- self.assertFalse(self.module.custom_add_called())
- self.assertTrue(z1.is_foo)
-
- with self.assertRaisesRegex(RuntimeError, "Invalid device"):
- z1.foo(torch.device("cpu"))
-
- z1 = z1.cpu()
- self.assertFalse(self.module.custom_add_called())
- self.assertFalse(z1.is_foo)
-
- z1 = z1.foo(device="foo:0", non_blocking=False)
- self.assertFalse(self.module.custom_add_called())
- self.assertTrue(z1.is_foo)
-
- with self.assertRaisesRegex(RuntimeError, "Invalid device"):
- z1.foo(device="cuda:0", non_blocking=False)
-
- # check UntypedStorage
- y = torch.empty(4, 4)
- z2 = y.untyped_storage()
- self.assertFalse(z2.is_foo)
-
- z2 = z2.foo()
- self.assertFalse(self.module.custom_add_called())
- self.assertTrue(z2.is_foo)
-
- # check custom StorageImpl create
- self.module.custom_storage_registry()
-
- z3 = y.untyped_storage()
- self.assertFalse(self.module.custom_storageImpl_called())
-
- z3 = z3.foo()
- self.assertTrue(self.module.custom_storageImpl_called())
- self.assertFalse(self.module.custom_storageImpl_called())
-
- z3 = z3[0:3]
- self.assertTrue(self.module.custom_storageImpl_called())
-
- @skipIfTorchDynamo("unsupported aten.is_pinned.default")
- def test_open_device_storage_pin_memory(self):
- # Check if the pin_memory is functioning properly on custom device
- cpu_tensor = torch.empty(3)
- self.assertFalse(cpu_tensor.is_foo)
- self.assertFalse(cpu_tensor.is_pinned("foo"))
-
- cpu_tensor_pin = cpu_tensor.pin_memory("foo")
- self.assertTrue(cpu_tensor_pin.is_pinned("foo"))
-
- # Test storage pin_memory on custom device string
- cpu_storage = cpu_tensor.storage()
- foo_device = torch.device("foo")
- self.assertFalse(cpu_storage.is_pinned("foo"))
-
- cpu_storage_pin = cpu_storage.pin_memory("foo")
- self.assertFalse(cpu_storage.is_pinned())
- self.assertFalse(cpu_storage.is_pinned("foo"))
- self.assertFalse(cpu_storage.is_pinned(foo_device))
- self.assertFalse(cpu_storage_pin.is_pinned())
- self.assertTrue(cpu_storage_pin.is_pinned("foo"))
- self.assertTrue(cpu_storage_pin.is_pinned(foo_device))
-
- cpu_storage_pin_already = cpu_storage_pin.pin_memory("foo")
- self.assertTrue(cpu_storage_pin.is_pinned("foo"))
- self.assertTrue(cpu_storage_pin.is_pinned(foo_device))
- self.assertTrue(cpu_storage_pin_already.is_pinned("foo"))
- self.assertTrue(cpu_storage_pin_already.is_pinned(foo_device))
- self.assertFalse(cpu_storage.is_pinned("foo"))
-
- cpu_storage_pinned = cpu_storage.pin_memory(foo_device)
- self.assertFalse(cpu_storage.is_pinned())
- self.assertFalse(cpu_storage.is_pinned("foo"))
- self.assertFalse(cpu_storage.is_pinned(foo_device))
- self.assertFalse(cpu_storage_pinned.is_pinned())
- self.assertTrue(cpu_storage_pinned.is_pinned("foo"))
- self.assertTrue(cpu_storage_pinned.is_pinned(foo_device))
-
- # Test untyped storage pin_memory and is_pin
- cpu_tensor = torch.randn([3, 2, 1, 4])
- cpu_untyped_storage = cpu_tensor.untyped_storage()
- self.assertFalse(cpu_untyped_storage.is_pinned())
- self.assertFalse(cpu_untyped_storage.is_pinned("foo"))
-
- cpu_untyped_storage_pinned = cpu_untyped_storage.pin_memory("foo")
- self.assertFalse(cpu_untyped_storage_pinned.is_pinned())
- self.assertTrue(cpu_untyped_storage_pinned.is_pinned("foo"))
- self.assertTrue(cpu_untyped_storage_pinned.is_pinned(foo_device))
-
- cpu_untyped_storage_pinned = cpu_untyped_storage.pin_memory(foo_device)
- self.assertFalse(cpu_untyped_storage_pinned.is_pinned())
- self.assertTrue(cpu_untyped_storage_pinned.is_pinned("foo"))
- self.assertTrue(cpu_untyped_storage_pinned.is_pinned(foo_device))
-
- with self.assertRaisesRegex(TypeError, "positional arguments but 3 were given"):
- cpu_untyped_storage_pinned.is_pinned("foo1", "foo2")
-
- # Test storage pin_memory on error device
- self.assertFalse(cpu_storage_pinned.is_pinned("hpu"))
- self.assertFalse(cpu_untyped_storage_pinned.is_pinned("hpu"))
- invalid_device = torch.device("hpu")
- self.assertFalse(cpu_untyped_storage_pinned.is_pinned(invalid_device))
-
- with self.assertRaisesRegex(
- NotImplementedError, "with arguments from the 'HPU' backend"
- ):
- cpu_storage.pin_memory("hpu")
- with self.assertRaisesRegex(
- NotImplementedError, "with arguments from the 'HPU' backend"
- ):
- cpu_untyped_storage.pin_memory("hpu")
- with self.assertRaisesRegex(
- NotImplementedError, "with arguments from the 'HPU' backend"
- ):
- cpu_untyped_storage.pin_memory(invalid_device)
-
- def test_open_device_serialization(self):
- self.module.set_custom_device_index(-1)
- storage = torch.UntypedStorage(4, device=torch.device("foo"))
- self.assertEqual(torch.serialization.location_tag(storage), "foo")
-
- self.module.set_custom_device_index(0)
- storage = torch.UntypedStorage(4, device=torch.device("foo"))
- self.assertEqual(torch.serialization.location_tag(storage), "foo:0")
-
- cpu_storage = torch.empty(4, 4).storage()
- foo_storage = torch.serialization.default_restore_location(cpu_storage, "foo:0")
- self.assertTrue(foo_storage.is_foo)
-
- # test tensor MetaData serialization
- x = torch.empty(4, 4).long()
- y = x.foo()
- self.assertFalse(self.module.check_backend_meta(y))
- self.module.custom_set_backend_meta(y)
- self.assertTrue(self.module.check_backend_meta(y))
-
- self.module.custom_serialization_registry()
- with tempfile.TemporaryDirectory() as tmpdir:
- path = os.path.join(tmpdir, "data.pt")
- torch.save(y, path)
- z1 = torch.load(path)
- # loads correctly onto the foo backend device
+ def test_open_device_registration(self):
+ def test_base_device_registration():
+ torch.utils.rename_privateuse1_backend("foo")
+ self.assertFalse(self.module.custom_add_called())
+ # create a tensor using our custom device object
+ device = self.module.custom_device()
+ # register foo module, torch.foo. This is for lazy
+ # init check.
+ torch._register_device_module("foo", DummyModule)
+ x = torch.empty(4, 4, device=device)
+ y = torch.empty(4, 4, device=device)
+ # Check that our device is correct.
+ self.assertTrue(x.device == device)
+ self.assertFalse(x.is_cpu)
+ self.assertFalse(self.module.custom_add_called())
+ # calls out custom add kernel, registered to the dispatcher
+ z = x + y
+ # check that it was called
+ self.assertTrue(self.module.custom_add_called())
+ z_cpu = z.to(device="cpu")
+ # Check that our cross-device copy correctly copied the data to cpu
+ self.assertTrue(z_cpu.is_cpu)
+ self.assertFalse(z.is_cpu)
+ self.assertTrue(z.device == device)
+ self.assertEqual(z, z_cpu)
+ z2 = z_cpu + z_cpu
+ del torch.foo
+
+ # check whether the error can be reported correctly
+ def test_before_common_registration():
+ # check that register module name should be the same as custom backend
+ with self.assertRaisesRegex(RuntimeError, "Expected one of cpu"):
+ torch._register_device_module("xxx", DummyModule)
+ # check generator registered before using
+ torch.utils.rename_privateuse1_backend("foo")
+ with self.assertRaisesRegex(RuntimeError, "torch has no module of"):
+ with torch.random.fork_rng(device_type="foo"):
+ pass
+ # check attributes before registered
+ self.assertFalse(hasattr(torch.Tensor, "is_foo"))
+ self.assertFalse(hasattr(torch.Tensor, "foo"))
+ self.assertFalse(hasattr(torch.TypedStorage, "is_foo"))
+ self.assertFalse(hasattr(torch.TypedStorage, "foo"))
+ self.assertFalse(hasattr(torch.UntypedStorage, "is_foo"))
+ self.assertFalse(hasattr(torch.UntypedStorage, "foo"))
+ self.assertFalse(hasattr(torch.nn.Module, "foo"))
+ self.assertFalse(hasattr(torch.nn.utils.rnn.PackedSequence, "is_foo"))
+ self.assertFalse(hasattr(torch.nn.utils.rnn.PackedSequence, "foo"))
+
+ def test_after_common_registration():
+ # check attributes after registered
+ self.assertTrue(hasattr(torch.Tensor, "is_foo"))
+ self.assertTrue(hasattr(torch.Tensor, "foo"))
+ self.assertTrue(hasattr(torch.TypedStorage, "is_foo"))
+ self.assertTrue(hasattr(torch.TypedStorage, "foo"))
+ self.assertTrue(hasattr(torch.UntypedStorage, "is_foo"))
+ self.assertTrue(hasattr(torch.UntypedStorage, "foo"))
+ self.assertTrue(hasattr(torch.nn.Module, "foo"))
+ self.assertTrue(hasattr(torch.nn.utils.rnn.PackedSequence, "is_foo"))
+ self.assertTrue(hasattr(torch.nn.utils.rnn.PackedSequence, "foo"))
+
+ def test_common_registration():
+ # first rename custom backend
+ torch.utils.rename_privateuse1_backend("foo")
+ # backend name can only rename once
+ with self.assertRaisesRegex(
+ RuntimeError, "torch.register_privateuse1_backend()"
+ ):
+ torch.utils.rename_privateuse1_backend("xxx")
+ # register foo module, torch.foo
+ torch._register_device_module("foo", DummyModule)
+ self.assertTrue(
+ torch.utils.backend_registration._get_custom_mod_func("device_count")()
+ == 1
+ )
+ with self.assertRaisesRegex(RuntimeError, "Try to call torch.foo"):
+ torch.utils.backend_registration._get_custom_mod_func("func_name_")
+ # default set for_tensor and for_module are True, so only set for_storage is True
+ torch.utils.generate_methods_for_privateuse1_backend(for_storage=True)
+ # generator tensor and module can be registered only once
+ with self.assertRaisesRegex(RuntimeError, "The custom device module of"):
+ torch.utils.generate_methods_for_privateuse1_backend()
+
+ def test_open_device_generator_registration_and_hooks():
+ device = self.module.custom_device()
+ # None of our CPU operations should call the custom add function.
+ self.assertFalse(self.module.custom_add_called())
+ # check generator registered before using
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Please register a generator to the PrivateUse1 dispatch key",
+ ):
+ gen_ = torch.Generator(device=device)
+ self.module.register_generator_first()
+ gen = torch.Generator(device=device)
+ self.assertTrue(gen.device == device)
+ # generator can be registered only once
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Only can register a generator to the PrivateUse1 dispatch key once",
+ ):
+ self.module.register_generator_second()
+ self.module.register_hook()
+ default_gen = self.module.default_generator(0)
+ self.assertTrue(
+ default_gen.device.type == torch._C._get_privateuse1_backend_name()
+ )
+
+ def test_open_device_dispatchstub():
+ # test kernels could be reused by privateuse1 backend through dispatchstub
+ torch.utils.rename_privateuse1_backend("foo")
+ input_data = torch.randn(2, 2, 3, dtype=torch.float32, device="cpu")
+ foo_input_data = input_data.to("foo")
+ output_data = torch.abs(input_data)
+ foo_output_data = torch.abs(foo_input_data)
+ self.assertEqual(output_data, foo_output_data.cpu())
+ output_data = torch.randn(2, 2, 6, dtype=torch.float32, device="cpu")
+ # output operand will resize flag is True in TensorIterator.
+ foo_input_data = input_data.to("foo")
+ foo_output_data = output_data.to("foo")
+ # output operand will resize flag is False in TensorIterator.
+ torch.abs(input_data, out=output_data[:, :, 0:6:2])
+ torch.abs(foo_input_data, out=foo_output_data[:, :, 0:6:2])
+ self.assertEqual(output_data, foo_output_data.cpu())
+ # output operand will resize flag is True in TensorIterator.
+ # and convert output to contiguous tensor in TensorIterator.
+ output_data = torch.randn(2, 2, 6, dtype=torch.float32, device="cpu")
+ foo_input_data = input_data.to("foo")
+ foo_output_data = output_data.to("foo")
+ torch.abs(input_data, out=output_data[:, :, 0:6:3])
+ torch.abs(foo_input_data, out=foo_output_data[:, :, 0:6:3])
+ self.assertEqual(output_data, foo_output_data.cpu())
+
+ def test_open_device_quantized():
+ torch.utils.rename_privateuse1_backend("foo")
+ input_data = torch.randn(3, 4, 5, dtype=torch.float32, device="cpu").to(
+ "foo"
+ )
+ quantized_tensor = torch.quantize_per_tensor(
+ input_data, 0.1, 10, torch.qint8
+ )
+ self.assertEqual(quantized_tensor.device, torch.device("foo:0"))
+ self.assertEqual(quantized_tensor.dtype, torch.qint8)
+
+ def test_open_device_random():
+ with torch.random.fork_rng(device_type="foo"):
+ pass
+
+ def test_open_device_tensor():
+ device = self.module.custom_device()
+ # check whether print tensor.type() meets the expectation
+ dtypes = {
+ torch.bool: "torch.foo.BoolTensor",
+ torch.double: "torch.foo.DoubleTensor",
+ torch.float32: "torch.foo.FloatTensor",
+ torch.half: "torch.foo.HalfTensor",
+ torch.int32: "torch.foo.IntTensor",
+ torch.int64: "torch.foo.LongTensor",
+ torch.int8: "torch.foo.CharTensor",
+ torch.short: "torch.foo.ShortTensor",
+ torch.uint8: "torch.foo.ByteTensor",
+ }
+ for tt, dt in dtypes.items():
+ test_tensor = torch.empty(4, 4, dtype=tt, device=device)
+ self.assertTrue(test_tensor.type() == dt)
+ # check whether the attributes and methods of the corresponding custom backend are generated correctly
+ x = torch.empty(4, 4)
+ self.assertFalse(x.is_foo)
+ x = x.foo(torch.device("foo"))
+ self.assertFalse(self.module.custom_add_called())
+ self.assertTrue(x.is_foo)
+ # test different device type input
+ y = torch.empty(4, 4)
+ self.assertFalse(y.is_foo)
+ y = y.foo(torch.device("foo:0"))
+ self.assertFalse(self.module.custom_add_called())
+ self.assertTrue(y.is_foo)
+ # test different device type input
+ z = torch.empty(4, 4)
+ self.assertFalse(z.is_foo)
+ z = z.foo(0)
+ self.assertFalse(self.module.custom_add_called())
+ self.assertTrue(z.is_foo)
+
+ def test_open_device_packed_sequence():
+ device = self.module.custom_device()
+ a = torch.rand(5, 3)
+ b = torch.tensor([1, 1, 1, 1, 1])
+ input = torch.nn.utils.rnn.PackedSequence(a, b)
+ self.assertFalse(input.is_foo)
+ input_foo = input.foo()
+ self.assertTrue(input_foo.is_foo)
+
+ def test_open_device_storage():
+ # check whether the attributes and methods for storage of the corresponding custom backend are generated correctly
+ x = torch.empty(4, 4)
+ z1 = x.storage()
+ self.assertFalse(z1.is_foo)
+ z1 = z1.foo()
+ self.assertFalse(self.module.custom_add_called())
self.assertTrue(z1.is_foo)
- # loads BackendMeta data correctly
- self.assertTrue(self.module.check_backend_meta(z1))
-
- # cross-backend
- z2 = torch.load(path, map_location="cpu")
- # loads correctly onto the cpu backend device
+ with self.assertRaisesRegex(RuntimeError, "Invalid device"):
+ z1.foo(torch.device("cpu"))
+ z1 = z1.cpu()
+ self.assertFalse(self.module.custom_add_called())
+ self.assertFalse(z1.is_foo)
+ z1 = z1.foo(device="foo:0", non_blocking=False)
+ self.assertFalse(self.module.custom_add_called())
+ self.assertTrue(z1.is_foo)
+ with self.assertRaisesRegex(RuntimeError, "Invalid device"):
+ z1.foo(device="cuda:0", non_blocking=False)
+ # check UntypedStorage
+ y = torch.empty(4, 4)
+ z2 = y.untyped_storage()
self.assertFalse(z2.is_foo)
- # loads BackendMeta data correctly
- self.assertFalse(self.module.check_backend_meta(z2))
-
- def test_open_device_storage_resize(self):
- cpu_tensor = torch.randn([8])
- foo_tensor = cpu_tensor.foo()
- foo_storage = foo_tensor.storage()
- self.assertTrue(foo_storage.size() == 8)
-
- # Only register tensor resize_ function.
- foo_tensor.resize_(8)
- self.assertTrue(foo_storage.size() == 8)
-
- with self.assertRaisesRegex(TypeError, "Overflow"):
- foo_tensor.resize_(8**29)
-
- def test_open_device_storage_type(self):
- # test cpu float storage
- cpu_tensor = torch.randn([8]).float()
- cpu_storage = cpu_tensor.storage()
- self.assertEqual(cpu_storage.type(), "torch.FloatStorage")
-
- # test custom float storage before defining FloatStorage
- foo_tensor = cpu_tensor.foo()
- foo_storage = foo_tensor.storage()
- self.assertEqual(foo_storage.type(), "torch.storage.TypedStorage")
-
- class CustomFloatStorage:
- @property
- def __module__(self):
- return "torch." + torch._C._get_privateuse1_backend_name()
-
- @property
- def __name__(self):
- return "FloatStorage"
-
- # test custom float storage after defining FloatStorage
- try:
- torch.foo.FloatStorage = CustomFloatStorage()
- self.assertEqual(foo_storage.type(), "torch.foo.FloatStorage")
-
- # test custom int storage after defining FloatStorage
- foo_tensor2 = torch.randn([8]).int().foo()
- foo_storage2 = foo_tensor2.storage()
- self.assertEqual(foo_storage2.type(), "torch.storage.TypedStorage")
- finally:
- torch.foo.FloatStorage = None
-
- def test_open_device_faketensor(self):
- with torch._subclasses.fake_tensor.FakeTensorMode.push():
- a = torch.empty(1, device="foo")
- b = torch.empty(1, device="foo:0")
- result = a + b
-
- def test_open_device_named_tensor(self):
- torch.empty([2, 3, 4, 5], device="foo", names=["N", "C", "H", "W"])
-
- # Not an open registration test - this file is just very convenient
- # for testing torch.compile on custom C++ operators
- def test_compile_autograd_function_returns_self(self):
- x_ref = torch.randn(4, requires_grad=True)
- out_ref = self.module.custom_autograd_fn_returns_self(x_ref)
- out_ref.sum().backward()
-
- x_test = x_ref.clone().detach().requires_grad_(True)
- f_compiled = torch.compile(self.module.custom_autograd_fn_returns_self)
- out_test = f_compiled(x_test)
- out_test.sum().backward()
-
- self.assertEqual(out_ref, out_test)
- self.assertEqual(x_ref.grad, x_test.grad)
-
- # Not an open registration test - this file is just very convenient
- # for testing torch.compile on custom C++ operators
- @skipIfTorchDynamo("Temporary disabled due to torch._ops.OpOverloadPacket")
- def test_compile_autograd_function_aliasing(self):
- x_ref = torch.randn(4, requires_grad=True)
- out_ref = torch.ops._test_funcs.custom_autograd_fn_aliasing(x_ref)
- out_ref.sum().backward()
-
- x_test = x_ref.clone().detach().requires_grad_(True)
- f_compiled = torch.compile(torch.ops._test_funcs.custom_autograd_fn_aliasing)
- out_test = f_compiled(x_test)
- out_test.sum().backward()
-
- self.assertEqual(out_ref, out_test)
- self.assertEqual(x_ref.grad, x_test.grad)
-
- def test_open_device_scalar_type_fallback(self):
- z_cpu = torch.Tensor([[0, 0, 0, 1, 1, 2], [0, 1, 2, 1, 2, 2]]).to(torch.int64)
- z = torch.triu_indices(3, 3, device="foo")
- self.assertEqual(z_cpu, z)
-
- def test_open_device_tensor_type_fallback(self):
- # create tensors located in custom device
- x = torch.Tensor([[1, 2, 3], [2, 3, 4]]).to("foo")
- y = torch.Tensor([1, 0, 2]).to("foo")
- # create result tensor located in cpu
- z_cpu = torch.Tensor([[0, 2, 1], [1, 3, 2]])
- # Check that our device is correct.
- device = self.module.custom_device()
- self.assertTrue(x.device == device)
- self.assertFalse(x.is_cpu)
-
- # call sub op, which will fallback to cpu
- z = torch.sub(x, y)
- self.assertEqual(z_cpu, z)
-
- # call index op, which will fallback to cpu
- z_cpu = torch.Tensor([3, 1])
- y = torch.Tensor([1, 0]).long().to("foo")
- z = x[y, y]
- self.assertEqual(z_cpu, z)
-
- def test_open_device_tensorlist_type_fallback(self):
- # create tensors located in custom device
- v_foo = torch.Tensor([1, 2, 3]).to("foo")
- # create result tensor located in cpu
- z_cpu = torch.Tensor([2, 4, 6])
- # create tensorlist for foreach_add op
- x = (v_foo, v_foo)
- y = (v_foo, v_foo)
- # Check that our device is correct.
- device = self.module.custom_device()
- self.assertTrue(v_foo.device == device)
- self.assertFalse(v_foo.is_cpu)
-
- # call _foreach_add op, which will fallback to cpu
- z = torch._foreach_add(x, y)
- self.assertEqual(z_cpu, z[0])
- self.assertEqual(z_cpu, z[1])
+ z2 = z2.foo()
+ self.assertFalse(self.module.custom_add_called())
+ self.assertTrue(z2.is_foo)
+ # check custom StorageImpl create
+ self.module.custom_storage_registry()
+ z3 = y.untyped_storage()
+ self.assertFalse(self.module.custom_storageImpl_called())
+ z3 = z3.foo()
+ self.assertTrue(self.module.custom_storageImpl_called())
+ self.assertFalse(self.module.custom_storageImpl_called())
+ z3 = z3[0:3]
+ self.assertTrue(self.module.custom_storageImpl_called())
+
+ def test_open_device_storage_pin_memory():
+ torch.utils.rename_privateuse1_backend("foo")
+ with self.assertRaisesRegex(RuntimeError, "The custom device module of"):
+ torch.utils.generate_methods_for_privateuse1_backend(
+ for_tensor=False, for_module=False, for_storage=True
+ )
+ # Check if the pin_memory is functioning properly on custom device
+ cpu_tensor = torch.empty(3)
+ self.assertFalse(cpu_tensor.is_foo)
+ self.assertFalse(cpu_tensor.is_pinned("foo"))
+ cpu_tensor_pin = cpu_tensor.pin_memory("foo")
+ self.assertTrue(cpu_tensor_pin.is_pinned("foo"))
+ # Test storage pin_memory on custom device string
+ cpu_storage = cpu_tensor.storage()
+ foo_device = torch.device("foo")
+ self.assertFalse(cpu_storage.is_pinned("foo"))
+ cpu_storage_pin = cpu_storage.pin_memory("foo")
+ self.assertFalse(cpu_storage.is_pinned())
+ self.assertFalse(cpu_storage.is_pinned("foo"))
+ self.assertFalse(cpu_storage.is_pinned(foo_device))
+ self.assertFalse(cpu_storage_pin.is_pinned())
+ self.assertTrue(cpu_storage_pin.is_pinned("foo"))
+ self.assertTrue(cpu_storage_pin.is_pinned(foo_device))
+ cpu_storage_pin_already = cpu_storage_pin.pin_memory("foo")
+ self.assertTrue(cpu_storage_pin.is_pinned("foo"))
+ self.assertTrue(cpu_storage_pin.is_pinned(foo_device))
+ self.assertTrue(cpu_storage_pin_already.is_pinned("foo"))
+ self.assertTrue(cpu_storage_pin_already.is_pinned(foo_device))
+
+ # Test storage pin_memory on torch.device
+ self.assertFalse(cpu_storage.is_pinned("foo"))
+ cpu_storage_pinned = cpu_storage.pin_memory(foo_device)
+ self.assertFalse(cpu_storage.is_pinned())
+ self.assertFalse(cpu_storage.is_pinned("foo"))
+ self.assertFalse(cpu_storage.is_pinned(foo_device))
+ self.assertFalse(cpu_storage_pinned.is_pinned())
+ self.assertTrue(cpu_storage_pinned.is_pinned("foo"))
+ self.assertTrue(cpu_storage_pinned.is_pinned(foo_device))
+
+ # Test untyped storage pin_memory and is_pin
+ cpu_tensor = torch.randn([3, 2, 1, 4])
+ cpu_untyped_storage = cpu_tensor.untyped_storage()
+ self.assertFalse(cpu_untyped_storage.is_pinned())
+ self.assertFalse(cpu_untyped_storage.is_pinned("foo"))
+ cpu_untyped_storage_pinned = cpu_untyped_storage.pin_memory("foo")
+ self.assertFalse(cpu_untyped_storage_pinned.is_pinned())
+ self.assertTrue(cpu_untyped_storage_pinned.is_pinned("foo"))
+ self.assertTrue(cpu_untyped_storage_pinned.is_pinned(foo_device))
+ cpu_untyped_storage_pinned = cpu_untyped_storage.pin_memory(foo_device)
+ self.assertFalse(cpu_untyped_storage_pinned.is_pinned())
+ self.assertTrue(cpu_untyped_storage_pinned.is_pinned("foo"))
+ self.assertTrue(cpu_untyped_storage_pinned.is_pinned(foo_device))
+ with self.assertRaisesRegex(
+ TypeError, "positional arguments but 3 were given"
+ ):
+ cpu_untyped_storage_pinned.is_pinned("foo1", "foo2")
+
+ # Test storage pin_memory on error device
+ self.assertFalse(cpu_storage_pinned.is_pinned("hpu"))
+ with self.assertRaisesRegex(
+ NotImplementedError, "with arguments from the 'HPU' backend"
+ ):
+ cpu_storage.pin_memory("hpu")
+ self.assertFalse(cpu_untyped_storage_pinned.is_pinned("hpu"))
+ with self.assertRaisesRegex(
+ NotImplementedError, "with arguments from the 'HPU' backend"
+ ):
+ cpu_untyped_storage.pin_memory("hpu")
+ invalid_device = torch.device("hpu")
+ self.assertFalse(cpu_untyped_storage_pinned.is_pinned(invalid_device))
+ with self.assertRaisesRegex(
+ NotImplementedError, "with arguments from the 'HPU' backend"
+ ):
+ cpu_untyped_storage.pin_memory(invalid_device)
+
+ def test_open_device_serialization():
+ self.module.set_custom_device_index(-1)
+ storage = torch.UntypedStorage(4, device=torch.device("foo"))
+ self.assertEqual(torch.serialization.location_tag(storage), "foo")
+
+ self.module.set_custom_device_index(0)
+ storage = torch.UntypedStorage(4, device=torch.device("foo"))
+ self.assertEqual(torch.serialization.location_tag(storage), "foo:0")
+
+ cpu_storage = torch.empty(4, 4).storage()
+ foo_storage = torch.serialization.default_restore_location(
+ cpu_storage, "foo:0"
+ )
+ self.assertTrue(foo_storage.is_foo)
+ # test tensor MetaData serialization
+ x = torch.empty(4, 4).long()
+ y = x.foo()
+ self.assertFalse(self.module.check_backend_meta(y))
+ self.module.custom_set_backend_meta(y)
+ self.assertTrue(self.module.check_backend_meta(y))
+
+ self.module.custom_serialization_registry()
+ with tempfile.TemporaryDirectory() as tmpdir:
+ path = os.path.join(tmpdir, "data.pt")
+ torch.save(y, path)
+ z1 = torch.load(path)
+ # loads correctly onto the foo backend device
+ self.assertTrue(z1.is_foo)
+ # loads BackendMeta data correctly
+ self.assertTrue(self.module.check_backend_meta(z1))
+ # cross-backend
+ z2 = torch.load(path, map_location="cpu")
+ # loads correctly onto the cpu backend device
+ self.assertFalse(z2.is_foo)
+ # loads BackendMeta data correctly
+ self.assertFalse(self.module.check_backend_meta(z2))
+
+ def test_open_device_storage_resize():
+ torch.utils.rename_privateuse1_backend("foo")
+ cpu_tensor = torch.randn([8])
+ foo_tensor = cpu_tensor.foo()
+ foo_storage = foo_tensor.storage()
+ self.assertTrue(foo_storage.size() == 8)
+ # Only register tensor resize_ function.
+ foo_tensor.resize_(8)
+ self.assertTrue(foo_storage.size() == 8)
+ with self.assertRaisesRegex(TypeError, "Overflow"):
+ foo_tensor.resize_(8**29)
+
+ def test_open_device_storage_type():
+ torch.utils.rename_privateuse1_backend("foo")
+ # test cpu float storage
+ cpu_tensor = torch.randn([8]).float()
+ cpu_storage = cpu_tensor.storage()
+ self.assertEqual(cpu_storage.type(), "torch.FloatStorage")
+
+ # test custom float storage before defining FloatStorage
+ foo_tensor = cpu_tensor.foo()
+ foo_storage = foo_tensor.storage()
+ self.assertEqual(foo_storage.type(), "torch.storage.TypedStorage")
+
+ class CustomFloatStorage:
+ @property
+ def __module__(self):
+ return "torch." + torch._C._get_privateuse1_backend_name()
+
+ @property
+ def __name__(self):
+ return "FloatStorage"
+
+ # test custom float storage after defining FloatStorage
+ try:
+ torch.foo.FloatStorage = CustomFloatStorage()
+ self.assertEqual(foo_storage.type(), "torch.foo.FloatStorage")
+
+ # test custom int storage after defining FloatStorage
+ foo_tensor2 = torch.randn([8]).int().foo()
+ foo_storage2 = foo_tensor2.storage()
+ self.assertEqual(foo_storage2.type(), "torch.storage.TypedStorage")
+ finally:
+ torch.foo.FloatStorage = None
+
+ def test_open_device_faketensor():
+ torch.utils.rename_privateuse1_backend("foo")
+ with torch._subclasses.fake_tensor.FakeTensorMode.push():
+ a = torch.empty(1, device="foo")
+ b = torch.empty(1, device="foo:0")
+ result = a + b
+
+ def test_open_device_named_tensor():
+ torch.utils.rename_privateuse1_backend("foo")
+ a = torch.empty([2, 3, 4, 5], device="foo", names=["N", "C", "H", "W"])
+
+ # Not an open registration test - this file is just very convenient
+ # for testing torch.compile on custom C++ operators
+ def test_compile_autograd_function_returns_self():
+ x_ref = torch.randn(4, requires_grad=True)
+ out_ref = self.module.custom_autograd_fn_returns_self(x_ref)
+ out_ref.sum().backward()
+
+ x_test = x_ref.clone().detach().requires_grad_(True)
+ f_compiled = torch.compile(self.module.custom_autograd_fn_returns_self)
+ out_test = f_compiled(x_test)
+ out_test.sum().backward()
+
+ self.assertEqual(out_ref, out_test)
+ self.assertEqual(x_ref.grad, x_test.grad)
+
+ # Not an open registration test - this file is just very convenient
+ # for testing torch.compile on custom C++ operators
+ def test_compile_autograd_function_aliasing():
+ x_ref = torch.randn(4, requires_grad=True)
+ out_ref = torch.ops._test_funcs.custom_autograd_fn_aliasing(x_ref)
+ out_ref.sum().backward()
+
+ x_test = x_ref.clone().detach().requires_grad_(True)
+ f_compiled = torch.compile(
+ torch.ops._test_funcs.custom_autograd_fn_aliasing
+ )
+ out_test = f_compiled(x_test)
+ out_test.sum().backward()
+
+ self.assertEqual(out_ref, out_test)
+ self.assertEqual(x_ref.grad, x_test.grad)
+
+ def test_open_device_scalar_type_fallback():
+ torch.utils.rename_privateuse1_backend("foo")
+ z_cpu = torch.Tensor([[0, 0, 0, 1, 1, 2], [0, 1, 2, 1, 2, 2]]).to(
+ torch.int64
+ )
+ z = torch.triu_indices(3, 3, device="foo")
+ self.assertEqual(z_cpu, z)
+
+ def test_open_device_tensor_type_fallback():
+ torch.utils.rename_privateuse1_backend("foo")
+ # create tensors located in custom device
+ x = torch.Tensor([[1, 2, 3], [2, 3, 4]]).to("foo")
+ y = torch.Tensor([1, 0, 2]).to("foo")
+ # create result tensor located in cpu
+ z_cpu = torch.Tensor([[0, 2, 1], [1, 3, 2]])
+ # Check that our device is correct.
+ device = self.module.custom_device()
+ self.assertTrue(x.device == device)
+ self.assertFalse(x.is_cpu)
+ # call sub op, which will fallback to cpu
+ z = torch.sub(x, y)
+ self.assertEqual(z_cpu, z)
+ # call index op, which will fallback to cpu
+ z_cpu = torch.Tensor([3, 1])
+ y = torch.Tensor([1, 0]).long().to("foo")
+ z = x[y, y]
+ self.assertEqual(z_cpu, z)
+
+ def test_open_device_tensorlist_type_fallback():
+ torch.utils.rename_privateuse1_backend("foo")
+ # create tensors located in custom device
+ v_foo = torch.Tensor([1, 2, 3]).to("foo")
+ # create result tensor located in cpu
+ z_cpu = torch.Tensor([2, 4, 6])
+ # create tensorlist for foreach_add op
+ x = (v_foo, v_foo)
+ y = (v_foo, v_foo)
+ # Check that our device is correct.
+ device = self.module.custom_device()
+ self.assertTrue(v_foo.device == device)
+ self.assertFalse(v_foo.is_cpu)
+ # call _foreach_add op, which will fallback to cpu
+ z = torch._foreach_add(x, y)
+
+ self.assertEqual(z_cpu, z[0])
+ self.assertEqual(z_cpu, z[1])
+
+ test_base_device_registration()
+ test_before_common_registration()
+ test_common_registration()
+ test_after_common_registration()
+ test_open_device_generator_registration_and_hooks()
+ test_open_device_dispatchstub()
+ test_open_device_random()
+ test_open_device_tensor()
+ test_open_device_packed_sequence()
+ test_open_device_storage()
+ test_open_device_storage_pin_memory()
+ test_open_device_serialization()
+ test_open_device_storage_resize()
+ test_open_device_storage_type()
+ test_open_device_faketensor()
+ test_open_device_named_tensor()
+ test_open_device_quantized()
+
+ test_compile_autograd_function_returns_self()
+ test_compile_autograd_function_aliasing()
+
+ test_open_device_scalar_type_fallback()
+ test_open_device_tensor_type_fallback()
+ test_open_device_tensorlist_type_fallback()
if __name__ == "__main__":
diff --git a/torch/csrc/jit/serialization/pickler.cpp b/torch/csrc/jit/serialization/pickler.cpp
index 0300dd6169..6e1b399e40 100644
--- a/torch/csrc/jit/serialization/pickler.cpp
+++ b/torch/csrc/jit/serialization/pickler.cpp
@@ -803,18 +803,4 @@ bool checkHasValidSetGetState(const std::shared_ptr<c10::ClassType>& cls) {
return true;
}
-std::array<
- c10::optional<std::pair<BackendMetaPtr, BackendMetaPtr>>,
- at::COMPILE_TIME_MAX_DEVICE_TYPES>&
-GetBackendMetaSerialization() {
- // The array to save function pointer for BackendMeta serialization.
- // key is the DeviceType, value is std::pair obj.
- // value.first represent get function and value.seconde represent set function
- static std::array<
- c10::optional<std::pair<BackendMetaPtr, BackendMetaPtr>>,
- at::COMPILE_TIME_MAX_DEVICE_TYPES>
- BackendMetaSerialization;
- return BackendMetaSerialization;
-}
-
} // namespace torch::jit
diff --git a/torch/csrc/jit/serialization/pickler.h b/torch/csrc/jit/serialization/pickler.h
index fc0f0231dd..4f553b6f7c 100644
--- a/torch/csrc/jit/serialization/pickler.h
+++ b/torch/csrc/jit/serialization/pickler.h
@@ -310,10 +310,19 @@ inline std::unordered_set<c10::DeviceType>& GetBackendMetaAllowlist() {
// Dynamically obtain serialization function pairs
// that require the corresponding backend.
-TORCH_API std::array<
+inline std::array<
c10::optional<std::pair<BackendMetaPtr, BackendMetaPtr>>,
at::COMPILE_TIME_MAX_DEVICE_TYPES>&
-GetBackendMetaSerialization();
+GetBackendMetaSerialization() {
+ // The array to save function pointer for BackendMeta serialization.
+ // key is the DeviceType, value is std::pair obj.
+ // value.first represent get function and value.seconde represent set function
+ static std::array<
+ c10::optional<std::pair<BackendMetaPtr, BackendMetaPtr>>,
+ at::COMPILE_TIME_MAX_DEVICE_TYPES>
+ BackendMetaSerialization;
+ return BackendMetaSerialization;
+}
// Register function pointer of Tensor BackendMetadata for serialization.
TORCH_API inline void TensorBackendMetaRegistry(
|
2.41.0
|
a0f070065e8bf4575107ccdecd6785dea6d9010
|
Tue, 30 Apr 2024 22:13:04 +0000
|
[PATCH 0858/1000] Revert "Add registration API for torch.compile-eager (#121387)"
|
This reverts commit 61e937f3d6b904d6706594c1b3cfd7d0e56f9663. Reverted https://github.com/pytorch/pytorch/pull/121387 on behalf of https://github.com/kit1980 due to breaking internal builds ([comment](https://github.com/pytorch/pytorch/pull/121387#issuecomment-2087541956))
|
diff --git a/build_variables.bzl b/build_variables.bzl
index f28131023c..5939da825c 100644
--- a/build_variables.bzl
+++ b/build_variables.bzl
@@ -824,7 +824,6 @@ libtorch_python_core_sources = [
"torch/csrc/mps/Module.cpp",
"torch/csrc/mtia/Module.cpp",
"torch/csrc/inductor/aoti_runner/pybind.cpp",
- "torch/csrc/inductor/aoti_eager/kernel_holder.cpp",
"torch/csrc/jit/backends/backend_init.cpp",
"torch/csrc/jit/python/init.cpp",
"torch/csrc/jit/passes/onnx.cpp",
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py
index fe1cf93768..cd3bb458a4 100644
--- a/test/inductor/test_torchinductor.py
+++ b/test/inductor/test_torchinductor.py
@@ -46,7 +46,6 @@ from torch._inductor.utils import (
from torch._inductor.virtualized import V
from torch._prims_common import is_integer_dtype
from torch.fx.experimental.proxy_tensor import make_fx
-from torch.library import _scoped_library
from torch.nn import functional as F
from torch.testing import FileCheck, make_tensor
from torch.testing._internal.common_cuda import (
@@ -760,70 +759,6 @@ class CommonTemplate:
),
)
- @skipCUDAIf(not SM80OrLater, "Requires sm80")
- def test_torch_compile_override_registration(self):
- dynamic = False
- namespace_name = "aten"
- dispatch_key = "CPU"
- device = torch.device("cpu")
- if self.device.lower() == "cuda":
- dispatch_key = "CUDA"
- device = torch.device("cuda")
-
- unary_op_set = ["abs", "acos"]
-
- def fn(x, op_name=""):
- return getattr(torch, op_name)(x)
-
- # Invoke torch.compile directly to get referent results
- x = torch.randn(3, 4, device=device)
-
- ref_array = []
- for unary_op_name in unary_op_set:
- opt_fn = torch.compile(functools.partial(fn, op_name=unary_op_name))
- ref = opt_fn(x)
- ref_array.append(ref)
-
- def register_ops(op_set, dispatch_key, torch_compile_op_lib_impl):
- for _op_name in op_set:
- qualified_op_name = f"{namespace_name}::{_op_name}"
- _, overload_names = torch._C._jit_get_operation(qualified_op_name)
- for overload_name in overload_names:
- try:
- reg_op_name = qualified_op_name
- schema = torch._C._get_schema(qualified_op_name, overload_name)
- if schema.overload_name:
- reg_op_name = f"{qualified_op_name}.{schema.overload_name}"
- torch_compile_op_lib_impl._impl_with_aoti_compile( # noqa: F821
- reg_op_name, dispatch_key
- )
- except Exception as e:
- continue
-
- with _scoped_library("aten", "IMPL") as torch_compile_op_lib_impl:
- register_ops(unary_op_set, dispatch_key, torch_compile_op_lib_impl)
-
- res_array = []
- for unary_op_name in unary_op_set:
- res_array.append(getattr(torch, unary_op_name)(x))
-
- for ref, res in zip(ref_array, res_array):
- self.assertEqual(ref, res)
-
- a = torch.randn(128, device=device)
- min_tensor = torch.randn(128, device=device)
- max_tensor = min_tensor + 0.5
-
- ref_with_min = torch.ops.aten.clamp(a, min_tensor)
- ref_with_min_max = torch.ops.aten.clamp(a, min_tensor, max_tensor)
-
- with _scoped_library("aten", "IMPL") as torch_compile_op_lib_impl:
- register_ops(["clamp"], dispatch_key, torch_compile_op_lib_impl)
- res_with_min = torch.ops.aten.clamp(a, min_tensor)
- res_with_min_max = torch.ops.aten.clamp(a, min_tensor, max_tensor)
- self.assertEqual(ref_with_min, res_with_min)
- self.assertEqual(ref_with_min_max, res_with_min_max)
-
def test_add_const_int(self):
def fn(a):
return (a + 1, torch.add(a, 1, alpha=2))
diff --git a/torch/csrc/inductor/aoti_eager/kernel_holder.cpp b/torch/csrc/inductor/aoti_eager/kernel_holder.cpp
deleted file mode 100644
index 55c0d71c55..0000000000
--- a/torch/csrc/inductor/aoti_eager/kernel_holder.cpp
+++ /dev/null
@@ -1,246 +0,0 @@
-#if !defined(C10_MOBILE) && !defined(ANDROID)
-#include <torch/csrc/inductor/aoti_eager/kernel_holder.h>
-
-#include <ATen/ATen.h>
-
-#include <ATen/core/dispatch/Dispatcher.h>
-#include <torch/csrc/PyInterpreter.h>
-#include <torch/csrc/autograd/python_variable.h>
-#include <torch/csrc/inductor/aoti_runner/model_container_runner_cpu.h>
-#ifdef USE_CUDA
-#include <torch/csrc/inductor/aoti_runner/model_container_runner_cuda.h>
-#endif
-#include <torch/csrc/jit/frontend/function_schema_parser.h>
-
-namespace torch::inductor {
-
-namespace {
-
-inline void unpack_tensor_ivalue(
- const c10::IValue& ivalue,
- const c10::Device& device,
- std::vector<at::Tensor>& inputs) {
- inputs.push_back(ivalue.toTensor());
-}
-
-inline void unpack_optional_tensor_ivalue(
- const c10::IValue& ivalue,
- const c10::Device& device,
- std::vector<at::Tensor>& inputs) {
- auto ivalue_opt_tensor = ivalue.toOptional<at::Tensor>();
- if (ivalue_opt_tensor.has_value()) {
- inputs.push_back(ivalue_opt_tensor.value());
- }
-}
-
-inline void unpack_tensor_list_ivalue(
- const c10::IValue& ivalue,
- const c10::Device& device,
- std::vector<at::Tensor>& inputs) {
- for (const auto& item : ivalue.toListRef()) {
- inputs.push_back(item.toTensor());
- }
-}
-
-inline void unpack_optional_tensor_list_ivalue(
- const c10::IValue& ivalue,
- const c10::Device& device,
- std::vector<at::Tensor>& inputs) {
- for (const auto& item : ivalue.toListRef()) {
- unpack_optional_tensor_ivalue(item, device, inputs);
- }
-}
-
-inline void unpack_scalar_ivalue(
- const c10::IValue& ivalue,
- const c10::Device& device,
- std::vector<at::Tensor>& inputs) {
- inputs.push_back(at::scalar_tensor(
- ivalue.toScalar(),
- c10::TensorOptions().device(device).dtype(ivalue.toScalar().type())));
-}
-
-bool unpack_ivalue(
- const c10::Argument& argument,
- const c10::IValue& ivalue,
- const c10::Device& device,
- std::vector<at::Tensor>& inputs) {
- if (ivalue.isTensor()) {
- unpack_tensor_ivalue(ivalue, device, inputs);
- } else if (ivalue.isTensorList()) {
- unpack_tensor_list_ivalue(ivalue, device, inputs);
- } else if (ivalue.isOptionalTensorList()) {
- unpack_optional_tensor_list_ivalue(ivalue, device, inputs);
- } else if (ivalue.isScalar()) {
- // ivalue is scalar
- unpack_scalar_ivalue(ivalue, device, inputs);
- } else if (
- *argument.real_type() == *c10::getTypePtr<c10::optional<at::Tensor>>()) {
- // ivalue is c10::optional<at::Tensor>
- unpack_optional_tensor_ivalue(ivalue, device, inputs);
- } else {
- // Unsupport IValue type.
- return false;
- }
-
- return true;
-}
-
-bool unpack_tensors(
- const std::vector<c10::Argument>& arguments,
- const torch::jit::Stack& stack,
- const c10::Device& device,
- std::vector<at::Tensor>& inputs) {
- for (size_t idx = 0; idx < stack.size(); idx++) {
- if (!unpack_ivalue(arguments[idx], stack[idx], device, inputs)) {
- return false;
- }
- }
-
- return true;
-}
-
-} // namespace
-
-AOTIPythonKernelHolder::AOTIPythonKernelHolder(
- c10::DispatchKey dispatch_key,
- c10::string_view ns,
- c10::string_view op_name_with_overload)
- : dispatch_key_(dispatch_key),
- ns_(std::string(ns)),
- op_name_with_overload_(std::string(op_name_with_overload)),
- device_(c10::dispatchKeyToDeviceType(dispatch_key_), 0),
- pyinterpreter_(getPyInterpreter()) {
- TORCH_CHECK(
- (device_.type() == c10::DeviceType::CPU) ||
- (device_.type() == c10::DeviceType::CUDA),
- "Unsupported device type");
-}
-
-void AOTIPythonKernelHolder::operator()(
- const c10::OperatorHandle& op,
- c10::DispatchKeySet keyset,
- torch::jit::Stack* stack) {
- if (cache_lookup(op, keyset, stack)) {
- cache_hit(op, keyset, stack);
- } else {
- cache_miss(op, keyset, stack);
- }
-}
-
-bool AOTIPythonKernelHolder::cache_lookup(
- const c10::OperatorHandle& op,
- c10::DispatchKeySet keyset,
- torch::jit::Stack* stack) {
- // TODO: Always return false now to implement cache_miss. Later, we will add
- // cache lookup and implement cache hit.
- return false;
-}
-
-void AOTIPythonKernelHolder::cache_hit(
- const c10::OperatorHandle& op,
- c10::DispatchKeySet keyset,
- torch::jit::Stack* stack) {
- TORCH_INTERNAL_ASSERT(false);
-}
-
-void AOTIPythonKernelHolder::cache_miss(
- const c10::OperatorHandle& op,
- c10::DispatchKeySet keyset,
- torch::jit::Stack* stack) {
- auto kernel_lib_path = produce_aoti_kernel_lib(op, keyset, stack);
- std::shared_ptr<AOTIModelContainerRunner> kernel = nullptr;
- // TODO: To enable the plugin mechanism to allow registration for other
- // backends
- if (device_.type() == c10::DeviceType::CPU) {
- kernel = std::make_shared<AOTIModelContainerRunnerCpu>(kernel_lib_path);
- } else {
-#ifdef USE_CUDA
- kernel = std::make_shared<AOTIModelContainerRunnerCuda>(kernel_lib_path);
-#else
- TORCH_CHECK(false, "Unsupported CUDA device type");
-#endif
- }
-
- std::vector<at::Tensor> inputs;
- TORCH_INTERNAL_ASSERT(
- unpack_tensors(op.schema().arguments(), *stack, device_, inputs),
- "Failed to unpack tensors for the stack to run the AOTI kernel.");
- auto outputs = kernel->run(inputs);
- if (outputs.size() > 0) {
- torch::jit::drop(*stack, op.schema().arguments().size());
- // TODO: Get the output type of this operation and then convert to the
- // output type.
- for (auto& output : outputs) {
- torch::jit::push(*stack, std::move(output));
- }
- }
-}
-
-std::string AOTIPythonKernelHolder::produce_aoti_kernel_lib(
- const c10::OperatorHandle& op,
- c10::DispatchKeySet keyset,
- torch::jit::Stack* stack) {
- auto arguments = torch::jit::last(*stack, op.schema().arguments().size());
-
- py::gil_scoped_acquire gil;
-
- // Get the corresponding python operation for the current operator and the
- // python operation will pass to the AOT Inductor to generate the kernel
- // library.
- const auto& schema = op.schema();
- const auto& qualified_name = op.operator_name().name;
- const auto& overload_name =
- schema.overload_name().empty() ? "default" : schema.overload_name();
- auto pos = qualified_name.find("::");
- TORCH_INTERNAL_ASSERT(pos != std::string::npos, qualified_name);
- // Make me some null terminated strings
- std::string ns_str = qualified_name.substr(0, pos);
- const char* ns = ns_str.c_str();
- const char* func_name = qualified_name.c_str() + pos + strlen("::");
- py::handle op_py_func = op.getPythonOp(pyinterpreter_, [&]() -> PyObject* {
- py::handle torch_api_function =
- py::module::import("torch").attr("ops").attr(ns).attr(func_name);
- return torch_api_function.attr(overload_name.c_str()).ptr();
- });
-
- TORCH_INTERNAL_ASSERT(
- op_py_func.ptr() != nullptr && op_py_func.ptr() != Py_None,
- "Failed to get python operation. Operator Name is ",
- op.operator_name().name,
- ", Overload Name is ",
- overload_name);
-
- py::handle aot_compile_function =
- py::module::import("torch._export").attr("aot_compile");
- TORCH_INTERNAL_ASSERT(
- aot_compile_function.ptr() != nullptr &&
- aot_compile_function.ptr() != Py_None,
- "Failed to import - torch._export.aot_compile");
-
- // Pass the python operation to the AOT Inductor to generate the kernel
- // library.
- auto args_kwargs = parseIValuesToPyArgsKwargs(op, arguments.vec());
- auto result = py::reinterpret_steal<py::object>(PyObject_CallFunctionObjArgs(
- aot_compile_function.ptr(),
- op_py_func.ptr(),
- args_kwargs.first.ptr(),
- args_kwargs.second.ptr(),
- nullptr));
- TORCH_INTERNAL_ASSERT(result.ptr() != nullptr && result.ptr() != Py_None);
-
- auto kernel_lib_path = py::cast<std::string>(result);
- TORCH_CHECK(
- !kernel_lib_path.empty(),
- "Failed to produce kernel libarary by using AOTI for ",
- c10::DeviceTypeName(device_.type()),
- ". Operator Name is ",
- op.operator_name().name,
- ", Overload Name is ",
- op.schema().overload_name());
-
- return kernel_lib_path;
-}
-
-} // namespace torch::inductor
-#endif
diff --git a/torch/csrc/inductor/aoti_eager/kernel_holder.h b/torch/csrc/inductor/aoti_eager/kernel_holder.h
deleted file mode 100644
index f7a886eb26..0000000000
--- a/torch/csrc/inductor/aoti_eager/kernel_holder.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#if !defined(C10_MOBILE) && !defined(ANDROID)
-#pragma once
-
-#include <ATen/ATen.h>
-#include <ATen/core/boxing/KernelFunction.h>
-
-#include <torch/csrc/inductor/aoti_runner/model_container_runner.h>
-#include <torch/csrc/utils/pybind.h>
-
-#include <string>
-
-namespace torch::inductor {
-
-// The AOTIPythonKernelHolder class uses the AOT Inductor to generate a kernel
-// for a specified operation. To speed up this process, the generated kernel
-// library is cached on disk. Detailed information from the input tensors is
-// used as the key for caching the kernel library. On subsequent runs, these
-// input tensors are used to search the cache. If a cache hit occurs, the cached
-// kernel library is loaded and executed. If a cache miss occurs, the AOT
-// Inductor is called again to generate the kernel library.
-class AOTIPythonKernelHolder : public c10::OperatorKernel {
- // A DispatchKey object that represents the dispatch key for the kernel.
- c10::DispatchKey dispatch_key_;
- // Namespace of the kernel.
- std::string ns_;
- // Name of the operation the kernel performs.
- std::string op_name_with_overload_;
- // The device on which the kernel is to be executed.
- c10::Device device_;
- // The Python interpreter to get OpOverload object with the given op_name and
- // op_overload_name.
- c10::impl::PyInterpreter* pyinterpreter_;
-
- public:
- AOTIPythonKernelHolder(
- c10::DispatchKey dispatch_key,
- c10::string_view ns,
- c10::string_view op_name_with_overload);
-
- void operator()(
- const c10::OperatorHandle& op,
- c10::DispatchKeySet keyset,
- torch::jit::Stack* stack);
-
- private:
- bool cache_lookup(
- const c10::OperatorHandle& op,
- c10::DispatchKeySet keyset,
- torch::jit::Stack* stack);
- void cache_miss(
- const c10::OperatorHandle& op,
- c10::DispatchKeySet keyset,
- torch::jit::Stack* stack);
- void cache_hit(
- const c10::OperatorHandle& op,
- c10::DispatchKeySet keyset,
- torch::jit::Stack* stack);
- std::string produce_aoti_kernel_lib(
- const c10::OperatorHandle& op,
- c10::DispatchKeySet keyset,
- torch::jit::Stack* stack);
-};
-
-} // namespace torch::inductor
-#endif
diff --git a/torch/csrc/utils/python_dispatch.cpp b/torch/csrc/utils/python_dispatch.cpp
index a3e71a2542..2d115a8228 100644
--- a/torch/csrc/utils/python_dispatch.cpp
+++ b/torch/csrc/utils/python_dispatch.cpp
@@ -21,7 +21,6 @@
#include <c10/util/flat_hash_map.h>
#include <pybind11/operators.h>
#include <pybind11/stl.h>
-#include <torch/csrc/inductor/aoti_eager/kernel_holder.h>
#include <torch/csrc/utils/pybind.h>
#include <torch/csrc/utils/python_raii.h>
@@ -373,32 +372,6 @@ void initDispatchBindings(PyObject* module) {
py::arg("name"),
py::arg("dispatch") = "",
py::arg("debug") = "impl_t_t")
- .def(
- "impl_with_aoti_compile",
- [](const py::object& self,
- const char* ns,
- const char* op_name_with_overload,
- c10::DispatchKey dispatch) {
- HANDLE_TH_ERRORS
- std::string reg_op_name =
- std::string(ns).append("::").append(op_name_with_overload);
-
- auto& lib = self.cast<torch::Library&>();
- lib.impl(
- reg_op_name.c_str(),
- torch::dispatch(
- dispatch,
- CppFunction::makeFromBoxedFunctor(
- std::make_unique<
- torch::inductor::AOTIPythonKernelHolder>(
- dispatch, ns, op_name_with_overload))),
- register_or_verify());
- END_HANDLE_TH_ERRORS_PYBIND
- },
- "",
- py::arg("ns"),
- py::arg("op_name_with_overload"),
- py::arg("dispatch"))
.def(
"impl",
[](const py::object& self,
diff --git a/torch/library.py b/torch/library.py
index 1d6886aeb6..6bd4bd8110 100644
--- a/torch/library.py
+++ b/torch/library.py
@@ -139,48 +139,6 @@ class Library:
handle = entry.abstract_impl.register(func_to_register, source)
self._registration_handles.append(handle)
- def _impl_with_aoti_compile(self, op_name, dispatch_key=''):
- r'''Register the operator to use the AOTI-compiled implementation.
-
- Args:
- op_name: operator name (along with the overload) or OpOverload object.
- dispatch_key: dispatch key that the input function should be registered for. By default, it uses
- the dispatch key that the library was created with.
-
- Example::
- >>> my_lib = Library("aten", "IMPL")
- >>> my_lib._impl_with_aoti_compile("div.Tensor", "CPU")
- '''
- if dispatch_key == '':
- dispatch_key = self.dispatch_key
- assert torch.DispatchKeySet(dispatch_key).has(torch._C.DispatchKey.Dense)
-
- if isinstance(op_name, str):
- name = op_name
- elif isinstance(op_name, OpOverload):
- name = op_name._schema.name
- overload_name = op_name._schema.overload_name
- if overload_name != '':
- name = name + '.' + overload_name
- else:
- raise RuntimeError("_impl_with_aoti_compile should be passed either a name or an OpOverload object "
- "as the first argument")
-
- key = self.ns + "/" + name.split("::")[-1] + "/" + dispatch_key
- if key in _impls:
- # TODO: in future, add more info about where the existing function is registered (this info is
- # today already returned by the C++ warning when _impl_with_aoti_compile is called but we error out before that)
- raise RuntimeError("This is not allowed since there's already a kernel registered from python overriding {}"
- "'s behavior for {} dispatch key and {} namespace.".
- format(name.split("::")[-1], dispatch_key, self.ns))
-
- assert self.m is not None
- impl_fn: Callable = self.m.impl_with_aoti_compile
- impl_fn(self.ns, name.split("::")[-1], dispatch_key)
-
- _impls.add(key)
- self._op_impls.add(key)
-
def impl(self, op_name, fn, dispatch_key='', *, with_keyset=False):
r'''Registers the function implementation for an operator defined in the library.
|
2.41.0
|
fec26e231433ec5a639adb1c15e354d37fd2e89
|
Tue, 30 Apr 2024 22:28:52 +0000
|
[PATCH 0859/1000] Fix typo under torch/_inductor directory (#119658)
|
This PR fixes typo in comments and msgs under `torch/_inductor` directory, and also changes the corresponding test. Pull Request resolved: https://github.com/pytorch/pytorch/pull/119658 Approved by: https://github.com/colesbury
|
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index ef3946cec2..31d83b3172 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -391,7 +391,7 @@ def write(
specified_dir: str = "",
) -> Tuple[str, str]:
# use striped content to compute hash so we don't end up with different
- # hashes just because the content begins/ends with differnet number of
+ # hashes just because the content begins/ends with different number of
# spaces.
key: str = get_hash(content.strip(), extra, hash_type)
basename, subdir, path = get_path(key, extension, specified_dir)
@@ -1304,7 +1304,7 @@ def pick_vec_isa() -> VecISA:
if not _valid_vec_isa_list:
return invalid_vec_isa
- # If the simdlen is None, it indicates determin the vectorization length automatically
+ # If the simdlen is None, it indicates determine the vectorization length automatically
if config.cpp.simdlen is None:
assert _valid_vec_isa_list
return _valid_vec_isa_list[0]
diff --git a/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py b/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py
index 6a15b183f8..d8bf408dc2 100644
--- a/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py
+++ b/torch/_inductor/codegen/cuda/cutlass_epilogue_gen.py
@@ -15,7 +15,7 @@ _MAGIC_SYMPY_ERROR_STRING = "[!sympy: unsupported expr!]"
def _arg_str(a):
if isinstance(a, sympy.Expr):
- # If this return value containting the _MAGIC_SYMPY_ERROR_STRING
+ # If this return value containing the _MAGIC_SYMPY_ERROR_STRING
# is used as part of the final generated C++ code,
# a CUTLASSEVTOpNotImplementedError is raised to indicate that
# the op could not be converted to a valid EVT expression.
diff --git a/torch/_inductor/codegen/multi_kernel.py b/torch/_inductor/codegen/multi_kernel.py
index e4fc396c64..c2ff415171 100644
--- a/torch/_inductor/codegen/multi_kernel.py
+++ b/torch/_inductor/codegen/multi_kernel.py
@@ -54,7 +54,7 @@ def get_all_call_args(call_args_list):
It will fail if any kernel has the same argument passed in multiple times.
Check test_pass_same_arg_multi_times in test_multi_kernel.py
- Instead, we pick the longest call args and assert that otehr call args are
+ Instead, we pick the longest call args and assert that other call args are
a subset of it.
"""
return _get_all_args(call_args_list)
@@ -128,7 +128,7 @@ class MultiKernelState:
)
# add subkernel src code hashes to the multi-kernel source code so changing a
- # subkernel implementation will result in a differnt py file for
+ # subkernel implementation will result in a different py file for
# multi-kernel. This makes cache implementation straightforward since
# we can decide cache file name based on multi-kernel py file name
# directly.
diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py
index 5e1d938fe2..b49aa3aa32 100644
--- a/torch/_inductor/codegen/triton.py
+++ b/torch/_inductor/codegen/triton.py
@@ -1360,7 +1360,7 @@ class TritonKernel(Kernel):
}.get(self.reduction_hint, 64)
# If multi_kernel is enabled, we do more aggressive persistent reduction.
- # This may result in some persisent reductions slower than the
+ # This may result in some persistent reductions slower than the
# corresponding non-persistent reductions. MultiKernel will do benchmarking
# to pick the faster one.
if config.triton.multi_kernel:
diff --git a/torch/_inductor/decomposition.py b/torch/_inductor/decomposition.py
index c483753e2a..6541152fdb 100644
--- a/torch/_inductor/decomposition.py
+++ b/torch/_inductor/decomposition.py
@@ -103,7 +103,7 @@ def register_decomposition(ops):
# TODO: for now, inductor doesn't handle asserts
-# because the condition is symbool -> tensor in the graph.
+# because the condition is symbol -> tensor in the graph.
@register_decomposition([aten._assert_async.msg])
def assert_async_msg_decomp(tensor, msg):
return
diff --git a/torch/_inductor/fx_passes/group_batch_fusion.py b/torch/_inductor/fx_passes/group_batch_fusion.py
index 880ba9df98..e58f288ba3 100644
--- a/torch/_inductor/fx_passes/group_batch_fusion.py
+++ b/torch/_inductor/fx_passes/group_batch_fusion.py
@@ -753,7 +753,7 @@ class BatchLayernormFusion(BatchFusion):
class BatchPointwiseOpsPreGradFusion(BatchPointwiseOpsFusionFactory):
"""
- Batch poinwise ops (e.g., sigmoid, relu, tanh) fusion in pre grad pass.
+ Batch pointwise ops (e.g., sigmoid, relu, tanh) fusion in pre grad pass.
We fuse it in random place, and the introduced stack node may be merged in split cat.
"""
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index 80b33026c3..c4915bcec4 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -1766,7 +1766,7 @@ class Scan(Loops):
)
-# This signifies a scan op that should go through TritonSplitScanKernel codgen on CUDA.
+# This signifies a scan op that should go through TritonSplitScanKernel codegen on CUDA.
@dataclasses.dataclass
class SplitScan(Scan):
pass
diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py
index 4b219fc517..ab45e1782b 100644
--- a/torch/_inductor/lowering.py
+++ b/torch/_inductor/lowering.py
@@ -548,7 +548,7 @@ def _convert_element_type(x: TensorBox, dtype: torch.dtype):
if dtype.is_complex or x.get_dtype().is_complex:
if x.get_size():
# Decompose since aa aten fallback is more friendly for c++ codegen.
- # This decompostion doesn't work for empty tensor, which needs more investigation.
+ # This decomposition doesn't work for empty tensor, which needs more investigation.
dst = empty_like(x, dtype=dtype)
ir.InplaceCopyFallback.create(dst, x)
return dst
diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py
index c85ea37133..6d49c60b0d 100644
--- a/torch/_inductor/runtime/triton_heuristics.py
+++ b/torch/_inductor/runtime/triton_heuristics.py
@@ -718,7 +718,7 @@ class CachingAutotuner(KernelInterface):
E.g., assuming regular autotune only get one config C1; while max-autotune get 4 configs C1, C2, C3, C4
and max-autotune figure out C3 is the best.
- Then if coordinate descnt tuning is run with max-autotune disabled, it will start from C1;
+ Then if coordinate desecnt tuning is run with max-autotune disabled, it will start from C1;
while if coordinate descent tuning is run with max-autotune enabled, it will start from C3.
"""
if (
|
2.41.0
|
12c85e9199c683fe047883323c61d82fdbbd262
|
Tue, 30 Apr 2024 10:16:41 -0700
|
[PATCH 0860/1000] Revert "[benchmark][cudagraph] Explicitly call aten.div with CUDA denominator for cudagraphs (#119729)" (#125246)
|
This reverts commit 62b5738a8bf325d79468b839b8412b87cb9951c1. https://github.com/pytorch/pytorch/pull/119729/ regresses cudagraph dashboard. Moving the one-time per iteration loss from CPU to CUDA is somehow causing a lot of copies: current (top) vs with revert (bottom)  Pull Request resolved: https://github.com/pytorch/pytorch/pull/125246 Approved by: https://github.com/eellison
|
diff --git a/test/inductor/test_compiled_autograd.py b/test/inductor/test_compiled_autograd.py
index 2a94e4cee5..d0e6949450 100644
--- a/test/inductor/test_compiled_autograd.py
+++ b/test/inductor/test_compiled_autograd.py
@@ -1,6 +1,5 @@
# Owner(s): ["module: inductor"]
import functools
-import io
import re
import sys
import unittest
@@ -1344,24 +1343,6 @@ TORCH_LIBRARY(test_autograd_cpp_node_data_dependent, m) {
out = compiled_fn(activations)
self.assertTrue(len(activations) == 0)
- @unittest.skipIf(not HAS_CUDA, "requires cuda")
- def test_cudagraphs_cpu_division(self):
- from torch._dynamo.testing import reduce_to_scalar_loss
-
- model = torch.nn.Linear(10, 10, dtype=torch.float16).cuda()
- inputs = torch.randn(10, 10, dtype=torch.float16).cuda()
- out = model(inputs)
- loss = reduce_to_scalar_loss(out)
- torch._inductor.config.triton.cudagraphs = True
-
- stderr_msgs = io.StringIO()
- with mock.patch("sys.stderr", stderr_msgs), compiled_autograd.enable(
- compiler_fn
- ):
- loss.backward()
-
- self.assertFalse("skipping cudagraphs" in stderr_msgs.getvalue())
-
def test_verbose_logs_graph(self):
torch._logging.set_logs(compiled_autograd_verbose=True)
diff --git a/torch/_dynamo/testing.py b/torch/_dynamo/testing.py
index 2dd384f4d8..c115e1cc09 100644
--- a/torch/_dynamo/testing.py
+++ b/torch/_dynamo/testing.py
@@ -103,7 +103,7 @@ def reduce_to_scalar_loss(out):
"""Reduce the output of a model to get scalar loss"""
if isinstance(out, torch.Tensor):
# Mean does not work on integer tensors
- return out.sum() / torch.tensor(out.numel(), device=out.device)
+ return out.sum() / out.numel()
elif isinstance(out, (list, tuple)):
return sum(reduce_to_scalar_loss(x) for x in out) / len(out)
elif type(out).__name__ in (
|
2.41.0
|
7023b89f88b9049383503f22608a40a2b96b919
|
Mon, 29 Apr 2024 13:32:03 -0700
|
[PATCH 0863/1000] Use torch._check for safety assert in _reshape_view_helper (#125187)
|
Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/125187 Approved by: https://github.com/albanD
|
diff --git a/torch/_refs/__init__.py b/torch/_refs/__init__.py
index b209ebb1a9..f9d6aafd68 100644
--- a/torch/_refs/__init__.py
+++ b/torch/_refs/__init__.py
@@ -3715,7 +3715,10 @@ def _reshape_view_helper(a: TensorLikeType, *shape, allow_copy: bool) -> TensorL
# Squeezes tail
while idx < a_.ndim:
- assert a_.shape[idx] == 1
+ torch._check(
+ a_.shape[idx] == 1,
+ lambda: f"a.size({idx}) expected to be 1 but got {a_.shape[idx]}",
+ )
a_ = squeeze(a_, idx)
return a_
|
2.41.0
|
5691558d9e370572d846e421acfb73b6a6318df
|
Wed, 1 May 2024 01:08:43 +0000
|
[PATCH 0864/1000] Change templated_attention -> flex_attention (#125251)
|
# Summary Change all the names Pull Request resolved: https://github.com/pytorch/pytorch/pull/125251 Approved by: https://github.com/Chillee, https://github.com/yanboliang
|
diff --git a/benchmarks/transformer/score_mod.py b/benchmarks/transformer/score_mod.py
index 2f49f234e8..e337049707 100644
--- a/benchmarks/transformer/score_mod.py
+++ b/benchmarks/transformer/score_mod.py
@@ -8,7 +8,7 @@ import numpy as np
import torch
import torch.nn.functional as F
from tabulate import tabulate
-from torch.nn.attention._templated_attention import _templated_attention
+from torch.nn.attention._flex_attention import _flex_attention
from tqdm import tqdm
torch._dynamo.config.automatic_dynamic_shapes = False
@@ -113,7 +113,7 @@ def run_single_experiment(config: ExperimentConfig) -> ExperimentResults:
def eager_sdpa(query, key, value, _):
return F.scaled_dot_product_attention(query, key, value)
- compiled_sdpa = torch.compile(_templated_attention)
+ compiled_sdpa = torch.compile(_flex_attention)
score_mod = config.score_mod
diff --git a/test/inductor/test_templated_attention.py b/test/inductor/test_flex_attention.py
similarity index 91%
rename from test/inductor/test_templated_attention.py
rename to test/inductor/test_flex_attention.py
index 42381fea08..e5f31e7bcb 100644
--- a/test/inductor/test_templated_attention.py
+++ b/test/inductor/test_flex_attention.py
@@ -10,19 +10,17 @@ from unittest.mock import patch
import torch
from torch._dynamo.testing import CompileCounterWithBackend, normalize_gm
-from torch._higher_order_ops.templated_attention import (
- templated_attention as templated_attention_hop,
-)
+from torch._higher_order_ops.flex_attention import flex_attention as flex_attention_hop
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import run_and_get_code
-from torch.nn.attention._templated_attention import (
+from torch.nn.attention._flex_attention import (
_causal,
_compose,
+ _flex_attention,
_generate_alibi_bias,
_identity,
_rel_bias,
_rel_causal,
- _templated_attention,
)
from torch.testing import FileCheck
from torch.testing._internal import common_utils
@@ -42,7 +40,7 @@ index = torch.ops.aten.index
def create_attention(score_mod):
- return functools.partial(_templated_attention, score_mod=score_mod)
+ return functools.partial(_flex_attention, score_mod=score_mod)
test_dtypes = (
@@ -324,9 +322,9 @@ class TestTemplatedSDPA(InductorTestCase):
requires_grad=True,
)
q, k, v = make_tensor(), make_tensor(), make_tensor()
- func = torch.compile(_templated_attention, backend="inductor", fullgraph=True)
+ func = torch.compile(_flex_attention, backend="inductor", fullgraph=True)
with self.assertRaisesRegex(
- AssertionError, "templated_attention_backward is not an OpOverload"
+ AssertionError, "flex_attention_backward is not an OpOverload"
):
out = func(q, k, v, _identity)
out.backward(torch.ones_like(out))
@@ -339,7 +337,7 @@ class TestTemplatedSDPA(InductorTestCase):
with self.assertRaisesRegex(
ValueError, "Expected query, key, and value to have the same dtype"
):
- _templated_attention(query, key, value, _identity)
+ _flex_attention(query, key, value, _identity)
@supported_platform
def test_different_sequence_length_fails(self):
@@ -347,7 +345,7 @@ class TestTemplatedSDPA(InductorTestCase):
key = torch.randn((1, 1, 1024, 64), dtype=torch.float32, device="cuda")
value = torch.randn((1, 1, 1024, 64), dtype=torch.float32, device="cuda")
with self.assertRaisesRegex(ValueError, "NYI: The target sequence length"):
- _templated_attention(query, key, value, _identity)
+ _flex_attention(query, key, value, _identity)
@supported_platform
@patch.object(torch._inductor.config, "max_autotune", True)
@@ -378,7 +376,7 @@ class TestTemplatedSDPA(InductorTestCase):
def test_logsumexp_correctness(self, dtype, score_mod):
@torch.compile
def sdpa_hop(q, k, v, score_mod):
- return templated_attention_hop(q, k, v, score_mod)
+ return flex_attention_hop(q, k, v, score_mod)
@torch.compile(backend="aot_eager")
def eager_sdpa_hop(q, k, v, score_mod):
@@ -386,7 +384,7 @@ class TestTemplatedSDPA(InductorTestCase):
Besides dropping LSE it also ensures that the hop is compiled with aot-eager
backend. We need to replicate this.
"""
- return templated_attention_hop(q, k, v, score_mod)
+ return flex_attention_hop(q, k, v, score_mod)
make_tensor = functools.partial(
torch.randn,
@@ -441,7 +439,7 @@ class TestTemplatedSDPA(InductorTestCase):
@torch.compile
def func(q, k, v, score_mod):
- _, lse = templated_attention_hop(q, k, v, score_mod)
+ _, lse = flex_attention_hop(q, k, v, score_mod)
lse_2 = lse * 2
return lse_2
@@ -462,7 +460,7 @@ class TestTemplatedSDPA(InductorTestCase):
@torch.compile
def func(q, k, v, score_mod):
- out, lse = templated_attention_hop(q, k, v, score_mod)
+ out, lse = flex_attention_hop(q, k, v, score_mod)
lse_2 = lse * 2
return out, lse_2
@@ -484,7 +482,7 @@ class TestTemplatedSDPA(InductorTestCase):
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
- func = torch.compile(_templated_attention, backend="aot_eager", fullgraph=True)
+ func = torch.compile(_flex_attention, backend="aot_eager", fullgraph=True)
self.assertTrue(
torch.autograd.gradcheck(
@@ -507,7 +505,7 @@ class TestTemplatedSDPA(InductorTestCase):
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
- func = torch.compile(_templated_attention, backend=mode, fullgraph=True)
+ func = torch.compile(_flex_attention, backend=mode, fullgraph=True)
score_mod = captured_buffers_map[score_mod_name](torch.float64)
self.assertTrue(
@@ -528,7 +526,7 @@ class TestTemplatedSDPA(InductorTestCase):
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
- func = torch.compile(_templated_attention, backend=cnt, fullgraph=True)
+ func = torch.compile(_flex_attention, backend=cnt, fullgraph=True)
out = func(query, key, value, _squared)
out.sum().backward()
self.assertEqual(cnt.frame_count, 1)
@@ -549,10 +547,10 @@ class GraphModule(torch.nn.Module):
new_empty_2 = l_args_0_.new_empty([], dtype = torch.int32)
new_empty_3 = l_args_0_.new_empty([], dtype = torch.int32)
new_empty_4 = l_args_0_.new_empty([], dtype = torch.int32)
- templated_attention_0 = self.templated_attention_0
- templated_attention = torch.ops.higher_order.templated_attention(l_args_0_, """
- + """l_args_1_, l_args_2_, templated_attention_0); l_args_0_ = l_args_1_ = l_args_2_ = templated_attention_0 = None
- out = templated_attention[0]; templated_attention = None
+ flex_attention_0 = self.flex_attention_0
+ flex_attention = torch.ops.higher_order.flex_attention(l_args_0_, """
+ + """l_args_1_, l_args_2_, flex_attention_0); l_args_0_ = l_args_1_ = l_args_2_ = flex_attention_0 = None
+ out = flex_attention[0]; flex_attention = None
return (out,)
class GraphModule(torch.nn.Module):
@@ -586,12 +584,12 @@ class GraphModule(torch.nn.Module):
+ """alias_5: "f64[2, 2, 8, 4]", alias_7: "f32[2, 2, 8]", tangents_1: "f64[2, 2, 8, 4]"):
fw_graph = self.fw_graph
joint_graph = self.joint_graph
- templated_attention_backward = torch.ops.higher_order.templated_attention_backward(primals_1, primals_2, """
+ flex_attention_backward = torch.ops.higher_order.flex_attention_backward(primals_1, primals_2, """
+ """primals_3, alias_5, alias_7, tangents_1, fw_graph, joint_graph); primals_1 = primals_2 = primals_3 = alias_5 """
+ """= alias_7 = tangents_1 = fw_graph = joint_graph = None
- getitem_2: "f64[2, 2, 8, 4]" = templated_attention_backward[0]
- getitem_3: "f64[2, 2, 8, 4]" = templated_attention_backward[1]
- getitem_4: "f64[2, 2, 8, 4]" = templated_attention_backward[2]; templated_attention_backward = None
+ getitem_2: "f64[2, 2, 8, 4]" = flex_attention_backward[0]
+ getitem_3: "f64[2, 2, 8, 4]" = flex_attention_backward[1]
+ getitem_4: "f64[2, 2, 8, 4]" = flex_attention_backward[2]; flex_attention_backward = None
return [getitem_2, getitem_3, getitem_4]
class <lambda>(torch.nn.Module):
diff --git a/torch/_dynamo/variables/higher_order_ops.py b/torch/_dynamo/variables/higher_order_ops.py
index da9ef7db58..8e1f3dbb42 100644
--- a/torch/_dynamo/variables/higher_order_ops.py
+++ b/torch/_dynamo/variables/higher_order_ops.py
@@ -530,7 +530,7 @@ class TorchHigherOrderOperatorVariable(VariableTracker):
return OutDtypeHigherOrderVariable(value, source, **kwargs)
elif value.__name__ == "wrap":
return WrapHigherOrderVariable(value, source, **kwargs)
- elif value.__name__ == "templated_attention":
+ elif value.__name__ == "flex_attention":
return TemplatedAttentionHigherOrderVariable(value, source, **kwargs)
elif value.__name__ in (
"wrap_activation_checkpoint",
@@ -1475,7 +1475,7 @@ class TemplatedAttentionHigherOrderVariable(TorchHigherOrderOperatorVariable):
self, tx, query: "VariableTracker", score_function: "VariableTracker"
):
from torch._dynamo.symbolic_convert import InstructionTranslator
- from torch._higher_order_ops.templated_attention import TransformGetItemToIndex
+ from torch._higher_order_ops.flex_attention import TransformGetItemToIndex
from .builder import SourcelessBuilder
tx: InstructionTranslator = tx
@@ -1511,14 +1511,14 @@ class TemplatedAttentionHigherOrderVariable(TorchHigherOrderOperatorVariable):
score_function,
new_args,
{}, # expect only args no kwargs for now
- description="templated_attention",
+ description="flex_attention",
source_target=self.value,
set_subgraph_inputs="flatten_manual",
)
body_name = add_subgraph(
tx,
- "templated_attention",
+ "flex_attention",
torch.fx.GraphModule(tx.output.nn_modules, body_graph),
)
diff --git a/torch/_higher_order_ops/__init__.py b/torch/_higher_order_ops/__init__.py
index d8c560e124..99b3577c2c 100644
--- a/torch/_higher_order_ops/__init__.py
+++ b/torch/_higher_order_ops/__init__.py
@@ -1,3 +1,3 @@
from .cond import cond
from .while_loop import while_loop
-from .templated_attention import templated_attention, templated_attention_backward
+from .flex_attention import flex_attention, flex_attention_backward
diff --git a/torch/_higher_order_ops/templated_attention.py b/torch/_higher_order_ops/flex_attention.py
similarity index 89%
rename from torch/_higher_order_ops/templated_attention.py
rename to torch/_higher_order_ops/flex_attention.py
index d79408a1e3..95152fe518 100644
--- a/torch/_higher_order_ops/templated_attention.py
+++ b/torch/_higher_order_ops/flex_attention.py
@@ -41,9 +41,9 @@ class TransformGetItemToIndex(TorchFunctionMode):
return func(*args, **(kwargs or {}))
-class TemplatedAttentionHOP(HigherOrderOperator):
+class FlexAttentionHOP(HigherOrderOperator):
def __init__(self):
- super().__init__("templated_attention")
+ super().__init__("flex_attention")
def __call__(
self,
@@ -58,13 +58,13 @@ class TemplatedAttentionHOP(HigherOrderOperator):
return super().__call__(query, key, value, score_mod, *other_buffers)
-templated_attention = TemplatedAttentionHOP()
-templated_attention.__module__ = "torch.ops.higher_order"
+flex_attention = FlexAttentionHOP()
+flex_attention.__module__ = "torch.ops.higher_order"
-class TemplatedAttentionBackwardHOP(HigherOrderOperator):
+class FlexAttentionBackwardHOP(HigherOrderOperator):
def __init__(self):
- super().__init__("templated_attention_backward")
+ super().__init__("flex_attention_backward")
def __call__(
self,
@@ -93,8 +93,8 @@ class TemplatedAttentionBackwardHOP(HigherOrderOperator):
)
-templated_attention_backward = TemplatedAttentionBackwardHOP()
-templated_attention_backward.__module__ = "torch.ops.higher_order"
+flex_attention_backward = FlexAttentionBackwardHOP()
+flex_attention_backward.__module__ = "torch.ops.higher_order"
def math_attention(
@@ -146,7 +146,7 @@ def math_attention(
return scores.to(query.dtype) @ value, logsumexp
-@templated_attention.py_impl(DispatchKey.CompositeExplicitAutograd)
+@flex_attention.py_impl(DispatchKey.CompositeExplicitAutograd)
def sdpa_dense(
query: torch.Tensor,
key: torch.Tensor,
@@ -159,7 +159,7 @@ def sdpa_dense(
return out, lse
-def trace_templated_attention(
+def trace_flex_attention(
proxy_mode: ProxyTorchDispatchMode,
query: torch.Tensor,
key: torch.Tensor,
@@ -167,13 +167,13 @@ def trace_templated_attention(
score_mod: Callable,
*other_buffers: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
- """Traces the templated_attention operator with the given score_mod function and other_buffers.
+ """Traces the flex_attention operator with the given score_mod function and other_buffers.
Trace SDPA will call make_fx with "fake" example vals and then trace the score_mod function
This will produce a GraphModule that will be stored on the root tracer as "sdpa_score". We
access this graph module in inductor to inline the score_mod function to the triton template.
"""
- example_out = templated_attention(query, key, value, score_mod, *other_buffers)
+ example_out = flex_attention(query, key, value, score_mod, *other_buffers)
example_vals = [
torch.zeros((), dtype=query.dtype, requires_grad=query.requires_grad)
] + [torch.zeros((), dtype=torch.int) for _ in range(4)]
@@ -183,15 +183,15 @@ def trace_templated_attention(
node_args = (query, key, value, score_graph, *other_buffers)
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args)
out_proxy = proxy_mode.tracer.create_proxy(
- "call_function", templated_attention, proxy_args, {}, name="templated_attention"
+ "call_function", flex_attention, proxy_args, {}, name="flex_attention"
)
return track_tensor_tree(
example_out, out_proxy, constant=None, tracer=proxy_mode.tracer
)
-@templated_attention.py_impl(ProxyTorchDispatchMode)
-def templated_attention_proxy_torch_dispatch_mode(
+@flex_attention.py_impl(ProxyTorchDispatchMode)
+def flex_attention_proxy_torch_dispatch_mode(
mode: ProxyTorchDispatchMode,
query: torch.Tensor,
key: torch.Tensor,
@@ -201,15 +201,13 @@ def templated_attention_proxy_torch_dispatch_mode(
) -> Tuple[torch.Tensor, torch.Tensor]:
assert mode is not None, "Mode should always be enabled for python fallback key"
if mode.enable_tracing:
- return trace_templated_attention(
- mode, query, key, value, score_mod, *other_buffers
- )
+ return trace_flex_attention(mode, query, key, value, score_mod, *other_buffers)
else:
- return templated_attention(query, key, value, score_mod, *other_buffers)
+ return flex_attention(query, key, value, score_mod, *other_buffers)
-@templated_attention.py_functionalize_impl
-def templated_attention_functionalize(
+@flex_attention.py_functionalize_impl
+def flex_attention_functionalize(
ctx: torch._subclasses.functional_tensor.BaseFunctionalizeAPI,
query: torch.Tensor,
key: torch.Tensor,
@@ -217,7 +215,7 @@ def templated_attention_functionalize(
score_mod: Callable,
*other_buffers: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
- """Defines the functionalization rules for the templated_attention operator.
+ """Defines the functionalization rules for the flex_attention operator.
Write now we are unwrapping each tensor and then redispatching to the next, however we want to
guard against any mutations in the score_mod function, to the other_buffers since those
@@ -252,7 +250,7 @@ def templated_attention_functionalize(
if mutates:
raise UnsupportedAliasMutationException("Mutations detected in score_mod")
- out = templated_attention(
+ out = flex_attention(
query_unwrapped,
key_unwrapped,
value_unwrapped,
@@ -262,8 +260,8 @@ def templated_attention_functionalize(
return ctx.wrap_tensors(out) # type: ignore[return-value, arg-type]
-@templated_attention.py_impl(FakeTensorMode)
-def templated_attention_fake_tensor_mode(
+@flex_attention.py_impl(FakeTensorMode)
+def flex_attention_fake_tensor_mode(
mode: FakeTensorMode,
query: torch.Tensor,
key: torch.Tensor,
@@ -359,7 +357,7 @@ def create_fw_bw_graph(score_mod, index_values, other_buffers):
return score_mod, joint_graph
-class TemplatedAttentionAutogradOp(torch.autograd.Function):
+class FlexAttentionAutogradOp(torch.autograd.Function):
@staticmethod
def forward(
ctx, query, key, value, fw_graph, joint_graph, *other_buffers
@@ -371,9 +369,7 @@ class TemplatedAttentionAutogradOp(torch.autograd.Function):
ctx._fw_graph = fw_graph
ctx._joint_graph = joint_graph
with torch._C._AutoDispatchBelowAutograd():
- out, logsumexp = templated_attention(
- query, key, value, fw_graph, *other_buffers
- )
+ out, logsumexp = flex_attention(query, key, value, fw_graph, *other_buffers)
ctx.save_for_backward(query, key, value, out, logsumexp, *other_buffers)
return out, logsumexp
@@ -386,7 +382,7 @@ class TemplatedAttentionAutogradOp(torch.autograd.Function):
joint_graph = ctx._joint_graph
# We have asserted that other_buffers do not require grad in the forward
none_grads = [None] * (2 + len(other_buffers))
- grad_query, grad_key, grad_value = templated_attention_backward(
+ grad_query, grad_key, grad_value = flex_attention_backward(
query,
key,
value,
@@ -400,8 +396,8 @@ class TemplatedAttentionAutogradOp(torch.autograd.Function):
return grad_query, grad_key, grad_value, *none_grads
-@templated_attention.py_impl(DispatchKey.Autograd)
-def templated_attention_autograd(
+@flex_attention.py_impl(DispatchKey.Autograd)
+def flex_attention_autograd(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
@@ -416,7 +412,7 @@ def templated_attention_autograd(
fw_graph, bw_graph = create_fw_bw_graph(score_mod, example_vals, other_buffers)
else:
fw_graph, bw_graph = score_mod, None
- out, logsumexp = TemplatedAttentionAutogradOp.apply(
+ out, logsumexp = FlexAttentionAutogradOp.apply(
query, key, value, fw_graph, bw_graph, *other_buffers
)
return out, logsumexp
@@ -425,7 +421,7 @@ def templated_attention_autograd(
# ---------------------------- Backward HOP Implementation ----------------------------
-@templated_attention_backward.py_impl(DispatchKey.CompositeExplicitAutograd)
+@flex_attention_backward.py_impl(DispatchKey.CompositeExplicitAutograd)
def sdpa_dense_backward(
query: torch.Tensor,
key: torch.Tensor,
@@ -497,7 +493,7 @@ def sdpa_dense_backward(
return grad_query.contiguous(), grad_key.contiguous(), grad_value.contiguous()
-def trace_templated_attention_backward(
+def trace_flex_attention_backward(
proxy_mode: ProxyTorchDispatchMode,
query: torch.Tensor,
key: torch.Tensor,
@@ -510,7 +506,7 @@ def trace_templated_attention_backward(
*other_buffers: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""We already have the forward graph and joint graph from the forward pass, so we create a proxy attach both graphs"""
- example_out = templated_attention_backward(
+ example_out = flex_attention_backward(
query,
key,
value,
@@ -544,18 +540,18 @@ def trace_templated_attention_backward(
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args)
out_proxy = proxy_mode.tracer.create_proxy(
"call_function",
- templated_attention_backward,
+ flex_attention_backward,
proxy_args,
{},
- name="templated_attention_backward",
+ name="flex_attention_backward",
)
return track_tensor_tree(
example_out, out_proxy, constant=None, tracer=proxy_mode.tracer
)
-@templated_attention_backward.py_impl(ProxyTorchDispatchMode)
-def templated_attention_backward_proxy_torch_dispatch_mode(
+@flex_attention_backward.py_impl(ProxyTorchDispatchMode)
+def flex_attention_backward_proxy_torch_dispatch_mode(
mode: ProxyTorchDispatchMode,
query: torch.Tensor,
key: torch.Tensor,
@@ -569,7 +565,7 @@ def templated_attention_backward_proxy_torch_dispatch_mode(
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
assert mode is not None, "Mode should always be enabled for python fallback key"
if mode.enable_tracing:
- return trace_templated_attention_backward(
+ return trace_flex_attention_backward(
mode,
query,
key,
@@ -582,7 +578,7 @@ def templated_attention_backward_proxy_torch_dispatch_mode(
*other_buffers,
)
else:
- return templated_attention_backward(
+ return flex_attention_backward(
query,
key,
value,
@@ -595,8 +591,8 @@ def templated_attention_backward_proxy_torch_dispatch_mode(
)
-@templated_attention_backward.py_functionalize_impl
-def templated_attention_backward_functionalize(
+@flex_attention_backward.py_functionalize_impl
+def flex_attention_backward_functionalize(
ctx: torch._subclasses.functional_tensor.BaseFunctionalizeAPI,
query: torch.Tensor,
key: torch.Tensor,
@@ -608,7 +604,7 @@ def templated_attention_backward_functionalize(
joint_graph: GraphModule,
*other_buffers: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
- """Defines the functionalization rules for the templated_attention operator.
+ """Defines the functionalization rules for the flex_attention operator.
Write now we are unwrapping each tensor and then redispatching to the next,
since we know that the forward score mod function is assured to be free of mutations
@@ -636,7 +632,7 @@ def templated_attention_backward_functionalize(
functional_fw_graph = ctx.functionalize(fw_graph)
functional_joint_graph = ctx.functionalize(joint_graph)
- grad_query, grad_key, grad_value = templated_attention_backward(
+ grad_query, grad_key, grad_value = flex_attention_backward(
query_unwrapped,
key_unwrapped,
value_unwrapped,
@@ -651,8 +647,8 @@ def templated_attention_backward_functionalize(
return ctx.wrap_tensors((grad_query, grad_key, grad_value)) # type: ignore[return-value,arg-type]
-@templated_attention_backward.py_impl(FakeTensorMode)
-def templated_attention_backward_fake_tensor_mode(
+@flex_attention_backward.py_impl(FakeTensorMode)
+def flex_attention_backward_fake_tensor_mode(
mode: FakeTensorMode,
query: torch.Tensor,
key: torch.Tensor,
@@ -671,6 +667,6 @@ def templated_attention_backward_fake_tensor_mode(
return grad_query, grad_key, grad_value
-templated_attention_backward.py_impl(DispatchKey.Autograd)(
- autograd_not_implemented(templated_attention_backward, deferred_error=True)
+flex_attention_backward.py_impl(DispatchKey.Autograd)(
+ autograd_not_implemented(flex_attention_backward, deferred_error=True)
)
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index c4915bcec4..537b66185d 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -3587,7 +3587,7 @@ class TritonTemplateBuffer(TemplateBuffer):
self.mutated_inputs = mutated_inputs
if mutated_inputs is not None:
# Ensure that the mutated inputs are only allowed for certain nodes
- allowed_set = {"templated_attention"}
+ allowed_set = {"flex_attention"}
current_node = str(V.graph.current_node)
assert (
current_node in allowed_set
diff --git a/torch/_inductor/kernel/templated_attention.py b/torch/_inductor/kernel/flex_attention.py
similarity index 96%
rename from torch/_inductor/kernel/templated_attention.py
rename to torch/_inductor/kernel/flex_attention.py
index b1c2571267..e31dfe0977 100644
--- a/torch/_inductor/kernel/templated_attention.py
+++ b/torch/_inductor/kernel/flex_attention.py
@@ -1,4 +1,4 @@
-""" Triton Implementation of the Templated SDPA Kernel"""
+""" Triton Implementation of the flex_attention Kernel"""
import logging
from typing import Any, List
@@ -196,8 +196,8 @@ def _get_default_config(query):
# TODO: We probably also need a layout constraint?
-@register_lowering(torch.ops.higher_order.templated_attention, type_promotion_kind=None)
-def templated_attention(*args, **kwargs):
+@register_lowering(torch.ops.higher_order.flex_attention, type_promotion_kind=None)
+def flex_attention(*args, **kwargs):
from torch._prims_common import make_contiguous_strides_for
from ..ir import (
ComputedBuffer,
@@ -244,7 +244,7 @@ def templated_attention(*args, **kwargs):
# There are two classes of placeholder inpts that we need
# to handle differently. For the first n_scalar_inps inputs
# we expect that these placeholders were generated by the make_fx call
- # in the templated Attention HOP. So we need to create a new placeholder
+ # in the flex Attention HOP. So we need to create a new placeholder
# TensorBox for each of these inputs. For the rest of the inputs we
# expect that these are lifted inputs that fill up the '*other_buffers'
# tuple and already have corresponding TensorBoxes passed in as args.
@@ -266,7 +266,7 @@ def templated_attention(*args, **kwargs):
output_buffer = env[node.args[0]]
assert isinstance(output_buffer.data, StorageBox), (
- "The output node for the templated attention subgraph must be a StorageBox, but got: ",
+ "The output node for the flex attention subgraph must be a StorageBox, but got: ",
type(output_buffer),
)
# Create the ComputedBuffer directly that will be inlined into the modification block
diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py
index ab45e1782b..99fe0a9f03 100644
--- a/torch/_inductor/lowering.py
+++ b/torch/_inductor/lowering.py
@@ -5661,123 +5661,6 @@ def while_loop(cond_fn, body_fn, carried_inputs, additional_inputs):
return list(map(TensorBox.create, result))
-@register_lowering(torch.ops.higher_order.templated_attention)
-def templated_attention(*args, **kwargs):
- from torch._prims_common import make_contiguous_strides_for
- from .ir import (
- ComputedBuffer,
- FixedLayout,
- FlexibleLayout,
- InputBuffer,
- StorageBox,
- TensorBox,
- )
-
- query, key, value, subgraph = args
-
- def create_placeholder(name: str, dtype: torch.dtype) -> InputBuffer:
- return TensorBox.create(
- InputBuffer(
- name,
- FixedLayout(
- query.get_device(),
- dtype,
- [
- 1,
- ],
- [
- 1,
- ],
- ),
- )
- )
-
- scalar_inps = ["score", "b", "h", "m", "n"]
- env = {}
- cnt = 0
- placeholder_inps = [
- create_placeholder(name, dtype)
- for name, dtype in [
- ("score", query.get_dtype()),
- ("b", torch.int64),
- ("h", torch.int64),
- ("m", torch.int64),
- ("n", torch.int64),
- ]
- ]
- for node in subgraph.graph_module.graph.nodes:
- # There are two classes of placeholder inpts that we need
- # to handle differently. For the first n_scalar_inps inputs
- # we expect that these placeholders were generated by the make_fx call
- # in the templated Attention HOP. So we need to create a new placeholder
- # TensorBox for each of these inputs. For the rest of the inputs we
- # expect that these are lifted inputs that fill up the '*other_buffers'
- # tuple and already have corresponding TensorBoxes passed in as args.
- if node.op == "placeholder":
- is_lifted_input = cnt >= len(scalar_inps)
- env[node] = args[cnt - 1] if is_lifted_input else placeholder_inps[cnt]
- cnt += 1
- elif node.op == "call_function":
- # For call_function we use the defulat lowerings and pass in the
- # already created TensorBoxes as args
- from torch.utils._pytree import tree_map
-
- env[node] = lowerings[node.target](
- *tree_map(lambda x: env[x] if x in env else x, node.args)
- )
- elif node.op == "output":
- # For the output node we need to create a ComputedBuffer
- # which represents the actual score modification
-
- output_buffer = env[node.args[0]]
- assert isinstance(output_buffer.data, StorageBox), (
- "The output node for the templated attention subgraph must be a StorageBox, but got: ",
- type(output_buffer),
- )
- # Create the ComputedBuffere directly that will be inlined into the modfication block
- subgraph_buffer = ComputedBuffer(
- name=None,
- layout=FlexibleLayout(
- device=output_buffer.data.get_device(),
- dtype=output_buffer.data.get_dtype(),
- size=output_buffer.data.get_size(),
- ),
- data=output_buffer.data.data, # type: ignore[arg-type]
- )
- from .kernel.templated_attention import sdpa_template
-
- layout = FixedLayout(
- output_buffer.get_device(),
- query.get_dtype(),
- query.get_size(),
- make_contiguous_strides_for(query.get_size()),
- )
- choices: List[Any] = []
- from .select_algorithm import autotune_select_algorithm
-
- for BLOCK_M, BLOCK_N, num_warps, num_stages in [
- (128, 64, 4, 3),
- (128, 128, 4, 3),
- (128, 128, 8, 2),
- (64, 128, 4, 3),
- ]:
- sdpa_template.maybe_append_choice(
- choices=choices,
- input_nodes=(query, key, value),
- layout=layout,
- subgraphs=subgraph_buffer,
- num_stages=num_stages,
- num_warps=num_warps,
- BLOCK_M=BLOCK_M,
- BLOCK_N=BLOCK_N,
- BLOCK_DMODEL=query.get_size()[-1],
- )
- return autotune_select_algorithm(
- "sdpa", choices, [query, key, value], layout
- )
- raise ValueError("TemplatedAttention was passed a subgraph with no output node!")
-
-
@register_lowering(associative_scan_op, type_promotion_kind=None)
def associative_scan(combine_fn: ir.Subgraph, input, dim: int):
from .subgraph_lowering import InputDescriptor, lower_pointwise_subgraph
diff --git a/torch/nn/attention/_templated_attention.py b/torch/nn/attention/_flex_attention.py
similarity index 88%
rename from torch/nn/attention/_templated_attention.py
rename to torch/nn/attention/_flex_attention.py
index 872c6fc29b..ee131dfac8 100644
--- a/torch/nn/attention/_templated_attention.py
+++ b/torch/nn/attention/_flex_attention.py
@@ -1,11 +1,9 @@
-"""This module implements the user facing API for templated attention in PyTorch."""
+"""This module implements the user facing API for flex_attention in PyTorch."""
import functools
from typing import Callable
import torch
-from torch._higher_order_ops.templated_attention import (
- templated_attention as templated_attention_hop,
-)
+from torch._higher_order_ops.flex_attention import flex_attention as flex_attention_hop
from torch._higher_order_ops.utils import _set_compilation_env
from torch.fx.experimental.proxy_tensor import (
_temp_remove_pre_dispatch_torch_function_mode,
@@ -30,7 +28,7 @@ _score_mod_signature = Callable[
]
-def _templated_attention(
+def _flex_attention(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
@@ -78,14 +76,14 @@ def _templated_attention(
- :math:`Ev: \text{Embedding dimension of the value}`
.. warning::
- `torch.nn.attention.templated_attention` is a prototype feature in PyTorch. It doesn't support training currently.
+ `torch.nn.attention.flex_attention` is a prototype feature in PyTorch. It doesn't support training currently.
Please look forward to a more stable implementation in a future version of PyTorch.
Read more about feature classification at: https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype
"""
if torch.compiler.is_dynamo_compiling():
- out, _ = templated_attention_hop(query, key, value, score_mod)
+ out, _ = flex_attention_hop(query, key, value, score_mod)
return out
# Some basic input validation
@@ -97,18 +95,18 @@ def _templated_attention(
)
if not torch._dynamo.is_dynamo_supported():
- raise RuntimeError("templated attention requires dynamo support.")
+ raise RuntimeError("flex_attention requires dynamo support.")
with _set_compilation_env():
with torch._dynamo.utils.disable_cache_limit():
with _temp_remove_pre_dispatch_torch_function_mode():
out, _ = torch.compile(
- templated_attention_hop, backend="eager", fullgraph=True
+ flex_attention_hop, backend="eager", fullgraph=True
)(query, key, value, score_mod)
return out
-"""Some common used score_mod functions for templated attention in PyTorch."""
+"""Some common used score_mod functions for flex_attention in PyTorch."""
def _identity(
diff --git a/torch/testing/_internal/hop_db.py b/torch/testing/_internal/hop_db.py
index 05d9722871..67ea2fa826 100644
--- a/torch/testing/_internal/hop_db.py
+++ b/torch/testing/_internal/hop_db.py
@@ -11,7 +11,7 @@ from torch.testing._internal.opinfo.core import (
)
from torch.testing._internal.common_dtype import all_types_and, custom_types
from torch.testing._internal.opinfo.core import DecorateInfo
-from torch.nn.attention._templated_attention import _templated_attention
+from torch.nn.attention._flex_attention import _flex_attention
def sample_inputs_map(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = functools.partial(
@@ -108,7 +108,7 @@ def simple_auto_functionalize(x, z):
return torch.ops.testlib.mutating_custom_op(x, z)
-def sample_inputs_templated_attention(opinfo, device, dtype, requires_grad, **kwargs):
+def sample_inputs_flex_attention(opinfo, device, dtype, requires_grad, **kwargs):
make_arg = functools.partial(
make_tensor, device=device, dtype=dtype, requires_grad=requires_grad
)
@@ -219,10 +219,10 @@ hop_db = [
supports_autograd=False,
),
OpInfo(
- name="templated_attention",
+ name="flex_attention",
variant_test_name="simple",
- op=_templated_attention,
- sample_inputs_func=sample_inputs_templated_attention,
+ op=_flex_attention,
+ sample_inputs_func=sample_inputs_flex_attention,
dtypes=custom_types(torch.float16, torch.float32),
supports_out=False,
check_batched_grad=False,
@@ -237,10 +237,10 @@ hop_db = [
)
),
OpInfo(
- name="templated_attention_backward",
+ name="flex_attention_backward",
variant_test_name="simple",
- op=_templated_attention,
- sample_inputs_func=sample_inputs_templated_attention,
+ op=_flex_attention,
+ sample_inputs_func=sample_inputs_flex_attention,
dtypes=custom_types(torch.float16, torch.float32),
supports_out=False,
check_batched_grad=False,
|
2.41.0
|
ee5c141631616dd208eeb86262de982fe9f6bc8
|
Wed, 1 May 2024 01:35:30 +0000
|
[PATCH 0865/1000] [PT2][Optimus] Read the patterns from the config instead of hard-code passes (#125136)
|
Summary: Due to the compatitbility issue, we hard coded the passes to do the pattern optimization. Here, we revisit the method since it has been a while for the changes into production packages. We instead read from the config to decide whether we do the specific pattern optimization, which makes followup pattern add easier. Differential Revision: D56659934 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125136 Approved by: https://github.com/jackiexu1992
|
diff --git a/test/inductor/test_aot_inductor.py b/test/inductor/test_aot_inductor.py
index ce4f9ad193..93c74aa2a6 100644
--- a/test/inductor/test_aot_inductor.py
+++ b/test/inductor/test_aot_inductor.py
@@ -326,6 +326,19 @@ class AOTInductorTestsTemplate:
with config.patch({"freezing": True}):
self.check_model(Model(self.device), example_inputs)
+ @torch._inductor.config.patch(
+ pre_grad_fusion_options={
+ "normalization_pass": {},
+ "remove_split_with_size_one_pass": {},
+ "merge_getitem_cat_pass": {},
+ "merge_stack_tahn_unbind_pass": {},
+ "merge_splits_pass": {},
+ "mutate_cat_pass": {},
+ "split_cat_pass": {},
+ "unbind_stack_pass": {},
+ },
+ post_grad_fusion_options={},
+ )
def test_simple_split(self):
class Model(torch.nn.Module):
def __init__(self):
diff --git a/test/inductor/test_decompose_mem_bound_mm.py b/test/inductor/test_decompose_mem_bound_mm.py
index c26233785e..6bfb5a9228 100644
--- a/test/inductor/test_decompose_mem_bound_mm.py
+++ b/test/inductor/test_decompose_mem_bound_mm.py
@@ -50,7 +50,9 @@ class MyModule3(torch.nn.Module):
@requires_cuda
@torch._inductor.config.patch(
- decompose_mem_bound_mm=True,
+ post_grad_fusion_options={
+ "decompose_mm_pass": {},
+ }
)
@instantiate_parametrized_tests
class TestDecomposeMemMM(TestCase):
diff --git a/test/inductor/test_pattern_matcher.py b/test/inductor/test_pattern_matcher.py
index c54c07c3d0..fc1a9a5ec5 100644
--- a/test/inductor/test_pattern_matcher.py
+++ b/test/inductor/test_pattern_matcher.py
@@ -520,7 +520,7 @@ class TestPatternMatcher(TestCase):
torch.randn(16, 16, device="cuda"),
torch.randn(16, 16, device="cuda"),
]
- self.common(fn, args, 2, 5)
+ self.common(fn, args, 1, 4)
def test_cat_addmm(self):
def fn(a, b, c):
@@ -538,7 +538,7 @@ class TestPatternMatcher(TestCase):
torch.randn(16, 16, device="cuda"),
torch.randn(16, 16, device="cuda"),
]
- self.common(fn, args, 2, 5)
+ self.common(fn, args, 1, 4)
def test_cat_slice_cat_cuda(self):
def fn(a, b):
@@ -839,7 +839,9 @@ class TestPatternMatcher(TestCase):
def test_match_with_mutation(self):
counter = 0
- test_pass = PatternMatcherPass(prevent_match_across_mutations=True)
+ test_pass = PatternMatcherPass(
+ prevent_match_across_mutations=True, pass_name="test"
+ )
@register_graph_pattern(
CallFunction(
@@ -892,7 +894,14 @@ class TestPatternMatcher(TestCase):
]
with unittest.mock.patch(
- "torch._inductor.fx_passes.pre_grad.pattern_matcher_passes", [test_pass]
+ "torch._inductor.fx_passes.pre_grad.config.pre_grad_fusion_options",
+ {"test": {}},
+ ), unittest.mock.patch(
+ "torch._inductor.fx_passes.pre_grad.PRE_GRAD_FUSIONS",
+ [],
+ ), unittest.mock.patch(
+ "torch._inductor.fx_passes.pre_grad.PRE_GRAD_PATTERNS",
+ {"test": test_pass},
):
for fn in (fn0, fn1, fn2, fn3, fn4, fn5):
counter = 0
diff --git a/test/inductor/test_split_cat_fx_passes.py b/test/inductor/test_split_cat_fx_passes.py
index 6457547784..b0cc28205d 100644
--- a/test/inductor/test_split_cat_fx_passes.py
+++ b/test/inductor/test_split_cat_fx_passes.py
@@ -13,7 +13,19 @@ requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")
def patch(f):
- f = torch._inductor.config.patch(split_cat_fx_passes=True)(f)
+ f = torch._inductor.config.patch(
+ pre_grad_fusion_options={
+ "normalization_pass": {},
+ "remove_split_with_size_one_pass": {},
+ "merge_getitem_cat_pass": {},
+ "merge_stack_tahn_unbind_pass": {},
+ "merge_splits_pass": {},
+ "mutate_cat_pass": {},
+ "split_cat_pass": {},
+ "unbind_stack_pass": {},
+ },
+ post_grad_fusion_options={},
+ )(f)
return f
@@ -605,7 +617,10 @@ class TestSplitCatFxPasses(TestCase):
)
counters.clear()
- @torch._inductor.config.patch(split_cat_fx_passes=False)
+ @torch._inductor.config.patch(
+ pre_grad_fusion_options={},
+ post_grad_fusion_options={},
+ )
def test_config_flag_is_respected(self):
def split_with_cat(x):
fs = torch.split(x, [4, 4, 24], dim=-1)
diff --git a/torch/_inductor/fx_passes/decompose_mem_bound_mm.py b/torch/_inductor/fx_passes/decompose_mem_bound_mm.py
index f63bf552fa..793d29383f 100644
--- a/torch/_inductor/fx_passes/decompose_mem_bound_mm.py
+++ b/torch/_inductor/fx_passes/decompose_mem_bound_mm.py
@@ -8,7 +8,7 @@ from torch._dynamo.utils import counters
from .. import config
from ..pattern_matcher import Arg, CallFunction, Match, register_graph_pattern
-from .split_cat import construct_pattern_matcher_pass, get_config_flag
+from .split_cat import construct_pattern_matcher_pass
aten = torch.ops.aten
log = logging.getLogger(__name__)
@@ -94,7 +94,6 @@ def print_decompose_pattern(match: Match, inputs: List[torch.fx.Node]):
@register_graph_pattern(
CallFunction(aten.bmm, Arg(), Arg()),
pass_dict=construct_pattern_matcher_pass("decompose_mm_pass"),
- extra_check=get_config_flag("decompose_mm_pass", "decompose_mem_bound_mm"),
)
def decompose_bmm(match: Match, mat1: torch.fx.Node, mat2: torch.fx.Node):
def repl(mat1, mat2):
@@ -111,7 +110,6 @@ def decompose_bmm(match: Match, mat1: torch.fx.Node, mat2: torch.fx.Node):
@register_graph_pattern(
CallFunction(aten.addmm, Arg(), Arg(), Arg()),
pass_dict=construct_pattern_matcher_pass("decompose_mm_pass"),
- extra_check=get_config_flag("decompose_mm_pass", "decompose_mem_bound_mm"),
)
def decompose_addmm(
match: Match,
@@ -133,7 +131,6 @@ def decompose_addmm(
@register_graph_pattern(
CallFunction(aten.mm, Arg(), Arg()),
pass_dict=construct_pattern_matcher_pass("decompose_mm_pass"),
- extra_check=get_config_flag("decompose_mm_pass", "decompose_mem_bound_mm"),
)
def decompose_mm(
match: Match,
diff --git a/torch/_inductor/fx_passes/group_batch_fusion.py b/torch/_inductor/fx_passes/group_batch_fusion.py
index e58f288ba3..90c59a06ba 100644
--- a/torch/_inductor/fx_passes/group_batch_fusion.py
+++ b/torch/_inductor/fx_passes/group_batch_fusion.py
@@ -302,7 +302,6 @@ class GroupLinearFusion(GroupFusion):
if all(bias is None for bias in group_biases):
group_biases = None # type: ignore[assignment]
- group_biases: Optional[List[Any]]
with graph.inserting_before(subset[0]):
fused_mm = graph.call_function(
@@ -649,10 +648,8 @@ class BatchLayernormFusion(BatchFusion):
if all(bias is None for bias in group_biases):
group_biases = None # type: ignore[assignment]
- group_biases: Optional[List[Any]]
if all(weight is None for weight in group_weights):
group_weights = None # type: ignore[assignment]
- group_weights: Optional[List[Any]]
assert all(
eps == group_epss[0] for eps in group_epss
), "all epsilon values must be equal"
diff --git a/torch/_inductor/fx_passes/post_grad.py b/torch/_inductor/fx_passes/post_grad.py
index 89a3978845..d85c3440c0 100644
--- a/torch/_inductor/fx_passes/post_grad.py
+++ b/torch/_inductor/fx_passes/post_grad.py
@@ -44,7 +44,7 @@ from ..pattern_matcher import (
from ..utils import decode_device, is_pointwise_use
from ..virtualized import V
from .ddp_fusion import fuse_ddp_communication
-from .group_batch_fusion import group_batch_fusion_passes
+from .group_batch_fusion import group_batch_fusion_passes, POST_GRAD_FUSIONS
from .pre_grad import is_same_dict, save_inductor_dict
from .reinplace import reinplace_inplaceable_ops
from .split_cat import POST_GRAD_PATTERNS
@@ -54,7 +54,6 @@ log = logging.getLogger(__name__)
aten = torch.ops.aten
prims = torch.ops.prims
-pattern_matcher_passes = POST_GRAD_PATTERNS.values()
# First pass_patterns[0] are applied, then [1], then [2]
pass_patterns = [
PatternMatcherPass(),
@@ -89,7 +88,11 @@ def post_grad_passes(gm: torch.fx.GraphModule, is_inference: bool):
remove_noop_ops(gm.graph)
for patterns in pass_patterns:
patterns.apply(gm.graph) # type: ignore[arg-type]
- for pattern_matcher_pass in pattern_matcher_passes:
+ for pass_name in config.post_grad_fusion_options:
+ # skip all patterns for group batch fusions
+ if pass_name in POST_GRAD_FUSIONS:
+ continue
+ pattern_matcher_pass = POST_GRAD_PATTERNS[pass_name]
inductor_before_change = save_inductor_dict(
[pattern_matcher_pass.pass_name]
)
diff --git a/torch/_inductor/fx_passes/pre_grad.py b/torch/_inductor/fx_passes/pre_grad.py
index dadede9f0f..9af2440eb8 100644
--- a/torch/_inductor/fx_passes/pre_grad.py
+++ b/torch/_inductor/fx_passes/pre_grad.py
@@ -24,7 +24,7 @@ from ..pattern_matcher import (
stable_topological_sort,
)
from ..utils import is_cpu_device, pass_execution_and_save
-from .group_batch_fusion import group_batch_fusion_passes
+from .group_batch_fusion import group_batch_fusion_passes, PRE_GRAD_FUSIONS
from .misc_patterns import numpy_compat_normalization
from .split_cat import PRE_GRAD_PATTERNS
@@ -85,12 +85,6 @@ def remove_split_ops(graph, shape_prop):
return None
-# split_cat related fusions
-pattern_matcher_passes = list(PRE_GRAD_PATTERNS.values())
-# non-split_cat related fusions
-# TODO: move them to the fusions dict too.
-pattern_matcher_passes.append(efficient_conv_bn_eval_pass)
-
pattern_matcher_passes_aten: List[PatternMatcherPass] = [
remove_split_with_size_one_pass_aten,
merge_getitem_cat_pass_aten,
@@ -134,6 +128,7 @@ def pre_grad_passes(gm: torch.fx.GraphModule, example_inputs=None):
def shape_prop(mod) -> None:
ShapeProp(
gm=mod,
+ # pyre-fixme[16]: Module `torch._dynamo.utils` has no attribute `detect_fake_mode`
fake_mode=detect_fake_mode(example_inputs),
).propagate(*example_inputs)
@@ -202,10 +197,13 @@ def pre_grad_passes(gm: torch.fx.GraphModule, example_inputs=None):
if example_inputs is not None:
gm = fuse_fx(gm, example_inputs)
numpy_compat_normalization(gm.graph)
-
optimus_scuba_log["before_recompile_pre_grad"] = upload_graph(gm.graph)
group_batch_fusion_passes(gm.graph, pre_grad=True)
- for pattern_matcher_pass in pattern_matcher_passes:
+ for pass_name in config.pre_grad_fusion_options:
+ # skip all patterns for group batch fusions
+ if pass_name in PRE_GRAD_FUSIONS:
+ continue
+ pattern_matcher_pass = PRE_GRAD_PATTERNS[pass_name]
inductor_before_change = save_inductor_dict(
[pattern_matcher_pass.pass_name]
)
@@ -214,6 +212,8 @@ def pre_grad_passes(gm: torch.fx.GraphModule, example_inputs=None):
optimus_scuba_log[
f"{pattern_matcher_pass.pass_name}_pre_grad"
] = upload_graph(gm.graph)
+ # TODO: move efficient_conv_bn_eval_pass to the fusions dict too.
+ efficient_conv_bn_eval_pass.apply(gm.graph) # type: ignore[arg-type]
if config.pre_grad_custom_pass is not None:
config.pre_grad_custom_pass(gm.graph)
@@ -249,7 +249,7 @@ def pre_grad_passes(gm: torch.fx.GraphModule, example_inputs=None):
def fuse_fx(gm: torch.fx.GraphModule, example_inputs) -> torch.fx.GraphModule:
is_cpu = is_cpu_device(example_inputs)
-
+ # pyre-fixme[16]: Module `torch._dynamo.utils` has no attribute `detect_fake_mode`
fake_mode = detect_fake_mode(example_inputs)
gm = sink_cat_after_pointwise(gm)
diff --git a/torch/_inductor/fx_passes/split_cat.py b/torch/_inductor/fx_passes/split_cat.py
index 79b36fb54c..0fc217abbc 100644
--- a/torch/_inductor/fx_passes/split_cat.py
+++ b/torch/_inductor/fx_passes/split_cat.py
@@ -7,7 +7,6 @@ from typing_extensions import TypeAlias
import torch
from torch._dynamo.utils import counters
-from .. import config
from ..pattern_matcher import (
Arg,
@@ -44,8 +43,7 @@ _Range: TypeAlias = Tuple[int, int]
PRE_GRAD_PATTERNS: Dict[str, PatternMatcherPass] = dict()
POST_GRAD_PATTERNS: Dict[str, PatternMatcherPass] = dict()
-# TODO: read the pass_names from the config after the frontend change
-pass_names = [
+pre_grad_pass_names = [
"normalization_pass",
"remove_split_with_size_one_pass",
"merge_getitem_cat_pass",
@@ -54,25 +52,31 @@ pass_names = [
"mutate_cat_pass",
"split_cat_pass",
"unbind_stack_pass",
- # must be the last pass
+]
+
+post_grad_pass_names = [
"decompose_mm_pass",
]
-for pass_name in pass_names:
+for pass_name in pre_grad_pass_names:
# exclude all passes from the group batch fusion
# they do not use pattern matcher
- if pass_name in PRE_GRAD_FUSIONS or pass_name in POST_GRAD_FUSIONS:
+ if pass_name in PRE_GRAD_FUSIONS:
continue
- if pass_name != "decompose_mm_pass":
- PRE_GRAD_PATTERNS[pass_name] = PatternMatcherPass(
- prevent_match_across_mutations=True,
- pass_name=pass_name,
- )
- else:
- POST_GRAD_PATTERNS[pass_name] = PatternMatcherPass(
- prevent_match_across_mutations=True,
- pass_name=pass_name,
- )
+ PRE_GRAD_PATTERNS[pass_name] = PatternMatcherPass(
+ prevent_match_across_mutations=True,
+ pass_name=pass_name,
+ )
+
+for pass_name in post_grad_pass_names:
+ # exclude all passes from the group batch fusion
+ # they do not use pattern matcher
+ if pass_name in POST_GRAD_FUSIONS:
+ continue
+ POST_GRAD_PATTERNS[pass_name] = PatternMatcherPass(
+ prevent_match_across_mutations=True,
+ pass_name=pass_name,
+ )
def construct_pattern_matcher_pass(pass_name: str) -> PatternMatcherPass:
@@ -81,30 +85,8 @@ def construct_pattern_matcher_pass(pass_name: str) -> PatternMatcherPass:
"""
if pass_name in PRE_GRAD_PATTERNS:
return PRE_GRAD_PATTERNS[pass_name]
- elif pass_name in POST_GRAD_PATTERNS:
- return POST_GRAD_PATTERNS[pass_name]
else:
- # pattern that does not in the config, will
- # not be conduted in the optimization
- return PatternMatcherPass(
- prevent_match_across_mutations=True,
- pass_name=pass_name,
- )
-
-
-def get_config_flag(pass_name: str, flag="split_cat_fx_passes"):
- def flag_check(match):
- # TODO: remove the flag config check after we have the front end change
- # currently, pre_grad_fusion_options and post_grad_fusion_options are only have batch fusion
- # options controlled by the batch_fusion flag, after we extend it to indluce other fusions,
- # we can only check if the pass_name is in the config
- return (
- getattr(config, flag)
- or pass_name in config.pre_grad_fusion_options
- or pass_name in config.post_grad_fusion_options
- )
-
- return flag_check
+ return POST_GRAD_PATTERNS[pass_name]
def _get_split_args_default(split_node):
@@ -121,7 +103,7 @@ def _get_split_args_default(split_node):
)
-def _get_dim(node: Any) -> int:
+def _get_dim(node: Any):
assert isinstance(node, torch.fx.Node)
if "dim" in node.kwargs:
assert isinstance(node.kwargs["dim"], int)
@@ -215,12 +197,10 @@ def normalize_split_base(
@register_graph_pattern(
CallFunctionVarArgs(torch.split, users=MULTIPLE),
pass_dict=construct_pattern_matcher_pass("normalization_pass"),
- extra_check=get_config_flag("normalization_pass"),
)
@register_graph_pattern(
CallMethodVarArgs("split", users=MULTIPLE),
pass_dict=construct_pattern_matcher_pass("normalization_pass"),
- extra_check=get_config_flag("normalization_pass"),
)
def normalize_split_default(match: Match, *args, **kwargs):
return normalize_split_base(match, _get_split_args_default)
@@ -229,12 +209,10 @@ def normalize_split_default(match: Match, *args, **kwargs):
@register_graph_pattern(
CallFunctionVarArgs(torch.split, users=MULTIPLE),
pass_dict=construct_pattern_matcher_pass("remove_split_with_size_one_pass"),
- extra_check=get_config_flag("remove_split_with_size_one_pass"),
)
@register_graph_pattern(
CallMethodVarArgs("split", users=MULTIPLE),
pass_dict=construct_pattern_matcher_pass("remove_split_with_size_one_pass"),
- extra_check=get_config_flag("remove_split_with_size_one_pass"),
)
def remove_split_with_size_one(match: Match, *args, **kwargs):
graph = match.graph
@@ -269,12 +247,10 @@ def remove_split_with_size_one(match: Match, *args, **kwargs):
@register_graph_pattern(
CallFunctionVarArgs(torch.unbind, users=MULTIPLE),
pass_dict=construct_pattern_matcher_pass("normalization_pass"),
- extra_check=get_config_flag("normalization_pass"),
)
@register_graph_pattern(
CallMethodVarArgs("unbind", users=MULTIPLE),
pass_dict=construct_pattern_matcher_pass("normalization_pass"),
- extra_check=get_config_flag("normalization_pass"),
)
def normalize_unbind_default(match: Match, *args, **kwargs):
node = match.nodes[0]
@@ -311,7 +287,6 @@ def normalize_unbind_default(match: Match, *args, **kwargs):
@register_graph_pattern(
CallFunctionVarArgs(torch.cat, users=MULTIPLE),
pass_dict=construct_pattern_matcher_pass("normalization_pass"),
- extra_check=get_config_flag("normalization_pass"),
)
def normalize_cat_default(match: Match, *args, **kwargs):
from torch.fx.experimental.symbolic_shapes import guard_size_oblivious
@@ -373,7 +348,6 @@ def normalize_cat_default(match: Match, *args, **kwargs):
@register_graph_pattern(
CallFunctionVarArgs(torch.stack, users=MULTIPLE),
pass_dict=construct_pattern_matcher_pass("normalization_pass"),
- extra_check=get_config_flag("normalization_pass"),
)
def normalize_stack_default(match: Match, *args, **kwargs):
node = match.nodes[0]
@@ -419,7 +393,6 @@ def find_next_users(split_node: torch.fx.Node) -> List[torch.fx.Node]:
@register_graph_pattern(
CallMethodVarArgs("squeeze", users=MULTIPLE),
pass_dict=construct_pattern_matcher_pass("normalization_pass"),
- extra_check=get_config_flag("normalization_pass"),
)
def normalize_squeeze_default(match: Match, *args, **kwargs):
squeeze_node = match.nodes[0]
@@ -502,7 +475,6 @@ class TorchSplit(CallFunction):
KeywordArg("next_split_sections"),
),
pass_dict=construct_pattern_matcher_pass("merge_splits_pass"),
- extra_check=get_config_flag("merge_splits_pass"),
)
def merge_splits(
match: Match,
@@ -1020,7 +992,7 @@ class UnbindCatRemover(SplitCatSimplifier):
def get_transform_params(
self,
- unbind_node: torch.fx.Node,
+ split_node: torch.fx.Node,
next_users: List[torch.fx.Node],
user_inputs_list: List[List[Union[torch.fx.Node, _Range]]],
) -> Optional[List[List[_TransformParam]]]:
@@ -1045,7 +1017,7 @@ class UnbindCatRemover(SplitCatSimplifier):
"""
- split_dim = _get_dim(unbind_node)
+ split_dim = _get_dim(split_node)
transform_params_list: List[List[_TransformParam]] = []
for user_node, user_inputs in zip(next_users, user_inputs_list):
cat_dim = get_arg_value(user_node, 1, "dim") or 0
@@ -1108,7 +1080,6 @@ class GetItem(CallFunction):
),
),
pass_dict=construct_pattern_matcher_pass("split_cat_pass"),
- extra_check=get_config_flag("split_cat_pass"),
)
@register_graph_pattern(
RepeatedExpr(
@@ -1126,7 +1097,6 @@ class GetItem(CallFunction):
)
),
pass_dict=construct_pattern_matcher_pass("split_cat_pass"),
- extra_check=get_config_flag("split_cat_pass"),
)
def merge_split_squeeze(
match: Match, split_input: torch.fx.Node, split_sizes: List[int], dim: int
@@ -1180,21 +1150,18 @@ getitem_unbind = ListOf(
@register_graph_pattern(
CallFunction([torch.stack, torch.cat], getitem_unbind, Ignored(), _users=MULTIPLE),
pass_dict=construct_pattern_matcher_pass("unbind_stack_pass"),
- extra_check=get_config_flag("unbind_stack_pass"),
)
@register_graph_pattern(
CallFunction(
[torch.stack, torch.cat], getitem_unbind, dim=Ignored(), _users=MULTIPLE
),
pass_dict=construct_pattern_matcher_pass("unbind_stack_pass"),
- extra_check=get_config_flag("unbind_stack_pass"),
)
@register_graph_pattern(
CallFunction(
[torch.stack, torch.cat], tensors=getitem_unbind, dim=Ignored(), _users=MULTIPLE
),
pass_dict=construct_pattern_matcher_pass("unbind_stack_pass"),
- extra_check=get_config_flag("unbind_stack_pass"),
)
def merge_unbind_stack(match: Match, unbind_input: torch.fx.Node, dim: int):
unbind_node = next(node for node in match.nodes if node.target == torch.unbind)
@@ -1223,7 +1190,6 @@ getitem_split = ListOf(
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("split_cat_pass"),
- extra_check=get_config_flag("split_cat_pass"),
)
@register_graph_pattern(
CallFunction(
@@ -1233,7 +1199,6 @@ getitem_split = ListOf(
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("split_cat_pass"),
- extra_check=get_config_flag("split_cat_pass"),
)
@register_graph_pattern(
CallFunction(
@@ -1243,7 +1208,6 @@ getitem_split = ListOf(
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("split_cat_pass"),
- extra_check=get_config_flag("split_cat_pass"),
)
def simplify_split_cat(match: Match, split_sections: List[int], dim: int):
if not isinstance(split_sections, (list, tuple)): # Unnormalized split
@@ -1328,7 +1292,6 @@ def calculate_fused_tensor_size(split_node: torch.fx.Node, indices: List[int]) -
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("merge_getitem_cat_pass"),
- extra_check=get_config_flag("merge_getitem_cat_pass"),
)
def merge_getitem_cat(match: Match, split_sections: List[int], dim: int):
if not isinstance(split_sections, (list, tuple)): # Unnormalized split
@@ -1436,7 +1399,6 @@ def merge_getitem_cat(match: Match, split_sections: List[int], dim: int):
_users=MULTIPLE,
),
pass_dict=construct_pattern_matcher_pass("mutate_cat_pass"),
- extra_check=get_config_flag("mutate_cat_pass"),
)
def mutate_cat_node(match: Match, split_sections: List[int], dim: int):
if not isinstance(split_sections, (list, tuple)): # Unnormalized split
@@ -1532,7 +1494,6 @@ def mutate_cat_node(match: Match, split_sections: List[int], dim: int):
),
),
pass_dict=construct_pattern_matcher_pass("merge_stack_tahn_unbind_pass"),
- extra_check=get_config_flag("merge_stack_tahn_unbind_pass"),
)
@register_graph_pattern(
CallFunction(
@@ -1544,7 +1505,6 @@ def mutate_cat_node(match: Match, split_sections: List[int], dim: int):
),
),
pass_dict=construct_pattern_matcher_pass("merge_stack_tahn_unbind_pass"),
- extra_check=get_config_flag("merge_stack_tahn_unbind_pass"),
)
@register_graph_pattern(
CallFunction(
@@ -1556,7 +1516,6 @@ def mutate_cat_node(match: Match, split_sections: List[int], dim: int):
),
),
pass_dict=construct_pattern_matcher_pass("merge_stack_tahn_unbind_pass"),
- extra_check=get_config_flag("merge_stack_tahn_unbind_pass"),
)
def merge_stack_tahn_unbind(match: Match, split_sections: List[int], dim: int):
if not isinstance(split_sections, (list, tuple)): # Unnormalized split
|
2.41.0
|
59dc148770398de6f9ff3d7a8216cddaedd2fb0
|
Wed, 1 May 2024 01:38:28 +0000
|
[PATCH 0866/1000] Keep node.meta when fusing subgraph (#125261)
|
Summary: When CapabilityBasedPartitioner creates the fused subgraph as the call_module node, it didn't populate the node.meta["val"] field. Test Plan: OSS CI Differential Revision: D56789259 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125261 Approved by: https://github.com/zhxchen17
|
diff --git a/torch/fx/passes/utils/fuser_utils.py b/torch/fx/passes/utils/fuser_utils.py
index 8fb328e859..8976690ed7 100644
--- a/torch/fx/passes/utils/fuser_utils.py
+++ b/torch/fx/passes/utils/fuser_utils.py
@@ -205,6 +205,8 @@ def insert_subgm(gm: GraphModule, sub_gm: GraphModule, orig_inputs: Tuple[Node,
# Use Proxy to record getitem access.
proxy_out = torch.fx.Proxy(module_node)[i].node # type: ignore[index]
orig_output.replace_all_uses_with(proxy_out, propagate_meta=True)
+
+ module_node.meta["val"] = tuple(orig_output.meta.get("val", None) for orig_output in orig_outputs)
return gm
@compatibility(is_backward_compatible=False)
|
2.41.0
|
b9d353e4fcd6be1ec530d5a0cba1ab1d376c543
|
Wed, 1 May 2024 01:40:20 +0000
|
[PATCH 0867/1000] [Torch] Add more mm kernel choices (#125000)
|
Differential Revision: D56616836 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125000 Approved by: https://github.com/htyu
|
diff --git a/torch/_inductor/kernel/mm_common.py b/torch/_inductor/kernel/mm_common.py
index e04f87c523..5a7f60e591 100644
--- a/torch/_inductor/kernel/mm_common.py
+++ b/torch/_inductor/kernel/mm_common.py
@@ -116,18 +116,35 @@ def filtered_configs(
# will be utilised on the target platform
mm_kernel_configs = [
# "BLOCK_M", "BLOCK_N", "BLOCK_K", "num_stages", "num_warps"
+ {"config": (16, 32, 16, 3, 2), "cond": True},
+ {"config": (16, 32, 32, 4, 2), "cond": True},
+ {"config": (16, 32, 32, 5, 2), "cond": True},
+ {"config": (32, 32, 16, 1, 2), "cond": True},
+ {"config": (32, 32, 128, 2, 4), "cond": torch.version.hip is None},
+ {"config": (32, 64, 32, 5, 8), "cond": True},
+ {"config": (64, 32, 32, 5, 8), "cond": True},
+ {"config": (64, 32, 128, 5, 4), "cond": True},
+ {"config": (64, 64, 16, 2, 4), "cond": True},
{"config": (64, 64, 32, 2, 4), "cond": True},
+ {"config": (64, 64, 64, 3, 8), "cond": True},
+ {"config": (64, 64, 128, 3, 4), "cond": True},
+ {"config": (64, 64, 128, 5, 4), "cond": True},
{"config": (64, 128, 32, 3, 4), "cond": True},
- {"config": (128, 64, 32, 3, 4), "cond": True},
{"config": (64, 128, 32, 4, 8), "cond": True},
+ {"config": (64, 128, 64, 4, 4), "cond": True},
+ {"config": (64, 128, 128, 4, 4), "cond": True},
+ {"config": (128, 64, 32, 2, 2), "cond": True},
+ {"config": (128, 64, 32, 3, 4), "cond": True},
{"config": (128, 64, 32, 4, 8), "cond": True},
- {"config": (64, 32, 32, 5, 8), "cond": True},
- {"config": (32, 64, 32, 5, 8), "cond": True},
+ {"config": (128, 64, 64, 3, 8), "cond": True},
+ {"config": (128, 64, 128, 4, 8), "cond": True},
{"config": (128, 128, 32, 2, 8), "cond": True},
- {"config": (64, 64, 64, 3, 8), "cond": True},
- {"config": (32, 32, 128, 2, 4), "cond": torch.version.hip is None},
- {"config": (64, 64, 16, 2, 4), "cond": True},
- {"config": (32, 32, 16, 1, 2), "cond": True},
+ {"config": (128, 128, 32, 3, 4), "cond": True},
+ {"config": (128, 128, 32, 4, 4), "cond": True},
+ {"config": (128, 128, 64, 3, 4), "cond": True},
+ {"config": (128, 128, 64, 3, 8), "cond": True},
+ {"config": (128, 128, 64, 5, 4), "cond": True},
+ {"config": (128, 128, 64, 5, 8), "cond": True},
]
int8_mm_kernel_configs = [
|
2.41.0
|
506e95433ef39cfa698bf3cf23669fe3e877538
|
Tue, 30 Apr 2024 10:58:38 -0700
|
[PATCH 0868/1000] [dynamo] support inactive context managers across graph breaks (#125203)
|
Fix https://github.com/pytorch/pytorch/issues/124900. When we reconstruct `ContextWrappingVariables`s, we only reconstruct the context class, not the object. Normally, contexts are active (via `with ctx:`) and we initialize the context object in the resume function. But for the case of inactive contexts (contexts declared ahead of time before the `with` block), we do not reconstruct them properly in the optimized bytecode or resume function. So this PR adds initialization for inactive contexts in the resume function. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125203 Approved by: https://github.com/jansel
|
diff --git a/test/dynamo/test_ctx_manager.py b/test/dynamo/test_ctx_manager.py
index b3e286edab..cc6e39de4d 100644
--- a/test/dynamo/test_ctx_manager.py
+++ b/test/dynamo/test_ctx_manager.py
@@ -1304,6 +1304,22 @@ class GraphModule(torch.nn.Module):
self.assertEqual(fn(x), opt_fn(x))
self.assertEqual(fn(x).requires_grad, opt_fn(x).requires_grad)
+ def test_inactive_context_graph_break(self):
+ def fn(x):
+ x = x + 1
+ ctx = torch.set_grad_enabled(True)
+ torch._dynamo.graph_break()
+ with ctx:
+ x = x + 1
+ return x
+
+ x = torch.zeros(10, requires_grad=False)
+ cnts = torch._dynamo.testing.CompileCounter()
+ opt_fn = torch.compile(fn, backend=cnts)
+ self.assertEqual(fn(x), opt_fn(x))
+ self.assertEqual(fn(x).requires_grad, opt_fn(x).requires_grad)
+ self.assertEqual(cnts.frame_count, 2)
+
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
diff --git a/torch/_dynamo/resume_execution.py b/torch/_dynamo/resume_execution.py
index 545bb0f5c9..969a679c9e 100644
--- a/torch/_dynamo/resume_execution.py
+++ b/torch/_dynamo/resume_execution.py
@@ -317,6 +317,17 @@ def _filter_iter(l1, l2, cond):
return res
+def _load_tuple_and_call(tup):
+ insts = []
+ if sys.version_info >= (3, 11):
+ insts.append(create_instruction("PUSH_NULL"))
+ insts.append(create_instruction("SWAP", arg=2))
+ for val in tup:
+ insts.append(create_instruction("LOAD_CONST", argval=val))
+ insts.extend(create_call_function(len(tup), False))
+ return insts
+
+
class ContinueExecutionCache:
cache = ExactWeakKeyDictionary()
generated_code_metadata = ExactWeakKeyDictionary()
@@ -341,6 +352,8 @@ class ContinueExecutionCache:
argnames: Tuple[str],
argnames_null: Tuple[str],
setup_fns: Tuple[ReenterWith],
+ stack_ctx_vars: Tuple[int, Tuple[Any]],
+ argnames_ctx_vars: Tuple[str, Tuple[Any]],
null_idxes: Tuple[int],
) -> types.CodeType:
assert offset is not None
@@ -359,6 +372,8 @@ class ContinueExecutionCache:
argnames,
argnames_null,
setup_fns,
+ stack_ctx_vars,
+ argnames_ctx_vars,
null_idxes,
)
@@ -420,6 +435,7 @@ class ContinueExecutionCache:
# map old hook targets to new targets generated by the hook
old_hook_target_remap = {}
null_idxes_i = 0
+ stack_ctx_vars_d = dict(stack_ctx_vars) # type: ignore[var-annotated,arg-type]
for i in range(nstack):
while (
null_idxes_i < len(null_idxes)
@@ -437,6 +453,12 @@ class ContinueExecutionCache:
old_hook_target = offset_to_inst[hook_target_offset]
meta.prefix_block_target_offset_remap.append(hook_target_offset)
old_hook_target_remap[old_hook_target] = exn_target
+ real_i = i + null_idxes_i
+ if real_i in stack_ctx_vars_d:
+ # current stack variable is a context var -
+ # load args for context variable and construct it
+ prefix.extend(_load_tuple_and_call(stack_ctx_vars_d[real_i]))
+
if is_py311_plus:
# reverse the mapping since targets of later/nested contexts are inserted
# into the mapping later, but show up earlier in the prefix.
@@ -446,6 +468,12 @@ class ContinueExecutionCache:
assert not hooks
+ # initialize inactive context vars in argnames
+ for name, vals in argnames_ctx_vars:
+ prefix.append(create_instruction("LOAD_FAST", argval=name))
+ prefix.extend(_load_tuple_and_call(vals))
+ prefix.append(create_instruction("STORE_FAST", argval=name))
+
# 3.12+: store NULL into variables that were NULL
if argnames_null:
assert sys.version_info >= (3, 12)
diff --git a/torch/_dynamo/symbolic_convert.py b/torch/_dynamo/symbolic_convert.py
index 5e9eb41cc0..aef6d32bac 100644
--- a/torch/_dynamo/symbolic_convert.py
+++ b/torch/_dynamo/symbolic_convert.py
@@ -2282,6 +2282,23 @@ class InstructionTranslator(InstructionTranslatorBase):
if sys.version_info < (3, 12):
assert len(argnames_null) == 0, "variables should not be NULL in < 3.12"
+ # Handle inactive context variables - inactive context variables
+ # are reconstructed to be the class, NOT the object.
+ # So the resume function needs to construct the context object
+ # from the class and the context object's target values.
+ # e.g. torch.set_grad_enabled(True) will be reconstructed as
+ # torch.set_grad_enabled
+ stack_ctx_vars = []
+ for i, var in enumerate(self.stack):
+ if type.__instancecheck__(ContextWrappingVariable, var):
+ stack_ctx_vars.append((i, tuple(var.target_values))) # type: ignore[attr-defined]
+ argnames_ctx_vars = []
+ for name in argnames:
+ if type.__instancecheck__(
+ ContextWrappingVariable, var := self.symbolic_locals[name]
+ ):
+ argnames_ctx_vars.append((name, tuple(var.target_values))) # type: ignore[attr-defined]
+
cg = PyCodegen(self)
# Python does not allow null to be an arg to a function, so
@@ -2293,12 +2310,12 @@ class InstructionTranslator(InstructionTranslatorBase):
if sys.version_info >= (3, 11):
# find indices of NullVariables
for i, var in enumerate(self.stack):
- if isinstance(var, NullVariable):
+ if type.__instancecheck__(NullVariable, var):
null_idxes.append(i)
# generate bytecode to pop the nulls
null_cnt = 0
for i, var in enumerate(reversed(self.stack)):
- if isinstance(var, NullVariable):
+ if type.__instancecheck__(NullVariable, var):
for j in range(2, i + 2 - null_cnt):
cg.append_output(create_instruction("SWAP", arg=j))
cg.extend_output(cg.pop_null())
@@ -2320,6 +2337,8 @@ class InstructionTranslator(InstructionTranslatorBase):
argnames,
argnames_null,
tuple(b.resume_fn() for b in self.block_stack),
+ tuple(stack_ctx_vars),
+ tuple(argnames_ctx_vars),
tuple(null_idxes),
)
diff --git a/torch/_dynamo/variables/lazy.py b/torch/_dynamo/variables/lazy.py
index c3dc781029..4c68c7bf78 100644
--- a/torch/_dynamo/variables/lazy.py
+++ b/torch/_dynamo/variables/lazy.py
@@ -19,9 +19,17 @@ class LazyCache:
assert self.vt is None
from ..symbolic_convert import InstructionTranslator
from .builder import VariableBuilder
+ from .ctx_manager import ContextWrappingVariable, NullContextVariable
+ from .misc import NullVariable
tx = InstructionTranslator.current_tx()
self.vt = VariableBuilder(tx, self.source)(self.value)
+
+ # we do not expect wrapping these variables in lazy VTs
+ assert not isinstance(
+ self.vt, (NullVariable, ContextWrappingVariable)
+ ) or isinstance(self.vt, NullContextVariable)
+
del self.value
del self.source
|
2.41.0
|
4e881731103e8b9729e62602dce14fdf654e4f4
|
Tue, 30 Apr 2024 13:24:09 -0700
|
[PATCH 0869/1000] [inductor] Minor fixes to various tests before enabling fx graph caching in OSS by default (#125258)
|
Summary: Discovered breakages by enabling codecache by default and doing a CI run. I'll commit these fixes first and eventually enabling caching by default will (hopefully) be a one-liner. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125258 Approved by: https://github.com/eellison
|
diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index 446cfa9fa8..2f7feb0752 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -26,9 +26,9 @@ from unittest.mock import patch
import numpy as np
import torch
-
-import torch._dynamo.test_case
import torch._dynamo.testing
+
+import torch._inductor.test_case
import torch.onnx.operators
import torch.utils._pytree as pytree
@@ -151,7 +151,7 @@ class UserDefineSetAttr:
return self.__dict__[f"pfx_{key}"]
-class MiscTests(torch._dynamo.test_case.TestCase):
+class MiscTests(torch._inductor.test_case.TestCase):
def test_get_cache_entry(self):
def f(x):
return x + 1
diff --git a/test/dynamo/test_structured_trace.py b/test/dynamo/test_structured_trace.py
index deb2a2d548..07f541edbe 100644
--- a/test/dynamo/test_structured_trace.py
+++ b/test/dynamo/test_structured_trace.py
@@ -16,10 +16,11 @@ import torch._dynamo.testing
import torch._logging.structured
import torch.distributed as dist
+from torch._inductor.test_case import TestCase
+
from torch._logging._internal import TorchLogsFormatter
from torch.nn.parallel import DistributedDataParallel as DDP
-
-from torch.testing._internal.common_utils import find_free_port, TestCase
+from torch.testing._internal.common_utils import find_free_port
from torch.testing._internal.inductor_utils import HAS_CUDA
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 31d83b3172..7578dff264 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -482,6 +482,14 @@ def _reduce_symint(s):
return (_ident, (str(s),))
+def _reduce_unsupported(s):
+ """
+ See FxGraphCachePickler. Custom reducer to handle any objects that we don't
+ support and therefore raise to bypass caching.
+ """
+ raise BypassFxGraphCache
+
+
class FxGraphCachePickler(pickle.Pickler):
"""
Custom pickler to customize the pickling of some objects (Tensors), only for the
@@ -494,6 +502,9 @@ class FxGraphCachePickler(pickle.Pickler):
dispatch_table[FakeTensor] = _reduce_fake_tensor
dispatch_table[torch.Tensor] = _reduce_tensor
dispatch_table[torch.SymInt] = _reduce_symint
+ dispatch_table[
+ torch.fx.experimental._backward_state.BackwardState
+ ] = _reduce_unsupported
@classmethod
def dumps(cls, obj) -> bytes:
@@ -893,7 +904,6 @@ class FxGraphCache:
Load a compiled graph from the cache. If a cached entry does not exist,
compile the graph and save it to the cache.
"""
-
compiled_graph = None
try:
FxGraphCache._check_can_cache(gm)
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index 0630614fb3..748737a9e1 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -2385,6 +2385,7 @@ class ShapeEnv:
"source_name_to_debug_name",
"_prev_cache_key",
"_version_counter",
+ "dim_constraints",
)
# Mapping of the value of each to-be-compared field into the values that
diff --git a/torch/testing/_internal/logging_utils.py b/torch/testing/_internal/logging_utils.py
index 8bf762a657..f97d0281b1 100644
--- a/torch/testing/_internal/logging_utils.py
+++ b/torch/testing/_internal/logging_utils.py
@@ -7,6 +7,7 @@ import contextlib
import torch._logging
import torch._logging._internal
from torch._dynamo.utils import LazyString
+from torch._inductor import config as inductor_config
import logging
import io
@@ -74,6 +75,7 @@ def kwargs_to_settings(**kwargs):
# that the logs are setup correctly and capturing the correct records.
def make_logging_test(**kwargs):
def wrapper(fn):
+ @inductor_config.patch({"fx_graph_cache": False})
def test_fn(self):
torch._dynamo.reset()
|
2.41.0
|
d5f8070c44e5edc7a1d012217f7af0b4438957e
|
Mon, 22 Apr 2024 20:14:26 +0000
|
[PATCH 0872/1000] add a decomposition for select_scatter (#124426)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124426 Approved by: https://github.com/peterbell10
|
diff --git a/test/expect/HasDecompTest.test_has_decomposition.expect b/test/expect/HasDecompTest.test_has_decomposition.expect
index c0390363f2..764379473b 100644
--- a/test/expect/HasDecompTest.test_has_decomposition.expect
+++ b/test/expect/HasDecompTest.test_has_decomposition.expect
@@ -1149,8 +1149,6 @@ aten::segment_reduce.out
aten::select.int
aten::select_copy.int
aten::select_copy.int_out
-aten::select_scatter
-aten::select_scatter.out
aten::set
aten::set.out
aten::set.source_Storage
diff --git a/torch/_inductor/decomposition.py b/torch/_inductor/decomposition.py
index 6541152fdb..d592cfc07f 100644
--- a/torch/_inductor/decomposition.py
+++ b/torch/_inductor/decomposition.py
@@ -86,6 +86,7 @@ decomps_to_exclude = [
aten.clamp_max,
aten.clamp_min,
aten.glu, # inductor lowers this directly
+ aten.select_scatter, # need to be in the ATen graph in order for it to work with the re-inplacing pass
aten.split.Tensor, # inductor lowers this directly
aten.squeeze, # inductor lowers this directly
aten.sum, # inductor lowers this directly
diff --git a/torch/_refs/__init__.py b/torch/_refs/__init__.py
index f9d6aafd68..f638285f69 100644
--- a/torch/_refs/__init__.py
+++ b/torch/_refs/__init__.py
@@ -6155,6 +6155,19 @@ def vdot(self, other):
return (self.conj_physical() * other).sum()
+@register_decomposition(aten.select_scatter)
+@out_wrapper()
+def select_scatter(x: TensorLikeType, src: TensorLikeType, dim: int, index: int):
+ dim = utils.canonicalize_dim(x.ndim, dim)
+ mask_shape = [1] * x.ndim
+ mask_shape[dim] = -1
+ if index < 0:
+ index = index + x.shape[dim]
+ mask = torch.arange(x.shape[dim], device=x.device).view(mask_shape) == index
+ src = torch.unsqueeze(src, dim).expand(x.shape)
+ return torch.where(mask, src, x)
+
+
# inplace
abs_ = _make_inplace(abs)
acos_ = _make_inplace(acos)
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
index ab73b5baf3..53eca816e4 100644
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -22138,6 +22138,10 @@ python_ref_db = [
torch_opinfo_name="rot90",
validate_view_consistency=False,
),
+ PythonRefInfo(
+ "_refs.select_scatter",
+ torch_opinfo_name="select_scatter",
+ ),
PythonRefInfo(
"_refs.stack",
torch_opinfo_name="stack",
|
2.41.0
|
baa7173509f451ea42a4141241f2de1894dcd4c
|
Tue, 30 Apr 2024 17:46:56 -0700
|
[PATCH 0874/1000] [FSDP2] Removed logic to save and remove pre-backward hook handles (#125269)
|
1. This PR removes the logic for saving and removing the pre-backward hook handles (which is registered via `register_multi_grad_hook(mode="any")`). 2. This PR removes the logic for _trying_ to guard against mistargeted prefetches that relies on querying if the engine will execute the module output tensors' `grad_fn`s. (See https://github.com/pytorch/pytorch/pull/118118 for original motivation.) For 1, the logic was error prone since it relied on `set_is_last_backward(False)` being set correctly or else pre-backward hooks could be de-registered too early. We would prefer to match the hook lifetimes with that of the autograd graph. This solves a bug with a 1f1b interleaved schedule. If we directly remove the manual saving/removing hook handle logic, then we have a ref cycle where the tensors' `grad_fn`s are passed to the hook function. We decide to simply remove this `grad_fn` logic since (1) it cannot perfectly prevent mistargeted prefetches and (2) it introduces undesired complexity. In the future, we may prefer a different mechanism to override the prefetching for more complex/dynamic use cases. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125269 Approved by: https://github.com/weifengpy ghstack dependencies: #125190, #125191
|
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_comm.py b/test/distributed/_composable/fsdp/test_fully_shard_comm.py
index 0bb4e54b32..33c6e61ac4 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_comm.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_comm.py
@@ -478,7 +478,7 @@ class TestFullyShardBackwardPrefetch(FSDPTest):
"""
Test a model with a linear module then a split into two linear modules,
where we run backward through one path first before the other, meaning
- that (1) onlyh one linear of the two split is used per backward and (2)
+ that (1) only one linear of the two split is used per backward and (2)
the initial shared linear is used in both backwards.
"""
dim = 8
@@ -512,8 +512,8 @@ class TestFullyShardBackwardPrefetch(FSDPTest):
loss2.sum().backward(retain_graph=True)
expected_events = [
("unshard", "1.lin2", TrainingState.PRE_BACKWARD),
- # Check that `1.lin1` is not prefetched since it is not used
- # for this backward
+ # NOTE: This `1.lin1` unshard is a mistargeted prefetch.
+ ("unshard", "1.lin1", TrainingState.PRE_BACKWARD),
("post_backward", "1.lin2", TrainingState.POST_BACKWARD),
("unshard", "0", TrainingState.PRE_BACKWARD),
("post_backward", "0", TrainingState.POST_BACKWARD),
@@ -524,10 +524,11 @@ class TestFullyShardBackwardPrefetch(FSDPTest):
model.set_is_last_backward(True)
loss1.sum().backward()
expected_events = [
- # Check that `1.lin2` is not unsharded
- ("unshard", "1.lin1", TrainingState.PRE_BACKWARD),
- ("post_backward", "1.lin1", TrainingState.POST_BACKWARD),
+ # NOTE: `1.lin1` is already unsharded from the mistargeted
+ # prefetch in the first backward.
+ # Prefetch `0`
("unshard", "0", TrainingState.PRE_BACKWARD),
+ ("post_backward", "1.lin1", TrainingState.POST_BACKWARD),
("post_backward", "0", TrainingState.POST_BACKWARD),
]
self.assertEqual(events, expected_events)
diff --git a/torch/distributed/_composable/fsdp/_fsdp_param_group.py b/torch/distributed/_composable/fsdp/_fsdp_param_group.py
index 9036862905..737c2fe801 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_param_group.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_param_group.py
@@ -5,7 +5,6 @@ from typing import Any, cast, Dict, List, NamedTuple, Optional, Set, Tuple, Unio
import torch
import torch.distributed as dist
import torch.nn as nn
-from torch.autograd.graph import Node
from torch.distributed.fsdp._common_utils import _named_parameters_with_duplicates
from torch.utils._pytree import tree_flatten, tree_unflatten
from torch.utils.hooks import RemovableHandle
@@ -123,11 +122,6 @@ class FSDPParamGroup:
self.comm_ctx = FSDPCommContext()
# Group's indices in the shared post-forward order
self._post_forward_indices: List[int] = []
- # Used to avoid mistargeted backward prefetches when the module is used
- # in forward but not in backward: for each forward, we record a tuple
- # of the output's grad fns and later query the autograd engine whether
- # any grad fn will execute in the current backward to know to prefetch.
- self.all_forward_output_grad_fns: Set[Tuple[Node, ...]] = set()
# Whether to reduce gradients at all (whether for FSDP or HSDP)
self.reduce_grads: bool = True
# Whether to all-reduce gradients for HSDP; only used if
@@ -305,13 +299,11 @@ class FSDPParamGroup:
self.comm_ctx.post_forward_order.append(self)
self._post_forward_indices.append(post_forward_index)
- def pre_backward(self, forward_grad_fns: Tuple[Any, ...], *unused: Any):
+ def pre_backward(self, *unused: Any):
with torch.profiler.record_function("FSDP::pre_backward"):
self._training_state = TrainingState.PRE_BACKWARD
self.unshard() # no-op if prefetched
self.wait_for_unshard()
- # Can be already removed if running multiple `backward`s
- self.all_forward_output_grad_fns.discard(forward_grad_fns)
self._prefetch_unshard()
def post_backward(self, *unused: Any):
@@ -370,7 +362,6 @@ class FSDPParamGroup:
fsdp_param.grad_offload_event.synchronize()
fsdp_param.grad_offload_event = None
self._post_forward_indices.clear()
- self.all_forward_output_grad_fns.clear()
def _prefetch_unshard(self):
if self._training_state == TrainingState.PRE_BACKWARD:
@@ -380,18 +371,14 @@ class FSDPParamGroup:
curr_index = self._post_forward_indices.pop()
if (target_index := curr_index - 1) < 0:
return
+ # Prefetch naively using the reverse post-forward order, which may
+ # have mistargeted prefetches if not all modules used in forward
+ # are used in this backward
target_fsdp_param_group = self.comm_ctx.post_forward_order[target_index]
- if any(
- torch._C._will_engine_execute_node(grad_fn) # type: ignore[attr-defined]
- for grad_fns in target_fsdp_param_group.all_forward_output_grad_fns
- for grad_fn in grad_fns
- ):
- with torch.profiler.record_function(
- "FSDP::backward_prefetch"
- ), target_fsdp_param_group.use_training_state(
- TrainingState.PRE_BACKWARD
- ):
- target_fsdp_param_group.unshard()
+ with torch.profiler.record_function(
+ "FSDP::backward_prefetch"
+ ), target_fsdp_param_group.use_training_state(TrainingState.PRE_BACKWARD):
+ target_fsdp_param_group.unshard()
# Utilities #
def _to_sharded(self):
diff --git a/torch/distributed/_composable/fsdp/_fsdp_state.py b/torch/distributed/_composable/fsdp/_fsdp_state.py
index 88421d1a11..3f4d407134 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_state.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_state.py
@@ -5,7 +5,7 @@ from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from torch.autograd import Variable
-from torch.autograd.graph import Node, register_multi_grad_hook
+from torch.autograd.graph import register_multi_grad_hook
from torch.distributed._composable_state import (
_get_module_state,
_insert_module_state,
@@ -13,7 +13,6 @@ from torch.distributed._composable_state import (
)
from torch.distributed.utils import _to_kwargs
from torch.utils._pytree import tree_flatten, tree_map
-from torch.utils.hooks import RemovableHandle
from ._fsdp_api import MixedPrecisionPolicy
from ._fsdp_common import _cast_fp_tensor, TrainingState
from ._fsdp_param import FSDPParam
@@ -55,7 +54,6 @@ class FSDPState(_State):
self._state_ctx = FSDPStateContext()
self._comm_ctx = FSDPCommContext()
self._training_state: TrainingState = TrainingState.IDLE
- self._pre_backward_hook_handles: List[RemovableHandle] = []
# Define a separate init since `__init__` is called in the contract
def init(
@@ -201,11 +199,11 @@ class FSDPState(_State):
)
return output
- def _pre_backward(self, forward_grad_fns: Tuple[Node, ...], *unused: Any) -> None:
+ def _pre_backward(self, *unused: Any) -> None:
self._training_state = TrainingState.PRE_BACKWARD
self._register_root_post_backward_final_callback()
if self._fsdp_param_group:
- self._fsdp_param_group.pre_backward(forward_grad_fns, *unused)
+ self._fsdp_param_group.pre_backward(*unused)
def _root_post_backward_final_callback(self) -> None:
with torch.profiler.record_function("FSDP::root_post_backward_callback"):
@@ -224,25 +222,18 @@ class FSDPState(_State):
self._state_ctx.post_backward_final_callback_queued = False
def _finalize_backward(self) -> None:
- for handle in self._pre_backward_hook_handles:
- handle.remove()
- self._pre_backward_hook_handles.clear()
if self._fsdp_param_group:
self._fsdp_param_group.finalize_backward()
def _register_pre_backward_hook(self, output: Any) -> Any:
if not torch.is_grad_enabled():
return output
-
flat_outputs, _ = tree_flatten(output)
- tensors = tuple(t for t in flat_outputs if (t is not None and t.requires_grad))
+ tensors = tuple(
+ t for t in flat_outputs if (torch.is_tensor(t) and t.requires_grad)
+ )
if tensors:
- grad_fns = tuple(t.grad_fn for t in tensors if t.grad_fn is not None)
- pre_backward = functools.partial(self._pre_backward, grad_fns)
- handle = register_multi_grad_hook(tensors, pre_backward, mode="any")
- self._pre_backward_hook_handles.append(handle)
- if self._fsdp_param_group:
- self._fsdp_param_group.all_forward_output_grad_fns.add(grad_fns)
+ register_multi_grad_hook(tensors, self._pre_backward, mode="any")
return output
def _register_root_post_backward_final_callback(self):
|
2.41.0
|
c905f1be34683480a33e36104b8482442d7c1fd
|
Wed, 1 May 2024 04:08:13 +0000
|
[PATCH 0875/1000] [EZ][BE] Don't import pathlib twice (#125260)
|
It was imported once as `import pathlib` and second time as `from pathlib import Path` Stick to the 2nd flavor Pull Request resolved: https://github.com/pytorch/pytorch/pull/125260 Approved by: https://github.com/kit1980
|
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 7578dff264..16af022a21 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -11,7 +11,6 @@ import json
import logging
import multiprocessing
import os
-import pathlib
import pickle
import pkgutil
import platform
@@ -408,7 +407,7 @@ def write_atomic(
assert isinstance(
content, (str, bytes)
), "Only strings and byte arrays can be saved in the cache"
- path = pathlib.Path(path)
+ path = Path(path)
if make_dirs:
path.parent.mkdir(parents=True, exist_ok=True)
tmp_path = path.parent / f".{os.getpid()}.{threading.get_ident()}.tmp"
|
2.41.0
|
87afc5180815b0f25e710d1e282a93bfcc1ba47
|
Tue, 23 Apr 2024 23:29:01 -0700
|
[PATCH 0876/1000] Add LR as tensor tests (#123750)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/123750 Approved by: https://github.com/janeyx99
|
diff --git a/test/inductor/test_compiled_optimizers.py b/test/inductor/test_compiled_optimizers.py
index 371ae815d5..f12f0d9783 100644
--- a/test/inductor/test_compiled_optimizers.py
+++ b/test/inductor/test_compiled_optimizers.py
@@ -85,6 +85,18 @@ KERNEL_COUNT_OVERRIDES = {
"test_adagrad_lr_decay_weight_decay_foreach_cuda": 3,
"test_adagrad_weight_decay_foreach_cuda": 3,
"test_adagrad_weight_decay_maximize_foreach_cuda": 3,
+ "test_adagrad_tensor_lr_cpu": 6,
+ "test_adagrad_tensor_lr_cuda": 6,
+ "test_adamax_tensor_lr_weight_decay_capturable_cuda": 6,
+ "test_asgd_tensor_lr_weight_decay_maximize_capturable_cuda": 8,
+ "test_asgd_tensor_lr_weight_decay_maximize_capturable_foreach_cuda": 4,
+ "test_nadam_tensor_lr_weight_decay_momentum_decay_decoupled_weight_decay_capturable_cuda": 9,
+ "test_nadam_tensor_lr_weight_decay_momentum_decay_decoupled_weight_decay_capturable_foreach_cuda": 3,
+ "test_radam_tensor_lr_capturable_weight_decay_decoupled_weight_decay_cuda": 6,
+ "test_radam_tensor_lr_capturable_weight_decay_decoupled_weight_decay_foreach_cuda": 3,
+ "test_sgd_tensor_lr_cpu": 2,
+ "test_sgd_tensor_lr_cuda": 2,
+ "test_sgd_tensor_lr_foreach_cuda": 2,
}
# also tracks currently supported optimizers
diff --git a/torch/optim/adamax.py b/torch/optim/adamax.py
index 443fbd2458..02eb3d6e1d 100644
--- a/torch/optim/adamax.py
+++ b/torch/optim/adamax.py
@@ -448,7 +448,7 @@ def _multi_tensor_adamax(
bias_corrections = [
1 - beta1 ** _get_value(step) for step in grouped_state_steps
]
- step_size = [(lr / bc) * -1 for bc in bias_corrections]
+ step_size = [(_get_value(lr) / bc) * -1 for bc in bias_corrections]
torch._foreach_addcdiv_(
grouped_params, grouped_exp_avgs, grouped_exp_infs, step_size
)
diff --git a/torch/optim/nadam.py b/torch/optim/nadam.py
index 190b1a64a0..a38cb7adc9 100644
--- a/torch/optim/nadam.py
+++ b/torch/optim/nadam.py
@@ -576,13 +576,18 @@ def _multi_tensor_nadam(
else:
step_size_grads = _stack_if_compiling(
[
- (lr * (1.0 - mu) / (1.0 - _get_value(mu_product))) * -1
+ (_get_value(lr) * (1.0 - mu) / (1.0 - _get_value(mu_product))) * -1
for mu_product, mu in zip(grouped_mu_products, mus)
]
)
step_size_expavg = _stack_if_compiling(
[
- (lr * mu_next / (1.0 - _get_value(mu_product) * mu_next)) * -1
+ (
+ _get_value(lr)
+ * mu_next
+ / (1.0 - _get_value(mu_product) * mu_next)
+ )
+ * -1
for mu_product, mu_next in zip(grouped_mu_products, mu_nexts)
]
)
diff --git a/torch/optim/optimizer.py b/torch/optim/optimizer.py
index ca4092d9d7..0fa8a8d8b7 100644
--- a/torch/optim/optimizer.py
+++ b/torch/optim/optimizer.py
@@ -99,7 +99,7 @@ def _get_value(x):
if not torch.jit.is_scripting() and is_compiling():
return x
else:
- return x.item()
+ return x.item() if isinstance(x, torch.Tensor) else x
def _stack_if_compiling(x):
diff --git a/torch/optim/sgd.py b/torch/optim/sgd.py
index 30d9070042..b346958204 100644
--- a/torch/optim/sgd.py
+++ b/torch/optim/sgd.py
@@ -427,7 +427,12 @@ def _multi_tensor_sgd(
device_grads = bufs
if not device_has_sparse_grad:
- torch._foreach_add_(device_params, device_grads, alpha=-lr)
+ # handle internal item() call if lr is a tensor
+ if isinstance(lr, torch.Tensor) and torch._utils.is_compiling():
+ grads_x_lr = torch._foreach_mul(device_grads, -lr)
+ torch._foreach_add_(device_params, grads_x_lr)
+ else:
+ torch._foreach_add_(device_params, device_grads, alpha=-lr)
else:
# foreach APIs don't support sparse
for i in range(len(device_params)):
diff --git a/torch/testing/_internal/common_optimizers.py b/torch/testing/_internal/common_optimizers.py
index 8b50b293ff..d6bf7aa80e 100644
--- a/torch/testing/_internal/common_optimizers.py
+++ b/torch/testing/_internal/common_optimizers.py
@@ -361,6 +361,11 @@ def optim_inputs_func_adagrad(device, dtype=None):
kwargs={"lr": 0.1, "lr_decay": 0.5, "weight_decay": 0.1},
desc="lr_decay",
), # TODO: Move out to testing in param_group?
+ OptimizerInput(
+ params=None,
+ kwargs={"lr": torch.tensor(0.001)},
+ desc="Tensor lr",
+ ),
]
@@ -503,6 +508,16 @@ def optim_inputs_func_adamax(device, dtype=None):
kwargs={"weight_decay": 0.9, "maximize": False, "capturable": True},
desc="capturable, weight_decay",
),
+ OptimizerInput(
+ params=None,
+ kwargs={
+ "lr": torch.tensor(0.001),
+ "weight_decay": 0.9,
+ "maximize": False,
+ "capturable": True,
+ },
+ desc="capturable, weight_decay, tensor LR",
+ ),
]
return [
@@ -562,6 +577,16 @@ def optim_inputs_func_asgd(device, dtype=None):
kwargs={"weight_decay": 0.1, "maximize": True, "capturable": True},
desc="maximize, weight_decay, capturable",
),
+ OptimizerInput(
+ params=None,
+ kwargs={
+ "lr": torch.tensor(0.001),
+ "weight_decay": 0.1,
+ "maximize": True,
+ "capturable": True,
+ },
+ desc="maximize, weight_decay, capturable, tensor LR",
+ ),
]
return [
OptimizerInput(params=None, kwargs={}, desc="default"),
@@ -635,6 +660,17 @@ def optim_inputs_func_nadam(device, dtype=None):
},
desc="decoupled_weight_decay, capturable",
),
+ OptimizerInput(
+ params=None,
+ kwargs={
+ "lr": torch.tensor(0.001),
+ "weight_decay": 0.9,
+ "momentum_decay": 6e-3,
+ "decoupled_weight_decay": True,
+ "capturable": True,
+ },
+ desc="decoupled_weight_decay, capturable",
+ ),
]
return [
OptimizerInput(params=None, kwargs={}, desc="default"),
@@ -708,6 +744,16 @@ def optim_inputs_func_radam(device=None, dtype=None):
},
desc="capturable, weight_decay, decoupled_weight_decay",
),
+ OptimizerInput(
+ params=None,
+ kwargs={
+ "lr": torch.tensor(0.001),
+ "capturable": True,
+ "weight_decay": 0.1,
+ "decoupled_weight_decay": True,
+ },
+ desc="capturable, weight_decay, decoupled_weight_decay, tensor LR",
+ ),
]
return [
OptimizerInput(params=None, kwargs={}, desc="default"),
@@ -857,6 +903,9 @@ def optim_inputs_func_sgd(device, dtype=None):
return [
OptimizerInput(params=None, kwargs={}, desc="default"),
OptimizerInput(params=None, kwargs={"lr": 1e-2}, desc="non-default lr"),
+ OptimizerInput(
+ params=None, kwargs={"lr": torch.tensor(0.001)}, desc="tensor lr"
+ ),
OptimizerInput(params=None, kwargs={"momentum": 0.9}, desc="momentum"),
OptimizerInput(
params=None,
|
2.41.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.