commitId
stringlengths 40
40
| datetime
stringlengths 30
31
| subject
stringlengths 37
266
| comment
stringlengths 109
15.2k
| diff
stringlengths 238
914k
| gitVersion
stringclasses 9
values |
|---|---|---|---|---|---|
5a8e9ee7776ba53a768df32a86c1b8a45049fdf
|
Tue, 30 Apr 2024 16:59:20 -0700
|
[PATCH 0877/1000] [inductor] better cache clearing in fx graph cache tests (#125280)
|
Summary: There's a shortcoming in the FX graph cache tests in that they don't fully clear all inductor in-memory caches when testing the cache-hit path: We were previously accessing the FX graph cache correctly, but when loading the source object using the PyCodeCache.load_by_key_path() method, _that_ path was serving entries out of memory. To better mimic what happens during warm start (i.e., a new process), we should clear all in-memory caches. Test Plan: updated the unit tests Pull Request resolved: https://github.com/pytorch/pytorch/pull/125280 Approved by: https://github.com/eellison
|
diff --git a/test/inductor/test_codecache.py b/test/inductor/test_codecache.py
index 96ed0d7022..28403bf0df 100644
--- a/test/inductor/test_codecache.py
+++ b/test/inductor/test_codecache.py
@@ -21,7 +21,7 @@ from torch._inductor.codecache import (
)
from torch._inductor.runtime.runtime_utils import cache_dir
from torch._inductor.test_case import run_tests, TestCase
-from torch._inductor.utils import fresh_inductor_cache
+from torch._inductor.utils import clear_inductor_caches, fresh_inductor_cache
from torch.testing._internal.common_cuda import SM80OrLater
from torch.testing._internal.common_device_type import largeTensorTest
from torch.testing._internal.common_utils import (
@@ -100,6 +100,10 @@ class TestFxGraphCache(TestCase):
super().setUp()
counters.clear()
+ def reset(self):
+ torch._dynamo.reset()
+ clear_inductor_caches()
+
@requires_triton()
@config.patch({"fx_graph_cache": True})
@parametrize("device", (GPU_TYPE, "cpu"))
@@ -130,10 +134,9 @@ class TestFxGraphCache(TestCase):
# A second call should hit. (First reset so in-memory guards
# don't prevent compilation).
- torch._dynamo.reset()
for m in torch._inductor.codecache.PyCodeCache.cache.values():
os.remove(m.__file__)
- torch._inductor.codecache.PyCodeCache.cache_clear()
+ self.reset()
self.assertEqual(fn(a, b), compiled_fn(a, b))
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
@@ -170,7 +173,7 @@ class TestFxGraphCache(TestCase):
# The second should see all hits. (First reset so in-memory guards
# don't prevent compilation).
counters.clear()
- torch._dynamo.reset()
+ self.reset()
grads2 = compiled_fn(mod, inp)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertGreater(counters["inductor"]["fxgraph_cache_hit"], 0)
@@ -220,7 +223,7 @@ class TestFxGraphCache(TestCase):
# A second call should hit. (Reset here to force compilation).
counters.clear()
- torch._dynamo.reset()
+ self.reset()
res2 = compiled_fn(a, b)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertGreater(counters["inductor"]["fxgraph_cache_hit"], 0)
@@ -263,7 +266,7 @@ class TestFxGraphCache(TestCase):
# A second call should hit.
counters.clear()
- torch._dynamo.reset()
+ self.reset()
res2 = compiled_fn(x)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertGreater(counters["inductor"]["fxgraph_cache_hit"], 0)
@@ -350,7 +353,7 @@ class TestFxGraphCache(TestCase):
self.assertEqual(metrics.generated_kernel_count, 1)
# Verify the "hit" case
- torch._dynamo.reset()
+ self.reset()
self.assertEqual(fn(a, b), compiled_fn(a, b))
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertEqual(metrics.generated_kernel_count, 2)
@@ -376,14 +379,14 @@ class TestFxGraphCache(TestCase):
# A second call should hit.
counters.clear()
- torch._dynamo.reset()
+ self.reset()
self.assertEqual(fn(a, b), compiled_fn(a, b))
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
# Clear the cache; now we should miss.
counters.clear()
- torch._dynamo.reset()
+ self.reset()
torch._inductor.codecache.FxGraphCache.clear()
self.assertEqual(fn(a, b), compiled_fn(a, b))
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py
index f54e81e1bd..27bf3614cd 100644
--- a/torch/_inductor/utils.py
+++ b/torch/_inductor/utils.py
@@ -660,6 +660,14 @@ def clear_on_fresh_inductor_cache(obj: Any):
return obj
+def clear_inductor_caches():
+ """
+ Clear all registered caches.
+ """
+ for obj in _registered_caches:
+ obj.cache_clear()
+
+
@contextlib.contextmanager
def fresh_inductor_cache(cache_entries=None):
"""
@@ -668,8 +676,7 @@ def fresh_inductor_cache(cache_entries=None):
Optionally, pass a dict as 'cache_entries' to get a list of filenames and sizes
generated with this cache instance.
"""
- for obj in _registered_caches:
- obj.cache_clear()
+ clear_inductor_caches()
inductor_cache_dir = tempfile.mkdtemp()
try:
|
2.41.0
|
4857e71c261b64cb5265c58ce8eab5b69f87af6
|
Wed, 1 May 2024 05:38:01 +0000
|
[PATCH 0878/1000] Export `torch.jit.interface` from `torch.jit` package (#125209)
|
Seems like this symbol was overlooked when other symbols were exported from `torch.jit`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125209 Approved by: https://github.com/ezyang
|
diff --git a/torch/jit/__init__.py b/torch/jit/__init__.py
index 8ed5c0727e..a5b9f5627e 100644
--- a/torch/jit/__init__.py
+++ b/torch/jit/__init__.py
@@ -81,6 +81,7 @@ __all__ = [
"export_opnames",
"fork",
"freeze",
+ "interface",
"ignore",
"isinstance",
"load",
|
2.41.0
|
81f41a920114d766d303fc4234d05103f1d4d89
|
Wed, 1 May 2024 05:43:35 +0000
|
[PATCH 0879/1000] Use BFloat16 in distributed quantization when supported by NCCL (#125113)
|
This PR enables BFloat16 in torch/csrc/distributed/c10d/quantization/quantization_gpu.cu . Pull Request resolved: https://github.com/pytorch/pytorch/pull/125113 Approved by: https://github.com/kwen2501
|
diff --git a/torch/csrc/distributed/c10d/quantization/quantization_gpu.cu b/torch/csrc/distributed/c10d/quantization/quantization_gpu.cu
index c9b6185a40..48cc7cfc4f 100644
--- a/torch/csrc/distributed/c10d/quantization/quantization_gpu.cu
+++ b/torch/csrc/distributed/c10d/quantization/quantization_gpu.cu
@@ -69,15 +69,16 @@ at::Tensor _float_to_bfloat16_cuda(const at::Tensor& input) {
auto output = at::empty(
{nrows, output_columns},
- input.options().dtype(at::kHalf)); // at::kHalf
+#if HAS_NCCL_BF16_DATATYPE
+ input.options().dtype(at::kBFloat16));
+#else
+ input.options().dtype(at::kHalf));
+#endif
if (nrows == 0 || output_columns == 0) {
return output;
}
- // TODO: replace Half by BFloat16, after BFloat16 is supported by Nvidia
- // NCCL input.options().dtype(at::kBFloat16)); // at::kBFloat16
-
constexpr int threads_per_block = 256;
const int blockDim_x = std::min(output_columns, threads_per_block);
dim3 blockDim(blockDim_x, threads_per_block / blockDim_x);
@@ -93,10 +94,13 @@ at::Tensor _float_to_bfloat16_cuda(const at::Tensor& input) {
input.const_data_ptr<float>(),
nrows,
ncols,
- // TODO: replace Half by BFloat16, after BFloat16 is supported by Nvidia
- // NCCL
- reinterpret_cast<uint16_t*>(output.mutable_data_ptr<at::Half>()));
- //C10_CUDA_KERNEL_LAUNCH_CHECK();
+#if HAS_NCCL_BF16_DATATYPE
+ reinterpret_cast<uint16_t*>(output.mutable_data_ptr<at::BFloat16>())
+#else
+ reinterpret_cast<uint16_t*>(output.mutable_data_ptr<at::Half>())
+#endif
+ );
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
return output;
}
@@ -134,9 +138,11 @@ at::Tensor _bfloat16_to_float_cuda(const at::Tensor& input) {
blockDim,
0,
at::cuda::getCurrentCUDAStream()>>>(
- // TODO: replace Half by BFloat16, after BFloat16 is supported by Nvidia
- // NCCL
+#if HAS_NCCL_BF16_DATATYPE
+ reinterpret_cast<const uint16_t*>(input.const_data_ptr<at::BFloat16>()),
+#else
reinterpret_cast<const uint16_t*>(input.const_data_ptr<at::Half>()),
+#endif
nrows,
ncols,
output.mutable_data_ptr<float>());
|
2.41.0
|
3c4465f504282f61f962027adf2bb9ea196b976
|
Tue, 30 Apr 2024 16:55:24 -0700
|
[PATCH 0880/1000] Add has_guarded_code to CompilationMetrics (#125279)
|
While studying some tlparse, I noticed that CompilationMetrics was reporting that there was no error for frames that have no nodes. I'm pretty sure we don't actually install a frame in this situation. has_guarded_code will tell us if that's the case, because it says if the GuardedCode object is None or not. Actually, while working on this, I was wondering if we can ever trigger the "skip this frame entirely, do not trace it ever again" codepath, as best as I could tell, it's impossible for this to happen by the time we get to compilation metrics block. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/125279 Approved by: https://github.com/yanboliang
|
diff --git a/torch/_dynamo/convert_frame.py b/torch/_dynamo/convert_frame.py
index f2355ca66d..574c56454d 100644
--- a/torch/_dynamo/convert_frame.py
+++ b/torch/_dynamo/convert_frame.py
@@ -696,6 +696,7 @@ def _compile(
fail_reason: Optional[str] = None
fail_user_frame_filename: Optional[str] = None
fail_user_frame_lineno: Optional[int] = None
+ guarded_code = None
try:
guarded_code = compile_inner(code, one_graph, hooks, transform)
return guarded_code
@@ -801,6 +802,7 @@ def _compile(
compliant_custom_ops,
restart_reasons,
dynamo_time_before_restart,
+ guarded_code is not None,
)
record_compilation_metrics(metrics)
torch._dynamo.callback_handler.run_end_callbacks()
diff --git a/torch/_dynamo/utils.py b/torch/_dynamo/utils.py
index e27f20d69e..c99bccc6b5 100644
--- a/torch/_dynamo/utils.py
+++ b/torch/_dynamo/utils.py
@@ -677,6 +677,10 @@ class CompilationMetrics:
compliant_custom_ops: Set[str]
restart_reasons: Set[str]
dynamo_time_before_restart_s: float
+ # Sometimes, we will finish analyzing a frame but conclude we don't want
+ # to install any guarded code. True means we actually decided to install
+ # a compiled frame
+ has_guarded_code: bool
DEFAULT_COMPILATION_METRICS_LIMIT = 64
|
2.41.0
|
ead440c62c94d0a5b9055bd34e961be4aabafdb
|
Wed, 1 May 2024 07:34:04 +0000
|
[PATCH 0882/1000] [Inductor] Further tune block size for templated attention on H100 (#125286)
|
Run a script to enumerate and get the best default block size for templated attention. A100 -> no change, check numbers at #125139 H100 ## torch.bfloat16 Before: ``` | Type | Speedup | batch_size | num_heads | q_seq_len | k_seq_len | head_dim | score_mod | dtype | |---------|-----------|--------------|-------------|-------------|-------------|------------|---------------|----------------| | Average | 1.103 | | | | | | | | | Max | 1.322 | 8 | 16 | 512 | 512 | 64 | noop | torch.bfloat16 | | Min | 0.829 | 1 | 16 | 1024 | 1024 | 128 | relative_bias | torch.bfloat16 | ``` After: ``` | Type | Speedup | batch_size | num_heads | q_seq_len | k_seq_len | head_dim | score_mod | dtype | |---------|-----------|--------------|-------------|-------------|-------------|------------|---------------|----------------| | Average | 1.137 | | | | | | | | | Max | 1.442 | 1 | 16 | 512 | 512 | 128 | relative_bias | torch.bfloat16 | | Min | 0.913 | 1 | 16 | 1024 | 1024 | 64 | head_bias | torch.bfloat16 | ``` ## torch.float32 Before: ``` | Type | Speedup | batch_size | num_heads | q_seq_len | k_seq_len | head_dim | score_mod | dtype | |---------|-----------|--------------|-------------|-------------|-------------|------------|---------------|---------------| | Average | 2.269 | | | | | | | | | Max | 3.740 | 16 | 16 | 1024 | 1024 | 64 | noop | torch.float32 | | Min | 0.761 | 1 | 16 | 512 | 512 | 128 | relative_bias | torch.float32 | ``` After: ``` | Type | Speedup | batch_size | num_heads | q_seq_len | k_seq_len | head_dim | score_mod | dtype | |---------|-----------|--------------|-------------|-------------|-------------|------------|-------------|---------------| | Average | 2.489 | | | | | | | | | Max | 3.755 | 16 | 16 | 4096 | 4096 | 64 | noop | torch.float32 | | Min | 1.609 | 1 | 16 | 512 | 512 | 64 | head_bias | torch.float32 | ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/125286 Approved by: https://github.com/Chillee
|
diff --git a/torch/_inductor/kernel/flex_attention.py b/torch/_inductor/kernel/flex_attention.py
index e31dfe0977..635af59f8c 100644
--- a/torch/_inductor/kernel/flex_attention.py
+++ b/torch/_inductor/kernel/flex_attention.py
@@ -3,7 +3,7 @@ import logging
from typing import Any, List
import torch
-from .. import config, utils
+from .. import config
from ..lowering import empty_strided, lowerings, register_lowering
from ..select_algorithm import autotune_select_algorithm, TritonTemplate
@@ -176,9 +176,19 @@ sdpa_template = TritonTemplate(
def _get_default_config(query):
head_dim = query.get_size()[-1]
default_config = None
- is_big_shared_mem = utils.get_gpu_shared_memory() > 128 * 1024
- if is_big_shared_mem:
+ if torch.cuda.get_device_capability() >= (9, 0): # H100
+ if query.get_dtype() == torch.float32:
+ if head_dim == 64:
+ default_config = (128, 32, 4, 3)
+ else:
+ default_config = (32, 64, 4, 3)
+ else:
+ if head_dim == 64:
+ default_config = (128, 64, 4, 3)
+ else:
+ default_config = (64, 32, 4, 3)
+ elif torch.cuda.get_device_capability() >= (8, 0): # A100
if query.get_dtype() == torch.float32:
default_config = (128, 32, 4, 3)
else:
@@ -188,7 +198,7 @@ def _get_default_config(query):
default_config = (128, 32, 4, 3)
else:
if query.get_dtype() == torch.float32:
- default_config = (32, 32, 4, 3)
+ default_config = (32, 16, 4, 3)
else:
default_config = (64, 32, 4, 3)
|
2.41.0
|
2142192d488f983bdbc6a65de77a0859e21d66d
|
Tue, 30 Apr 2024 14:32:02 -0700
|
[PATCH 0883/1000] [pipelining] Add stage backward function (#124958)
|
This is a helper function which: 1. computes the gradients for the stage inputs, and 2. accumulates gradients for the stage module's parameters. A unit test for this function is also added. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124958 Approved by: https://github.com/wconstab ghstack dependencies: #124776, #124875
|
diff --git a/docs/source/distributed.pipelining.rst b/docs/source/distributed.pipelining.rst
new file mode 100644
index 0000000000..ec7423f261
--- /dev/null
+++ b/docs/source/distributed.pipelining.rst
@@ -0,0 +1,178 @@
+.. role:: hidden
+ :class: hidden-section
+
+Pipeline Parallelism
+####################
+
+.. note:: ``torch.distributed.pipelining`` is a package migrated from the `PiPPy <https://github.com/pytorch/PiPPy>`_ project. It is currently in alpha state and under extensive development. For examples that work with our APIs, please refer to PiPPy's `examples <https://github.com/pytorch/PiPPy/tree/main/examples>`_ directory.
+
+Why Pipeline Parallel?
+**********************
+
+One of the most important techniques for advancing the state of the art in deep learning is scaling. Common techniques for scaling neural networks include *data parallelism*, *tensor/operation parallelism*, and *pipeline parallelism* (or *pipelining*). Pipelining is a technique in which the *code* of the model is partitioned and multiple *micro-batches* execute different parts of the model code concurrently. In many cases, pipeline parallelism can be an effective technique for scaling, in particular for large-scale jobs or bandwidth-limited interconnects. To learn more about pipeline parallelism in deep learning, see `this article <https://www.deepspeed.ai/tutorials/pipeline/>`_.
+
+What is ``torch.distributed.pipelining``?
+*****************************************
+
+.. automodule:: torch.distributed.pipelining
+
+.. currentmodule:: torch.distributed.pipelining
+
+While promising for scaling, pipelining is often difficult to implement, requiring intrusive code changes to model code and difficult-to-implement runtime orchestration code. ``torch.distributed.pipelining`` aims to provide **a toolkit that does said things automatically to allow high-productivity scaling of models.** It consists of a **compiler** and a **runtime** stack for easy pipelining of PyTorch models. In particular, it provides the following features:
+
+* Splitting of model code based on your specification. The goal is for the user to provide model code as-is to the system for parallelization, without having to make heavyweight modifications to make parallelism work. The specification is also simple.
+* Support for rich pipeline scheduling paradigms, including GPipe, 1F1B, Interleaved 1F1B and Looped BFS. It will be also easy to customize your own schedule under this framework.
+* First-class support for cross-host pipeline parallelism, as this is where PP is typically used (over slower interconnects).
+* Composability with other PyTorch parallel schemes such as data parallelism (DDP, FSDP) or tensor parallelism (overall, known as "3d parallelism").
+
+Examples
+********
+
+In the `PiPPy <https://github.com/pytorch/PiPPy>`_ repo where this package is migrated from, we provide rich examples based on realistic models. In particular, we show how to apply pipelining without any model code change. You can refer to the `HuggingFace examples directory <https://github.com/pytorch/PiPPy/tree/main/examples/huggingface>`_. Popular examples include: `GPT2 <https://github.com/pytorch/PiPPy/tree/main/examples/huggingface/pippy_gpt2.py>`_, and `LLaMA <https://github.com/pytorch/PiPPy/tree/main/examples/llama>`_.
+
+Techniques Explained
+********************
+
+``torch.distributed.pipelining`` consists of two parts: a *compiler* and a *runtime*. The compiler takes your model code, splits it up, and transforms it into a ``Pipe``, which is a wrapper that describes the model at each pipeline stage and their data-flow relationship. The runtime executes the ``PipelineStage`` in parallel, handling things like micro-batch splitting, scheduling, communication, and gradient propagation, etc. We will cover the APIs for these concepts in this section.
+
+Splitting a Model with ``pipeline``
+===================================
+
+To see how we can split a model into a pipeline, let's first take an example trivial neural network:
+
+.. code-block:: python
+
+ import torch
+
+ class MyNetworkBlock(torch.nn.Module):
+ def __init__(self, in_dim, out_dim):
+ super().__init__()
+ self.lin = torch.nn.Linear(in_dim, out_dim)
+
+ def forward(self, x):
+ x = self.lin(x)
+ x = torch.relu(x)
+ return x
+
+
+ class MyNetwork(torch.nn.Module):
+ def __init__(self, in_dim, layer_dims):
+ super().__init__()
+
+ prev_dim = in_dim
+ for i, dim in enumerate(layer_dims):
+ setattr(self, f'layer{i}', MyNetworkBlock(prev_dim, dim))
+ prev_dim = dim
+
+ self.num_layers = len(layer_dims)
+ # 10 output classes
+ self.output_proj = torch.nn.Linear(layer_dims[-1], 10)
+
+ def forward(self, x):
+ for i in range(self.num_layers):
+ x = getattr(self, f'layer{i}')(x)
+
+ return self.output_proj(x)
+
+
+ in_dim = 512
+ layer_dims = [512, 1024, 256]
+ mn = MyNetwork(in_dim, layer_dims).to(device)
+
+This network is written as free-form Python code; it has not been modified for any specific parallelism technique.
+
+Let us see our usage of the ``pipeline`` interface:
+
+.. code-block:: python
+
+ from torch.distributed.pipelining import annotate_split_points, pipeline, Pipe, SplitPoint
+
+ annotate_split_points(mn, {'layer0': SplitPoint.END,
+ 'layer1': SplitPoint.END})
+
+ batch_size = 32
+ example_input = torch.randn(batch_size, in_dim, device=device)
+ chunks = 4
+
+ pipe = pipeline(mn, chunks, example_args=(example_input,))
+ print(pipe)
+
+::
+
+ ************************************* pipe *************************************
+ GraphModule(
+ (submod_0): GraphModule(
+ (layer0): InterpreterModule(
+ (lin): InterpreterModule()
+ )
+ )
+ (submod_1): GraphModule(
+ (layer1): InterpreterModule(
+ (lin): InterpreterModule()
+ )
+ )
+ (submod_2): GraphModule(
+ (layer2): InterpreterModule(
+ (lin): InterpreterModule()
+ )
+ (output_proj): InterpreterModule()
+ )
+ )
+
+ def forward(self, arg8_1):
+ submod_0 = self.submod_0(arg8_1); arg8_1 = None
+ submod_1 = self.submod_1(submod_0); submod_0 = None
+ submod_2 = self.submod_2(submod_1); submod_1 = None
+ return (submod_2,)
+
+So what's going on here? First, ``pipeline`` turns our model into a directed acyclic graph (DAG) by tracing the model. Then, it groups together the operations and parameters into *pipeline stages*. Stages are represented as ``submod_N`` submodules, where ``N`` is a natural number.
+
+We used ``annotate_split_points`` to specify that the code should be split and the end of ``layer0`` and ``layer1``. Our code has thus been split into *three* pipeline stages. Our library also provides ``SplitPoint.BEGINNING`` if a user wants to split before certain annotation point.
+
+While the ``annotate_split_points`` API gives users a way to specify the split points without modifying the model, our library also provides an API for in-model annotation: ``pipe_split()``. For details, you can read `this example <https://github.com/pytorch/PiPPy/blob/main/test/test_pipe.py>`_.
+
+This covers the basic usage of the ``Pipe`` API. For more information, please see the documentation.
+
+Using ``PipelineSchedule`` for Execution
+========================================
+
+Given the above ``Pipe`` object, we can use one of the ``PipelineStage`` classes to execute our model in a pipelined fashion. First off, let us instantiate a ``PipelineStage`` instance:
+
+.. code-block:: python
+
+ # We are using `torchrun` to run this example with multiple processes.
+ # `torchrun` defines two environment variables: `RANK` and `WORLD_SIZE`.
+ rank = int(os.environ["RANK"])
+ world_size = int(os.environ["WORLD_SIZE"])
+
+ # Initialize distributed environment
+ import torch.distributed as dist
+ dist.init_process_group(rank=rank, world_size=world_size)
+
+ # Pipeline stage is our main pipeline runtime. It takes in the pipe object,
+ # the rank of this process, and the device.
+ from torch.distributed.pipelining import PipelineStage
+ stage = PipelineStage(pipe, rank, device)
+
+We can now attach the ``PipelineStage`` to a pipeline schedule, GPipe for example, and run with data:
+
+.. code-block:: python
+
+ from torch.distributed.pipelining import ScheduleGPipe
+ schedule = ScheduleGPipe(stage, chunks)
+
+ # Input data
+ x = torch.randn(batch_size, in_dim, device=device)
+
+ # Run the pipeline with input `x`. Divide the batch into 4 micro-batches
+ # and run them in parallel on the pipeline
+ if rank == 0:
+ schedule.step(x)
+ else:
+ output = schedule.step()
+
+Note that since we split our model into three stages, we must run this script with three workers. For this example, we will use ``torchrun`` to run multiple processes within a single machine for demonstration purposes. We can collect up all of the code blocks above into a file named `example.py <https://github.com/pytorch/PiPPy/tree/main/examples/basic>`_ and then run it with ``torchrun`` like so:
+
+.. code-block:: bash
+
+ torchrun --nproc_per_node=3 example.py
diff --git a/docs/source/index.rst b/docs/source/index.rst
index a7afe60bc2..f4e4c96e11 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -78,6 +78,7 @@ Features described in this documentation are classified by release status:
torch.distributed.elastic <distributed.elastic>
torch.distributed.fsdp <fsdp>
torch.distributed.optim <distributed.optim>
+ torch.distributed.pipelining <distributed.pipelining>
torch.distributed.tensor.parallel <distributed.tensor.parallel>
torch.distributed.checkpoint <distributed.checkpoint>
torch.distributions <distributions>
diff --git a/test/distributed/pipelining/test_stage_backward.py b/test/distributed/pipelining/test_stage_backward.py
new file mode 100644
index 0000000000..358607ab91
--- /dev/null
+++ b/test/distributed/pipelining/test_stage_backward.py
@@ -0,0 +1,72 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates
+# Owner(s): ["oncall: distributed"]
+import copy
+
+import torch
+from torch.distributed.pipelining._backward import stage_backward
+from torch.testing._internal.common_utils import run_tests, TestCase
+
+
+d_hid = 512
+batch_size = 256
+
+
+class MLPModule(torch.nn.Module):
+ def __init__(self, d_hid):
+ super().__init__()
+ self.net1 = torch.nn.Linear(d_hid, d_hid)
+ self.relu = torch.nn.ReLU()
+ self.net2 = torch.nn.Linear(d_hid, d_hid)
+
+ def forward(self, x):
+ x = self.net1(x)
+ x = self.relu(x)
+ x = self.net2(x)
+ return x
+
+
+class StageBackwardTests(TestCase):
+ def test_stage_backward(self):
+ # MLP as a stage module
+ mod = MLPModule(d_hid)
+ x = torch.randn(batch_size, d_hid)
+ # As in a pipeline stage, the inputs to this stage requires gradients
+ x.requires_grad_(True)
+ target = torch.randn(batch_size, d_hid)
+ loss_fn = torch.nn.MSELoss(reduction="sum")
+
+ # Make a copy
+ ref_mod = copy.deepcopy(mod)
+ ref_x = x.detach().requires_grad_(x.requires_grad)
+ ref_target = target.detach()
+
+ # Forward and backward in stage manner
+ out = mod(x)
+ loss = loss_fn(out, target)
+ grad_inputs = stage_backward(
+ stage_output=loss,
+ output_grads=None,
+ input_values=(x,),
+ )
+
+ # Run reference
+ ref_out = ref_mod(ref_x)
+ ref_loss = loss_fn(ref_out, ref_target)
+ ref_loss.backward()
+
+ torch.testing.assert_close(grad_inputs[0], ref_x.grad)
+
+ # Every rank checks gradients
+ for name, p in mod.named_parameters():
+ ref_p = ref_mod.get_parameter(name)
+ try:
+ torch.testing.assert_close(p.grad, ref_p.grad)
+ except AssertionError:
+ print(f"Gradient test failed for {name}: {p.grad} vs {ref_p.grad}")
+ raise
+
+ print("Stage backward test passed")
+
+
+if __name__ == "__main__":
+ run_tests()
diff --git a/torch/distributed/pipelining/__init__.py b/torch/distributed/pipelining/__init__.py
new file mode 100644
index 0000000000..f2661b8c6f
--- /dev/null
+++ b/torch/distributed/pipelining/__init__.py
@@ -0,0 +1 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates
diff --git a/torch/distributed/pipelining/_backward.py b/torch/distributed/pipelining/_backward.py
new file mode 100644
index 0000000000..c3aa906050
--- /dev/null
+++ b/torch/distributed/pipelining/_backward.py
@@ -0,0 +1,117 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates
+from typing import List, Optional
+
+import torch
+
+from ._debug import map_debug_info
+
+
+def stage_backward(
+ stage_output,
+ output_grads,
+ input_values,
+ outputs_with_grads_idxs: Optional[List[int]] = None, # deprecated, not used
+):
+ """
+ This is a helper function to:
+ 1. compute the gradients for the stage inputs, and
+ 2. accumulate gradients for the stage module's parameters.
+
+ Given the input value(s) and the corresponding gradient for the output
+ value(s), compute and accumulate gradients for all parameter values (leaves
+ in the autograd trace) as well as return a list of the gradients for the
+ input values
+ """
+ if outputs_with_grads_idxs is not None:
+ # Deprecated, not used in runtime calls, only exists in compiler
+ stage_output = [stage_output[i] for i in outputs_with_grads_idxs]
+ output_grads = [output_grads[i] for i in outputs_with_grads_idxs]
+
+ try:
+ # stage_output may be a composite datatype like dict. Extract all individual
+ # tensor values here
+ stage_output_tensors = []
+ output_grad_tensors = []
+
+ def extract_tensors_with_grads(output_val, grad_val):
+ if isinstance(output_val, torch.Tensor):
+ if not output_val.requires_grad and output_val.grad_fn is None:
+ return
+ assert isinstance(
+ grad_val, (torch.Tensor, type(None))
+ ), f"Expected Tensor or None gradient but got {type(grad_val)}"
+ stage_output_tensors.append(output_val)
+ output_grad_tensors.append(grad_val)
+ elif isinstance(output_val, (tuple, list)):
+ if grad_val is None:
+ return
+ assert isinstance(
+ grad_val, (tuple, list)
+ ), f"grad_value expected to have type {type(output_val)} but got {type(grad_val)}"
+ assert len(output_val) == len(grad_val)
+ for ov, gv in zip(output_val, grad_val):
+ extract_tensors_with_grads(ov, gv)
+ elif isinstance(output_val, dict):
+ if grad_val is None:
+ return
+ assert isinstance(grad_val, dict)
+ assert set(output_val.keys()) == set(grad_val.keys())
+ for k in output_val.keys():
+ extract_tensors_with_grads(output_val[k], grad_val[k])
+ else:
+ # Output is a non-tensor type; just ignore it
+ pass
+
+ extract_tensors_with_grads(stage_output, output_grads)
+
+ torch.autograd.backward(
+ stage_output_tensors, grad_tensors=output_grad_tensors # type: ignore[arg-type]
+ )
+
+ # Extract gradients wrt the input values
+ grad_inputs = []
+ for val in input_values:
+ if isinstance(val, torch.Tensor):
+ grad_inputs.append(val.grad)
+ else:
+ grad_inputs.append(None)
+
+ # Alternative impl: `torch.autograd.grad`.
+ # Note that `torch.autograd.grad` will not accumulate gradients into the
+ # model's parameters.
+ """
+ inputs_with_grad = []
+ for val in input_values:
+ if isinstance(val, torch.Tensor) and val.requires_grad:
+ inputs_with_grad.append(val)
+
+ grad_inputs = torch.autograd.grad(
+ stage_output_tensors, inputs_with_grad, output_grad_tensors, # type: ignore[arg-type]
+ )
+ """
+
+ except Exception as e:
+ exc_msg = f"""
+ Failed to run stage backward:
+ Stage output: {map_debug_info(stage_output)}
+ Output gradient: {map_debug_info(output_grads)}
+ Input: {map_debug_info(input_values)}
+ """
+ raise RuntimeError(exc_msg) from e
+
+ return grad_inputs
+
+
+# TODO: handling requires_grad=False dynamically. Can we analyze this during initial
+# IR emission?
+def _null_coalesce_accumulate(lhs, rhs):
+ """
+ Coalesce two values, even if one of them is null, returning the non-null
+ value.
+ """
+ if lhs is None:
+ return rhs
+ elif rhs is None:
+ return lhs
+ else:
+ return torch.add(lhs, rhs)
|
2.41.0
|
59cce38a9b389885c545a6f1b5090aa86539250
|
Wed, 1 May 2024 10:39:13 +0000
|
[PATCH 0884/1000] [MacOS][CPUInductor] Fix includes to system Python (#125285)
|
On MacOS 14.4, system Python is configured to point to a non-existing include dir ``` % /usr/bin/python3 -c "import sysconfig;print(sysconfig.get_path('include'))" /Library/Python/3.9/include ``` Workaround the issue by composing path to include folder from `stlib` config, which points to ``` % /usr/bin/python3 -c "import sysconfig;print(sysconfig.get_path('stdlib'))" /Applications/Xcode.app/Contents/Developer/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9 ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/125285 Approved by: https://github.com/kit1980
|
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 16af022a21..5d884ee62b 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -1456,6 +1456,19 @@ def _set_gpu_runtime_env() -> None:
os.environ["CUDA_HOME"] = os.path.dirname(build_paths.cuda())
+def _get_python_include_dirs():
+ include_dir = Path(sysconfig.get_path("include"))
+ # On Darwin Python executable from a framework can return
+ # non-existing /Library/Python/... include path, in which case
+ # one should use Headers folder from the framework
+ if not include_dir.exists() and platform.system() == "Darwin":
+ std_lib = Path(sysconfig.get_path("stdlib"))
+ include_dir = (std_lib.parent.parent / "Headers").absolute()
+ if not (include_dir / "Python.h").exists():
+ warnings.warn(f"Can't find Python.h in {str(include_dir)}")
+ return [str(include_dir)]
+
+
def get_include_and_linking_paths(
include_pytorch: bool = False,
vec_isa: VecISA = invalid_vec_isa,
@@ -1476,7 +1489,7 @@ def get_include_and_linking_paths(
# Note - We include pytorch only on linux right now. There is more work
# to do to enable OMP build on darwin where PyTorch is built with IOMP
# and we need a way to link to what PyTorch links.
- ipaths = cpp_extension.include_paths(cuda) + [sysconfig.get_path("include")]
+ ipaths = cpp_extension.include_paths(cuda) + _get_python_include_dirs()
lpaths = cpp_extension.library_paths(cuda) + [
sysconfig.get_config_var("LIBDIR")
]
@@ -1541,7 +1554,7 @@ def get_include_and_linking_paths(
# symbol not found, if those header files require a library.
# For those cases, include the lpath and libs command as we do for pytorch above.
# This approach allows us to only pay for what we use.
- ipaths = cpp_extension.include_paths(cuda) + [sysconfig.get_path("include")]
+ ipaths = cpp_extension.include_paths(cuda) + _get_python_include_dirs()
if aot_mode:
ipaths += [os.path.dirname(cpp_prefix_path())]
lpaths = []
|
2.41.0
|
7ba7a76e230c9a1355cd16d254a9516d406f95a
|
Wed, 1 May 2024 10:57:10 +0000
|
[PATCH 0885/1000] [ATen][CUDA][AMP] Fix dtype mismatch in linalg_vector_norm (#125175)
|
Fixes #125174 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125175 Approved by: https://github.com/eqy, https://github.com/lezcano
|
diff --git a/aten/src/ATen/native/LinearAlgebra.cpp b/aten/src/ATen/native/LinearAlgebra.cpp
index 1849228a63..81f461f6c9 100644
--- a/aten/src/ATen/native/LinearAlgebra.cpp
+++ b/aten/src/ATen/native/LinearAlgebra.cpp
@@ -2839,10 +2839,16 @@ TORCH_IMPL_FUNC(linalg_vector_norm_out)(const Tensor& self, const Scalar& scalar
}
if (is_reduce_over_1D_vector) {
+ Tensor self_;
+ if (opt_dtype.has_value()) {
+ self_ = self.to(*opt_dtype);
+ } else {
+ self_ = self;
+ }
if (ord != 0.0) {
- keepdim ? at::abs_outf(self, const_cast<Tensor&>(result)) : at::abs_outf(self.squeeze(reduce_dim), const_cast<Tensor&>(result));
+ keepdim ? at::abs_outf(self_, const_cast<Tensor&>(result)) : at::abs_outf(self_.squeeze(reduce_dim), const_cast<Tensor&>(result));
} else {
- keepdim ? at::ne_outf(self, 0, const_cast<Tensor&>(result)) : at::ne_outf(self.squeeze(reduce_dim), 0, const_cast<Tensor&>(result));
+ keepdim ? at::ne_outf(self_, 0, const_cast<Tensor&>(result)) : at::ne_outf(self_.squeeze(reduce_dim), 0, const_cast<Tensor&>(result));
}
return;
}
diff --git a/test/test_linalg.py b/test/test_linalg.py
index 5ddeac9aa3..8976d81c5a 100644
--- a/test/test_linalg.py
+++ b/test/test_linalg.py
@@ -1228,6 +1228,7 @@ class TestLinalg(TestCase):
# torch.linalg.norm given a flattened tensor
ord_vector = [0, 0.9, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf]
input_sizes = [
+ (1, ),
(10, ),
(4, 5),
(3, 4, 5),
@@ -1281,15 +1282,17 @@ class TestLinalg(TestCase):
else:
raise RuntimeError("Unsupported dtype")
- for input_size, ord, keepdim, norm_dtype in product(input_sizes, ord_vector, [True, False], norm_dtypes):
- input = make_tensor(input_size, dtype=dtype, device=device, low=-9, high=9)
- for dim in [None, random.randint(0, len(input_size) - 1)]:
- run_test_case(
- input,
- ord,
- dim,
- keepdim,
- norm_dtype)
+ for amp in [False, True]:
+ with torch.autocast(device_type=device, enabled=amp):
+ for input_size, ord, keepdim, norm_dtype in product(input_sizes, ord_vector, [True, False], norm_dtypes):
+ input = make_tensor(input_size, dtype=dtype, device=device, low=-9, high=9)
+ for dim in [None, random.randint(0, len(input_size) - 1)]:
+ run_test_case(
+ input,
+ ord,
+ dim,
+ keepdim,
+ norm_dtype)
def test_vector_norm_dim_tuple_arg(self, device):
test_cases = [
|
2.41.0
|
fbb4dfc125943547df5d484dbb2be4a8b48c0f3
|
Wed, 1 May 2024 12:08:02 +0000
|
[PATCH 0886/1000] Fix AttributeError when doing mock patch for FileTimerServerTest.test_expired_timers (#125144)
|
Fix the patch failure, and we should patch the function where it is used, not where it is defined. Failure info: ```bash root@cambricon-PowerEdge-C4140:/workspace# python file_based_timer_test.py -k test_expired_timers /opt/conda/lib/python3.10/site-packages/torch/_custom_ops.py:253: DeprecationWarning: torch.library.impl_abstract was renamed to torch.library.register_fake. Please use that instead; we will remove torch.library.impl_abstract in a future version of PyTorch. return torch.library.impl_abstract(qualname, func, _stacklevel=2) E ====================================================================== ERROR: test_expired_timers (__main__.FileTimerServerTest) tests that a single expired timer on a process should terminate ---------------------------------------------------------------------- Traceback (most recent call last): File "/opt/conda/lib/python3.10/site-packages/torch/testing/_internal/common_utils.py", line 2757, in wrapper method(*args, **kwargs) File "/opt/conda/lib/python3.10/unittest/mock.py", line 1376, in patched with self.decoration_helper(patched, File "/opt/conda/lib/python3.10/contextlib.py", line 135, in __enter__ return next(self.gen) File "/opt/conda/lib/python3.10/unittest/mock.py", line 1358, in decoration_helper arg = exit_stack.enter_context(patching) File "/opt/conda/lib/python3.10/contextlib.py", line 492, in enter_context result = _cm_type.__enter__(cm) File "/opt/conda/lib/python3.10/unittest/mock.py", line 1447, in __enter__ original, local = self.get_original() File "/opt/conda/lib/python3.10/unittest/mock.py", line 1420, in get_original raise AttributeError( AttributeError: <module 'torch.distributed.elastic.timer' from '/opt/conda/lib/python3.10/site-packages/torch/distributed/elastic/timer/__init__.py'> does not have the attribute 'log_debug_info_for_expired_timers' To execute this test, run the following from the base repo dir: python file_based_timer_test.py -k test_expired_timers This message can be suppressed by setting PYTORCH_PRINT_REPRO_ON_FAILURE=0 ---------------------------------------------------------------------- Ran 1 test in 0.792s FAILED (errors=1) ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/125144 Approved by: https://github.com/gag1jain
|
diff --git a/test/distributed/elastic/timer/file_based_local_timer_test.py b/test/distributed/elastic/timer/file_based_local_timer_test.py
index 4616ae061b..490e4a9ce3 100644
--- a/test/distributed/elastic/timer/file_based_local_timer_test.py
+++ b/test/distributed/elastic/timer/file_based_local_timer_test.py
@@ -264,7 +264,9 @@ if not (IS_WINDOWS or IS_MACOS):
)
@mock.patch("os.kill")
- @mock.patch("torch.distributed.elastic.timer.log_debug_info_for_expired_timers")
+ @mock.patch(
+ "torch.distributed.elastic.timer.file_based_local_timer.log_debug_info_for_expired_timers"
+ )
def test_expired_timers(self, mock_debug_info, mock_os_kill):
"""
tests that a single expired timer on a process should terminate
|
2.41.0
|
2715144c332a3b3e9032b0c9ddee592a1b572da
|
Wed, 1 May 2024 14:04:46 +0000
|
[PATCH 0887/1000] Add NEON-accelerated int8mm for bfloat16 (#125290)
|
As apparently `vshlq_u32` is faster than `vcvt_f32_f16` Refactor NEON `tinygemm_kernel` to rely on `load_as_float32x4` and `load_as_float32x4x2` and implement them for float16 (using vcvt), bfloat16 (using left shift) and plain float32 (not using anything) As result stories110M run at 60 tokens/sec with f16, but at 66 tokens/sec with bf16 and 75 tokens/sec with f32, though more bandwith demand starts to favor reduced floating types as model size gets bigger. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125290 Approved by: https://github.com/mikekgfb
|
diff --git a/aten/src/ATen/native/cpu/int8mm_kernel.cpp b/aten/src/ATen/native/cpu/int8mm_kernel.cpp
index 4ef6cde4a8..bd266030b2 100644
--- a/aten/src/ATen/native/cpu/int8mm_kernel.cpp
+++ b/aten/src/ATen/native/cpu/int8mm_kernel.cpp
@@ -185,17 +185,50 @@ inline void tinygemm_kernel(
#if !defined(C10_MOBILE) && defined(__aarch64__)
#include <arm_neon.h>
-static inline float reduce(float32x4_t x) {
+inline float reduce(float32x4_t x) {
auto sum = vpaddq_f32(x, x);
return vgetq_lane_f32(vpaddq_f32(sum, sum), 0);
}
-template <int BLOCK_M, int BLOCK_N>
-inline void tinygemm_kernel(
- const Half* RESTRICT A,
+inline float32x4x2_t load_as_float32x4x2(const Half* ptr) {
+ float16x8_t f16_val = vld1q_f16(reinterpret_cast<const float16_t *>(ptr));
+ auto val_low = vcvt_f32_f16(vget_low_f16(f16_val));
+ auto val_high = vcvt_f32_f16(vget_high_f16(f16_val));
+ return {val_low, val_high};
+}
+
+inline float32x4_t load_as_float32x4(const Half* ptr) {
+ return vcvt_f32_f16(vld1_f16(reinterpret_cast<const float16_t *>(ptr)));
+}
+
+inline float32x4x2_t load_as_float32x4x2(const BFloat16* ptr) {
+ int32x4_t shift = vdupq_n_s32(16);
+ uint16x8_t u16_val = vld1q_u16(reinterpret_cast<const uint16_t *>(ptr));
+ uint32x4_t int_low = vmovl_u16(vget_low_u16(u16_val));
+ uint32x4_t int_high = vmovl_u16(vget_high_u16(u16_val));
+ return {vreinterpretq_f32_u32(vshlq_u32(int_low, shift)), vreinterpretq_f32_u32(vshlq_u32(int_high, shift))};
+}
+
+inline float32x4_t load_as_float32x4(const BFloat16* ptr) {
+ int32x4_t shift = vdupq_n_s32(16);
+ uint32x4_t as_int = vmovl_u16(vld1_u16(reinterpret_cast<const uint16_t *>(ptr)));
+ return vreinterpretq_f32_u32(vshlq_u32(as_int, shift));
+}
+
+inline float32x4_t load_as_float32x4(const float* ptr) {
+ return vld1q_f32(ptr);
+}
+
+inline float32x4x2_t load_as_float32x4x2(const float* ptr) {
+ return {vld1q_f32(ptr), vld1q_f32(ptr + 4)};
+}
+
+template <int BLOCK_M, int BLOCK_N, typename T>
+inline void tinygemm_kernel_(
+ const T* RESTRICT A,
const int8_t* RESTRICT B,
- const Half* RESTRICT scales,
- Half* RESTRICT C,
+ const T* RESTRICT scales,
+ T* RESTRICT C,
int lda,
int ldb,
int ldc,
@@ -207,24 +240,61 @@ inline void tinygemm_kernel(
c_val[i] = vdupq_n_f32(0.0);
});
for (int k = 0; k < K; k += 8) {
- float16x8_t a_val = vld1q_f16(reinterpret_cast<const float16_t *>(A) + m * lda + k);
- auto a_val_low = vcvt_f32_f16(vget_low_f16(a_val));
- auto a_val_high = vcvt_f32_f16(vget_high_f16(a_val));
+ auto a_val = load_as_float32x4x2(A + m * lda + k);
c10::ForcedUnroll<BLOCK_N>{}([&](auto i) {
int16x8_t b_val = vmovl_s8(vld1_s8(B + i * ldb + k));
auto b_val_low = vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_val)));
auto b_val_high = vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_val)));
- c_val[i] = vfmaq_f32(c_val[i], a_val_high, b_val_high);
- c_val[i] = vfmaq_f32(c_val[i], a_val_low, b_val_low);
+ c_val[i] = vfmaq_f32(c_val[i], a_val.val[1], b_val_high);
+ c_val[i] = vfmaq_f32(c_val[i], a_val.val[0], b_val_low);
});
}
- float32x4_t scale_val = vcvt_f32_f16(vld1_f16(reinterpret_cast<const float16_t *>(scales)));
+ float32x4_t scale_val = load_as_float32x4(scales);
c10::ForcedUnroll<BLOCK_N>{}([&](auto i) {
C[m * ldc + i] = reduce(c_val[i]) * vgetq_lane_f32(scale_val, i);
});
}
}
+
+template <int BLOCK_M, int BLOCK_N>
+inline void tinygemm_kernel(
+ const Half* RESTRICT A,
+ const int8_t* RESTRICT B,
+ const Half* RESTRICT scales,
+ Half* RESTRICT C,
+ int lda,
+ int ldb,
+ int ldc,
+ int K) {
+ tinygemm_kernel_<BLOCK_M, BLOCK_N>(A, B, scales, C, lda, ldb, ldc, K);
+}
+
+template <int BLOCK_M, int BLOCK_N>
+inline void tinygemm_kernel(
+ const BFloat16* RESTRICT A,
+ const int8_t* RESTRICT B,
+ const BFloat16* RESTRICT scales,
+ BFloat16* RESTRICT C,
+ int lda,
+ int ldb,
+ int ldc,
+ int K) {
+ tinygemm_kernel_<BLOCK_M, BLOCK_N>(A, B, scales, C, lda, ldb, ldc, K);
+}
+
+template <int BLOCK_M, int BLOCK_N>
+inline void tinygemm_kernel(
+ const float* RESTRICT A,
+ const int8_t* RESTRICT B,
+ const float* RESTRICT scales,
+ float* RESTRICT C,
+ int lda,
+ int ldb,
+ int ldc,
+ int K) {
+ tinygemm_kernel_<BLOCK_M, BLOCK_N>(A, B, scales, C, lda, ldb, ldc, K);
+}
#endif
// non-vectorized version
|
2.41.0
|
421f1b4a86ccd0305ac5b3343f8b18097c0adc8
|
Wed, 1 May 2024 14:27:37 +0000
|
[PATCH 0888/1000] docs: `torch.nn.utils.rnn`: docs improve (#123559)
|
docs: `torch.nn.utils.rnn`: docs improve Pull Request resolved: https://github.com/pytorch/pytorch/pull/123559 Approved by: https://github.com/mikaylagawarecki
|
diff --git a/torch/nn/utils/rnn.py b/torch/nn/utils/rnn.py
index 27c902e9aa..1a62254f08 100644
--- a/torch/nn/utils/rnn.py
+++ b/torch/nn/utils/rnn.py
@@ -212,10 +212,10 @@ def pack_padded_sequence(
) -> PackedSequence:
r"""Packs a Tensor containing padded sequences of variable length.
- :attr:`input` can be of size ``T x B x *`` where `T` is the length of the
- longest sequence (equal to ``lengths[0]``), ``B`` is the batch size, and
- ``*`` is any number of dimensions (including 0). If ``batch_first`` is
- ``True``, ``B x T x *`` :attr:`input` is expected.
+ :attr:`input` can be of size ``T x B x *`` where ``T`` is the length of the
+ longest sequence, ``B`` is the batch size, and ``*`` is any number of dimensions
+ (including 0). If :attr:`batch_first` is ``False``, ``T x B x *`` :attr:`input` is expected,
+ ``B x T x *`` otherwise.
For unsorted sequences, use `enforce_sorted = False`. If :attr:`enforce_sorted` is
``True``, the sequences should be sorted by length in a decreasing order, i.e.
@@ -233,7 +233,7 @@ def pack_padded_sequence(
lengths (Tensor or list(int)): list of sequence lengths of each batch
element (must be on the CPU if provided as a tensor).
batch_first (bool, optional): if ``True``, the input is expected in ``B x T x *``
- format.
+ format, ``T x B x *`` otherwise.
enforce_sorted (bool, optional): if ``True``, the input is expected to
contain sequences sorted by length in a decreasing order. If
``False``, the input will get sorted unconditionally. Default: ``True``.
@@ -275,9 +275,9 @@ def pad_packed_sequence(
It is an inverse operation to :func:`pack_padded_sequence`.
- The returned Tensor's data will be of size ``T x B x *``, where `T` is the length
- of the longest sequence and `B` is the batch size. If ``batch_first`` is True,
- the data will be transposed into ``B x T x *`` format.
+ The returned Tensor's data will be of size ``T x B x *`` (if :attr:`batch_first` is ``False``)
+ or ``B x T x *`` (if :attr:`batch_first` is ``True``) , where ``T`` is the length of the longest
+ sequence and ``B`` is the batch size.
Example:
>>> from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
@@ -305,7 +305,7 @@ def pad_packed_sequence(
Args:
sequence (PackedSequence): batch to pad
batch_first (bool, optional): if ``True``, the output will be in ``B x T x *``
- format.
+ format, ``T x B x *`` otherwise.
padding_value (float, optional): values for padded elements.
total_length (int, optional): if not ``None``, the output will be padded to
have length :attr:`total_length`. This method will throw :class:`ValueError`
@@ -344,17 +344,15 @@ def pad_sequence(
batch_first: bool = False,
padding_value: float = 0.0,
) -> Tensor:
- r"""Pad a list of variable length Tensors with ``padding_value``.
+ r"""Pad a list of variable length Tensors with :attr:`padding_value`.
- ``pad_sequence`` stacks a list of Tensors along a new dimension,
- and pads them to equal length. For example, consider a list of sequences
- with size ``L x *`` as the input. If ``batch_first`` is ``False``,
- the output is of size ``T x B x *``, and ``B x T x *`` otherwise.
-
- `B` is batch size. It is equal to the number of elements in ``sequences``.
- `T` is length of the longest sequence.
- `L` is length of the sequence.
- `*` is any number of trailing dimensions, including none.
+ ``pad_sequence`` stacks a list of Tensors along a new dimension, and pads them
+ to equal length. :attr:`sequences` can be list of sequences with size ``L x *``,
+ where `L` is length of the sequence and ``*`` is any number of dimensions
+ (including 0). If :attr:`batch_first` is ``False``, the output is of size
+ ``T x B x *``, and ``B x T x *`` otherwise, where ``B`` is the batch size
+ (the number of elements in :attr:`sequences`), ``T`` is the length of the longest
+ sequence.
Example:
>>> from torch.nn.utils.rnn import pad_sequence
@@ -371,8 +369,8 @@ def pad_sequence(
Args:
sequences (list[Tensor]): list of variable length sequences.
- batch_first (bool, optional): output will be in ``B x T x *`` if True, or in
- ``T x B x *`` otherwise. Default: False.
+ batch_first (bool, optional): if ``True``, the output will be in ``B x T x *``
+ format, ``T x B x *`` otherwise.
padding_value (float, optional): value for padded elements. Default: 0.
Returns:
|
2.41.0
|
09f98c705e4851414cd8ddf21949177af2b13aa
|
Wed, 1 May 2024 14:31:31 +0000
|
[PATCH 0889/1000] =?UTF-8?q?Include=20support=20for=20the=20scatt?= =?UTF-8?q?er=20gather=20cuda=20kernels=20to=20allow=20for=20comp=E2=80=A6?= =?UTF-8?q?=20(#124809)?=MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit
|
Fixes #121965 This PR hopes to add support complex numbers in the scatter/gather related kernels. For brevity, I will only include `complex<float>` for now as `complex<double>`, for example, will be more complicated. C++ unit tests are currently passing alongside tests in `test_scatter_gather_ops.py`. Python test suites also seem to be passing. Please keep the following in mind: 1) I think this is my first time using Pytorch. 2) This is my first contribution to Pytorch. Environment: 3080 & WSL 2. `nvcc` is at 12.4. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124809 Approved by: https://github.com/eqy, https://github.com/mikaylagawarecki
|
diff --git a/aten/src/ATen/cuda/Atomic.cuh b/aten/src/ATen/cuda/Atomic.cuh
index 56ee8f87e2..2fa55902f9 100644
--- a/aten/src/ATen/cuda/Atomic.cuh
+++ b/aten/src/ATen/cuda/Atomic.cuh
@@ -35,6 +35,26 @@ struct AtomicFPOp<at::Half> {
}
};
+template <>
+struct AtomicFPOp<c10::complex<float>> {
+ template <typename func_t>
+ inline __device__ c10::complex<float> operator() (c10::complex<float> *address, c10::complex<float> val, const func_t& func) {
+ unsigned long long int* addr_as_ull = (unsigned long long int*)address;
+ unsigned long long int old = *addr_as_ull;
+ unsigned long long int assumed, new_val;
+
+ c10::complex<float> csum;
+ do {
+ assumed = old;
+ csum = func(csum, val);
+ new_val = *reinterpret_cast<unsigned long long*>(&csum);
+ old = atomicCAS(addr_as_ull, assumed, new_val);
+ } while (assumed != old);
+
+ return *reinterpret_cast<c10::complex<float>*>(&addr_as_ull);
+ }
+};
+
template <>
struct AtomicFPOp<at::BFloat16> {
template <typename func_t>
@@ -348,6 +368,14 @@ GPU_ATOMIC_INTEGER(Mul, a * b, int16_t)
GPU_ATOMIC_INTEGER(Mul, a * b, int32_t)
GPU_ATOMIC_INTEGER(Mul, a * b, int64_t)
+inline __device__ c10::complex<float> gpuAtomicMul(c10::complex<float> *address, c10::complex<float> val){
+ return AtomicFPOp<c10::complex<float>>()(address, val,
+ [](c10::complex<float> bsum, c10::complex<float> val) {
+ bsum*=(val);
+ return bsum;
+ });
+}
+
inline __device__ at::Half gpuAtomicMul(at::Half * address, at::Half val) {
return AtomicFPOp<at::Half>()(address, val,
[](at::Half bsum, at::Half val) {
@@ -369,7 +397,7 @@ inline __device__ double gpuAtomicMul(double * address, double val) {
});
}
-// Dont use a templated function for this since the addition function defaults to the CUDA built-in.
+// Don't use a templated function for this since the addition function defaults to the CUDA built-in.
inline __device__ float gpuAtomicMul (float * address, float val) {
unsigned int* address_as_ull = (unsigned int*)address;
unsigned int old = *address_as_ull;
@@ -402,6 +430,29 @@ __host__ __device__ T safe_max(T a, T b) {
return max;
}
+__inline__ __device__ c10::complex<float> complex_max(c10::complex<float> a, c10::complex<float> b) {
+ if(at::_isnan(b)) {
+ return b;
+ } else {
+ // Compute the magnitude of the complex numbers and compare each to see which one is greater.
+ float a_magnitude = __fsqrt_rn(
+ (
+ __fmul_rn(a.real(), a.real()) +
+ __fmul_rn(a.imag(),a.imag())
+ )
+ );
+ float b_magnitude = __fsqrt_rn(
+ (
+ __fmul_rn(b.real(), b.real()) +
+ __fmul_rn(b.imag(),b.imag())
+ )
+ );
+ return std::max<float>(a_magnitude, b_magnitude);
+ }
+
+}
+
+
ATOMIC_INTEGER_IMPL(Max)
GPU_ATOMIC_INTEGER(Max, safe_max(a, b), uint8_t)
GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int8_t)
@@ -416,6 +467,13 @@ inline __device__ at::Half gpuAtomicMax(at::Half * address, at::Half val) {
});
}
+inline __device__ c10::complex<float> gpuAtomicMax(c10::complex<float> * address, c10::complex<float> val) {
+ return AtomicFPOp<c10::complex<float>>()(address, val,
+ [](c10::complex<float> bsum, c10::complex<float> val) {
+ return complex_max(bsum, val);
+ });
+}
+
inline __device__ at::BFloat16 gpuAtomicMax(at::BFloat16 * address, at::BFloat16 val) {
return AtomicFPOp<at::BFloat16>()(address, val,
[](at::BFloat16 bsum, at::BFloat16 val) {
@@ -462,6 +520,27 @@ __host__ __device__ T safe_min(T a, T b) {
return min;
}
+__inline__ __device__ c10::complex<float> complex_min(c10::complex<float> a, c10::complex<float> b) {
+ if(at::_isnan(b)) {
+ return b;
+ } else {
+ // Compute the magnitude of the complex numbers and compare each to see which one is smaller.
+ float a_magnitude = __fsqrt_rn(
+ (
+ __fmul_rn(a.real(), a.real()) +
+ __fmul_rn(a.imag(),a.imag())
+ )
+ );
+ float b_magnitude = __fsqrt_rn(
+ (
+ __fmul_rn(b.real(), b.real()) +
+ __fmul_rn(b.imag(),b.imag())
+ )
+ );
+ return std::min<float>(a_magnitude, b_magnitude);
+ }
+}
+
ATOMIC_INTEGER_IMPL(Min)
GPU_ATOMIC_INTEGER(Min, safe_min(a, b), uint8_t)
GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int8_t)
@@ -476,6 +555,13 @@ inline __device__ at::Half gpuAtomicMin(at::Half * address, at::Half val) {
});
}
+inline __device__ c10::complex<float> gpuAtomicMin(c10::complex<float> * address, c10::complex<float> val) {
+ return AtomicFPOp<c10::complex<float>>()(address, val,
+ [](c10::complex<float> bsum, c10::complex<float> val) {
+ return complex_min(bsum, val);
+ });
+}
+
inline __device__ at::BFloat16 gpuAtomicMin(at::BFloat16 * address, at::BFloat16 val) {
return AtomicFPOp<at::BFloat16>()(address, val,
[](at::BFloat16 bsum, at::BFloat16 val) {
diff --git a/aten/src/ATen/native/cuda/ScatterGatherKernel.cu b/aten/src/ATen/native/cuda/ScatterGatherKernel.cu
index 9ef83599cd..78f5d98dfe 100644
--- a/aten/src/ATen/native/cuda/ScatterGatherKernel.cu
+++ b/aten/src/ATen/native/cuda/ScatterGatherKernel.cu
@@ -4,7 +4,6 @@
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/MemoryOverlap.h>
-
#include <ATen/native/ScatterGatherChecks.h>
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/native/TensorIterator.h>
@@ -201,7 +200,6 @@ struct cuda_scatter_gather_base_kernel {
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
-
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
@@ -259,7 +257,6 @@ struct cuda_scatter_gather_base_kernel {
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
-
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
@@ -318,9 +315,9 @@ struct cuda_scatter_gather_base_kernel {
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
-
- AT_DISPATCH_ALL_TYPES_AND2(
+ AT_DISPATCH_ALL_TYPES_AND3(
at::ScalarType::Half, at::ScalarType::BFloat16,
+ at::ScalarType::ComplexFloat,
iter.dtype(),
"cuda_scatter_gather_base_kernel_func", [&] {
using dtype = typename std::conditional<cast_to_opaque,
@@ -450,8 +447,9 @@ struct cuda_scatter_fill_base_kernel {
auto index_size = ensure_nonempty_size(self, dim);
auto index_stride = ensure_nonempty_stride(self, dim);
- AT_DISPATCH_ALL_TYPES_AND2(
+ AT_DISPATCH_ALL_TYPES_AND3(
at::ScalarType::Half, at::ScalarType::BFloat16,
+ at::ScalarType::ComplexFloat,
iter.dtype(),
"cuda_scatter_fill_base_kernel_reduce_multiply", [&] {
using dtype = typename std::conditional<cast_to_opaque,
diff --git a/test/test_scatter_gather_ops.py b/test/test_scatter_gather_ops.py
index 3351b9d257..9074d3e2a4 100644
--- a/test/test_scatter_gather_ops.py
+++ b/test/test_scatter_gather_ops.py
@@ -221,7 +221,8 @@ class TestScatterGather(TestCase):
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
+ include_complex=False, include_bool=False))
def test_scatter_reduce_prod(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
@@ -229,7 +230,8 @@ class TestScatterGather(TestCase):
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_bool=False))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
+ include_complex=False, include_bool=False))
def test_scatter_reduce_mean(self, device, dtype):
for include_self in (True, False):
for deterministic in [False, True]:
@@ -239,7 +241,8 @@ class TestScatterGather(TestCase):
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
+ include_complex=False, include_bool=False))
def test_scatter_reduce_amax(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
@@ -258,7 +261,8 @@ class TestScatterGather(TestCase):
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
+ include_complex=False, include_bool=False))
def test_scatter_reduce_amin(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
diff --git a/test/test_torch.py b/test/test_torch.py
index 21318f3b16..433ccd5d5b 100644
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -57,8 +57,8 @@ from torch.testing._internal.common_cuda import (
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
- floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
- all_types_and, floating_types, floating_and_complex_types, integral_types_and,
+ floating_types_and, get_all_math_dtypes, all_types_and_complex_and, all_types_and, floating_types,
+ floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
@@ -3837,7 +3837,7 @@ else:
self.assertEqual(input, result, msg=f"result: {result} input: {input} method: {str(operation)}")
@onlyCUDA
- @dtypes(*complex_types())
+ @dtypes(torch.cdouble)
def test_scatter_reduce_multiply_unsupported_dtypes(self, device, dtype):
height = 2
width = 2
|
2.41.0
|
fde9a988c2b6ba5cd88ee884227563a5f669912
|
Wed, 1 May 2024 15:37:48 +0000
|
[PATCH 0891/1000] CI: Extending unit test coverage for aarch64 linux (#125255)
|
Adding core, dynamo and inductor unit tests for aarch64 linux CI runs. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125255 Approved by: https://github.com/malfet, https://github.com/atalman
|
diff --git a/.ci/pytorch/test.sh b/.ci/pytorch/test.sh
index 5a1e098636..c903a26998 100755
--- a/.ci/pytorch/test.sh
+++ b/.ci/pytorch/test.sh
@@ -1158,8 +1158,23 @@ test_executorch() {
}
test_linux_aarch64(){
- # TODO: extend unit tests list
- python test/run_test.py --include test_modules test_mkldnn test_mkldnn_fusion test_openmp --verbose
+ python test/run_test.py --include test_modules test_mkldnn test_mkldnn_fusion test_openmp test_torch test_dynamic_shapes \
+ test_transformers test_multiprocessing test_numpy_interop --verbose
+
+ # Dynamo tests
+ python test/run_test.py --include dynamo/test_compile dynamo/test_backends dynamo/test_comptime dynamo/test_config \
+ dynamo/test_functions dynamo/test_fx_passes_pre_grad dynamo/test_interop dynamo/test_model_output dynamo/test_modules \
+ dynamo/test_optimizers dynamo/test_recompile_ux dynamo/test_recompiles --verbose
+
+ # Inductor tests
+ python test/run_test.py --include inductor/test_torchinductor inductor/test_benchmark_fusion inductor/test_codecache \
+ inductor/test_config inductor/test_control_flow inductor/test_coordinate_descent_tuner inductor/test_fx_fusion \
+ inductor/test_group_batch_fusion inductor/test_inductor_freezing inductor/test_inductor_utils \
+ inductor/test_inplacing_pass inductor/test_kernel_benchmark inductor/test_layout_optim \
+ inductor/test_max_autotune inductor/test_memory_planning inductor/test_metrics inductor/test_multi_kernel inductor/test_pad_mm \
+ inductor/test_pattern_matcher inductor/test_perf inductor/test_profiler inductor/test_select_algorithm inductor/test_smoke \
+ inductor/test_split_cat_fx_passes inductor/test_standalone_compile inductor/test_torchinductor \
+ inductor/test_torchinductor_codegen_dynamic_shapes inductor/test_torchinductor_dynamic_shapes --verbose
}
if ! [[ "${BUILD_ENVIRONMENT}" == *libtorch* || "${BUILD_ENVIRONMENT}" == *-bazel-* ]]; then
diff --git a/.github/workflows/linux-aarch64.yml b/.github/workflows/linux-aarch64.yml
index 36461afb6a..acdb688497 100644
--- a/.github/workflows/linux-aarch64.yml
+++ b/.github/workflows/linux-aarch64.yml
@@ -20,7 +20,10 @@ jobs:
runner: linux.arm64.2xlarge
test-matrix: |
{ include: [
- { config: "default", shard: 1, num_shards: 1, runner: "linux.arm64.2xlarge" },
+ { config: "default", shard: 1, num_shards: 4, runner: "linux.arm64.2xlarge" },
+ { config: "default", shard: 2, num_shards: 4, runner: "linux.arm64.2xlarge" },
+ { config: "default", shard: 3, num_shards: 4, runner: "linux.arm64.2xlarge" },
+ { config: "default", shard: 4, num_shards: 4, runner: "linux.arm64.2xlarge" },
]}
linux-jammy-aarch64-py3_10-test:
|
2.41.0
|
16f1ee4cc3e58dbd4755021e4b4b87a16b2aac2
|
Wed, 1 May 2024 15:48:48 +0000
|
[PATCH 0892/1000] [ez][CI] Move test_modules and test_schema_check off CI_SERIAL_LIST (#125193)
|
* Related https://github.com/pytorch/pytorch/pull/124085 As in title, move test_modules and test_schema_check off CI_SERIAL_LIST If things fail, they can get the serialTest decorator instead Pull Request resolved: https://github.com/pytorch/pytorch/pull/125193 Approved by: https://github.com/huydhn
|
diff --git a/test/run_test.py b/test/run_test.py
index 9945859987..1b95bcb465 100755
--- a/test/run_test.py
+++ b/test/run_test.py
@@ -227,11 +227,9 @@ CI_SERIAL_LIST = [
"nn/test_pooling",
"nn/test_convolution", # Doesn't respect set_per_process_memory_fraction, results in OOM for other tests in slow gradcheck
"distributions/test_distributions",
- "test_modules", # failed test due to mismatched elements
"functorch/test_vmap", # OOM
"test_fx", # gets SIGKILL
"test_dataloader", # frequently hangs for ROCm
- "test_schema_check", # Cause CUDA illegal memory access https://github.com/pytorch/pytorch/issues/95749
"functorch/test_memory_efficient_fusion", # Cause CUDA OOM on ROCm
"test_utils", # OOM
"test_sort_and_select", # OOM
|
2.41.0
|
d410155b241334b32872ff593d3480d10f57d5e
|
Wed, 1 May 2024 16:02:02 +0000
|
[PATCH 0893/1000] =?UTF-8?q?Revert=20"Include=20support=20for=20t?= =?UTF-8?q?he=20scatter=20gather=20cuda=20kernels=20to=20allow=20for=20com?= =?UTF-8?q?p=E2=80=A6=20(#124809)"?=MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit
|
This reverts commit e09f98c705e4851414cd8ddf21949177af2b13aa. Reverted https://github.com/pytorch/pytorch/pull/124809 on behalf of https://github.com/clee2000 due to windows build failure is real, https://github.com/pytorch/pytorch/actions/runs/8910674030/job/24470387612#step:11:11236 is the correct failure line, ignore the statement saying build passed, batch is errorcodes arent propagating again ([comment](https://github.com/pytorch/pytorch/pull/124809#issuecomment-2088680371))
|
diff --git a/aten/src/ATen/cuda/Atomic.cuh b/aten/src/ATen/cuda/Atomic.cuh
index 2fa55902f9..56ee8f87e2 100644
--- a/aten/src/ATen/cuda/Atomic.cuh
+++ b/aten/src/ATen/cuda/Atomic.cuh
@@ -35,26 +35,6 @@ struct AtomicFPOp<at::Half> {
}
};
-template <>
-struct AtomicFPOp<c10::complex<float>> {
- template <typename func_t>
- inline __device__ c10::complex<float> operator() (c10::complex<float> *address, c10::complex<float> val, const func_t& func) {
- unsigned long long int* addr_as_ull = (unsigned long long int*)address;
- unsigned long long int old = *addr_as_ull;
- unsigned long long int assumed, new_val;
-
- c10::complex<float> csum;
- do {
- assumed = old;
- csum = func(csum, val);
- new_val = *reinterpret_cast<unsigned long long*>(&csum);
- old = atomicCAS(addr_as_ull, assumed, new_val);
- } while (assumed != old);
-
- return *reinterpret_cast<c10::complex<float>*>(&addr_as_ull);
- }
-};
-
template <>
struct AtomicFPOp<at::BFloat16> {
template <typename func_t>
@@ -368,14 +348,6 @@ GPU_ATOMIC_INTEGER(Mul, a * b, int16_t)
GPU_ATOMIC_INTEGER(Mul, a * b, int32_t)
GPU_ATOMIC_INTEGER(Mul, a * b, int64_t)
-inline __device__ c10::complex<float> gpuAtomicMul(c10::complex<float> *address, c10::complex<float> val){
- return AtomicFPOp<c10::complex<float>>()(address, val,
- [](c10::complex<float> bsum, c10::complex<float> val) {
- bsum*=(val);
- return bsum;
- });
-}
-
inline __device__ at::Half gpuAtomicMul(at::Half * address, at::Half val) {
return AtomicFPOp<at::Half>()(address, val,
[](at::Half bsum, at::Half val) {
@@ -397,7 +369,7 @@ inline __device__ double gpuAtomicMul(double * address, double val) {
});
}
-// Don't use a templated function for this since the addition function defaults to the CUDA built-in.
+// Dont use a templated function for this since the addition function defaults to the CUDA built-in.
inline __device__ float gpuAtomicMul (float * address, float val) {
unsigned int* address_as_ull = (unsigned int*)address;
unsigned int old = *address_as_ull;
@@ -430,29 +402,6 @@ __host__ __device__ T safe_max(T a, T b) {
return max;
}
-__inline__ __device__ c10::complex<float> complex_max(c10::complex<float> a, c10::complex<float> b) {
- if(at::_isnan(b)) {
- return b;
- } else {
- // Compute the magnitude of the complex numbers and compare each to see which one is greater.
- float a_magnitude = __fsqrt_rn(
- (
- __fmul_rn(a.real(), a.real()) +
- __fmul_rn(a.imag(),a.imag())
- )
- );
- float b_magnitude = __fsqrt_rn(
- (
- __fmul_rn(b.real(), b.real()) +
- __fmul_rn(b.imag(),b.imag())
- )
- );
- return std::max<float>(a_magnitude, b_magnitude);
- }
-
-}
-
-
ATOMIC_INTEGER_IMPL(Max)
GPU_ATOMIC_INTEGER(Max, safe_max(a, b), uint8_t)
GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int8_t)
@@ -467,13 +416,6 @@ inline __device__ at::Half gpuAtomicMax(at::Half * address, at::Half val) {
});
}
-inline __device__ c10::complex<float> gpuAtomicMax(c10::complex<float> * address, c10::complex<float> val) {
- return AtomicFPOp<c10::complex<float>>()(address, val,
- [](c10::complex<float> bsum, c10::complex<float> val) {
- return complex_max(bsum, val);
- });
-}
-
inline __device__ at::BFloat16 gpuAtomicMax(at::BFloat16 * address, at::BFloat16 val) {
return AtomicFPOp<at::BFloat16>()(address, val,
[](at::BFloat16 bsum, at::BFloat16 val) {
@@ -520,27 +462,6 @@ __host__ __device__ T safe_min(T a, T b) {
return min;
}
-__inline__ __device__ c10::complex<float> complex_min(c10::complex<float> a, c10::complex<float> b) {
- if(at::_isnan(b)) {
- return b;
- } else {
- // Compute the magnitude of the complex numbers and compare each to see which one is smaller.
- float a_magnitude = __fsqrt_rn(
- (
- __fmul_rn(a.real(), a.real()) +
- __fmul_rn(a.imag(),a.imag())
- )
- );
- float b_magnitude = __fsqrt_rn(
- (
- __fmul_rn(b.real(), b.real()) +
- __fmul_rn(b.imag(),b.imag())
- )
- );
- return std::min<float>(a_magnitude, b_magnitude);
- }
-}
-
ATOMIC_INTEGER_IMPL(Min)
GPU_ATOMIC_INTEGER(Min, safe_min(a, b), uint8_t)
GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int8_t)
@@ -555,13 +476,6 @@ inline __device__ at::Half gpuAtomicMin(at::Half * address, at::Half val) {
});
}
-inline __device__ c10::complex<float> gpuAtomicMin(c10::complex<float> * address, c10::complex<float> val) {
- return AtomicFPOp<c10::complex<float>>()(address, val,
- [](c10::complex<float> bsum, c10::complex<float> val) {
- return complex_min(bsum, val);
- });
-}
-
inline __device__ at::BFloat16 gpuAtomicMin(at::BFloat16 * address, at::BFloat16 val) {
return AtomicFPOp<at::BFloat16>()(address, val,
[](at::BFloat16 bsum, at::BFloat16 val) {
diff --git a/aten/src/ATen/native/cuda/ScatterGatherKernel.cu b/aten/src/ATen/native/cuda/ScatterGatherKernel.cu
index 78f5d98dfe..9ef83599cd 100644
--- a/aten/src/ATen/native/cuda/ScatterGatherKernel.cu
+++ b/aten/src/ATen/native/cuda/ScatterGatherKernel.cu
@@ -4,6 +4,7 @@
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/MemoryOverlap.h>
+
#include <ATen/native/ScatterGatherChecks.h>
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/native/TensorIterator.h>
@@ -200,6 +201,7 @@ struct cuda_scatter_gather_base_kernel {
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
+
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
@@ -257,6 +259,7 @@ struct cuda_scatter_gather_base_kernel {
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
+
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
@@ -315,9 +318,9 @@ struct cuda_scatter_gather_base_kernel {
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
- AT_DISPATCH_ALL_TYPES_AND3(
+
+ AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
- at::ScalarType::ComplexFloat,
iter.dtype(),
"cuda_scatter_gather_base_kernel_func", [&] {
using dtype = typename std::conditional<cast_to_opaque,
@@ -447,9 +450,8 @@ struct cuda_scatter_fill_base_kernel {
auto index_size = ensure_nonempty_size(self, dim);
auto index_stride = ensure_nonempty_stride(self, dim);
- AT_DISPATCH_ALL_TYPES_AND3(
+ AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
- at::ScalarType::ComplexFloat,
iter.dtype(),
"cuda_scatter_fill_base_kernel_reduce_multiply", [&] {
using dtype = typename std::conditional<cast_to_opaque,
diff --git a/test/test_scatter_gather_ops.py b/test/test_scatter_gather_ops.py
index 9074d3e2a4..3351b9d257 100644
--- a/test/test_scatter_gather_ops.py
+++ b/test/test_scatter_gather_ops.py
@@ -221,8 +221,7 @@ class TestScatterGather(TestCase):
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
- include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_prod(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
@@ -230,8 +229,7 @@ class TestScatterGather(TestCase):
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_bool=False))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
- include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_mean(self, device, dtype):
for include_self in (True, False):
for deterministic in [False, True]:
@@ -241,8 +239,7 @@ class TestScatterGather(TestCase):
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
- include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_amax(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
@@ -261,8 +258,7 @@ class TestScatterGather(TestCase):
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
- include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_amin(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
diff --git a/test/test_torch.py b/test/test_torch.py
index 433ccd5d5b..21318f3b16 100644
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -57,8 +57,8 @@ from torch.testing._internal.common_cuda import (
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
- floating_types_and, get_all_math_dtypes, all_types_and_complex_and, all_types_and, floating_types,
- floating_and_complex_types, integral_types_and,
+ floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
+ all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
@@ -3837,7 +3837,7 @@ else:
self.assertEqual(input, result, msg=f"result: {result} input: {input} method: {str(operation)}")
@onlyCUDA
- @dtypes(torch.cdouble)
+ @dtypes(*complex_types())
def test_scatter_reduce_multiply_unsupported_dtypes(self, device, dtype):
height = 2
width = 2
|
2.41.0
|
9eb5d4fa4a09cd462b51f460e71e16768188a44
|
Wed, 1 May 2024 16:59:35 +0000
|
[PATCH 0894/1000] Add Sanity Testing to Pytorch Profiler (#124773)
|
Summary: In the recent weeks, we have encountered bugs in both the normal synchronous trace and on-demand tracing. This diff on its own does sanity checking to make sure the profiler does not have spans that extend past the boundaries that we expect. It also checks some basic properties of the tracings we expect to see. Right now the sanity tests check some basic properties to make sure that the tracings are not completely broken. Requests/suggestions for other properties are welcome. Test Plan: Run the tests in OSS and Buck Reviewed By: aaronenyeshi Differential Revision: D56374298 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124773 Approved by: https://github.com/aaronenyeshi
|
diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py
index 2ec04c447f..ff6dc640dd 100644
--- a/test/profiler/test_profiler.py
+++ b/test/profiler/test_profiler.py
@@ -22,6 +22,7 @@ import re
import subprocess
import sys
import threading
+import time
import unittest
from dataclasses import dataclass, field
from typing import List, Optional
@@ -1641,6 +1642,137 @@ assert KinetoStepTracker.current_step() == initial_step + 2 * niters
event_list.table()
+ def _check_all_gpu_present(self, gpu_dict, max_gpu_count):
+ for i in range(0, max_gpu_count):
+ self.assertEqual(gpu_dict["GPU " + str(i)], 1)
+
+ # Do json sanity testing. Checks that all events are between profiler start and end
+ # also checks to see that GPU values are present in trace if cuda is used
+ def _validate_basic_json(self, traceEvents, cuda_available=False):
+ MAX_GPU_COUNT = 8
+ PROFILER_IDX = -4
+ RECORD_END = -1
+ RECORD_START = -2
+ traceEventProfiler = traceEvents[PROFILER_IDX]
+
+ self.assertTrue(traceEventProfiler["name"] == "PyTorch Profiler (0)")
+ self.assertTrue(traceEvents[RECORD_END]["name"] == "Record Window End")
+ self.assertTrue(
+ traceEvents[RECORD_START]["name"] == "Iteration Start: PyTorch Profiler"
+ )
+ # check that the profiler starts/ends within the record interval
+ self.assertGreaterEqual(
+ traceEventProfiler["ts"],
+ traceEvents[RECORD_START]["ts"],
+ "Profiler starts before record!",
+ )
+ self.assertLessEqual(
+ traceEventProfiler["ts"] + traceEventProfiler["dur"],
+ traceEvents[RECORD_END]["ts"],
+ "Profiler ends after record end!",
+ )
+
+ gpu_dict = collections.defaultdict(int)
+ for i, traceEvent in enumerate(traceEvents):
+ if (
+ i == len(traceEvents) + RECORD_END
+ or i == len(traceEvents) + RECORD_START
+ ):
+ continue
+ # make sure all valid trace events are within the bounds of the profiler
+ if "ts" in traceEvent:
+ self.assertGreaterEqual(
+ traceEvent["ts"],
+ traceEventProfiler["ts"],
+ "Trace event is out of bounds",
+ )
+ # some python events seem to go a little past record end probably because
+ # of some clock inaccuracies so just compare events ending to RECORD_END
+ if "dur" in traceEvent:
+ self.assertLessEqual(
+ traceEvent["ts"] + traceEvent["dur"],
+ traceEvents[RECORD_END]["ts"],
+ "Trace event ends too late!",
+ )
+ gpu_value = traceEvent.get("args", {}).get("labels", None)
+ if gpu_value and "GPU" in gpu_value:
+ gpu_dict[gpu_value] += 1
+ self.assertTrue(
+ traceEvents[i + 1]["args"]["sort_index"]
+ == 0x1000000 + int(gpu_value.split()[1])
+ )
+
+ # only check that gpu labels are present if cuda available
+ if cuda_available:
+ self._check_all_gpu_present(gpu_dict, MAX_GPU_COUNT)
+
+ def _test_chrome_trace_basic_helper(self, with_cuda=False):
+ if with_cuda:
+ device = "cuda"
+ else:
+ device = "cpu"
+ x, y = (torch.rand(4, 4).to(device) for _ in range(2))
+
+ with profile(with_stack=True) as p:
+ torch.add(x, y)
+ with TemporaryFileName(mode="w+") as fname:
+ p.export_chrome_trace(fname)
+ with open(fname) as f:
+ report = json.load(f)
+ self._validate_basic_json(report["traceEvents"], with_cuda)
+
+ @unittest.skipIf(not kineto_available(), "Kineto is required")
+ @skipIfTorchDynamo("profiler gets ignored if dynamo activated")
+ def test_basic_chrome_trace(self):
+ self._test_chrome_trace_basic_helper()
+ if torch.cuda.is_available():
+ self._test_chrome_trace_basic_helper(with_cuda=True)
+
+ @skipIfTorchDynamo("profiler gets ignored if dynamo activated")
+ def test_profiler_time_scale(self):
+ MARGIN_ERROR = 0.5
+ SEC_TO_US = 1000 * 1000
+ WAIT_TIME = 10
+ with profile() as p:
+ with torch.profiler.record_function("test_span"):
+ for i in range(WAIT_TIME):
+ torch.rand(4, 4)
+ time.sleep(1)
+ events = p.events()
+
+ # make sure function events are scaled appropriately
+ self.assertTrue(events[0].name == "test_span")
+ test_span = events[0]
+ self.assertGreaterEqual(
+ test_span.cpu_time / SEC_TO_US,
+ WAIT_TIME - MARGIN_ERROR,
+ "event out of range",
+ )
+ self.assertLessEqual(
+ test_span.cpu_time / SEC_TO_US,
+ WAIT_TIME + MARGIN_ERROR,
+ "event out of range",
+ )
+
+ # make sure tracing is scaled appropriately
+ with TemporaryFileName(mode="w+") as fname:
+ p.export_chrome_trace(fname)
+ with open(fname) as f:
+ report = json.load(f)
+ events = report["traceEvents"]
+ for event in events:
+ if event["name"] == "test_span":
+ self.assertGreaterEqual(
+ event["dur"] / SEC_TO_US,
+ WAIT_TIME - MARGIN_ERROR,
+ "profiling out of range",
+ )
+ self.assertLessEqual(
+ event["dur"] / SEC_TO_US,
+ WAIT_TIME + MARGIN_ERROR,
+ "profiling out of range",
+ )
+
class SimpleNet(nn.Module):
def __init__(self):
|
2.41.0
|
3627d05e789c6cd20eeccce89dd733f02a808a9
|
Wed, 1 May 2024 17:26:28 +0000
|
[PATCH 0895/1000] [CMake] Add NVPL BLAS/LAPACK option (#125268)
|
This PR add a [NVPL](https://docs.nvidia.com/nvpl/introduction.html) BLAS/LAPACK option to CMake for `aarch64` (ARM) machines. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125268 Approved by: https://github.com/albanD
|
diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
index 5b30bef4fc..a8313f206f 100644
--- a/cmake/Dependencies.cmake
+++ b/cmake/Dependencies.cmake
@@ -237,6 +237,12 @@ elseif(BLAS STREQUAL "MKL")
set(CAFFE2_USE_EIGEN_FOR_BLAS ON)
set(CAFFE2_USE_MKL OFF)
endif()
+elseif(BLAS STREQUAL "NVPL")
+ find_package(NVPL_BLAS REQUIRED)
+ list(APPEND Caffe2_DEPENDENCY_LIBS nvpl::blas_lp64_omp)
+ set(BLAS_INFO "nvpl")
+ set(BLAS_FOUND 1)
+ set(BLAS_USE_CBLAS_DOT TRUE)
elseif(BLAS STREQUAL "vecLib")
find_package(vecLib REQUIRED)
include_directories(SYSTEM ${vecLib_INCLUDE_DIR})
@@ -269,7 +275,7 @@ if(NOT INTERN_BUILD_MOBILE)
set(AT_MKL_ENABLED 0)
set(AT_MKL_SEQUENTIAL 0)
set(USE_BLAS 1)
- if(NOT (ATLAS_FOUND OR BLIS_FOUND OR GENERIC_BLAS_FOUND OR MKL_FOUND OR OpenBLAS_FOUND OR VECLIB_FOUND OR FlexiBLAS_FOUND))
+ if(NOT (ATLAS_FOUND OR BLIS_FOUND OR GENERIC_BLAS_FOUND OR MKL_FOUND OR OpenBLAS_FOUND OR VECLIB_FOUND OR FlexiBLAS_FOUND OR NVPL_BLAS_FOUND))
message(WARNING "Preferred BLAS (" ${BLAS} ") cannot be found, now searching for a general BLAS library")
find_package(BLAS)
if(NOT BLAS_FOUND)
diff --git a/cmake/Modules/FindLAPACK.cmake b/cmake/Modules/FindLAPACK.cmake
index 02367ff986..fc8bf50d7d 100644
--- a/cmake/Modules/FindLAPACK.cmake
+++ b/cmake/Modules/FindLAPACK.cmake
@@ -95,6 +95,13 @@ if(BLAS_FOUND)
SET(LAPACK_INFO "mkl")
ENDIF()
+ # NVPL
+ IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "nvpl"))
+ FIND_PACKAGE(NVPL_LAPACK REQUIRED)
+ SET(LAPACK_LIBRARIES nvpl::lapack_lp64_omp)
+ SET(LAPACK_INFO "nvpl")
+ ENDIF()
+
# Accelerate
IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "accelerate"))
SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES})
|
2.41.0
|
8d2a55273757c90989fde7c6f05e957aba9a238
|
Wed, 1 May 2024 17:45:11 +0000
|
[PATCH 0896/1000] Intel GPU: specify the tolerance for torchbench models (#125213)
|
We encountered some model accuracy failures as the tolerance is critical. In general, we align with CUDA practice. This PR intends to adjust the tolerance for Torchbench models for training mode on Intel GPU devices and aligns with CUDA. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125213 Approved by: https://github.com/desertfire
|
diff --git a/benchmarks/dynamo/torchbench.py b/benchmarks/dynamo/torchbench.py
index a6b4edb3a4..274d04da15 100755
--- a/benchmarks/dynamo/torchbench.py
+++ b/benchmarks/dynamo/torchbench.py
@@ -402,7 +402,7 @@ class TorchBenchmarkRunner(BenchmarkRunner):
if name in self._tolerance["higher_bf16"]:
return 1e-2, cosine
- if is_training and current_device == "cuda":
+ if is_training and (current_device == "cuda" or current_device == "xpu"):
tolerance = 1e-3
if name in self._tolerance["cosine"]:
cosine = True
|
2.41.0
|
f6acf9add0f52a483c9ea073a7adfe4ab1233dd
|
Wed, 1 May 2024 18:30:57 +0000
|
[PATCH 0897/1000] [ROCm] Add extra cuda_to_hip_mappings.py (#125108)
|
Adding extra mappings discovered when hipifying the backward CUDA kernel of the Mamba model (https://github.com/state-spaces/mamba/). Co-authored-by: Jeff Daily <jeff.daily@amd.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/125108 Approved by: https://github.com/Skylion007, https://github.com/jeffdaily
|
diff --git a/torch/utils/hipify/cuda_to_hip_mappings.py b/torch/utils/hipify/cuda_to_hip_mappings.py
index e48c595928..d07292a22b 100644
--- a/torch/utils/hipify/cuda_to_hip_mappings.py
+++ b/torch/utils/hipify/cuda_to_hip_mappings.py
@@ -643,7 +643,11 @@ CUDA_INCLUDE_MAP = collections.OrderedDict(
("thrust/system/cuda", ("thrust/system/hip", CONV_INCLUDE, API_BLAS)),
("cub/util_allocator.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/block/block_reduce.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
+ ("cub/block/block_raking_layout.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/cub.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
+ ("cub/config.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
+ ("cub/util_ptx.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
+ ("cub/util_type.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/device/device_run_length_encode.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/block/block_load.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
("cub/block/block_store.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)),
@@ -7954,6 +7958,9 @@ CUDA_IDENTIFIER_MAP = collections.OrderedDict(
("cub::BlockScan", ("hipcub::BlockScan", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::BlockLoad", ("hipcub::BlockLoad", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::BlockStore", ("hipcub::BlockStore", CONV_SPECIAL_FUNC, API_RUNTIME)),
+ ("cub::BlockRakingLayout", ("hipcub::BlockRakingLayout", CONV_SPECIAL_FUNC, API_RUNTIME)),
+ ("cub::Uninitialized", ("hipcub::Uninitialized", CONV_SPECIAL_FUNC, API_RUNTIME)),
+ ("cub::RowMajorTid", ("hipcub::RowMajorTid", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::CachingDeviceAllocator", ("hipcub::CachingDeviceAllocator", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::CountingInputIterator", ("hipcub::CountingInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::DeviceRadixSort", ("hipcub::DeviceRadixSort", CONV_SPECIAL_FUNC, API_RUNTIME)),
@@ -7967,9 +7974,15 @@ CUDA_IDENTIFIER_MAP = collections.OrderedDict(
("cub::Max", ("hipcub::Max", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::Min", ("hipcub::Min", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::Sum", ("hipcub::Sum", CONV_SPECIAL_FUNC, API_RUNTIME)),
+ ("cub::Log2", ("hipcub::Log2", CONV_SPECIAL_FUNC, API_RUNTIME)),
+ ("cub::LaneId", ("hipcub::LaneId", CONV_SPECIAL_FUNC, API_RUNTIME)),
+ ("cub::WarpMask", ("hipcub::WarpMask", CONV_SPECIAL_FUNC, API_RUNTIME)),
+ ("cub::ShuffleIndex", ("hipcub::ShuffleIndex", CONV_SPECIAL_FUNC, API_RUNTIME)),
+ ("cub::ShuffleDown", ("hipcub::ShuffleDown", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::ArgIndexInputIterator", ("hipcub::ArgIndexInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::TransformInputIterator", ("hipcub::TransformInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)),
("cub::WarpReduce", ("hipcub::WarpReduce", CONV_SPECIAL_FUNC, API_RUNTIME)),
+ ("cub::CTA_SYNC", ("hipcub::CTA_SYNC", CONV_SPECIAL_FUNC, API_RUNTIME)),
("nvtxMark", ("roctxMark", CONV_OTHER, API_ROCTX)),
("nvtxMarkA", ("roctxMarkA", CONV_OTHER, API_ROCTX)),
("nvtxRangePushA", ("roctxRangePushA", CONV_OTHER, API_ROCTX)),
|
2.41.0
|
7422fd0b9ad21eb9e44c68172d4a85b3d03769e
|
Wed, 1 May 2024 18:35:49 +0000
|
[PATCH 0898/1000] add missing space to first cmake append (#125294)
|
the first append not having a space incorrectly merges it to any previous arguments, like `-allow-unsupported-compiler` in my case which results in a silly error: `unrecognized command-line option '-allow-unsupported-compiler-DLIBCUDACXX_ENABLE_SIMPLIFIED_COMPLEX_OPERATIONS'` full log: ``` python setup.py develop Building wheel torch-2.4.0a0+git75fa54a -- Building version 2.4.0a0+git75fa54a cmake3 -GNinja -DBUILD_PYTHON=True -DBUILD_TEST=True -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/code/pytorch/torch -DCMAKE_PREFIX_PATH=/code/pytorch/.venv/lib/python3.12/site-packages;/code/spack/opt/spack/linux-fedora40-zen2/gcc-14.0.1/gcc-13.2.0-noa2f4oqalxzqvsebhuntndewgt4gq4h:/code/spack/opt/spack/linux-fedora40-zen2/gcc-14.0.1/zstd-1.5.6-z3guwm4l5rmmsv4g4wvkej3ri3bppeja:/code/spack/opt/spack/linux-fedora40-zen2/gcc-14.0.1/zlib-ng-2.1.6-kwi4ljobodjgv5eetnga4bow6crdlacl:/code/spack/opt/spack/linux-fedora40-zen2/gcc-14.0.1/mpc-1.3.1-nuwa2snyzm265lsupa2dkmxxyhiqcv7e:/code/spack/opt/spack/linux-fedora40-zen2/gcc-14.0.1/mpfr-4.2.1-wepuwobwttxbtz3nguimxa2mlljjozsi:/code/spack/opt/spack/linux-fedora40-zen2/gcc-14.0.1/gmp-6.2.1-ashy6kiitonxv2f365f4q3beggzf3646:/code/spack/opt/spack/linux-fedora40-zen2/gcc-14.0.1/gcc-runtime-14.0.1-wmogkqrzn7t57dogaake2hmhjbod27gs -DNUMPY_INCLUDE_DIR=/code/pytorch/.venv/lib64/python3.12/site-packages/numpy/core/include -DPYTHON_EXECUTABLE=/code/pytorch/.venv/bin/python -DPYTHON_INCLUDE_DIR=/usr/include/python3.12 -DPYTHON_LIBRARY=/usr/lib64/libpython3.12.so.1.0 -DTORCH_BUILD_VERSION=2.4.0a0+git75fa54a -DUSE_NUMPY=True /code/pytorch -- /usr/lib64/ccache/c++ /code/pytorch/torch/abi-check.cpp -o /code/pytorch/build/abi-check -- Determined _GLIBCXX_USE_CXX11_ABI=1 -- Current compiler supports avx2 extension. Will build perfkernels. -- Current compiler supports avx512f extension. Will build fbgemm. -- The CUDA compiler identification is NVIDIA 12.4.131 -- Detecting CUDA compiler ABI info -- Detecting CUDA compiler ABI info - failed -- Check for working CUDA compiler: /usr/local/cuda-12/bin/nvcc -- Check for working CUDA compiler: /usr/local/cuda-12/bin/nvcc - broken CMake Error at /usr/share/cmake/Modules/CMakeTestCUDACompiler.cmake:59 (message): The CUDA compiler "/usr/local/cuda-12/bin/nvcc" is not able to compile a simple test program. It fails with the following output: Change Dir: '/code/pytorch/build/CMakeFiles/CMakeScratch/TryCompile-mSGoFl' Run Build Command(s): /code/pytorch/.venv/bin/ninja -v cmTC_ee207 [1/2] /usr/local/cuda-12/bin/nvcc -forward-unknown-to-host-compiler -allow-unsupported-compiler-DLIBCUDACXX_ENABLE_SIMPLIFIED_COMPLEX_OPERATIONS -D_GLIBCXX_USE_CXX11_ABI=1 -Xfatbin -compress-all "--generate-code=arch=compute_52,code=[compute_52,sm_52]" -MD -MT CMakeFiles/cmTC_ee207.dir/main.cu.o -MF CMakeFiles/cmTC_ee207.dir/main.cu.o.d -x cu -c /code/pytorch/build/CMakeFiles/CMakeScratch/TryCompile-mSGoFl/main.cu -o CMakeFiles/cmTC_ee207.dir/main.cu.o FAILED: CMakeFiles/cmTC_ee207.dir/main.cu.o /usr/local/cuda-12/bin/nvcc -forward-unknown-to-host-compiler -allow-unsupported-compiler-DLIBCUDACXX_ENABLE_SIMPLIFIED_COMPLEX_OPERATIONS -D_GLIBCXX_USE_CXX11_ABI=1 -Xfatbin -compress-all "--generate-code=arch=compute_52,code=[compute_52,sm_52]" -MD -MT CMakeFiles/cmTC_ee207.dir/main.cu.o -MF CMakeFiles/cmTC_ee207.dir/main.cu.o.d -x cu -c /code/pytorch/build/CMakeFiles/CMakeScratch/TryCompile-mSGoFl/main.cu -o CMakeFiles/cmTC_ee207.dir/main.cu.o gcc: error: unrecognized command-line option '-allow-unsupported-compiler-DLIBCUDACXX_ENABLE_SIMPLIFIED_COMPLEX_OPERATIONS' ninja: build stopped: subcommand failed. CMake will not be able to correctly generate this project. Call Stack (most recent call first): cmake/public/cuda.cmake:47 (enable_language) cmake/Dependencies.cmake:44 (include) CMakeLists.txt:758 (include) -- Configuring incomplete, errors occurred! ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/125294 Approved by: https://github.com/albanD
|
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 215ec7a81a..9ce1e06bea 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -56,7 +56,7 @@ endif()
# This define is needed to preserve behavior given anticpated changes to cccl/thrust
# https://nvidia.github.io/libcudacxx/standard_api/numerics_library/complex.html
-string(APPEND CMAKE_CUDA_FLAGS "-DLIBCUDACXX_ENABLE_SIMPLIFIED_COMPLEX_OPERATIONS")
+string(APPEND CMAKE_CUDA_FLAGS " -DLIBCUDACXX_ENABLE_SIMPLIFIED_COMPLEX_OPERATIONS")
if(LINUX)
include(cmake/CheckAbi.cmake)
|
2.41.0
|
97612c84c37eb99ef6d18f579e7fd8a52a31d28
|
Wed, 1 May 2024 19:59:51 +0000
|
[PATCH 0902/1000] ProcessGroupWrapper support custom backend (#124447)MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit
|
Fixes #ISSUE_NUMBER In current code, ProcessGroupWrapper works only for `GLOO, NCCL, UCC` when `TORCH_DISTRIBUTED_DEBUG=DETAIL`. I read the ProcessGroupWrapper code,find that communication_op in ProcessGroupWrapper is just communication_op in origin_backend + runCollectiveChecks in gloo, like allreduce: https://github.com/pytorch/pytorch/blob/82e0153487c2cd1abc92598963be5b57ab1948d4/torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp#L406-L411 `runCollectiveChecks` is used to `collective finger print` for tensors and run gloo's `monitoredBarrier`. https://github.com/pytorch/pytorch/blob/82e0153487c2cd1abc92598963be5b57ab1948d4/torch/csrc/distributed/c10d/ProcessGroupWrapper.cpp#L586-L590 I dont know why ProcessGroupWrapper doesn't work for all backend, but I think custom backend can support it. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124447 Approved by: https://github.com/kwen2501
|
diff --git a/torch/distributed/distributed_c10d.py b/torch/distributed/distributed_c10d.py
index 74f2ed5845..c006fbc08c 100644
--- a/torch/distributed/distributed_c10d.py
+++ b/torch/distributed/distributed_c10d.py
@@ -1602,7 +1602,7 @@ def _new_process_group_helper(
break
# Process group wrapper initialization for supported PGs when TORCH_DISTRIBUTED_DEBUG is set
- if backend_str in [Backend.GLOO, Backend.NCCL, Backend.UCC]:
+ if backend_str in [Backend.GLOO, Backend.NCCL, Backend.UCC] or backend_str.upper() in Backend._plugins:
# In debug mode and if GLOO is available, wrap in a wrapper PG that
# enables enhanced collective checking for debuggability.
if get_debug_level() == DebugLevel.DETAIL:
|
2.41.0
|
bcf123105a3f11d02f04067ca0cb377ed09e88c
|
Wed, 1 May 2024 20:59:17 +0000
|
[PATCH 0904/1000] Upgrade submodule oneDNN to v3.4 (#122472)MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit
|
## Improvements This upgrade fixes the following issues: - https://github.com/pytorch/pytorch/issues/120982 This upgrade brings the following new features: - Introduced memory descriptor serialization API. This API is needed to support freezing on CPU in AOTInductor (https://github.com/pytorch/pytorch/issues/114450) ## Validation results on CPU No regression was found. 1. NLP models accuracy/inference/training Model Name | Mode| Precision | New | Baseline | New/Baseline -- | -- | -- | -- | -- | -- bert-large | accuracy | fp32 | 93.15325 | 93.15325 | 100.00% bert-large | accuracy | bf16 | 93.20125 | 93.20125 | 100.00% bert-large | accuracy | int8 | 92.66641 | 92.66641 | 100.00% LCM | accuracy | fp32 | 44.11152 | 44.11154 | 100.00% LCM | accuracy | bf16 | 43.57667 | 43.65096 | 100.17% ViT | accuracy | fp32 | 0.8033 | 0.8033 | 100.00% ViT | accuracy | bf16 | 0.8031 | 0.8031 | 100.00% ViT | accuracy | int8 | 0.7985 | 0.7985 | 100.00% yolov7 | accuracy | fp32 | 0.512 | 0.512 | 100.00% yolov7 | accuracy | bf16 | 0.504 | 0.504 | 100.00% yolov7 | accuracy | int8 | 0.507 | 0.507 | 100.00% bert-large | realtime | fp32 | 37.433 | 39.136 | 95.65% bert-large | realtime | bf16 | 166.592 | 160.134 | 104.03% bert-large | realtime | int8 | 230.876 | 222.594 | 103.72% ViT | realtime | fp32 | 288.19 | 282.05 | 102.18% ViT | realtime | bf16 | 755.42 | 741.1 | 101.93% ViT | realtime | int8 | 1060.94 | 1092.47 | 97.11% yolov7 | realtime | fp32 | 17.06927 | 16.47995 | 103.58% yolov7 | realtime | bf16 | 54.68561 | 54.00723 | 101.26% yolov7 | realtime | int8 | 78.38271 | 77.63214 | 100.97% bert-large | throughput | fp32 | 47.142 | 47.341 | 99.58% bert-large | throughput | bf16 | 200.365 | 200.806 | 99.78% bert-large | throughput | int8 | 144.999 | 145.295 | 99.80% LCM | throughput | fp32 | 0.54913 | 0.54897 | 100.03% LCM | throughput | bf16 | 1.062417 | 1.07772 | 98.58% stable-diffusion | throughput | fp32 | 0.03301 | 0.0331 | 99.73% stable-diffusion | throughput | bf16 | 0.08773 | 0.08849 | 99.14% stable-diffusion | throughput | int8 | 0.0491 | 0.05024 | 97.73% ViT | throughput | fp32 | 342.55 | 346.47 | 98.87% ViT | throughput | bf16 | 1263.4 | 1268.32 | 99.61% ViT | throughput | int8 | 1331.3 | 1345.32 | 98.96% yolov7 | throughput | fp32 | 115.313 | 115.612 | 99.74% yolov7 | throughput | bf16 | 323.364 | 323.747 | 99.88% yolov7 | throughput | int8 | 388.137 | 384.236 | 101.02% bert-large | train_phase1 | fp32 | 34.223 | 34.309 | 99.75% bert-large | train_phase1 | bf16 | 90.372 | 88.453 | 102.17% bert-large | train_phase2 | fp32 | 7.307 | 7.318 | 99.85% Data Type | Geomean -- | -- fp32 | 99.88% bf16 | 100.70% int8 | 99.88% all | 100.16% 2. Torchbench cpu userbenchmark inference & training Test suite | Geomean Ratio (New/baseline) -- | -- eager_throughtput_bf16_infer | 1.00x eager_throughtput_fp32_infer | 1.00x jit_llga_throughtput_amp_bf16 | 0.99x jit_llga_throughtput_fp32 | 1.01x eager_throughtput_fx_int8 | 1.00x eager_throughtput_bf16_train | 1.00x eager_throughtput_fp32_train | 1.00x 3. Inductor quantization (static & dynamic) accuracy & performance Config | Performance geomean ratio (New/baseline) | Accuracy ratio (New/baseline) -- | -- | -- Static quant PTQ | 0.99x | 1.00x Static quant PTQ_CPP_WRAPPER | 0.98x | 1.00x Static quant QAT | 0.99x | 1.00x Dynamic quant PTQ | 1.00x | 1.00x 4. Dynamo benchmarks Precision | Shape | Wrapper | Thread | Ratio old/new GEOMEAN | Ratio old/new GEOMEAN -- | -- | -- | -- | -- | -- | | | | Eager | Inductor Float32 | Static | Default | Multiple | 0.998776 | 1.002091 | | | Single | 1.014086 | 1.01054 Float32 | Dynamic | Default | Multiple | 1.00386 | 1.005975 | | | Single | 1.011036 | 1.008317 AMP | Static | Default | Multiple | 0.996965 | 1.005117 | | | Single | 1.00092 | 0.995666 AMP | Dynamic | Default | Multiple | 0.9959 | 0.995048 | | | Single | 1.002569 | 0.994085 --- Pull Request resolved: https://github.com/pytorch/pytorch/pull/122472 Approved by: https://github.com/jgong5, https://github.com/EikanWang, https://github.com/atalman
|
diff --git a/third_party/ideep b/third_party/ideep
index 8a6cc4e09d..420c9806c8 160000
--- a/third_party/ideep
+++ b/third_party/ideep
@@ -1 +1 @@
-Subproject commit 8a6cc4e09dc509f04f83c085e38786b1fb44e14d
+Subproject commit 420c9806c870711668a4bbc469f1d40839532fea
diff --git a/third_party/mkl-dnn.BUILD b/third_party/mkl-dnn.BUILD
index dac4f9e3e8..bde7905aa0 100644
--- a/third_party/mkl-dnn.BUILD
+++ b/third_party/mkl-dnn.BUILD
@@ -63,9 +63,9 @@ template_rule(
out = "include/oneapi/dnnl/dnnl_version.h",
substitutions = {
"@DNNL_VERSION_MAJOR@": "3",
- "@DNNL_VERSION_MINOR@": "3",
- "@DNNL_VERSION_PATCH@": "6",
- "@DNNL_VERSION_HASH@": "86e6af5974177e513fd3fee58425e1063e7f1361",
+ "@DNNL_VERSION_MINOR@": "4",
+ "@DNNL_VERSION_PATCH@": "1",
+ "@DNNL_VERSION_HASH@": "f5ff0a6de16c130053bec1a1aec3a9b826c66f78",
},
)
|
2.41.0
|
46da8755c45c331dc4c7513ff89b531f978c2cd
|
Wed, 1 May 2024 21:01:26 +0000
|
[PATCH 0905/1000] switch tests from constrain_as* to torch._check* (#125253)
|
To fix data-dependent errors we want to recommend that people use `torch._check*` APIs. The `constrain_as*` APIs should be fully subsumed by them, and in the future we should kill them entirely. Differential Revision: D56774333 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125253 Approved by: https://github.com/ezyang
|
diff --git a/test/distributed/test_c10d_functional_native.py b/test/distributed/test_c10d_functional_native.py
index 99062d1bab..54030d1f1d 100644
--- a/test/distributed/test_c10d_functional_native.py
+++ b/test/distributed/test_c10d_functional_native.py
@@ -703,7 +703,7 @@ class CompileTest(TestCase):
def _tolist_with_constrain_as_size(tensor):
lst = tensor.tolist()
for elem in lst:
- torch._constrain_as_size(elem)
+ torch._check_is_size(elem)
return lst
def func(
diff --git a/test/distributed/test_inductor_collectives.py b/test/distributed/test_inductor_collectives.py
index 19853718b9..65802c4896 100644
--- a/test/distributed/test_inductor_collectives.py
+++ b/test/distributed/test_inductor_collectives.py
@@ -35,7 +35,7 @@ from torch.utils._triton import has_triton
def _tolist_with_constrain_as_size(tensor):
lst = tensor.tolist()
for elem in lst:
- torch._constrain_as_size(elem)
+ torch._check_is_size(elem)
return lst
diff --git a/test/dynamo/test_export.py b/test/dynamo/test_export.py
index 89d34ea4f2..1d13d68f26 100644
--- a/test/dynamo/test_export.py
+++ b/test/dynamo/test_export.py
@@ -2604,7 +2604,7 @@ def forward(self, x):
def test_export_preserve_constraints_as_metadata_scalar(self):
def f(x, y):
b = x.item()
- torch._constrain_as_size(b)
+ torch._check_is_size(b)
return torch.empty((b, y.shape[0]))
x = torch.tensor([3])
@@ -2634,7 +2634,8 @@ def forward(self, x):
def test_export_preserve_constraints_as_metadata_tensor(self):
def f(x):
b = x.nonzero()
- torch._constrain_as_value(b.shape[0], min=2, max=5)
+ torch._check(b.shape[0] >= 2)
+ torch._check(b.shape[0] <= 5)
return b
y = torch.tensor([8, 8, 6])
@@ -2652,7 +2653,7 @@ def forward(self, x):
def test_exported_graph_serialization(self):
def f(x, y):
b = x.item()
- torch._constrain_as_size(b)
+ torch._check_is_size(b)
return torch.empty((b, y.shape[0]))
x = torch.tensor([3])
diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index 1d187ad9e5..f57fce9428 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -8491,7 +8491,9 @@ def ___make_guard_fn():
def f(lengths, values):
sizes = lengths.tolist()
for s in sizes:
- torch._constrain_as_size(s, min=2, max=100)
+ torch._check_is_size(s)
+ torch._check(s >= 2)
+ torch._check(s <= 100)
return torch.split(values, sizes)
f(torch.tensor([2, 3, 4]), torch.randn(9))
diff --git a/test/export/test_export.py b/test/export/test_export.py
index 5f6198ce39..c7b6f53aaf 100644
--- a/test/export/test_export.py
+++ b/test/export/test_export.py
@@ -150,7 +150,7 @@ class TestDynamismExpression(TestCase):
class Module(torch.nn.Module):
def forward(self, x):
b = x.item()
- torch._constrain_as_size(b)
+ torch._check_is_size(b)
return torch.full((b, 1), 1)
f = Module()
@@ -183,8 +183,9 @@ class TestDynamismExpression(TestCase):
class ConflictingConstraints(torch.nn.Module):
def forward(self, x):
b = x.item()
- torch._constrain_as_size(b)
- torch._constrain_as_value(b, min=4, max=5)
+ torch._check_is_size(b)
+ torch._check(b >= 4)
+ torch._check(b <= 5)
return torch.full((b, 1), 1)
inp = (torch.tensor([3]),)
@@ -1416,7 +1417,7 @@ class TestExport(TestCase):
valid_idxs = torch.nonzero(valid_mask).to(scores.device)
num_topk = torch.minimum(topk, torch.tensor(valid_idxs.shape[0])).item()
- torch._constrain_as_size(num_topk)
+ torch._check_is_size(num_topk)
torch._check(scores.shape[0] >= num_topk)
scores, idxs = scores.sort(descending=True)
scores = scores[:num_topk]
@@ -2152,21 +2153,6 @@ class TestExport(TestCase):
# Intentionally not wrapping `inp` in a tuple to trigger the error
_ = export(M(), inp)
- def test_constrain_value_with_no_default(self):
- class Module(torch.nn.Module):
- def forward(self, x, y):
- n = x.max().item()
- torch._constrain_as_value(n)
- return y + n
-
- fn = Module()
- ep = export(
- fn,
- (torch.randint(3, 5, (2, 2)), torch.randint(3, 5, (2, 3))),
- )
- test_inp = (torch.randint(3, 5, (2, 2)), torch.randint(3, 5, (2, 3)))
- self.assertTrue(torch.allclose(ep.module()(*test_inp), fn(*test_inp)))
-
def test_decomp_batch_norm_functional_predispatch(self):
class ConvBatchnorm(torch.nn.Module):
def __init__(self):
@@ -2227,30 +2213,11 @@ def forward(self, x):
return pytree.tree_unflatten((getitem,), self._out_spec)""",
)
- def test_constrain_value_with_symfloat(self):
- class Module(torch.nn.Module):
- def forward(self, x, y):
- n = x.max().item()
- torch._constrain_as_value(n)
- return y + n
-
- fn = Module()
- error = (
- ValueError
- if is_non_strict_test(self._testMethodName)
- else torch._dynamo.exc.TorchRuntimeError
- )
- with self.assertRaisesRegex(
- error,
- "Constraining SymFloat or Symbool is nyi",
- ):
- _ = export(fn, (torch.rand(2, 2), torch.rand(2, 3)))
-
def test_constrain_size_in_eager(self):
class Module(torch.nn.Module):
def forward(self, x, y):
n = x.max().item()
- torch._constrain_as_size(n)
+ torch._check_is_size(n)
return y + n
fn = Module()
@@ -2262,17 +2229,19 @@ def forward(self, x):
self.assertTrue(torch.allclose(ep.module()(*test_inp), fn(*test_inp)))
@testing.expectedFailureNonStrict
+ @testing.expectedFailureRetraceability
def test_constrain_size_with_constrain_value(self):
class Module(torch.nn.Module):
def forward(self, x, y):
n = x.max().item()
- torch._constrain_as_value(n, 2, 10)
- torch._constrain_as_size(n)
+ torch._check(n >= 2)
+ torch._check(n <= 10)
+ torch._check_is_size(n)
return y + n
fn = Module()
with self.assertRaisesRegex(
- RuntimeError, r"Invalid value range for 1 between \[2, 10\]."
+ RuntimeError, r"Expected cond to be True, but got False"
):
_ = fn(torch.randint(1, 2, (2, 2)), torch.randint(3, 5, (2, 3)))
@@ -2288,7 +2257,8 @@ def forward(self, x):
class Module1(torch.nn.Module):
def forward(self, x, y):
n = x.item()
- torch._constrain_as_size(n, min=0)
+ torch._check_is_size(n)
+ torch._check(n >= 0)
return y.sum() + torch.ones(n, 5).sum()
case1 = Module1()
@@ -2296,7 +2266,9 @@ def forward(self, x):
class Module2(torch.nn.Module):
def forward(self, x, y):
n = x.item()
- torch._constrain_as_size(n, min=0, max=6)
+ torch._check_is_size(n)
+ torch._check(n >= 0)
+ torch._check(n <= 6)
return y.sum() + torch.ones(n, 5).sum()
case2 = Module2()
@@ -2304,7 +2276,9 @@ def forward(self, x):
class Module3(torch.nn.Module):
def forward(self, x, y):
n = x.item()
- torch._constrain_as_size(n, min=0, max=1)
+ torch._check_is_size(n)
+ torch._check(n >= 0)
+ torch._check(n <= 1)
return y.sum() + torch.ones(n, 5).sum()
case3 = Module3()
@@ -2312,7 +2286,8 @@ def forward(self, x):
class Module4(torch.nn.Module):
def forward(self, x, y):
n = x.item()
- torch._constrain_as_size(n, min=2)
+ torch._check_is_size(n)
+ torch._check(n >= 2)
return y.sum() + torch.ones(n, 5).sum()
case4 = Module4()
@@ -2320,7 +2295,8 @@ def forward(self, x):
class Module5(torch.nn.Module):
def forward(self, x, y):
n = x.item()
- torch._constrain_as_size(n, min=1)
+ torch._check_is_size(n)
+ torch._check(n >= 1)
return y.sum() + torch.ones(n, 5).sum()
case5 = Module5()
@@ -2328,7 +2304,7 @@ def forward(self, x):
ep = export(case1, (torch.tensor(1), torch.ones(4, 5)))
with self.assertRaisesRegex(
- RuntimeError, r"Invalid value range for -1 between"
+ RuntimeError, r"Expected cond to be True, but got False"
):
_ = case1(torch.tensor(-1), torch.randn(4, 5))
@@ -2341,10 +2317,16 @@ def forward(self, x):
ep = export(case2, (torch.tensor(5), torch.randn(4, 5)))
- with self.assertRaisesRegex(RuntimeError, r"Invalid value range for 7 between"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"Expected cond to be True, but got False",
+ ):
_ = case2(torch.tensor(7), torch.randn(4, 5))
- with self.assertRaisesRegex(RuntimeError, r"Invalid value range for 9 between"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"Expected cond to be True, but got False",
+ ):
_ = case2(torch.tensor(9), torch.randn(4, 5))
self.assertTrue(
@@ -2354,21 +2336,20 @@ def forward(self, x):
)
)
- with self.assertRaisesRegex(
- RuntimeError,
- "Max value to constrain_range_for_size must be greater than 2. got: 1",
- ):
- _ = case3(torch.tensor(1), torch.randn(4, 5))
+ _ = case3(torch.tensor(1), torch.randn(4, 5))
with self.assertRaisesRegex(
RuntimeError,
- r"Invalid value range for 1 between \[2, 9223372036854775807\].",
+ r"Expected cond to be True, but got False",
):
_ = case4(torch.tensor(1), torch.randn(4, 5))
ep = export(case4, (torch.tensor(5), torch.randn(4, 5)))
- with self.assertRaisesRegex(RuntimeError, r"Invalid value range for 1"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"Expected cond to be True, but got False",
+ ):
_ = case4(torch.tensor(1), torch.randn(4, 5))
self.assertTrue(
@@ -2380,7 +2361,10 @@ def forward(self, x):
ep = export(case5, (torch.tensor(5), torch.randn(4, 5)))
- with self.assertRaisesRegex(RuntimeError, r"Invalid value range for 0"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"Expected cond to be True, but got False",
+ ):
_ = case5(torch.tensor(0), torch.randn(4, 5))
self.assertTrue(
@@ -2424,7 +2408,9 @@ def forward(self, x):
def forward(self, start_pos: torch.Tensor):
pos = start_pos.item()
- torch._constrain_as_size(pos, min=0, max=4)
+ torch._check_is_size(pos)
+ torch._check(pos >= 0)
+ torch._check(pos <= 4)
return self.freq[pos] * self.freq[pos]
ep = torch.export.export(M(), (torch.tensor(1),))
@@ -2455,11 +2441,13 @@ def forward(self, x):
@testing.expectedFailureNonStrict
@testing.expectedFailureSerDerPreDispatch # .item() becomes aten.item in predispatch IR
@testing.expectedFailurePreDispatchRunDecomp # Assert message is still using the old node name, so it shoudl fail
+ @testing.expectedFailureRetraceability # assert message mismatch
def test_export_with_inline_constraints(self):
class Module(torch.nn.Module):
def forward(self, x):
a = x.item()
- torch._constrain_as_value(a, 4, 7)
+ torch._check(a >= 4)
+ torch._check(a <= 7)
return torch.empty((a, 4))
f = Module()
@@ -2467,7 +2455,7 @@ def forward(self, x):
self.assertEqual(ep.module()(torch.tensor([6])).shape, (6, 4))
FileCheck().check_count(
- "torch.ops.aten.sym_constrain_range.default", 1, exactly=True
+ "torch.ops.aten._assert_async.msg", 2, exactly=True
).run(ep.graph_module.code)
with self.assertRaisesRegex(
@@ -2476,11 +2464,13 @@ def forward(self, x):
) as cm:
ep.module()(torch.tensor([30]))
+ @testing.expectedFailureNonStrict # assert not found
def test_export_with_inline_constraints_complex(self):
class Module(torch.nn.Module):
def forward(self, x):
a = x.item()
- torch._constrain_as_value(a, 4, 7)
+ torch._check(a >= 4)
+ torch._check(a <= 7)
empty = torch.empty((a, 4))
return torch.cat((empty.transpose(0, 1), torch.zeros(6, a)), 0)
@@ -2489,7 +2479,7 @@ def forward(self, x):
ep = export(f, (torch.tensor([6]),))
self.assertEqual(ep.module()(torch.tensor([5])).shape, (10, 5))
FileCheck().check_count(
- "torch.ops.aten.sym_constrain_range.default", 1, exactly=True
+ "torch.ops.aten._assert_async.msg", 2, exactly=True
).run(ep.graph_module.code)
def test_to_module_with_mutated_buffer(self):
@@ -2879,7 +2869,7 @@ def forward(self, x):
)
)
- def test_constrain_as_size_error(self):
+ def test_check_is_size_error(self):
class Module(torch.nn.Module):
def forward(self, x):
a = x.item()
@@ -3105,7 +3095,7 @@ def forward(self, x):
class Foo(torch.nn.Module):
def forward(self, x):
y = x.item()
- torch._constrain_as_size(y)
+ torch._check_is_size(y)
return torch.zeros(y)
f = Foo()
@@ -3897,9 +3887,10 @@ graph():
)
def test_sym_stack_trace(self):
+ # TODO(avik): update this test with torch._check*
class Foo(torch.nn.Module):
def forward(self, x, y):
- y = torch._constrain_as_size(y.item(), min=2)
+ y = torch.sym_constrain_range_for_size(y.item(), min=2)
z = x.shape[0] == 4
z = torch.sym_ite(z, x.shape[0], x.shape[1])
return z
@@ -3918,7 +3909,7 @@ graph():
][0].meta.get("stack_trace", None)
self.assertTrue(
re.search(
- r"torch/__init__.py.*in _constrain_as_size\n.*torch.sym_constrain_range_for_size",
+ r"in forward\n.*torch.sym_constrain_range_for_size",
trace_constrain_range,
)
)
@@ -4717,7 +4708,9 @@ def forward(self, q, k, v):
) -> torch.Tensor:
# x.sizes(): 1, 128, 16, 128
sp = start_pos.item()
- torch._constrain_as_size(sp, min=0, max=126)
+ torch._check_is_size(sp)
+ torch._check(sp >= 0)
+ torch._check(sp <= 126)
key = cache[:, : sp + 1, :, :] # 1, sp+1, 16, 128
value = cache[:, : sp + 1, :, :] # 1, sp+1, 16, 128
query = query.transpose(1, 2) # (bs, n_local_heads, seqlen, head_dim)
diff --git a/test/export/test_pass_infra.py b/test/export/test_pass_infra.py
index fecae44225..955b961f37 100644
--- a/test/export/test_pass_infra.py
+++ b/test/export/test_pass_infra.py
@@ -50,12 +50,14 @@ class TestPassInfra(TestCase):
def forward(self, pred, x, y):
def true_fn(x, y):
b = x.item()
- torch._constrain_as_value(b, min=2, max=5)
+ torch._check(b >= 2)
+ torch._check(b <= 5)
return x - y
def false_fn(x, y):
c = y.item()
- torch._constrain_as_value(c, min=2, max=5)
+ torch._check(c >= 2)
+ torch._check(c <= 5)
return x + y
ret = control_flow.cond(pred, true_fn, false_fn, [x, y])
diff --git a/test/export/test_passes.py b/test/export/test_passes.py
index 6057474ee1..e2724ead88 100644
--- a/test/export/test_passes.py
+++ b/test/export/test_passes.py
@@ -522,7 +522,8 @@ class TestPasses(TestCase):
def forward(self, x):
b = x.item()
- torch._constrain_as_value(b, min=2, max=5)
+ torch._check(b >= 2)
+ torch._check(b <= 5)
return b
x = torch.tensor([2])
@@ -545,7 +546,8 @@ class TestPasses(TestCase):
def forward(self, x):
b = x.nonzero()
- torch._constrain_as_value(b.shape[0], min=3, max=5)
+ torch._check(b.shape[0] >= 3)
+ torch._check(b.shape[0] <= 5)
return b
x = torch.tensor([2, 1, 2, 3, 5, 0])
@@ -586,12 +588,14 @@ class TestPasses(TestCase):
def forward(self, pred, x, y):
def true_fn(x, y):
b = x.item()
- torch._constrain_as_value(b, min=2, max=5)
+ torch._check(b >= 2)
+ torch._check(b <= 5)
return x - b
def false_fn(x, y):
c = y.item()
- torch._constrain_as_value(c, min=2, max=5)
+ torch._check(c >= 2)
+ torch._check(c <= 5)
return y - c
ret = cond(pred, true_fn, false_fn, [x, y])
@@ -611,7 +615,8 @@ class TestPasses(TestCase):
class Foo(torch.nn.Module):
def forward(self, x):
a = x.item()
- torch._constrain_as_value(a, 4, 7)
+ torch._check(a >= 4)
+ torch._check(a <= 7)
return torch.empty((a, 4))
f = Foo()
@@ -619,8 +624,8 @@ class TestPasses(TestCase):
ep = torch.export.export(f, (torch.tensor([7]),))
gm = ep.graph_module
FileCheck().check_count(
- "torch.ops.aten.sym_constrain_range.default",
- 1,
+ "torch.ops.aten._assert_async.msg",
+ 2,
exactly=True,
).run(gm.code)
@@ -638,10 +643,10 @@ class TestPasses(TestCase):
self.assertEqual(dep_token.shape, torch.Size([]))
FileCheck().check_count(
- "torch.ops.aten._functional_sym_constrain_range", 1, exactly=True
+ "torch.ops.aten._functional_assert_async.msg", 2, exactly=True
).run(gm.code)
FileCheck().check_count(
- "torch.ops.aten.sym_constrain_range.default", 0, exactly=True
+ "torch.ops.aten._assert_async.msg", 0, exactly=True
).run(gm.code)
def test_math_ops(self):
diff --git a/test/export/test_serialize.py b/test/export/test_serialize.py
index 2f8163333d..9709241e9a 100644
--- a/test/export/test_serialize.py
+++ b/test/export/test_serialize.py
@@ -773,7 +773,7 @@ class TestDeserialize(TestCase):
class Module(torch.nn.Module):
def forward(self, x, y):
n = x.item()
- torch._constrain_as_size(n, min=2)
+ torch._check_is_size(n)
return y.sum() + torch.ones(n, 5).sum()
f = Module()
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py
index cd3bb458a4..419d36e5bf 100644
--- a/test/inductor/test_torchinductor.py
+++ b/test/inductor/test_torchinductor.py
@@ -10002,7 +10002,9 @@ if HAS_GPU and RUN_GPU and not TEST_WITH_ASAN:
return a[y.to(torch.int64)]
def fn2(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
- torch._constrain_as_size(b.shape[0], 2, 100)
+ torch._check_is_size(b.shape[0])
+ torch._check(b.shape[0] >= 2)
+ torch._check(b.shape[0] <= 100)
return fn1(a, b)
fn1_opt = torch._dynamo.optimize("inductor")(fn1)
diff --git a/test/inductor/test_torchinductor_dynamic_shapes.py b/test/inductor/test_torchinductor_dynamic_shapes.py
index adacfd46fe..0e83b235ed 100644
--- a/test/inductor/test_torchinductor_dynamic_shapes.py
+++ b/test/inductor/test_torchinductor_dynamic_shapes.py
@@ -780,7 +780,9 @@ class TestInductorDynamic(TestCase):
@torch.compile(fullgraph=True, dynamic=True)
def f(x):
a = x.item()
- torch._constrain_as_size(a, min=1, max=10)
+ torch._check_is_size(a)
+ torch._check(a >= 1)
+ torch._check(a <= 10)
return torch.ones(a, a)
f(torch.tensor([5], device=device))
diff --git a/test/test_proxy_tensor.py b/test/test_proxy_tensor.py
index 6c7454656b..115305c313 100644
--- a/test/test_proxy_tensor.py
+++ b/test/test_proxy_tensor.py
@@ -1347,7 +1347,7 @@ def forward(self, crop_camera_1, mask_1):
for s in p.shape:
guard_int(s)
x = x[mask]
- torch._constrain_as_value(x.shape[0], min=1)
+ torch._check(x.shape[0] >= 1)
for p in params.values():
p.grad = None
return torch.func.functional_call(mod, {**params, **buffers}, (x,)).sum()
@@ -1498,6 +1498,7 @@ def forward(self, x_1, y_1):
# tolist not directly supported atm
sizes = [lengths[i].item() for i in range(lengths.size(0))]
for s in sizes:
+ # TODO(avik): no assertion generated with torch._check_is_size?
torch._constrain_as_size(s)
return torch.split(values, sizes)
diff --git a/torch/__init__.py b/torch/__init__.py
index 7a3bdd9aec..2fd8005cf7 100644
--- a/torch/__init__.py
+++ b/torch/__init__.py
@@ -2057,7 +2057,7 @@ def _constrain_as_size(symbol, min: Optional[builtins.int] = None, max: Optional
GuardOnDataDependentSymNode errors upon export, since we cannot guard on unbacked SymInts.
This function has unusual semantics which distinguish it from
- constrain_as_value. Specifically, in some circumstances in framework
+ _constrain_as_value. Specifically, in some circumstances in framework
code, we will treat this int as >= 2 (when we do a size-oblivious guard).
This makes it easier to This makes it easier to use the unbacked int in
size contexts, as we will often attempt to guard on a size being zero/one
diff --git a/torch/_dynamo/utils.py b/torch/_dynamo/utils.py
index c99bccc6b5..ff9438085c 100644
--- a/torch/_dynamo/utils.py
+++ b/torch/_dynamo/utils.py
@@ -1819,7 +1819,7 @@ def get_fake_value(node, tx, allow_non_graph_fake=False):
"Tried to use data-dependent value in the subsequent computation. "
"This can happen when we encounter unbounded dynamic value that is unknown during tracing time. "
"You will need to explicitly give hint to the compiler. Please take a look at "
- f"constrain_as_value OR constrain_as_size APIs. {cause}",
+ f"torch._check OR torch._check_is_size APIs. {cause}",
case_name="constrain_as_size_example",
)
elif isinstance(cause, ValueRangeError):
diff --git a/torch/_dynamo/variables/tensor.py b/torch/_dynamo/variables/tensor.py
index 6ac50d5828..7f06483ab1 100644
--- a/torch/_dynamo/variables/tensor.py
+++ b/torch/_dynamo/variables/tensor.py
@@ -997,7 +997,7 @@ class SymNodeVariable(VariableTracker):
except GuardOnDataDependentSymNode as e:
raise UserError( # noqa: TRY200
UserErrorType.ANTI_PATTERN,
- f"Consider annotating your code using torch._constrain_as_*(). {str(e)}",
+ f"Consider annotating your code using torch._check*(). {str(e)}",
case_name="constrain_as_size_example",
)
diff --git a/torch/_export/db/examples/constrain_as_size_example.py b/torch/_export/db/examples/constrain_as_size_example.py
index 0adbffd96c..16d6462524 100644
--- a/torch/_export/db/examples/constrain_as_size_example.py
+++ b/torch/_export/db/examples/constrain_as_size_example.py
@@ -13,8 +13,8 @@ from torch._export.db.case import export_case
class ConstrainAsSizeExample(torch.nn.Module):
"""
If the value is not known at tracing time, you can provide hint so that we
- can trace further. Please look at constrain_as_value and constrain_as_size APIs
- constrain_as_size is used for values that NEED to be used for constructing
+ can trace further. Please look at torch._check and torch._check_is_size APIs.
+ torch._check_is_size is used for values that NEED to be used for constructing
tensor.
"""
@@ -23,5 +23,6 @@ class ConstrainAsSizeExample(torch.nn.Module):
def forward(self, x):
a = x.item()
- torch._constrain_as_size(a, min=0, max=5)
+ torch._check_is_size(a)
+ torch._check(a <= 5)
return torch.zeros((a, 5))
diff --git a/torch/_export/db/examples/constrain_as_value_example.py b/torch/_export/db/examples/constrain_as_value_example.py
index 3844c7227a..1de266c689 100644
--- a/torch/_export/db/examples/constrain_as_value_example.py
+++ b/torch/_export/db/examples/constrain_as_value_example.py
@@ -13,8 +13,8 @@ from torch._export.db.case import export_case
class ConstrainAsValueExample(torch.nn.Module):
"""
If the value is not known at tracing time, you can provide hint so that we
- can trace further. Please look at constrain_as_value and constrain_as_size APIs.
- constrain_as_value is used for values that don't need to be used for constructing
+ can trace further. Please look at torch._check and torch._check_is_size APIs.
+ torch._check is used for values that don't need to be used for constructing
tensor.
"""
@@ -23,7 +23,8 @@ class ConstrainAsValueExample(torch.nn.Module):
def forward(self, x, y):
a = x.item()
- torch._constrain_as_value(a, min=0, max=5)
+ torch._check(a >= 0)
+ torch._check(a <= 5)
if a < 6:
return y.sin()
diff --git a/torch/_subclasses/fake_tensor.py b/torch/_subclasses/fake_tensor.py
index fdea8a3441..28e376de5c 100644
--- a/torch/_subclasses/fake_tensor.py
+++ b/torch/_subclasses/fake_tensor.py
@@ -703,7 +703,8 @@ class FakeTensor(torch.Tensor):
for _ in range(self.shape[0]):
s = shape_env.create_unbacked_symint()
# max value?
- torch._constrain_as_size(s, min=2)
+ torch._check_is_size(s)
+ torch._check(s >= 2)
out.append(s)
return out
diff --git a/torch/export/_trace.py b/torch/export/_trace.py
index 906e34e31a..ddfc6392ba 100644
--- a/torch/export/_trace.py
+++ b/torch/export/_trace.py
@@ -444,7 +444,7 @@ def _export_to_torch_ir(
except GuardOnDataDependentSymNode as e:
raise UserError( # noqa: TRY200
UserErrorType.ANTI_PATTERN,
- f"Consider annotating your code using torch._constrain_as_*(). {str(e)}",
+ f"Consider annotating your code using torch._check*(). {str(e)}",
case_name="constrain_as_size_example",
)
|
2.41.0
|
d794bcb8a9e7dec578b1e8587dc6513cd24c2e7
|
Wed, 1 May 2024 13:00:06 -0400
|
[PATCH 0906/1000] Delete NegateSource handling, I think it's dead (#125311)
|
Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/125311 Approved by: https://github.com/Skylion007
|
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index 748737a9e1..fdaeb3a060 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -3398,7 +3398,7 @@ class ShapeEnv:
# TODO: Make this more efficient by binding all the size/stride/offsets
# to locals before performing tests on them.
- from torch._dynamo.source import TensorPropertySource, TensorProperty, NegateSource
+ from torch._dynamo.source import TensorPropertySource, TensorProperty
# Actual codegen must be delayed as we don't necessarily know what
# the symbol mapping is
@@ -3491,8 +3491,6 @@ class ShapeEnv:
symbol_to_source[s].append(source)
if constraint is not None:
symbol_to_constraints[s].add(constraint)
- elif isinstance(-s, sympy.Symbol):
- symbol_to_source[-s].append(NegateSource(source))
else:
constraint_violated = False
if isinstance(constraint, StrictMinMaxConstraint):
|
2.41.0
|
058563078e707b848b061a222d0195e7472be7d
|
Wed, 1 May 2024 09:51:21 -0700
|
[PATCH 0907/1000] support as_python_constant on PlacementClassVariable (#124398)
|
Fixes an error for torchtitan + internal Pull Request resolved: https://github.com/pytorch/pytorch/pull/124398 Approved by: https://github.com/ezyang, https://github.com/wanchaol, https://github.com/yoyoyocmu
|
diff --git a/torch/_dynamo/variables/distributed.py b/torch/_dynamo/variables/distributed.py
index 2f4a2eb91e..6816ea9b1e 100644
--- a/torch/_dynamo/variables/distributed.py
+++ b/torch/_dynamo/variables/distributed.py
@@ -107,6 +107,9 @@ class PlacementClassVariable(DistributedVariable):
return type(value) is type and issubclass(value, Placement)
+ def as_python_constant(self):
+ return self.value
+
def call_function(
self, tx, args: "List[VariableTracker]", kwargs: "Dict[str, VariableTracker]"
) -> "VariableTracker":
|
2.41.0
|
173cbe260e8cfc9173da005a7b272c31511b05a
|
Wed, 1 May 2024 09:51:21 -0700
|
[PATCH 0908/1000] fix FakeTensor creation on noncontiguous subclasses (#124399)
|
Fixes https://github.com/pytorch/pytorch/issues/125287 Fixes https://github.com/pytorch/pytorch/issues/124090, context on the issue Pull Request resolved: https://github.com/pytorch/pytorch/pull/124399 Approved by: https://github.com/soulitzer ghstack dependencies: #124398
|
diff --git a/test/distributed/_tensor/test_dtensor_compile.py b/test/distributed/_tensor/test_dtensor_compile.py
index f9ad0278d7..21ca8ae8f0 100644
--- a/test/distributed/_tensor/test_dtensor_compile.py
+++ b/test/distributed/_tensor/test_dtensor_compile.py
@@ -252,6 +252,43 @@ class TestDTensorCompile(torch._dynamo.test_case.TestCase):
self.assertEqual(res, ref)
self.assertEqual(cnt.frame_count, 2)
+ def test_dtensor_partial_placement_redistribute_unbalanced_correct_strides(self):
+ # Partial -> Shard on an unbalanced tensor results in:
+ # - A contiguous DTensor
+ # - where the inner _local_tensor is noncontiguous
+ placement = Shard(1)
+
+ def fn(x):
+ out = x.redistribute(mesh, [placement])
+ return out
+
+ # Temporarily ignore setUp(), and use rank3 graphs during tracing
+ dist.destroy_process_group()
+ fake_store = FakeStore()
+ dist.init_process_group("fake", store=fake_store, rank=3, world_size=2)
+ mesh = DeviceMesh(self.device_type, [1, 3])
+
+ x = torch.randn(10, 257, 160, requires_grad=True)
+ x_dt = DTensor.from_local(
+ x,
+ mesh,
+ [_Partial()],
+ run_check=False,
+ shape=(10, 257, 160),
+ stride=(41120, 160, 1),
+ )
+
+ # tmp_dt has an inner, non-contiguous tensor, and is an autograd non-leaf
+ tmp_dt = fn(x_dt)
+ fake_mode = torch._subclasses.FakeTensorMode()
+ tmp_dt_fake = fake_mode.from_tensor(tmp_dt)
+ self.assertEqual(tmp_dt.shape, tmp_dt_fake.shape)
+ self.assertEqual(tmp_dt.stride(), tmp_dt_fake.stride())
+ self.assertEqual(tmp_dt._local_tensor.shape, tmp_dt_fake._local_tensor.shape)
+ self.assertEqual(
+ tmp_dt._local_tensor.stride(), tmp_dt_fake._local_tensor.stride()
+ )
+
def test_dynamo_to_local_kwargs(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
diff --git a/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_noncontiguous b/test/dynamo_expected_failures/TestAOTAutograd.test_input_mutation_noncontiguous
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/test/dynamo_expected_failures/TestAutogradLogging.test_logging b/test/dynamo_expected_failures/TestAutogradLogging.test_logging
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/torch/_dynamo/trace_rules.py b/torch/_dynamo/trace_rules.py
index 2a940eb600..ab6f52c7fb 100644
--- a/torch/_dynamo/trace_rules.py
+++ b/torch/_dynamo/trace_rules.py
@@ -189,6 +189,7 @@ manual_torch_name_rule_map = {
"torch._C.autocast_decrement_nesting": SkipFunctionVariable,
"torch._C.autocast_increment_nesting": SkipFunctionVariable,
"torch.autograd.grad": SkipFunctionVariable,
+ "torch.autograd.backward": SkipFunctionVariable,
"torch._C.clear_autocast_cache": SkipFunctionVariable,
"torch.distributions.constraints.is_dependent": SkipFunctionVariable,
"torch.jit.isinstance": SkipFunctionVariable,
@@ -2337,7 +2338,6 @@ torch_non_c_binding_in_graph_functions = dict.fromkeys(
"torch.autograd._make_grads",
"torch.autograd._register_py_tensor_class_for_device",
"torch.autograd._tensor_or_tensors_to_tuple",
- "torch.autograd.backward",
"torch.autograd.forward_ad._maybe_load_decompositions",
"torch.autograd.function._iter_filter",
"torch.autograd.function._iter_jit_values",
diff --git a/torch/_subclasses/meta_utils.py b/torch/_subclasses/meta_utils.py
index b8947877a9..7c259186f8 100644
--- a/torch/_subclasses/meta_utils.py
+++ b/torch/_subclasses/meta_utils.py
@@ -900,8 +900,12 @@ class MetaConverter:
if t.requires_grad:
r.requires_grad = True
if t.requires_grad and not is_leaf:
+ # This should probably use DelayedError,
+ # but clone is fine for now for sparse tensors.
+ # (DelayedError does not work for sparse because it causes
+ # the Fake sparse tensor to "lose" its fakeness)
+ r = r.clone()
with torch.enable_grad():
- r = r.clone()
r._coalesced_(t.is_coalesced)
elif is_sparse_compressed_layout(t.layout):
is_leaf = t.is_leaf
@@ -937,8 +941,10 @@ class MetaConverter:
if t.requires_grad:
r.requires_grad = True
if t.requires_grad and not is_leaf:
- with torch.enable_grad():
- r = r.clone()
+ r = torch._C._functions.DelayedError(
+ "Internal error: Tried to backward() through example input",
+ 1,
+ )(r)
elif t.is_nested and not t.is_traceable_wrapper_subclass:
# TODO: Handle this better in Dynamo?
# There are checks there now, but this can still be triggered by a dense
@@ -962,8 +968,10 @@ class MetaConverter:
if t.requires_grad:
r.requires_grad = True
if t.requires_grad and not is_leaf:
- with torch.enable_grad():
- r = r.clone()
+ r = torch._C._functions.DelayedError(
+ "Internal error: Tried to backward() through example input",
+ 1,
+ )(r)
elif t.is_functorch_wrapped:
if t.is_view:
from torch._dynamo.exc import unimplemented
@@ -1011,8 +1019,12 @@ class MetaConverter:
if t.requires_grad and safe_is_leaf(r):
r.requires_grad = True
elif t.requires_grad and not is_leaf:
- with torch.enable_grad():
- r = r.clone()
+ r = torch._C._functions.DelayedError( # type: ignore[assignment]
+ "Internal error: Tried to backward() through example input",
+ 1,
+ )(
+ r # type: ignore[arg-type]
+ )
elif t.is_functional:
assert t.unwrapped is not None
assert t.current_level is not None
@@ -1203,11 +1215,18 @@ class MetaConverter:
r.requires_grad = t.requires_grad
if not is_leaf:
# Fake up some autograd history.
- with torch.enable_grad():
- # preserve_format is the default, but we want to
- # emphasize how important it is to preserve
- # format here
- r = r.clone(memory_format=torch.preserve_format)
+ # Note: we *used* to call .clone() here to mock up some autograd history.
+ # This is bad for subclasses.
+ # Consider the case where you have a wrapper subclass that is contiguous,
+ # but its inner tensor is noncontiguous().
+ # .clone() (or other ops) will have the side effect of changing
+ # the metadata of the inner tensor.
+ # So instead, we now have a dedicated fn to set autograd history,
+ # without inadvertently changing other metadata.
+ r = torch._C._functions.DelayedError(
+ "Internal error: Tried to backward() through example input",
+ 1,
+ )(r)
# Graph-Break for wrapped tensors
if (
diff --git a/torch/csrc/autograd/variable.cpp b/torch/csrc/autograd/variable.cpp
index 3399520a8a..07e37463cb 100644
--- a/torch/csrc/autograd/variable.cpp
+++ b/torch/csrc/autograd/variable.cpp
@@ -7,6 +7,7 @@
#include <torch/csrc/autograd/function.h>
#include <torch/csrc/autograd/functions/accumulate_grad.h>
#include <torch/csrc/autograd/functions/tensor.h>
+#include <torch/csrc/autograd/functions/utils.h>
#include <torch/csrc/autograd/generated/Functions.h>
#include <torch/csrc/autograd/generated/ViewFuncs.h>
#include <torch/csrc/autograd/utils/error_messages.h>
|
2.41.0
|
e9ba61fde8f97fc05ceaee6f4ccbf23fe6bf9e8
|
Wed, 1 May 2024 09:51:22 -0700
|
[PATCH 0909/1000] AOTAutograd: force tangents to be contiguous when subclass inner tensor is noncontiguous (#124400)
|
Fixes https://github.com/pytorch/pytorch/issues/124397 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124400 Approved by: https://github.com/ezyang, https://github.com/yoyoyocmu ghstack dependencies: #124398, #124399
|
diff --git a/test/distributed/_tensor/test_dtensor_compile.py b/test/distributed/_tensor/test_dtensor_compile.py
index 21ca8ae8f0..f726e35153 100644
--- a/test/distributed/_tensor/test_dtensor_compile.py
+++ b/test/distributed/_tensor/test_dtensor_compile.py
@@ -289,6 +289,41 @@ class TestDTensorCompile(torch._dynamo.test_case.TestCase):
tmp_dt._local_tensor.stride(), tmp_dt_fake._local_tensor.stride()
)
+ @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch")
+ def test_dtensor_contiguous_dtensor_noncontiguous_local_as_tangent(self):
+ # Partial -> Shard on an unbalanced tensor results in:
+ # - A contiguous DTensor
+ # - where the inner _local_tensor is noncontiguous
+ # When this tensor is a fwd graph output,
+ # AOTAutograd needs to make sure we trace the backward
+ # with a contiguous tangent
+ placement = Shard(1)
+
+ def fn(x):
+ out = x.redistribute(mesh, [placement])
+ return out
+
+ # Temporarily ignore setUp(), and use rank3 graphs during tracing
+ dist.destroy_process_group()
+ fake_store = FakeStore()
+ dist.init_process_group("fake", store=fake_store, rank=3, world_size=2)
+ mesh = DeviceMesh(self.device_type, [1, 3])
+
+ x = torch.randn(10, 257, 160, requires_grad=True)
+ x_dt = DTensor.from_local(
+ x,
+ mesh,
+ [_Partial()],
+ run_check=False,
+ shape=(10, 257, 160),
+ stride=(41120, 160, 1),
+ )
+
+ out_dt = torch.compile(fn)(x_dt)
+ # If we don't properly contiguify our traced tangents,
+ # this fails with an inductor stride assert
+ out_dt.to_local().sum().backward()
+
def test_dynamo_to_local_kwargs(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
diff --git a/torch/_functorch/_aot_autograd/collect_metadata_analysis.py b/torch/_functorch/_aot_autograd/collect_metadata_analysis.py
index 4a0aae0484..57d68d6995 100644
--- a/torch/_functorch/_aot_autograd/collect_metadata_analysis.py
+++ b/torch/_functorch/_aot_autograd/collect_metadata_analysis.py
@@ -84,7 +84,16 @@ def coerce_tangent(x):
if is_traceable_wrapper_subclass(out) and hasattr(
out, "__coerce_tangent_metadata__"
):
- return out.__coerce_tangent_metadata__()
+ out = out.__coerce_tangent_metadata__()
+ # It's possible to have a subclass that advertises as contiguous,
+ # but has noncontiguous inner tensors.
+ # Force these to be conntiguous too
+ if is_traceable_wrapper_subclass(out):
+ for attr in out.__tensor_flatten__()[0]: # type: ignore[attr-defined]
+ elem = getattr(out, attr)
+ if not elem.is_contiguous():
+ elem_contig = elem.contiguous()
+ setattr(out, attr, elem_contig)
return out
diff --git a/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py b/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
index 5aed487080..b1b311fd1e 100644
--- a/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
+++ b/torch/_functorch/_aot_autograd/jit_compile_runtime_wrappers.py
@@ -75,6 +75,19 @@ aot_graphs_log = getArtifactLogger(__name__, "aot_graphs")
aten = torch.ops.aten
+def _force_contiguous(x):
+ if not isinstance(x, torch.Tensor):
+ return x
+ x = x.contiguous()
+ if not is_traceable_wrapper_subclass(x):
+ return x
+ for attr in x.__tensor_flatten__()[0]: # type: ignore[attr-defined]
+ elem = getattr(x, attr)
+ if not elem.is_contiguous():
+ setattr(x, attr, elem.contiguous())
+ return x
+
+
def _compute_output_meta_with_inductor_strides(fw_module, fwd_output_strides):
out = [n.meta["val"] for n in (list(fw_module.graph.nodes)[-1].args[0])]
# will only be set for inductor
@@ -895,11 +908,8 @@ Got grad_output types: {str(grad_output_types)}"""
# Make the tangents contiguous. Note that we must do this after subclass desugaring
# because inputs to inductor have to be contiguous
all_args = [
- t.contiguous()
- if (
- (tangents_start_idx <= i < tangents_end_idx)
- and (not t.is_contiguous())
- )
+ _force_contiguous(t)
+ if (tangents_start_idx <= i < tangents_end_idx)
else t
for i, t in enumerate(all_args)
]
|
2.41.0
|
99a2e25f132b6ad7cc060d1dad4a4f374eb100d
|
Wed, 1 May 2024 09:56:32 -0700
|
[PATCH 0910/1000] Reland "make sure dynamo doesn't inline DTensor __new__ or __torch_dispatch__ (#123347)" (#125288)
|
Re-land of https://github.com/pytorch/pytorch/pull/123347. The original PR broke internal because of a circular import due to importing dynamo in the DTensor code. The new version uses `torch._dynamo_disable` to work around This reverts commit 9d88339b535f57cd0e2926c9ac4c2542e4490aac. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125288 Approved by: https://github.com/ezyang, https://github.com/yanboliang, https://github.com/yoyoyocmu, https://github.com/anijain2305, https://github.com/fegin ghstack dependencies: #124398, #124399, #124400
|
diff --git a/test/distributed/_tensor/test_dtensor_compile.py b/test/distributed/_tensor/test_dtensor_compile.py
index f726e35153..b26c8900dd 100644
--- a/test/distributed/_tensor/test_dtensor_compile.py
+++ b/test/distributed/_tensor/test_dtensor_compile.py
@@ -191,6 +191,47 @@ class TestDTensorCompile(torch._dynamo.test_case.TestCase):
res = opt_fn(x)
self.assertEqual(res, ref)
+ def test_dtensor_constructor_w_graph_break(self):
+ mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
+
+ # test passing in DTensor as inputs/outputs and run some tensor computation
+ def fn(x):
+ print("graph break!")
+ return DTensor(
+ x,
+ mesh,
+ (Replicate(), Shard(0)),
+ shape=[128, 32],
+ dtype=x.dtype,
+ requires_grad=x.requires_grad,
+ stride=[32, 1],
+ )
+
+ x = torch.randn(64, 32, requires_grad=True)
+ out = fn(x)
+ out2 = torch.compile(fn, backend="eager")(x)
+
+ def test_dtensor_constructor_w_dynamo_disable(self):
+ mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
+
+ @torch._dynamo.disable(recursive=False)
+ def fn(x):
+ print("foo")
+ return DTensor(
+ x,
+ mesh,
+ (Replicate(),),
+ shape=torch.Size([32]),
+ dtype=x.dtype,
+ requires_grad=x.requires_grad,
+ stride=(1,),
+ )
+
+ x = torch.randn(32, requires_grad=True)
+ out = fn(x)
+ out2 = torch.compile(fn, backend="eager")(x)
+ self.assertEqual(out, out2)
+
def test_dtensor_noncontiguous_output(self):
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size))
diff --git a/torch/distributed/_tensor/api.py b/torch/distributed/_tensor/api.py
index 00079ef946..16cd41adc8 100644
--- a/torch/distributed/_tensor/api.py
+++ b/torch/distributed/_tensor/api.py
@@ -198,6 +198,7 @@ class DTensor(torch.Tensor): # pyre-ignore[13]: pyre is bad at __new__
_op_dispatcher: op_dispatch.OpDispatcher = op_dispatch.OpDispatcher()
@staticmethod
+ @torch._disable_dynamo
def __new__(
cls,
local_tensor: torch.Tensor,
@@ -288,6 +289,7 @@ class DTensor(torch.Tensor): # pyre-ignore[13]: pyre is bad at __new__
)
@classmethod
+ @torch._disable_dynamo
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
|
2.41.0
|
06eda538b85c9fec7a35a4c8684cdef05fc5e9d
|
Wed, 1 May 2024 22:06:47 +0000
|
[PATCH 0911/1000] Fix windows build error not propagating (#125306)
|
* Fixes https://github.com/pytorch/pytorch/issues/124886 * Kind of similar to https://github.com/pytorch/pytorch/pull/109393 I think what happens is `exit` and `exit /b` propagate the errorlevel correctly, but `exit /b` only exists the currently running batch script and not the entire cmd.exe (or whatever program is running the batch script), so `exit /b` exits with errorlevel 1, but the the parent cmd exits with 0, and bash sees cmd's 0 I think `goto fail` and `exit` are the same thing when the batch script is run from a bash script so either would work in this case? But the `goto fail` method might be better if someone happens to run the script on cmdline I assumed that anywhere anyone was exiting after checking the error code, they did want to exit completely, and I'm pretty sure that being inside a parenthesis counts as being a different script, so I changed everything to goto fail just in case, this might be too aggressive? Logs after this change for a build failure on cuda: https://github.com/pytorch/pytorch/actions/runs/8912185834/job/24475087535?pr=125306 ``` 2 errors detected in the compilation of "C:/actions-runner/_work/pytorch/pytorch/aten/src/ATen/native/cuda/AdaptiveMaxPooling3d.cu". AdaptiveMaxPooling3d.cu [7599/8420] Linking CXX shared library bin\torch_cpu.dll ninja: build stopped: subcommand failed. -- Building version 2.4.0a0+git3171c11 cmake -GNinja -DBUILD_ENVIRONMENT=win-vs2019-cuda11.8-py3 -DBUILD_PYTHON=True -DBUILD_TEST=True -DBUILD_TYPE=release -DBUILD_WHEEL=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_CUDA_COMPILER=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.8/bin/nvcc.exe -DCMAKE_CUDA_COMPILER_LAUNCHER=C:/actions-runner/_work/pytorch/pytorch/build/win_tmp/bin/randomtemp.exe;C:/actions-runner/_work/pytorch/pytorch/build/win_tmp\bin\sccache.exe -DCMAKE_CXX_COMPILER_LAUNCHER=sccache -DCMAKE_C_COMPILER_LAUNCHER=sccache -DCMAKE_GENERATOR=Ninja -DCMAKE_INSTALL_PREFIX=C:\actions-runner\_work\pytorch\pytorch\torch -DCMAKE_PREFIX_PATH=C:\Jenkins\Miniconda3\Lib\site-packages -DCUDA_NVCC_EXECUTABLE=C:/actions-runner/_work/pytorch/pytorch/build/win_tmp/bin/nvcc.bat -DCUDNN_LIBRARY=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8\lib\x64 -DNUMPY_INCLUDE_DIR=C:\Jenkins\Miniconda3\lib\site-packages\numpy\core\include -DPYTHON_EXECUTABLE=C:\Jenkins\Miniconda3\python.exe -DPYTHON_INCLUDE_DIR=C:\Jenkins\Miniconda3\Include -DPYTHON_LIBRARY=C:\Jenkins\Miniconda3/libs/python39.lib -DTORCH_BUILD_VERSION=2.4.0a0+git3171c11 -DTORCH_CUDA_ARCH_LIST=8.6 -DUSE_CUDA=1 -DUSE_NUMPY=True C:\actions-runner\_work\pytorch\pytorch cmake --build . --target install --config Release -- -j 8 (base) C:\actions-runner\_work\pytorch\pytorch>if errorlevel 1 goto fail (base) C:\actions-runner\_work\pytorch\pytorch>exit /b 1 Error: Process completed with exit code 1. ``` vs original https://github.com/pytorch/pytorch/actions/runs/8910674030/job/24470387612 ``` 2 errors detected in the compilation of "C:/actions-runner/_work/pytorch/pytorch/aten/src/ATen/native/cuda/AdaptiveMaxPooling3d.cu". AdaptiveMaxPooling3d.cu [7604/8420] Linking CXX shared library bin\torch_cpu.dll ninja: build stopped: subcommand failed. -- Building version 2.4.0a0+gite09f98c cmake -GNinja -DBUILD_ENVIRONMENT=win-vs2019-cuda11.8-py3 -DBUILD_PYTHON=True -DBUILD_TEST=True -DBUILD_TYPE=release -DBUILD_WHEEL=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_CUDA_COMPILER=C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.8/bin/nvcc.exe -DCMAKE_CUDA_COMPILER_LAUNCHER=C:/actions-runner/_work/pytorch/pytorch/build/win_tmp/bin/randomtemp.exe;C:/actions-runner/_work/pytorch/pytorch/build/win_tmp\bin\sccache.exe -DCMAKE_CXX_COMPILER_LAUNCHER=sccache -DCMAKE_C_COMPILER_LAUNCHER=sccache -DCMAKE_GENERATOR=Ninja -DCMAKE_INSTALL_PREFIX=C:\actions-runner\_work\pytorch\pytorch\torch -DCMAKE_PREFIX_PATH=C:\Jenkins\Miniconda3\Lib\site-packages -DCUDA_NVCC_EXECUTABLE=C:/actions-runner/_work/pytorch/pytorch/build/win_tmp/bin/nvcc.bat -DCUDNN_LIBRARY=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.8\lib\x64 -DNUMPY_INCLUDE_DIR=C:\Jenkins\Miniconda3\lib\site-packages\numpy\core\include -DPYTHON_EXECUTABLE=C:\Jenkins\Miniconda3\python.exe -DPYTHON_INCLUDE_DIR=C:\Jenkins\Miniconda3\Include -DPYTHON_LIBRARY=C:\Jenkins\Miniconda3/libs/python39.lib -DTORCH_BUILD_VERSION=2.4.0a0+gite09f98c -DTORCH_CUDA_ARCH_LIST=8.6 -DUSE_CUDA=1 -DUSE_NUMPY=True C:\actions-runner\_work\pytorch\pytorch cmake --build . --target install --config Release -- -j 8 (base) C:\actions-runner\_work\pytorch\pytorch>if errorlevel 1 exit /b + assert_git_not_dirty + [[ win-vs2019-cuda11.8-py3 != *rocm* ]] + [[ win-vs2019-cuda11.8-py3 != *xla* ]] ++ git status --porcelain ++ grep -v '?? third_party' ++ true + git_status= + [[ -n '' ]] + echo 'BUILD PASSED' BUILD PASSED ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/125306 Approved by: https://github.com/ZainRizvi, https://github.com/huydhn, https://github.com/atalman
|
diff --git a/.ci/pytorch/win-test-helpers/build_pytorch.bat b/.ci/pytorch/win-test-helpers/build_pytorch.bat
index 4b7bbad744..28bd083f98 100644
--- a/.ci/pytorch/win-test-helpers/build_pytorch.bat
+++ b/.ci/pytorch/win-test-helpers/build_pytorch.bat
@@ -17,22 +17,22 @@ set PATH=C:\Program Files\CMake\bin;C:\Program Files\7-Zip;C:\ProgramData\chocol
set INSTALLER_DIR=%SCRIPT_HELPERS_DIR%\installation-helpers
call %INSTALLER_DIR%\install_magma.bat
-if errorlevel 1 exit /b
-if not errorlevel 0 exit /b
+if errorlevel 1 goto fail
+if not errorlevel 0 goto fail
call %INSTALLER_DIR%\install_sccache.bat
-if errorlevel 1 exit /b
-if not errorlevel 0 exit /b
+if errorlevel 1 goto fail
+if not errorlevel 0 goto fail
:: Miniconda has been installed as part of the Windows AMI with all the dependencies.
:: We just need to activate it here
call %INSTALLER_DIR%\activate_miniconda3.bat
-if errorlevel 1 exit /b
-if not errorlevel 0 exit /b
+if errorlevel 1 goto fail
+if not errorlevel 0 goto fail
call pip install mkl-include==2021.4.0 mkl-devel==2021.4.0
-if errorlevel 1 exit /b
-if not errorlevel 0 exit /b
+if errorlevel 1 goto fail
+if not errorlevel 0 goto fail
:: Override VS env here
pushd .
@@ -41,8 +41,8 @@ if "%VC_VERSION%" == "" (
) else (
call "C:\Program Files (x86)\Microsoft Visual Studio\%VC_YEAR%\%VC_PRODUCT%\VC\Auxiliary\Build\vcvarsall.bat" x64 -vcvars_ver=%VC_VERSION%
)
-if errorlevel 1 exit /b
-if not errorlevel 0 exit /b
+if errorlevel 1 goto fail
+if not errorlevel 0 goto fail
@echo on
popd
@@ -52,12 +52,12 @@ set CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION%
if x%CUDA_VERSION:.=%==x%CUDA_VERSION% (
echo CUDA version %CUDA_VERSION% format isn't correct, which doesn't contain '.'
- exit /b 1
+ goto fail
)
rem version transformer, for example 10.1 to 10_1.
if x%CUDA_VERSION:.=%==x%CUDA_VERSION% (
echo CUDA version %CUDA_VERSION% format isn't correct, which doesn't contain '.'
- exit /b 1
+ goto fail
)
set VERSION_SUFFIX=%CUDA_VERSION:.=_%
set CUDA_PATH_V%VERSION_SUFFIX%=%CUDA_PATH%
@@ -101,8 +101,8 @@ if "%USE_CUDA%"=="1" (
:: CMake requires a single command as CUDA_NVCC_EXECUTABLE, so we push the wrappers
:: randomtemp.exe and sccache.exe into a batch file which CMake invokes.
curl -kL https://github.com/peterjc123/randomtemp-rust/releases/download/v0.4/randomtemp.exe --output %TMP_DIR_WIN%\bin\randomtemp.exe
- if errorlevel 1 exit /b
- if not errorlevel 0 exit /b
+ if errorlevel 1 goto fail
+ if not errorlevel 0 goto fail
echo @"%TMP_DIR_WIN%\bin\randomtemp.exe" "%TMP_DIR_WIN%\bin\sccache.exe" "%CUDA_PATH%\bin\nvcc.exe" %%* > "%TMP_DIR%/bin/nvcc.bat"
cat %TMP_DIR%/bin/nvcc.bat
set CUDA_NVCC_EXECUTABLE=%TMP_DIR%/bin/nvcc.bat
@@ -114,8 +114,8 @@ if "%USE_CUDA%"=="1" (
set
python setup.py bdist_wheel
-if errorlevel 1 exit /b
-if not errorlevel 0 exit /b
+if errorlevel 1 goto fail
+if not errorlevel 0 goto fail
sccache --show-stats
python -c "import os, glob; os.system('python -mpip install --no-index --no-deps ' + glob.glob('dist/*.whl')[0])"
(
@@ -135,3 +135,8 @@ python -c "import os, glob; os.system('python -mpip install --no-index --no-deps
sccache --show-stats --stats-format json | jq .stats > sccache-stats-%BUILD_ENVIRONMENT%-%OUR_GITHUB_JOB_ID%.json
sccache --stop-server
+
+exit /b 0
+
+:fail
+exit /b 1
|
2.41.0
|
451d108da6b903e5a4d21d55be7988080cac684
|
Wed, 1 May 2024 23:14:05 +0000
|
[PATCH 0912/1000] Implemented isin_Tensor_Tensor_out for MPS backend (#124896)
|
Addresses issue #124518, adds isin_Tensor_Tensor_out. Tests added to test_mps.py. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124896 Approved by: https://github.com/malfet, https://github.com/kulinseth
|
diff --git a/aten/src/ATen/native/mps/operations/TensorCompare.mm b/aten/src/ATen/native/mps/operations/TensorCompare.mm
index 78fa67e57d..f378af1326 100644
--- a/aten/src/ATen/native/mps/operations/TensorCompare.mm
+++ b/aten/src/ATen/native/mps/operations/TensorCompare.mm
@@ -12,7 +12,9 @@
#include <ATen/ops/clamp_max_native.h>
#include <ATen/ops/clamp_min_native.h>
#include <ATen/ops/clamp_native.h>
+#include <ATen/ops/isin_native.h>
#include <ATen/ops/nan_to_num_native.h>
+#include <ATen/ops/ones_like_native.h>
#include <ATen/ops/where_native.h>
#endif
@@ -268,6 +270,68 @@ static void clamp_scalar_out_mps(const Tensor& input_t,
}
}
+static void isin_Tensor_Tensor_out_mps(const Tensor& elements,
+ const Tensor& test_elements,
+ bool assume_unique,
+ bool invert,
+ const Tensor& out,
+ string op_name) {
+ TORCH_CHECK(is_macos_13_or_newer(MacOSVersion::MACOS_VER_14_0_PLUS),
+ "isin_Tensor_Tensor_out supported on MPS from MacOs_14_0 onwards");
+ if (elements.numel() == 0) {
+ return;
+ }
+
+ if (test_elements.numel() == 0) {
+ if (invert) {
+ auto ones = ones_like(out);
+ out.copy_(ones);
+ } else {
+ auto zeros = zeros_like(out);
+ out.copy_(zeros);
+ }
+ return;
+ }
+
+ TORCH_CHECK(elements.is_mps() && test_elements.is_mps());
+ TORCH_CHECK(elements.dtype() == test_elements.dtype());
+
+ @autoreleasepool {
+ string key =
+ op_name + getTensorsStringKey({elements}) + getTensorsStringKey({test_elements}) + std::to_string(invert);
+
+ auto cachedGraph = LookUpOrCreateCachedGraph<MPSBinaryCachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
+ MPSGraphTensor* inputTensor = mpsGraphUnrankedPlaceHolder(mpsGraph, getMPSDataType(elements.scalar_type()));
+ MPSGraphTensor* otherTensor = mpsGraphUnrankedPlaceHolder(mpsGraph, getMPSDataType(test_elements.scalar_type()));
+
+ newCachedGraph->inputTensor_ = inputTensor;
+ newCachedGraph->otherTensor_ = otherTensor;
+
+ MPSShape* outputShape = getMPSShape(out);
+
+ MPSGraphTensor* input_flattened = [mpsGraph reshapeTensor:inputTensor withShape:@[ @-1, @1 ] name:nil];
+ MPSGraphTensor* other_flattened = [mpsGraph reshapeTensor:otherTensor withShape:@[ @1, @-1 ] name:nil];
+ MPSGraphTensor* isInTensor = [mpsGraph equalWithPrimaryTensor:input_flattened
+ secondaryTensor:other_flattened
+ name:nil];
+ MPSGraphTensor* output = [mpsGraph reductionOrWithTensor:isInTensor axis:1 name:nil];
+ output = [mpsGraph reshapeTensor:output withShape:outputShape name:nil];
+
+ if (invert) {
+ output = [mpsGraph notWithTensor:output name:nil];
+ }
+ newCachedGraph->outputTensor_ = output;
+ });
+
+ auto inputPlaceholder = Placeholder(cachedGraph->inputTensor_, elements);
+ auto otherPlaceholder = Placeholder(cachedGraph->otherTensor_, test_elements);
+ auto outputPlaceholder = Placeholder(cachedGraph->outputTensor_, out);
+
+ auto feeds = dictionaryFromPlaceholders(inputPlaceholder, otherPlaceholder);
+ runMPSGraph(getCurrentMPSStream(), cachedGraph->graph(), feeds, outputPlaceholder);
+ }
+}
+
} // namespace mps
// APIs exposed to at::native scope
@@ -301,6 +365,11 @@ TORCH_IMPL_FUNC(clamp_max_out_mps)
mps::clamp_scalar_out_mps(input_t, at::OptionalScalarRef(), max, output_t, __func__);
}
+TORCH_IMPL_FUNC(isin_Tensor_Tensor_out_mps)
+(const Tensor& elements, const Tensor& test_elements, bool assume_unique, bool invert, const Tensor& out) {
+ mps::isin_Tensor_Tensor_out_mps(elements, test_elements, assume_unique, invert, out, __func__);
+}
+
static void where_kernel_mps(TensorIterator& iter) {
const auto& condition = iter.input(0);
const auto& self = iter.input(1);
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index 7b48d2116f..fc0e21c12e 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -3127,6 +3127,7 @@
structured: True
dispatch:
CPU, CUDA: isin_Tensor_Tensor_out
+ MPS: isin_Tensor_Tensor_out_mps
- func: isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor
variants: function
diff --git a/test/test_mps.py b/test/test_mps.py
index 27ff47e702..95828315d5 100644
--- a/test/test_mps.py
+++ b/test/test_mps.py
@@ -673,7 +673,6 @@ def mps_ops_modifier(ops):
'index_reducemean': None,
'index_reduceamax': None,
'index_reduceamin': None,
- 'isin': None,
'isneginf': None,
'isposinf': None,
'kthvalue': None,
@@ -899,6 +898,9 @@ def mps_ops_modifier(ops):
'fft.rfft2': None,
'fft.rfftn': None,
'stft': None,
+ # Error in TestConsistencyCPU.test_output_match_isin_cpu_int32,
+ # not reproducible in later OS. Added assert to op if used in < 14.0
+ 'isin': None,
})
UNDEFINED_XFAILLIST = {
@@ -8167,6 +8169,46 @@ class TestLogical(TestCaseMPS):
[helper(dtype) for dtype in [torch.float32, torch.float16, torch.int32, torch.int16, torch.uint8, torch.int8, torch.bool]]
+ @unittest.skipIf(product_version < 14.0, "Skipped on MacOS < 14.0")
+ def test_isin(self):
+ def helper(dtype):
+ shapes = [([2, 5], [3, 5, 2]), ([10, 3, 5], [20, 1, 3]),
+ ([5], [10]), ([0], [5]), ([5], [0])]
+ for shape_tuple in shapes:
+ for inverted in [True, False]:
+ if dtype.is_floating_point:
+ # Half is not supported for CPU isin. Compute reference in FP32
+ A = torch.randn(size=shape_tuple[0], device='cpu', dtype=torch.float32)
+ B = torch.randn(size=shape_tuple[1], device='cpu', dtype=torch.float32)
+ else:
+ A = torch.randint(0, 100, size=shape_tuple[0], device='cpu', dtype=dtype)
+ B = torch.randint(0, 100, size=shape_tuple[1], device='cpu', dtype=dtype)
+
+ A_mps = A.clone().detach().to('mps')
+ B_mps = B.clone().detach().to('mps')
+
+ cpu_ref = torch.isin(A, B, invert=inverted)
+ if dtype is torch.float16:
+ cpu_ref.type(dtype)
+
+ mps_out = torch.isin(A_mps, B_mps, invert=inverted)
+ self.assertEqual(mps_out, cpu_ref)
+
+ [helper(dtype) for dtype in [torch.float32, torch.float16, torch.int32, torch.int16, torch.uint8, torch.int8]]
+
+ @unittest.skipIf(product_version < 14.0, "Skipped on MacOS < 14.0")
+ def test_isin_asserts(self):
+ A = torch.randn(size=[1, 4], device='mps', dtype=torch.float32)
+ B = torch.randn(size=[1, 4], device='mps', dtype=torch.float16)
+ with self.assertRaisesRegex(RuntimeError, 'Expected elements.dtype()*'):
+ out = torch.isin(A, B)
+
+
+ C = torch.randn(size=[1, 4], device='mps', dtype=torch.float32)
+ D = torch.randn(size=[1, 4], device='cpu', dtype=torch.float32)
+ with self.assertRaisesRegex(RuntimeError, 'Expected elements.is_mps()*'):
+ out = torch.isin(C, D)
+
class TestSmoothL1Loss(TestCaseMPS):
def _smooth_l1_loss_helper(self, reduction="mean", requires_grad=False):
|
2.41.0
|
cfb55dd5dfcffc06e039c9af2342f455dfd1007
|
Wed, 1 May 2024 23:19:07 +0000
|
[PATCH 0913/1000] Add a variable for some testcases. (#124708)
|
Some testcases can use 'TEST_PRIVATEUSE1_DEVICE_TYPE' to make adapting these testcases on others device more convenient. Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/124708 Approved by: https://github.com/albanD
|
diff --git a/test/test_shape_ops.py b/test/test_shape_ops.py
index 189187b582..47acfff9c6 100644
--- a/test/test_shape_ops.py
+++ b/test/test_shape_ops.py
@@ -12,7 +12,7 @@ import unittest
from torch import nan
from torch.testing import make_tensor
from torch.testing._internal.common_utils import (
- TestCase, run_tests, skipIfTorchDynamo, torch_to_numpy_dtype_dict, IS_JETSON)
+ TestCase, run_tests, skipIfTorchDynamo, torch_to_numpy_dtype_dict, IS_JETSON, TEST_PRIVATEUSE1_DEVICE_TYPE)
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests, onlyCPU, onlyCUDA, dtypes, onlyNativeDeviceTypes,
dtypesIfCUDA, largeTensorTest)
@@ -631,7 +631,7 @@ class TestShapeOps(TestCase):
"scalar type Long",
lambda: torch.nonzero(tensor, out=torch.empty([], dtype=torch.float, device=device))
)
- if self.device_type == 'cuda':
+ if self.device_type == 'cuda' or self.device_type == TEST_PRIVATEUSE1_DEVICE_TYPE:
self.assertRaisesRegex(
RuntimeError,
"on the same device",
diff --git a/torch/testing/_internal/common_utils.py b/torch/testing/_internal/common_utils.py
index 2f6c0f8e2b..46e4f817d2 100644
--- a/torch/testing/_internal/common_utils.py
+++ b/torch/testing/_internal/common_utils.py
@@ -1235,7 +1235,9 @@ TEST_MPS = torch.backends.mps.is_available()
TEST_XPU = torch.xpu.is_available()
TEST_CUDA = torch.cuda.is_available()
custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name(), None)
-TEST_PRIVATEUSE1 = True if (hasattr(custom_device_mod, "is_available") and custom_device_mod.is_available()) else False
+custom_device_is_available = hasattr(custom_device_mod, "is_available") and custom_device_mod.is_available()
+TEST_PRIVATEUSE1 = True if custom_device_is_available else False
+TEST_PRIVATEUSE1_DEVICE_TYPE = torch._C._get_privateuse1_backend_name()
TEST_NUMBA = _check_module_exists('numba')
TEST_TRANSFORMERS = _check_module_exists('transformers')
TEST_DILL = _check_module_exists('dill')
|
2.41.0
|
f5f405b057c7de0f5fce0b1432cb74468f96f95
|
Wed, 1 May 2024 23:29:55 +0000
|
[PATCH 0914/1000] [ncclx] Rename NCCL-EXP to NCCLX (#125238)
|
Reviewed By: kryanchun Differential Revision: D56534548 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125238 Approved by: https://github.com/kwen2501
|
diff --git a/test/cpp/c10d/ProcessGroupNCCLTest.cpp b/test/cpp/c10d/ProcessGroupNCCLTest.cpp
index d2dc02d323..d1c2380274 100644
--- a/test/cpp/c10d/ProcessGroupNCCLTest.cpp
+++ b/test/cpp/c10d/ProcessGroupNCCLTest.cpp
@@ -840,7 +840,7 @@ TEST_F(ProcessGroupNCCLTest, testSplittingCommunicator) {
multiThreadRun(testSplittingCommunicator);
}
-#ifdef IS_NCCL_EXP
+#ifdef IS_NCCLX
TEST_F(ProcessGroupNCCLTest, testSparseAllreduce) {
if (skipTest()) {
return;
diff --git a/torch/csrc/distributed/c10d/NCCLUtils.hpp b/torch/csrc/distributed/c10d/NCCLUtils.hpp
index ee771b66f8..a4b96a2a40 100644
--- a/torch/csrc/distributed/c10d/NCCLUtils.hpp
+++ b/torch/csrc/distributed/c10d/NCCLUtils.hpp
@@ -303,7 +303,7 @@ class NCCLComm {
}
#endif
-#if defined(IS_NCCL_EXP) && defined(NCCL_COMM_DUMP)
+#if defined(IS_NCCLX) && defined(NCCL_COMM_DUMP)
std::unordered_map<std::string, std::string> ncclCommDump() {
std::unordered_map<std::string, std::string> dump;
if (isAborted()) {
diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
index 573779ae39..46b2923651 100644
--- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
+++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
@@ -323,7 +323,7 @@ void cacheAllocatorDeregisterHook(
}
}
-#if defined(IS_NCCL_EXP) && defined(NCCL_COMM_DUMP)
+#if defined(IS_NCCLX) && defined(NCCL_COMM_DUMP)
std::string dump_nccl_trace() {
std::unordered_map<
std::string /* ncclUniqueID */,
@@ -2999,7 +2999,7 @@ c10::intrusive_ptr<Work> ProcessGroupNCCL::allreduce_sparse(
const AllreduceOptions& opts) {
TORCH_CHECK(tensors.size() == 1, MULTI_DEVICE_ERROR_MSG);
auto tensor = tensors.back();
-#ifdef IS_NCCL_EXP
+#ifdef IS_NCCLX
tensor = tensor.coalesce();
at::Tensor outputTensor =
torch::zeros(tensor.sizes(), tensor.options().layout(torch::kStrided));
diff --git a/torch/csrc/distributed/c10d/default_comm_hooks.cpp b/torch/csrc/distributed/c10d/default_comm_hooks.cpp
index 124bacd2b2..9dd780e199 100644
--- a/torch/csrc/distributed/c10d/default_comm_hooks.cpp
+++ b/torch/csrc/distributed/c10d/default_comm_hooks.cpp
@@ -46,7 +46,7 @@ c10::intrusive_ptr<c10::ivalue::Future> FP16CompressCommHook::runHook(
c10::intrusive_ptr<c10::ivalue::Future> _AllReduceBySumCommHook::runHook(
GradBucket& bucket) {
std::vector<at::Tensor> tensors = {bucket.getBufferRef()};
-#ifdef IS_NCCL_EXP
+#ifdef IS_NCCLX
// case with sparse_metadata_ set and using indices from there
if (bucket.getSparseGradIndices().has_value()) {
AllreduceOptions opts = AllreduceOptions();
|
2.41.0
|
6f326eff56250d97e0df1f5e75943a72ebaa5c2
|
Wed, 1 May 2024 11:38:49 -0700
|
[PATCH 0915/1000] explicitly reset stderr/stdout in precompilation (#125289)
|
I was seeing a weird bug where after running max-autotune my stdout would be misdirected. other people have not been able to repro this. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125289 Approved by: https://github.com/shunting314, https://github.com/mlazos
|
diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py
index 577a1c318b..8fcb441ed8 100644
--- a/torch/_inductor/select_algorithm.py
+++ b/torch/_inductor/select_algorithm.py
@@ -40,6 +40,7 @@ from .runtime.runtime_utils import do_bench
from .utils import (
get_dtype_size,
Placeholder,
+ restore_stdout_stderr,
sympy_dot,
sympy_index_symbol,
sympy_product,
@@ -1007,15 +1008,28 @@ class AlgorithmSelectorCache(PersistentCache):
num_workers,
)
+ # In rare circumstances, because python threads inherit global state,
+ # thread pool executor can race and leave stdout/stderr in a state
+ # different than the original values. we explicitly restore the state
+ # here to avoid this issue.
+
+ initial_stdout = sys.stdout
+ initial_stderr = sys.stderr
+
+ def precompile_with_captured_stdout(choice):
+ with restore_stdout_stderr(initial_stdout, initial_stderr):
+ return choice.precompile()
+
executor = ThreadPoolExecutor(max_workers=num_workers)
futures = executor.map(
- lambda c: c.precompile(),
+ lambda c: precompile_with_captured_stdout(c),
[c for c in choices if hasattr(c, "precompile")],
timeout=precompilation_timeout_seconds,
)
from triton.runtime.autotuner import OutOfResources
@functools.lru_cache(None)
+ @restore_stdout_stderr(initial_stdout, initial_stderr)
def wait_on_futures():
counters["inductor"]["select_algorithm_precompile"] += 1
try:
diff --git a/torch/_inductor/utils.py b/torch/_inductor/utils.py
index 27bf3614cd..d4063b5833 100644
--- a/torch/_inductor/utils.py
+++ b/torch/_inductor/utils.py
@@ -848,6 +848,15 @@ class IndentedBuffer:
return res
+@contextlib.contextmanager
+def restore_stdout_stderr(initial_stdout, initial_stderr):
+ try:
+ yield
+ finally:
+ sys.stdout = initial_stdout
+ sys.stderr = initial_stderr
+
+
class DeferredLineBase:
"""A line that can be 'unwritten' at a later time"""
|
2.41.0
|
043ccafdf133002ce6c6993429c419054ee9ad7
|
Tue, 30 Apr 2024 22:55:30 +0300
|
[PATCH 0916/1000] Require nnz==0 in sparse meta tensors (#125221)
|
As in the title and per discussion starting at https://github.com/pytorch/pytorch/pull/117907#issuecomment-2082426468 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125221 Approved by: https://github.com/amjames, https://github.com/ezyang
|
diff --git a/aten/src/ATen/native/TensorConversions.cpp b/aten/src/ATen/native/TensorConversions.cpp
index a6c1118c4e..c70da8334a 100644
--- a/aten/src/ATen/native/TensorConversions.cpp
+++ b/aten/src/ATen/native/TensorConversions.cpp
@@ -259,6 +259,9 @@ Tensor _to_copy(
memory_format == MemoryFormat::Preserve,
"to(options): COO only supports memory format Preserve, but got ", memory_format,
" instead.");
+ if (options.device().is_meta()) {
+ return zeros_like(self, options);
+ }
auto indices = self._indices();
const auto new_indices = at::native::to(
indices,
@@ -291,6 +294,10 @@ Tensor _to_copy(
" only supports memory format Preserve, but got ", memory_format,
" instead.");
+ if (options.device().is_meta()) {
+ return zeros_like(self, options);
+ }
+
auto [compressed_indices, plain_indices] = at::sparse_csr::getCompressedPlainIndices(self);
const auto new_values = at::native::to(
diff --git a/aten/src/ATen/native/TensorShape.cpp b/aten/src/ATen/native/TensorShape.cpp
index f5479925fc..a99e6e3a50 100644
--- a/aten/src/ATen/native/TensorShape.cpp
+++ b/aten/src/ATen/native/TensorShape.cpp
@@ -4082,11 +4082,13 @@ void unbind_copy_int_out(const at::Tensor & self, int64_t dim, at::TensorList o
}
}
-int64_t sparse_dim_strided(const at::Tensor& self) {
+int64_t sparse_dim_default(const Tensor& self) {
+ TORCH_CHECK(self.layout() == kStrided, "sparse_dim expected sparse or strided tensor layout but got ", self.layout());
return 0;
}
-int64_t dense_dim_strided(const at::Tensor& self) {
+int64_t dense_dim_default(const Tensor& self) {
+ TORCH_CHECK(self.layout() == kStrided, "dense_dim expected sparse or strided tensor layout but got ", self.layout());
return self.dim();
}
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index fc0e21c12e..8c1173683b 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -7210,9 +7210,9 @@
- func: sparse_dim(Tensor self) -> int
variants: method
dispatch:
- CPU, CUDA: sparse_dim_strided
SparseCPU, SparseCUDA, SparseMeta: sparse_dim_sparse
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: sparse_dim_sparse_csr
+ CompositeExplicitAutograd: sparse_dim_default
device_check: NoCheck
device_guard: False
@@ -7227,9 +7227,9 @@
- func: dense_dim(Tensor self) -> int
variants: method
dispatch:
- CPU, CUDA: dense_dim_strided
SparseCPU, SparseCUDA, SparseMeta: dense_dim_sparse
SparseCsrCPU, SparseCsrCUDA, SparseCsrMeta: dense_dim_sparse_csr
+ CompositeExplicitAutograd: dense_dim_default
device_check: NoCheck
device_guard: False
diff --git a/aten/src/ATen/native/sparse/SparseCsrTensor.cpp b/aten/src/ATen/native/sparse/SparseCsrTensor.cpp
index 6c0039e8e1..d1973c43e9 100644
--- a/aten/src/ATen/native/sparse/SparseCsrTensor.cpp
+++ b/aten/src/ATen/native/sparse/SparseCsrTensor.cpp
@@ -258,7 +258,9 @@ static void _validate_sparse_compressed_tensor_args_worker(const Tensor& compres
compressed_indices_name, " and ", plain_indices_name, " dtype must be Int or Long, but got ",
compressed_indices_type);
- if (!compressed_indices.is_meta()) {
+ if (compressed_indices.is_meta()) {
+ TORCH_CHECK(values_nnz == 0, "expected nnz to be 0 for sparse ", layout_name, " meta tensor but got ", values_nnz);
+ } else {
// Indices invariants
at::_validate_compressed_sparse_indices(
/*is_crow = */layout == kSparseCsr || layout == kSparseBsr,
diff --git a/test/test_sparse.py b/test/test_sparse.py
index 64a5ff0b23..07a2d3495e 100644
--- a/test/test_sparse.py
+++ b/test/test_sparse.py
@@ -4236,7 +4236,6 @@ class TestSparseMaskedReductions(TestCase):
class TestSparseMeta(TestCase):
exact_dtype = True
- @skipIfTorchDynamo("changing sparse tensor dimensionality confuses dynamo")
def _test_meta_sparse_coo(self, dtype):
r = torch.empty(4, 4, layout=torch.sparse_coo, device='meta', dtype=dtype)
self.assertTrue(r.is_meta)
@@ -4423,7 +4422,7 @@ class TestSparseMeta(TestCase):
for t in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype):
m = t.to(device="meta")
self.assertEqual(m.device.type, "meta")
- self.assertEqualMeta(m, t, t._nnz())
+ self.assertEqualMeta(m, t, 0)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
@@ -4461,6 +4460,7 @@ class TestSparseMeta(TestCase):
with no_dispatch():
result = torch.zeros_like(f, device=f.fake_device)
self.assertEqual(result, expected)
+ self.assertEqualMeta(result, expected, 0)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
@@ -4470,9 +4470,9 @@ class TestSparseMeta(TestCase):
for t in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype):
m = t.to(device='meta')
r = torch.sum(m)
- self.assertEqual(r.layout, torch.strided)
+ expected = torch.sum(t).to(device="meta")
self.assertTrue(r.is_meta)
- self.assertEqual(r.shape, ())
+ self.assertEqualMeta(r, expected, 0)
@all_sparse_layouts('layout', include_strided=False)
@parametrize("dtype", [torch.float64])
@@ -4483,7 +4483,7 @@ class TestSparseMeta(TestCase):
expected = torch.add(t, t).to(device='meta')
m = t.to(device='meta')
r = torch.add(m, m)
- self.assertEqualMeta(r, expected, 0 if layout is torch.sparse_coo else expected._nnz())
+ self.assertEqualMeta(r, expected, 0)
class _SparseDataset(torch.utils.data.Dataset):
|
2.41.0
|
281d3a0cb435792c88f5015fafd33af40e2004e
|
Wed, 1 May 2024 23:44:53 +0000
|
[PATCH 0917/1000] Enable UFMT on test_indexing&test_view_ops (#125112)
|
Part of https://github.com/pytorch/pytorch/issues/123062 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125112 Approved by: https://github.com/ezyang
|
diff --git a/.lintrunner.toml b/.lintrunner.toml
index a945814b23..21dfdf5b3f 100644
--- a/.lintrunner.toml
+++ b/.lintrunner.toml
@@ -1071,7 +1071,6 @@ exclude_patterns = [
'test/test_fx_reinplace_pass.py',
'test/test_hub.py',
'test/test_import_stats.py',
- 'test/test_indexing.py',
'test/test_itt.py',
'test/test_jit.py',
'test/test_jit_autocast.py',
@@ -1157,7 +1156,6 @@ exclude_patterns = [
'test/test_type_promotion.py',
'test/test_unary_ufuncs.py',
'test/test_utils.py',
- 'test/test_view_ops.py',
'test/test_vulkan.py',
'test/test_xnnpack_integration.py',
'test/torch_np/numpy_test/**/*.py',
diff --git a/test/test_indexing.py b/test/test_indexing.py
index 195a3144c3..f34fa4c566 100644
--- a/test/test_indexing.py
+++ b/test/test_indexing.py
@@ -1,27 +1,38 @@
# Owner(s): ["module: tests"]
-import torch
-from torch import tensor
+import operator
+import random
import unittest
import warnings
-import random
from functools import reduce
import numpy as np
+import torch
+from torch import tensor
from torch.testing import make_tensor
-from torch.testing._internal.common_utils import (
- TestCase, run_tests, skipIfTorchDynamo, DeterministicGuard, serialTest, TEST_CUDA)
from torch.testing._internal.common_device_type import (
- instantiate_device_type_tests, onlyCUDA, dtypes, dtypesIfCPU, dtypesIfCUDA,
- onlyNativeDeviceTypes, skipXLA)
-import operator
+ dtypes,
+ dtypesIfCPU,
+ dtypesIfCUDA,
+ instantiate_device_type_tests,
+ onlyCUDA,
+ onlyNativeDeviceTypes,
+ skipXLA,
+)
+from torch.testing._internal.common_utils import (
+ DeterministicGuard,
+ run_tests,
+ serialTest,
+ skipIfTorchDynamo,
+ TEST_CUDA,
+ TestCase,
+)
class TestIndexing(TestCase):
def test_index(self, device):
-
def consec(size, start=1):
sequence = torch.ones(torch.tensor(size).prod(0)).cumsum(0)
sequence.add_(start - 1)
@@ -30,7 +41,9 @@ class TestIndexing(TestCase):
reference = consec((3, 3, 3)).to(device)
# empty tensor indexing
- self.assertEqual(reference[torch.LongTensor().to(device)], reference.new(0, 3, 3))
+ self.assertEqual(
+ reference[torch.LongTensor().to(device)], reference.new(0, 3, 3)
+ )
self.assertEqual(reference[0], consec((3, 3)), atol=0, rtol=0)
self.assertEqual(reference[1], consec((3, 3), 10), atol=0, rtol=0)
@@ -41,10 +54,15 @@ class TestIndexing(TestCase):
self.assertEqual(reference[:], consec((3, 3, 3)), atol=0, rtol=0)
# indexing with Ellipsis
- self.assertEqual(reference[..., 2], torch.tensor([[3., 6., 9.],
- [12., 15., 18.],
- [21., 24., 27.]]), atol=0, rtol=0)
- self.assertEqual(reference[0, ..., 2], torch.tensor([3., 6., 9.]), atol=0, rtol=0)
+ self.assertEqual(
+ reference[..., 2],
+ torch.tensor([[3.0, 6.0, 9.0], [12.0, 15.0, 18.0], [21.0, 24.0, 27.0]]),
+ atol=0,
+ rtol=0,
+ )
+ self.assertEqual(
+ reference[0, ..., 2], torch.tensor([3.0, 6.0, 9.0]), atol=0, rtol=0
+ )
self.assertEqual(reference[..., 2], reference[:, :, 2], atol=0, rtol=0)
self.assertEqual(reference[0, ..., 2], reference[0, :, 2], atol=0, rtol=0)
self.assertEqual(reference[0, 2, ...], reference[0, 2], atol=0, rtol=0)
@@ -55,9 +73,15 @@ class TestIndexing(TestCase):
self.assertEqual(reference[...], reference, atol=0, rtol=0)
reference_5d = consec((3, 3, 3, 3, 3)).to(device)
- self.assertEqual(reference_5d[..., 1, 0], reference_5d[:, :, :, 1, 0], atol=0, rtol=0)
- self.assertEqual(reference_5d[2, ..., 1, 0], reference_5d[2, :, :, 1, 0], atol=0, rtol=0)
- self.assertEqual(reference_5d[2, 1, 0, ..., 1], reference_5d[2, 1, 0, :, 1], atol=0, rtol=0)
+ self.assertEqual(
+ reference_5d[..., 1, 0], reference_5d[:, :, :, 1, 0], atol=0, rtol=0
+ )
+ self.assertEqual(
+ reference_5d[2, ..., 1, 0], reference_5d[2, :, :, 1, 0], atol=0, rtol=0
+ )
+ self.assertEqual(
+ reference_5d[2, 1, 0, ..., 1], reference_5d[2, 1, 0, :, 1], atol=0, rtol=0
+ )
self.assertEqual(reference_5d[...], reference_5d, atol=0, rtol=0)
# LongTensor indexing
@@ -70,10 +94,18 @@ class TestIndexing(TestCase):
# None indexing
self.assertEqual(reference[2, None], reference[2].unsqueeze(0))
- self.assertEqual(reference[2, None, None], reference[2].unsqueeze(0).unsqueeze(0))
+ self.assertEqual(
+ reference[2, None, None], reference[2].unsqueeze(0).unsqueeze(0)
+ )
self.assertEqual(reference[2:4, None], reference[2:4].unsqueeze(1))
- self.assertEqual(reference[None, 2, None, None], reference.unsqueeze(0)[:, 2].unsqueeze(0).unsqueeze(0))
- self.assertEqual(reference[None, 2:5, None, None], reference.unsqueeze(0)[:, 2:5].unsqueeze(2).unsqueeze(2))
+ self.assertEqual(
+ reference[None, 2, None, None],
+ reference.unsqueeze(0)[:, 2].unsqueeze(0).unsqueeze(0),
+ )
+ self.assertEqual(
+ reference[None, 2:5, None, None],
+ reference.unsqueeze(0)[:, 2:5].unsqueeze(2).unsqueeze(2),
+ )
# indexing 0-length slice
self.assertEqual(torch.empty(0, 5, 5), reference[slice(0)])
@@ -84,13 +116,28 @@ class TestIndexing(TestCase):
# indexing with step
reference = consec((10, 10, 10)).to(device)
self.assertEqual(reference[1:5:2], torch.stack([reference[1], reference[3]], 0))
- self.assertEqual(reference[1:6:2], torch.stack([reference[1], reference[3], reference[5]], 0))
+ self.assertEqual(
+ reference[1:6:2], torch.stack([reference[1], reference[3], reference[5]], 0)
+ )
self.assertEqual(reference[1:9:4], torch.stack([reference[1], reference[5]], 0))
- self.assertEqual(reference[2:4, 1:5:2], torch.stack([reference[2:4, 1], reference[2:4, 3]], 1))
- self.assertEqual(reference[3, 1:6:2], torch.stack([reference[3, 1], reference[3, 3], reference[3, 5]], 0))
- self.assertEqual(reference[None, 2, 1:9:4], torch.stack([reference[2, 1], reference[2, 5]], 0).unsqueeze(0))
- self.assertEqual(reference[:, 2, 1:6:2],
- torch.stack([reference[:, 2, 1], reference[:, 2, 3], reference[:, 2, 5]], 1))
+ self.assertEqual(
+ reference[2:4, 1:5:2],
+ torch.stack([reference[2:4, 1], reference[2:4, 3]], 1),
+ )
+ self.assertEqual(
+ reference[3, 1:6:2],
+ torch.stack([reference[3, 1], reference[3, 3], reference[3, 5]], 0),
+ )
+ self.assertEqual(
+ reference[None, 2, 1:9:4],
+ torch.stack([reference[2, 1], reference[2, 5]], 0).unsqueeze(0),
+ )
+ self.assertEqual(
+ reference[:, 2, 1:6:2],
+ torch.stack(
+ [reference[:, 2, 1], reference[:, 2, 3], reference[:, 2, 5]], 1
+ ),
+ )
lst = [list(range(i, i + 10)) for i in range(0, 100, 10)]
tensor = torch.DoubleTensor(lst).to(device)
@@ -156,23 +203,33 @@ class TestIndexing(TestCase):
def validate_indexing(x):
self.assertEqual(x[[0]], consec((1,)))
- self.assertEqual(x[ri([0]), ], consec((1,)))
- self.assertEqual(x[ri([3]), ], consec((1,), 4))
+ self.assertEqual(x[ri([0]),], consec((1,)))
+ self.assertEqual(x[ri([3]),], consec((1,), 4))
self.assertEqual(x[[2, 3, 4]], consec((3,), 3))
- self.assertEqual(x[ri([2, 3, 4]), ], consec((3,), 3))
- self.assertEqual(x[ri([0, 2, 4]), ], torch.tensor([1, 3, 5], dtype=dtype, device=device))
+ self.assertEqual(x[ri([2, 3, 4]),], consec((3,), 3))
+ self.assertEqual(
+ x[ri([0, 2, 4]),], torch.tensor([1, 3, 5], dtype=dtype, device=device)
+ )
def validate_setting(x):
x[[0]] = -2
self.assertEqual(x[[0]], torch.tensor([-2], dtype=dtype, device=device))
x[[0]] = -1
- self.assertEqual(x[ri([0]), ], torch.tensor([-1], dtype=dtype, device=device))
+ self.assertEqual(
+ x[ri([0]),], torch.tensor([-1], dtype=dtype, device=device)
+ )
x[[2, 3, 4]] = 4
- self.assertEqual(x[[2, 3, 4]], torch.tensor([4, 4, 4], dtype=dtype, device=device))
- x[ri([2, 3, 4]), ] = 3
- self.assertEqual(x[ri([2, 3, 4]), ], torch.tensor([3, 3, 3], dtype=dtype, device=device))
- x[ri([0, 2, 4]), ] = torch.tensor([5, 4, 3], dtype=dtype, device=device)
- self.assertEqual(x[ri([0, 2, 4]), ], torch.tensor([5, 4, 3], dtype=dtype, device=device))
+ self.assertEqual(
+ x[[2, 3, 4]], torch.tensor([4, 4, 4], dtype=dtype, device=device)
+ )
+ x[ri([2, 3, 4]),] = 3
+ self.assertEqual(
+ x[ri([2, 3, 4]),], torch.tensor([3, 3, 3], dtype=dtype, device=device)
+ )
+ x[ri([0, 2, 4]),] = torch.tensor([5, 4, 3], dtype=dtype, device=device)
+ self.assertEqual(
+ x[ri([0, 2, 4]),], torch.tensor([5, 4, 3], dtype=dtype, device=device)
+ )
# Only validates indexing and setting for halfs
if dtype == torch.half:
@@ -192,208 +249,300 @@ class TestIndexing(TestCase):
# strided is [1, 3, 5, 7]
reference = consec((10,))
strided = torch.tensor((), dtype=dtype, device=device)
- strided.set_(reference.storage(), storage_offset=0,
- size=torch.Size([4]), stride=[2])
+ strided.set_(
+ reference.storage(), storage_offset=0, size=torch.Size([4]), stride=[2]
+ )
self.assertEqual(strided[[0]], torch.tensor([1], dtype=dtype, device=device))
- self.assertEqual(strided[ri([0]), ], torch.tensor([1], dtype=dtype, device=device))
- self.assertEqual(strided[ri([3]), ], torch.tensor([7], dtype=dtype, device=device))
- self.assertEqual(strided[[1, 2]], torch.tensor([3, 5], dtype=dtype, device=device))
- self.assertEqual(strided[ri([1, 2]), ], torch.tensor([3, 5], dtype=dtype, device=device))
- self.assertEqual(strided[ri([[2, 1], [0, 3]]), ],
- torch.tensor([[5, 3], [1, 7]], dtype=dtype, device=device))
+ self.assertEqual(
+ strided[ri([0]),], torch.tensor([1], dtype=dtype, device=device)
+ )
+ self.assertEqual(
+ strided[ri([3]),], torch.tensor([7], dtype=dtype, device=device)
+ )
+ self.assertEqual(
+ strided[[1, 2]], torch.tensor([3, 5], dtype=dtype, device=device)
+ )
+ self.assertEqual(
+ strided[ri([1, 2]),], torch.tensor([3, 5], dtype=dtype, device=device)
+ )
+ self.assertEqual(
+ strided[ri([[2, 1], [0, 3]]),],
+ torch.tensor([[5, 3], [1, 7]], dtype=dtype, device=device),
+ )
# stride is [4, 8]
strided = torch.tensor((), dtype=dtype, device=device)
- strided.set_(reference.storage(), storage_offset=4,
- size=torch.Size([2]), stride=[4])
+ strided.set_(
+ reference.storage(), storage_offset=4, size=torch.Size([2]), stride=[4]
+ )
self.assertEqual(strided[[0]], torch.tensor([5], dtype=dtype, device=device))
- self.assertEqual(strided[ri([0]), ], torch.tensor([5], dtype=dtype, device=device))
- self.assertEqual(strided[ri([1]), ], torch.tensor([9], dtype=dtype, device=device))
- self.assertEqual(strided[[0, 1]], torch.tensor([5, 9], dtype=dtype, device=device))
- self.assertEqual(strided[ri([0, 1]), ], torch.tensor([5, 9], dtype=dtype, device=device))
- self.assertEqual(strided[ri([[0, 1], [1, 0]]), ],
- torch.tensor([[5, 9], [9, 5]], dtype=dtype, device=device))
+ self.assertEqual(
+ strided[ri([0]),], torch.tensor([5], dtype=dtype, device=device)
+ )
+ self.assertEqual(
+ strided[ri([1]),], torch.tensor([9], dtype=dtype, device=device)
+ )
+ self.assertEqual(
+ strided[[0, 1]], torch.tensor([5, 9], dtype=dtype, device=device)
+ )
+ self.assertEqual(
+ strided[ri([0, 1]),], torch.tensor([5, 9], dtype=dtype, device=device)
+ )
+ self.assertEqual(
+ strided[ri([[0, 1], [1, 0]]),],
+ torch.tensor([[5, 9], [9, 5]], dtype=dtype, device=device),
+ )
# reference is 1 2
# 3 4
# 5 6
reference = consec((3, 2))
- self.assertEqual(reference[ri([0, 1, 2]), ri([0])], torch.tensor([1, 3, 5], dtype=dtype, device=device))
- self.assertEqual(reference[ri([0, 1, 2]), ri([1])], torch.tensor([2, 4, 6], dtype=dtype, device=device))
+ self.assertEqual(
+ reference[ri([0, 1, 2]), ri([0])],
+ torch.tensor([1, 3, 5], dtype=dtype, device=device),
+ )
+ self.assertEqual(
+ reference[ri([0, 1, 2]), ri([1])],
+ torch.tensor([2, 4, 6], dtype=dtype, device=device),
+ )
self.assertEqual(reference[ri([0]), ri([0])], consec((1,)))
self.assertEqual(reference[ri([2]), ri([1])], consec((1,), 6))
- self.assertEqual(reference[[ri([0, 0]), ri([0, 1])]], torch.tensor([1, 2], dtype=dtype, device=device))
- self.assertEqual(reference[[ri([0, 1, 1, 0, 2]), ri([1])]],
- torch.tensor([2, 4, 4, 2, 6], dtype=dtype, device=device))
- self.assertEqual(reference[[ri([0, 0, 1, 1]), ri([0, 1, 0, 0])]],
- torch.tensor([1, 2, 3, 3], dtype=dtype, device=device))
-
- rows = ri([[0, 0],
- [1, 2]])
- columns = [0],
- self.assertEqual(reference[rows, columns], torch.tensor([[1, 1],
- [3, 5]], dtype=dtype, device=device))
-
- rows = ri([[0, 0],
- [1, 2]])
+ self.assertEqual(
+ reference[[ri([0, 0]), ri([0, 1])]],
+ torch.tensor([1, 2], dtype=dtype, device=device),
+ )
+ self.assertEqual(
+ reference[[ri([0, 1, 1, 0, 2]), ri([1])]],
+ torch.tensor([2, 4, 4, 2, 6], dtype=dtype, device=device),
+ )
+ self.assertEqual(
+ reference[[ri([0, 0, 1, 1]), ri([0, 1, 0, 0])]],
+ torch.tensor([1, 2, 3, 3], dtype=dtype, device=device),
+ )
+
+ rows = ri([[0, 0], [1, 2]])
+ columns = ([0],)
+ self.assertEqual(
+ reference[rows, columns],
+ torch.tensor([[1, 1], [3, 5]], dtype=dtype, device=device),
+ )
+
+ rows = ri([[0, 0], [1, 2]])
columns = ri([1, 0])
- self.assertEqual(reference[rows, columns], torch.tensor([[2, 1],
- [4, 5]], dtype=dtype, device=device))
- rows = ri([[0, 0],
- [1, 2]])
- columns = ri([[0, 1],
- [1, 0]])
- self.assertEqual(reference[rows, columns], torch.tensor([[1, 2],
- [4, 5]], dtype=dtype, device=device))
+ self.assertEqual(
+ reference[rows, columns],
+ torch.tensor([[2, 1], [4, 5]], dtype=dtype, device=device),
+ )
+ rows = ri([[0, 0], [1, 2]])
+ columns = ri([[0, 1], [1, 0]])
+ self.assertEqual(
+ reference[rows, columns],
+ torch.tensor([[1, 2], [4, 5]], dtype=dtype, device=device),
+ )
# setting values
reference[ri([0]), ri([1])] = -1
- self.assertEqual(reference[ri([0]), ri([1])], torch.tensor([-1], dtype=dtype, device=device))
- reference[ri([0, 1, 2]), ri([0])] = torch.tensor([-1, 2, -4], dtype=dtype, device=device)
- self.assertEqual(reference[ri([0, 1, 2]), ri([0])],
- torch.tensor([-1, 2, -4], dtype=dtype, device=device))
- reference[rows, columns] = torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device)
- self.assertEqual(reference[rows, columns],
- torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device))
+ self.assertEqual(
+ reference[ri([0]), ri([1])], torch.tensor([-1], dtype=dtype, device=device)
+ )
+ reference[ri([0, 1, 2]), ri([0])] = torch.tensor(
+ [-1, 2, -4], dtype=dtype, device=device
+ )
+ self.assertEqual(
+ reference[ri([0, 1, 2]), ri([0])],
+ torch.tensor([-1, 2, -4], dtype=dtype, device=device),
+ )
+ reference[rows, columns] = torch.tensor(
+ [[4, 6], [2, 3]], dtype=dtype, device=device
+ )
+ self.assertEqual(
+ reference[rows, columns],
+ torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device),
+ )
# Verify still works with Transposed (i.e. non-contiguous) Tensors
- reference = torch.tensor([[0, 1, 2, 3],
- [4, 5, 6, 7],
- [8, 9, 10, 11]], dtype=dtype, device=device).t_()
+ reference = torch.tensor(
+ [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], dtype=dtype, device=device
+ ).t_()
# Transposed: [[0, 4, 8],
# [1, 5, 9],
# [2, 6, 10],
# [3, 7, 11]]
- self.assertEqual(reference[ri([0, 1, 2]), ri([0])],
- torch.tensor([0, 1, 2], dtype=dtype, device=device))
- self.assertEqual(reference[ri([0, 1, 2]), ri([1])],
- torch.tensor([4, 5, 6], dtype=dtype, device=device))
- self.assertEqual(reference[ri([0]), ri([0])],
- torch.tensor([0], dtype=dtype, device=device))
- self.assertEqual(reference[ri([2]), ri([1])],
- torch.tensor([6], dtype=dtype, device=device))
- self.assertEqual(reference[[ri([0, 0]), ri([0, 1])]],
- torch.tensor([0, 4], dtype=dtype, device=device))
- self.assertEqual(reference[[ri([0, 1, 1, 0, 3]), ri([1])]],
- torch.tensor([4, 5, 5, 4, 7], dtype=dtype, device=device))
- self.assertEqual(reference[[ri([0, 0, 1, 1]), ri([0, 1, 0, 0])]],
- torch.tensor([0, 4, 1, 1], dtype=dtype, device=device))
-
- rows = ri([[0, 0],
- [1, 2]])
- columns = [0],
- self.assertEqual(reference[rows, columns],
- torch.tensor([[0, 0], [1, 2]], dtype=dtype, device=device))
-
- rows = ri([[0, 0],
- [1, 2]])
+ self.assertEqual(
+ reference[ri([0, 1, 2]), ri([0])],
+ torch.tensor([0, 1, 2], dtype=dtype, device=device),
+ )
+ self.assertEqual(
+ reference[ri([0, 1, 2]), ri([1])],
+ torch.tensor([4, 5, 6], dtype=dtype, device=device),
+ )
+ self.assertEqual(
+ reference[ri([0]), ri([0])], torch.tensor([0], dtype=dtype, device=device)
+ )
+ self.assertEqual(
+ reference[ri([2]), ri([1])], torch.tensor([6], dtype=dtype, device=device)
+ )
+ self.assertEqual(
+ reference[[ri([0, 0]), ri([0, 1])]],
+ torch.tensor([0, 4], dtype=dtype, device=device),
+ )
+ self.assertEqual(
+ reference[[ri([0, 1, 1, 0, 3]), ri([1])]],
+ torch.tensor([4, 5, 5, 4, 7], dtype=dtype, device=device),
+ )
+ self.assertEqual(
+ reference[[ri([0, 0, 1, 1]), ri([0, 1, 0, 0])]],
+ torch.tensor([0, 4, 1, 1], dtype=dtype, device=device),
+ )
+
+ rows = ri([[0, 0], [1, 2]])
+ columns = ([0],)
+ self.assertEqual(
+ reference[rows, columns],
+ torch.tensor([[0, 0], [1, 2]], dtype=dtype, device=device),
+ )
+
+ rows = ri([[0, 0], [1, 2]])
columns = ri([1, 0])
- self.assertEqual(reference[rows, columns],
- torch.tensor([[4, 0], [5, 2]], dtype=dtype, device=device))
- rows = ri([[0, 0],
- [1, 3]])
- columns = ri([[0, 1],
- [1, 2]])
- self.assertEqual(reference[rows, columns],
- torch.tensor([[0, 4], [5, 11]], dtype=dtype, device=device))
+ self.assertEqual(
+ reference[rows, columns],
+ torch.tensor([[4, 0], [5, 2]], dtype=dtype, device=device),
+ )
+ rows = ri([[0, 0], [1, 3]])
+ columns = ri([[0, 1], [1, 2]])
+ self.assertEqual(
+ reference[rows, columns],
+ torch.tensor([[0, 4], [5, 11]], dtype=dtype, device=device),
+ )
# setting values
reference[ri([0]), ri([1])] = -1
- self.assertEqual(reference[ri([0]), ri([1])],
- torch.tensor([-1], dtype=dtype, device=device))
- reference[ri([0, 1, 2]), ri([0])] = torch.tensor([-1, 2, -4], dtype=dtype, device=device)
- self.assertEqual(reference[ri([0, 1, 2]), ri([0])],
- torch.tensor([-1, 2, -4], dtype=dtype, device=device))
- reference[rows, columns] = torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device)
- self.assertEqual(reference[rows, columns],
- torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device))
+ self.assertEqual(
+ reference[ri([0]), ri([1])], torch.tensor([-1], dtype=dtype, device=device)
+ )
+ reference[ri([0, 1, 2]), ri([0])] = torch.tensor(
+ [-1, 2, -4], dtype=dtype, device=device
+ )
+ self.assertEqual(
+ reference[ri([0, 1, 2]), ri([0])],
+ torch.tensor([-1, 2, -4], dtype=dtype, device=device),
+ )
+ reference[rows, columns] = torch.tensor(
+ [[4, 6], [2, 3]], dtype=dtype, device=device
+ )
+ self.assertEqual(
+ reference[rows, columns],
+ torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device),
+ )
# stride != 1
# strided is [[1 3 5 7],
# [9 11 13 15]]
- reference = torch.arange(0., 24, dtype=dtype, device=device).view(3, 8)
+ reference = torch.arange(0.0, 24, dtype=dtype, device=device).view(3, 8)
strided = torch.tensor((), dtype=dtype, device=device)
- strided.set_(reference.storage(), 1, size=torch.Size([2, 4]),
- stride=[8, 2])
-
- self.assertEqual(strided[ri([0, 1]), ri([0])],
- torch.tensor([1, 9], dtype=dtype, device=device))
- self.assertEqual(strided[ri([0, 1]), ri([1])],
- torch.tensor([3, 11], dtype=dtype, device=device))
- self.assertEqual(strided[ri([0]), ri([0])],
- torch.tensor([1], dtype=dtype, device=device))
- self.assertEqual(strided[ri([1]), ri([3])],
- torch.tensor([15], dtype=dtype, device=device))
- self.assertEqual(strided[[ri([0, 0]), ri([0, 3])]],
- torch.tensor([1, 7], dtype=dtype, device=device))
- self.assertEqual(strided[[ri([1]), ri([0, 1, 1, 0, 3])]],
- torch.tensor([9, 11, 11, 9, 15], dtype=dtype, device=device))
- self.assertEqual(strided[[ri([0, 0, 1, 1]), ri([0, 1, 0, 0])]],
- torch.tensor([1, 3, 9, 9], dtype=dtype, device=device))
-
- rows = ri([[0, 0],
- [1, 1]])
- columns = [0],
- self.assertEqual(strided[rows, columns],
- torch.tensor([[1, 1], [9, 9]], dtype=dtype, device=device))
-
- rows = ri([[0, 1],
- [1, 0]])
+ strided.set_(reference.storage(), 1, size=torch.Size([2, 4]), stride=[8, 2])
+
+ self.assertEqual(
+ strided[ri([0, 1]), ri([0])],
+ torch.tensor([1, 9], dtype=dtype, device=device),
+ )
+ self.assertEqual(
+ strided[ri([0, 1]), ri([1])],
+ torch.tensor([3, 11], dtype=dtype, device=device),
+ )
+ self.assertEqual(
+ strided[ri([0]), ri([0])], torch.tensor([1], dtype=dtype, device=device)
+ )
+ self.assertEqual(
+ strided[ri([1]), ri([3])], torch.tensor([15], dtype=dtype, device=device)
+ )
+ self.assertEqual(
+ strided[[ri([0, 0]), ri([0, 3])]],
+ torch.tensor([1, 7], dtype=dtype, device=device),
+ )
+ self.assertEqual(
+ strided[[ri([1]), ri([0, 1, 1, 0, 3])]],
+ torch.tensor([9, 11, 11, 9, 15], dtype=dtype, device=device),
+ )
+ self.assertEqual(
+ strided[[ri([0, 0, 1, 1]), ri([0, 1, 0, 0])]],
+ torch.tensor([1, 3, 9, 9], dtype=dtype, device=device),
+ )
+
+ rows = ri([[0, 0], [1, 1]])
+ columns = ([0],)
+ self.assertEqual(
+ strided[rows, columns],
+ torch.tensor([[1, 1], [9, 9]], dtype=dtype, device=device),
+ )
+
+ rows = ri([[0, 1], [1, 0]])
columns = ri([1, 2])
- self.assertEqual(strided[rows, columns],
- torch.tensor([[3, 13], [11, 5]], dtype=dtype, device=device))
- rows = ri([[0, 0],
- [1, 1]])
- columns = ri([[0, 1],
- [1, 2]])
- self.assertEqual(strided[rows, columns],
- torch.tensor([[1, 3], [11, 13]], dtype=dtype, device=device))
+ self.assertEqual(
+ strided[rows, columns],
+ torch.tensor([[3, 13], [11, 5]], dtype=dtype, device=device),
+ )
+ rows = ri([[0, 0], [1, 1]])
+ columns = ri([[0, 1], [1, 2]])
+ self.assertEqual(
+ strided[rows, columns],
+ torch.tensor([[1, 3], [11, 13]], dtype=dtype, device=device),
+ )
# setting values
# strided is [[10, 11],
# [17, 18]]
- reference = torch.arange(0., 24, dtype=dtype, device=device).view(3, 8)
+ reference = torch.arange(0.0, 24, dtype=dtype, device=device).view(3, 8)
strided = torch.tensor((), dtype=dtype, device=device)
- strided.set_(reference.storage(), 10, size=torch.Size([2, 2]),
- stride=[7, 1])
- self.assertEqual(strided[ri([0]), ri([1])],
- torch.tensor([11], dtype=dtype, device=device))
+ strided.set_(reference.storage(), 10, size=torch.Size([2, 2]), stride=[7, 1])
+ self.assertEqual(
+ strided[ri([0]), ri([1])], torch.tensor([11], dtype=dtype, device=device)
+ )
strided[ri([0]), ri([1])] = -1
- self.assertEqual(strided[ri([0]), ri([1])],
- torch.tensor([-1], dtype=dtype, device=device))
+ self.assertEqual(
+ strided[ri([0]), ri([1])], torch.tensor([-1], dtype=dtype, device=device)
+ )
- reference = torch.arange(0., 24, dtype=dtype, device=device).view(3, 8)
+ reference = torch.arange(0.0, 24, dtype=dtype, device=device).view(3, 8)
strided = torch.tensor((), dtype=dtype, device=device)
- strided.set_(reference.storage(), 10, size=torch.Size([2, 2]),
- stride=[7, 1])
- self.assertEqual(strided[ri([0, 1]), ri([1, 0])],
- torch.tensor([11, 17], dtype=dtype, device=device))
- strided[ri([0, 1]), ri([1, 0])] = torch.tensor([-1, 2], dtype=dtype, device=device)
- self.assertEqual(strided[ri([0, 1]), ri([1, 0])],
- torch.tensor([-1, 2], dtype=dtype, device=device))
-
- reference = torch.arange(0., 24, dtype=dtype, device=device).view(3, 8)
+ strided.set_(reference.storage(), 10, size=torch.Size([2, 2]), stride=[7, 1])
+ self.assertEqual(
+ strided[ri([0, 1]), ri([1, 0])],
+ torch.tensor([11, 17], dtype=dtype, device=device),
+ )
+ strided[ri([0, 1]), ri([1, 0])] = torch.tensor(
+ [-1, 2], dtype=dtype, device=device
+ )
+ self.assertEqual(
+ strided[ri([0, 1]), ri([1, 0])],
+ torch.tensor([-1, 2], dtype=dtype, device=device),
+ )
+
+ reference = torch.arange(0.0, 24, dtype=dtype, device=device).view(3, 8)
strided = torch.tensor((), dtype=dtype, device=device)
- strided.set_(reference.storage(), 10, size=torch.Size([2, 2]),
- stride=[7, 1])
-
- rows = ri([[0],
- [1]])
- columns = ri([[0, 1],
- [0, 1]])
- self.assertEqual(strided[rows, columns],
- torch.tensor([[10, 11], [17, 18]], dtype=dtype, device=device))
- strided[rows, columns] = torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device)
- self.assertEqual(strided[rows, columns],
- torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device))
+ strided.set_(reference.storage(), 10, size=torch.Size([2, 2]), stride=[7, 1])
+
+ rows = ri([[0], [1]])
+ columns = ri([[0, 1], [0, 1]])
+ self.assertEqual(
+ strided[rows, columns],
+ torch.tensor([[10, 11], [17, 18]], dtype=dtype, device=device),
+ )
+ strided[rows, columns] = torch.tensor(
+ [[4, 6], [2, 3]], dtype=dtype, device=device
+ )
+ self.assertEqual(
+ strided[rows, columns],
+ torch.tensor([[4, 6], [2, 3]], dtype=dtype, device=device),
+ )
# Tests using less than the number of dims, and ellipsis
@@ -401,12 +550,17 @@ class TestIndexing(TestCase):
# 3 4
# 5 6
reference = consec((3, 2))
- self.assertEqual(reference[ri([0, 2]), ],
- torch.tensor([[1, 2], [5, 6]], dtype=dtype, device=device))
- self.assertEqual(reference[ri([1]), ...],
- torch.tensor([[3, 4]], dtype=dtype, device=device))
- self.assertEqual(reference[..., ri([1])],
- torch.tensor([[2], [4], [6]], dtype=dtype, device=device))
+ self.assertEqual(
+ reference[ri([0, 2]),],
+ torch.tensor([[1, 2], [5, 6]], dtype=dtype, device=device),
+ )
+ self.assertEqual(
+ reference[ri([1]), ...], torch.tensor([[3, 4]], dtype=dtype, device=device)
+ )
+ self.assertEqual(
+ reference[..., ri([1])],
+ torch.tensor([[2], [4], [6]], dtype=dtype, device=device),
+ )
# verify too many indices fails
with self.assertRaises(IndexError):
@@ -417,21 +571,22 @@ class TestIndexing(TestCase):
# can't test cuda because it is a device assert
if not reference.is_cuda:
for err_idx in (10, -11):
- with self.assertRaisesRegex(IndexError, r'out of'):
+ with self.assertRaisesRegex(IndexError, r"out of"):
reference[err_idx]
- with self.assertRaisesRegex(IndexError, r'out of'):
+ with self.assertRaisesRegex(IndexError, r"out of"):
reference[torch.LongTensor([err_idx]).to(device)]
- with self.assertRaisesRegex(IndexError, r'out of'):
+ with self.assertRaisesRegex(IndexError, r"out of"):
reference[[err_idx]]
def tensor_indices_to_np(tensor, indices):
# convert the Torch Tensor to a numpy array
- tensor = tensor.to(device='cpu')
+ tensor = tensor.to(device="cpu")
npt = tensor.numpy()
# convert indices
- idxs = tuple(i.tolist() if isinstance(i, torch.LongTensor) else
- i for i in indices)
+ idxs = tuple(
+ i.tolist() if isinstance(i, torch.LongTensor) else i for i in indices
+ )
return npt, idxs
@@ -443,7 +598,7 @@ class TestIndexing(TestCase):
def set_numpy(tensor, indices, value):
if not isinstance(value, int):
- if self.device_type != 'cpu':
+ if self.device_type != "cpu":
value = value.cpu()
value = value.numpy()
@@ -458,7 +613,9 @@ class TestIndexing(TestCase):
pyt = tensor.clone()
numt = tensor.clone()
pyt[indexer] = val
- numt = torch.tensor(set_numpy(numt, indexer, val), dtype=dtype, device=device)
+ numt = torch.tensor(
+ set_numpy(numt, indexer, val), dtype=dtype, device=device
+ )
self.assertEqual(pyt, numt)
def assert_backward_eq(tensor, indexer):
@@ -481,18 +638,15 @@ class TestIndexing(TestCase):
# 5 6 7 8 9
# 10 11 12 13 14
# 15 16 17 18 19
- reference = torch.arange(0., 20, dtype=dtype, device=device).view(4, 5)
+ reference = torch.arange(0.0, 20, dtype=dtype, device=device).view(4, 5)
indices_to_test = [
# grab the second, fourth columns
[slice(None), [1, 3]],
-
# first, third rows,
[[0, 2], slice(None)],
-
# weird shape
- [slice(None), [[0, 1],
- [2, 3]]],
+ [slice(None), [[0, 1], [2, 3]]],
# negatives
[[-1], [0]],
[[0, 2], [-1]],
@@ -504,16 +658,14 @@ class TestIndexing(TestCase):
for indexer in get_indices_to_test:
assert_get_eq(reference, indexer)
- if self.device_type != 'cpu':
+ if self.device_type != "cpu":
assert_backward_eq(reference, indexer)
for indexer in indices_to_test:
assert_set_eq(reference, indexer, 44)
- assert_set_eq(reference,
- indexer,
- get_set_tensor(reference, indexer))
+ assert_set_eq(reference, indexer, get_set_tensor(reference, indexer))
- reference = torch.arange(0., 160, dtype=dtype, device=device).view(4, 8, 5)
+ reference = torch.arange(0.0, 160, dtype=dtype, device=device).view(4, 8, 5)
indices_to_test = [
[slice(None), slice(None), [0, 3, 4]],
@@ -537,7 +689,9 @@ class TestIndexing(TestCase):
[[0, 2, 3], slice(None), [1, 3, 4]],
# [...]
# less dim, ellipsis
- [[0, 2], ],
+ [
+ [0, 2],
+ ],
[[0, 2], slice(None)],
[[0, 2], Ellipsis],
[[0, 2], slice(None), Ellipsis],
@@ -548,7 +702,6 @@ class TestIndexing(TestCase):
[Ellipsis, [2, 3, 4]],
[Ellipsis, slice(None), [2, 3, 4]],
[slice(None), Ellipsis, [2, 3, 4]],
-
# ellipsis counts for nothing
[Ellipsis, slice(None), slice(None), [0, 3, 4]],
[slice(None), Ellipsis, slice(None), [0, 3, 4]],
@@ -566,7 +719,7 @@ class TestIndexing(TestCase):
if torch.cuda.is_available():
assert_backward_eq(reference, indexer)
- reference = torch.arange(0., 1296, dtype=dtype, device=device).view(3, 9, 8, 6)
+ reference = torch.arange(0.0, 1296, dtype=dtype, device=device).view(3, 9, 8, 6)
indices_to_test = [
[slice(None), slice(None), slice(None), [0, 3, 4]],
@@ -610,7 +763,6 @@ class TestIndexing(TestCase):
[[0], [4], [1, 3, 4], slice(None)],
[[1], [0, 2, 3], [1], slice(None)],
[[[1, 2], [1, 2]], [[0, 1], [2, 3]], [[2, 3], [3, 5]], slice(None)],
-
# less dim, ellipsis
[Ellipsis, [0, 3, 4]],
[Ellipsis, slice(None), [0, 3, 4]],
@@ -624,7 +776,9 @@ class TestIndexing(TestCase):
[[0], [1, 2, 4], slice(None)],
[[0], [1, 2, 4], Ellipsis],
[[0], [1, 2, 4], Ellipsis, slice(None)],
- [[1], ],
+ [
+ [1],
+ ],
[[0, 2, 1], [3], [4]],
[[0, 2, 1], [3], [4], slice(None)],
[[0, 2, 1], [3], [4], Ellipsis],
@@ -642,14 +796,16 @@ class TestIndexing(TestCase):
for indexer in indices_to_test:
assert_get_eq(reference, indexer)
assert_set_eq(reference, indexer, 1333)
- if self.device_type != 'cpu':
+ if self.device_type != "cpu":
assert_backward_eq(reference, indexer)
def test_advancedindex_big(self, device):
reference = torch.arange(0, 123344, dtype=torch.int, device=device)
- self.assertEqual(reference[[0, 123, 44488, 68807, 123343], ],
- torch.tensor([0, 123, 44488, 68807, 123343], dtype=torch.int))
+ self.assertEqual(
+ reference[[0, 123, 44488, 68807, 123343],],
+ torch.tensor([0, 123, 44488, 68807, 123343], dtype=torch.int),
+ )
def test_set_item_to_scalar_tensor(self, device):
m = random.randint(1, 10)
@@ -687,31 +843,37 @@ class TestIndexing(TestCase):
def test_step_assignment(self, device):
v = torch.zeros(4, 4, device=device)
- v[0, 1::2] = torch.tensor([3., 4.], device=device)
+ v[0, 1::2] = torch.tensor([3.0, 4.0], device=device)
self.assertEqual(v[0].tolist(), [0, 3, 0, 4])
self.assertEqual(v[1:].sum(), 0)
def test_bool_indices(self, device):
v = torch.randn(5, 7, 3, device=device)
- boolIndices = torch.tensor([True, False, True, True, False], dtype=torch.bool, device=device)
+ boolIndices = torch.tensor(
+ [True, False, True, True, False], dtype=torch.bool, device=device
+ )
self.assertEqual(v[boolIndices].shape, (3, 7, 3))
self.assertEqual(v[boolIndices], torch.stack([v[0], v[2], v[3]]))
v = torch.tensor([True, False, True], dtype=torch.bool, device=device)
- boolIndices = torch.tensor([True, False, False], dtype=torch.bool, device=device)
+ boolIndices = torch.tensor(
+ [True, False, False], dtype=torch.bool, device=device
+ )
uint8Indices = torch.tensor([1, 0, 0], dtype=torch.uint8, device=device)
with warnings.catch_warnings(record=True) as w:
v1 = v[boolIndices]
v2 = v[uint8Indices]
self.assertEqual(v1.shape, v2.shape)
self.assertEqual(v1, v2)
- self.assertEqual(v[boolIndices], tensor([True], dtype=torch.bool, device=device))
+ self.assertEqual(
+ v[boolIndices], tensor([True], dtype=torch.bool, device=device)
+ )
self.assertEqual(len(w), 1)
def test_bool_indices_accumulate(self, device):
- mask = torch.zeros(size=(10, ), dtype=torch.bool, device=device)
+ mask = torch.zeros(size=(10,), dtype=torch.bool, device=device)
y = torch.ones(size=(10, 10), device=device)
- y.index_put_((mask, ), y[mask], accumulate=True)
+ y.index_put_((mask,), y[mask], accumulate=True)
self.assertEqual(y, torch.ones(size=(10, 10), device=device))
def test_multiple_bool_indices(self, device):
@@ -730,29 +892,33 @@ class TestIndexing(TestCase):
self.assertEqual(res, torch.stack([v[0], v[2], v[3]]))
self.assertEqual(len(w), 1)
- v = torch.tensor([1.], device=device)
+ v = torch.tensor([1.0], device=device)
self.assertEqual(v[v == 0], torch.tensor([], device=device))
def test_byte_mask_accumulate(self, device):
- mask = torch.zeros(size=(10, ), dtype=torch.uint8, device=device)
+ mask = torch.zeros(size=(10,), dtype=torch.uint8, device=device)
y = torch.ones(size=(10, 10), device=device)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
- y.index_put_((mask, ), y[mask], accumulate=True)
+ y.index_put_((mask,), y[mask], accumulate=True)
self.assertEqual(y, torch.ones(size=(10, 10), device=device))
self.assertEqual(len(w), 2)
- @skipIfTorchDynamo("This test causes SIGKILL when running with dynamo, https://github.com/pytorch/pytorch/issues/88472")
+ @skipIfTorchDynamo(
+ "This test causes SIGKILL when running with dynamo, https://github.com/pytorch/pytorch/issues/88472"
+ )
@serialTest(TEST_CUDA)
def test_index_put_accumulate_large_tensor(self, device):
# This test is for tensors with number of elements >= INT_MAX (2^31 - 1).
N = (1 << 31) + 5
dt = torch.int8
a = torch.ones(N, dtype=dt, device=device)
- indices = torch.tensor([-2, 0, -2, -1, 0, -1, 1], device=device, dtype=torch.long)
+ indices = torch.tensor(
+ [-2, 0, -2, -1, 0, -1, 1], device=device, dtype=torch.long
+ )
values = torch.tensor([6, 5, 6, 6, 5, 7, 11], dtype=dt, device=device)
- a.index_put_((indices, ), values, accumulate=True)
+ a.index_put_((indices,), values, accumulate=True)
self.assertEqual(a[0], 11)
self.assertEqual(a[1], 12)
@@ -787,11 +953,19 @@ class TestIndexing(TestCase):
t_dev = t.to(device)
indices = [
torch.tensor([0, 1, 2, 3]),
- torch.tensor([1, ]),
+ torch.tensor(
+ [
+ 1,
+ ]
+ ),
]
indices_dev = [i.to(device) for i in indices]
values0d = torch.tensor(1.0)
- values1d = torch.tensor([1.0, ])
+ values1d = torch.tensor(
+ [
+ 1.0,
+ ]
+ )
out_cuda = t_dev.index_put_(indices_dev, values0d.to(device), accumulate=True)
out_cpu = t.index_put_(indices, values0d, accumulate=True)
@@ -805,13 +979,21 @@ class TestIndexing(TestCase):
t_dev = t.to(device)
indices = [
- torch.tensor([0, ]),
+ torch.tensor(
+ [
+ 0,
+ ]
+ ),
torch.arange(3)[:, None],
torch.arange(2)[None, :],
]
indices_dev = [i.to(device) for i in indices]
values1d = torch.tensor([-1.0, -2.0])
- values2d = torch.tensor([[-1.0, -2.0], ])
+ values2d = torch.tensor(
+ [
+ [-1.0, -2.0],
+ ]
+ )
out_cuda = t_dev.index_put_(indices_dev, values1d.to(device), accumulate=True)
out_cpu = t.index_put_(indices, values1d, accumulate=True)
@@ -830,7 +1012,9 @@ class TestIndexing(TestCase):
self.assertTrue(not t1.is_contiguous())
self.assertTrue(not t2.is_contiguous())
- indices = [torch.tensor([0, 1]), ]
+ indices = [
+ torch.tensor([0, 1]),
+ ]
indices_dev = [i.to(device) for i in indices]
value = torch.randn(2, 2)
out_cuda = t1.index_put_(indices_dev, value.to(device), accumulate=True)
@@ -952,7 +1136,11 @@ class TestIndexing(TestCase):
scripted_fn2 = torch.jit.script(fn2)
data = torch.arange(100, device=device, dtype=torch.float)
out = scripted_fn1(data.detach().clone())
- ref = torch.tensor(np.concatenate((np.ones(50), np.arange(50, 100))), device=device, dtype=torch.float)
+ ref = torch.tensor(
+ np.concatenate((np.ones(50), np.arange(50, 100))),
+ device=device,
+ dtype=torch.float,
+ )
self.assertEqual(out, ref)
out = scripted_fn2(data.detach().clone())
self.assertEqual(out, ref)
@@ -963,9 +1151,15 @@ class TestIndexing(TestCase):
self.assertEqual(v[:, [0, 4, 2]].shape, (5, 3, 3))
self.assertEqual(v[:, [[0, 1], [4, 3]]].shape, (5, 2, 2, 3))
- @dtypes(torch.cfloat, torch.cdouble, torch.float, torch.bfloat16, torch.long, torch.bool)
- @dtypesIfCPU(torch.cfloat, torch.cdouble, torch.float, torch.long, torch.bool, torch.bfloat16)
- @dtypesIfCUDA(torch.cfloat, torch.cdouble, torch.half, torch.long, torch.bool, torch.bfloat16)
+ @dtypes(
+ torch.cfloat, torch.cdouble, torch.float, torch.bfloat16, torch.long, torch.bool
+ )
+ @dtypesIfCPU(
+ torch.cfloat, torch.cdouble, torch.float, torch.long, torch.bool, torch.bfloat16
+ )
+ @dtypesIfCUDA(
+ torch.cfloat, torch.cdouble, torch.half, torch.long, torch.bool, torch.bfloat16
+ )
def test_index_put_src_datatype(self, device, dtype):
src = torch.ones(3, 2, 4, device=device, dtype=dtype)
vals = torch.ones(3, 2, 4, device=device, dtype=dtype)
@@ -1016,21 +1210,28 @@ class TestIndexing(TestCase):
def test_empty_ndim_index(self, device):
x = torch.randn(5, device=device)
- self.assertEqual(torch.empty(0, 2, device=device), x[torch.empty(0, 2, dtype=torch.int64, device=device)])
+ self.assertEqual(
+ torch.empty(0, 2, device=device),
+ x[torch.empty(0, 2, dtype=torch.int64, device=device)],
+ )
x = torch.randn(2, 3, 4, 5, device=device)
- self.assertEqual(torch.empty(2, 0, 6, 4, 5, device=device),
- x[:, torch.empty(0, 6, dtype=torch.int64, device=device)])
+ self.assertEqual(
+ torch.empty(2, 0, 6, 4, 5, device=device),
+ x[:, torch.empty(0, 6, dtype=torch.int64, device=device)],
+ )
x = torch.empty(10, 0, device=device)
self.assertEqual(x[[1, 2]].shape, (2, 0))
self.assertEqual(x[[], []].shape, (0,))
- with self.assertRaisesRegex(IndexError, 'for dimension with size 0'):
+ with self.assertRaisesRegex(IndexError, "for dimension with size 0"):
x[:, [0, 1]]
def test_empty_ndim_index_bool(self, device):
x = torch.randn(5, device=device)
- self.assertRaises(IndexError, lambda: x[torch.empty(0, 2, dtype=torch.uint8, device=device)])
+ self.assertRaises(
+ IndexError, lambda: x[torch.empty(0, 2, dtype=torch.uint8, device=device)]
+ )
def test_empty_slice(self, device):
x = torch.randn(2, 3, 4, 5, device=device)
@@ -1045,7 +1246,7 @@ class TestIndexing(TestCase):
true = torch.tensor(1, dtype=torch.uint8, device=device)
false = torch.tensor(0, dtype=torch.uint8, device=device)
- tensors = [torch.randn(2, 3, device=device), torch.tensor(3., device=device)]
+ tensors = [torch.randn(2, 3, device=device), torch.tensor(3.0, device=device)]
for a in tensors:
self.assertNotEqual(a.data_ptr(), a[True].data_ptr())
@@ -1178,18 +1379,18 @@ class TestIndexing(TestCase):
self.assertEqual(x.tolist(), [[0, 1], [5, 6]])
def test_byte_tensor_assignment(self, device):
- x = torch.arange(0., 16, device=device).view(4, 4)
+ x = torch.arange(0.0, 16, device=device).view(4, 4)
b = torch.ByteTensor([True, False, True, False]).to(device)
- value = torch.tensor([3., 4., 5., 6.], device=device)
+ value = torch.tensor([3.0, 4.0, 5.0, 6.0], device=device)
with warnings.catch_warnings(record=True) as w:
x[b] = value
self.assertEqual(len(w), 1)
self.assertEqual(x[0], value)
- self.assertEqual(x[1], torch.arange(4., 8, device=device))
+ self.assertEqual(x[1], torch.arange(4.0, 8, device=device))
self.assertEqual(x[2], value)
- self.assertEqual(x[3], torch.arange(12., 16, device=device))
+ self.assertEqual(x[3], torch.arange(12.0, 16, device=device))
def test_variable_slicing(self, device):
x = torch.arange(0, 16, device=device).view(4, 4)
@@ -1200,50 +1401,62 @@ class TestIndexing(TestCase):
def test_ellipsis_tensor(self, device):
x = torch.arange(0, 9, device=device).view(3, 3)
idx = torch.tensor([0, 2], device=device)
- self.assertEqual(x[..., idx].tolist(), [[0, 2],
- [3, 5],
- [6, 8]])
- self.assertEqual(x[idx, ...].tolist(), [[0, 1, 2],
- [6, 7, 8]])
+ self.assertEqual(x[..., idx].tolist(), [[0, 2], [3, 5], [6, 8]])
+ self.assertEqual(x[idx, ...].tolist(), [[0, 1, 2], [6, 7, 8]])
def test_unravel_index_errors(self, device):
with self.assertRaisesRegex(TypeError, r"expected 'indices' to be integer"):
- torch.unravel_index(
- torch.tensor(0.5, device=device),
- (2, 2))
+ torch.unravel_index(torch.tensor(0.5, device=device), (2, 2))
with self.assertRaisesRegex(TypeError, r"expected 'indices' to be integer"):
- torch.unravel_index(
- torch.tensor([], device=device),
- (10, 3, 5))
+ torch.unravel_index(torch.tensor([], device=device), (10, 3, 5))
- with self.assertRaisesRegex(TypeError, r"expected 'shape' to be int or sequence"):
+ with self.assertRaisesRegex(
+ TypeError, r"expected 'shape' to be int or sequence"
+ ):
torch.unravel_index(
torch.tensor([1], device=device, dtype=torch.int64),
- torch.tensor([1, 2, 3]))
+ torch.tensor([1, 2, 3]),
+ )
- with self.assertRaisesRegex(TypeError, r"expected 'shape' sequence to only contain ints"):
+ with self.assertRaisesRegex(
+ TypeError, r"expected 'shape' sequence to only contain ints"
+ ):
torch.unravel_index(
- torch.tensor([1], device=device, dtype=torch.int64),
- (1, 2, 2.0))
+ torch.tensor([1], device=device, dtype=torch.int64), (1, 2, 2.0)
+ )
- with self.assertRaisesRegex(ValueError, r"'shape' cannot have negative values, but got \(2, -3\)"):
- torch.unravel_index(
- torch.tensor(0, device=device),
- (2, -3))
+ with self.assertRaisesRegex(
+ ValueError, r"'shape' cannot have negative values, but got \(2, -3\)"
+ ):
+ torch.unravel_index(torch.tensor(0, device=device), (2, -3))
def test_invalid_index(self, device):
x = torch.arange(0, 16, device=device).view(4, 4)
- self.assertRaisesRegex(TypeError, 'slice indices', lambda: x["0":"1"])
+ self.assertRaisesRegex(TypeError, "slice indices", lambda: x["0":"1"])
def test_out_of_bound_index(self, device):
x = torch.arange(0, 100, device=device).view(2, 5, 10)
- self.assertRaisesRegex(IndexError, 'index 5 is out of bounds for dimension 1 with size 5', lambda: x[0, 5])
- self.assertRaisesRegex(IndexError, 'index 4 is out of bounds for dimension 0 with size 2', lambda: x[4, 5])
- self.assertRaisesRegex(IndexError, 'index 15 is out of bounds for dimension 2 with size 10',
- lambda: x[0, 1, 15])
- self.assertRaisesRegex(IndexError, 'index 12 is out of bounds for dimension 2 with size 10',
- lambda: x[:, :, 12])
+ self.assertRaisesRegex(
+ IndexError,
+ "index 5 is out of bounds for dimension 1 with size 5",
+ lambda: x[0, 5],
+ )
+ self.assertRaisesRegex(
+ IndexError,
+ "index 4 is out of bounds for dimension 0 with size 2",
+ lambda: x[4, 5],
+ )
+ self.assertRaisesRegex(
+ IndexError,
+ "index 15 is out of bounds for dimension 2 with size 10",
+ lambda: x[0, 1, 15],
+ )
+ self.assertRaisesRegex(
+ IndexError,
+ "index 12 is out of bounds for dimension 2 with size 10",
+ lambda: x[:, :, 12],
+ )
def test_zero_dim_index(self, device):
x = torch.tensor(10, device=device)
@@ -1253,16 +1466,19 @@ class TestIndexing(TestCase):
print(x[0])
return x[0]
- self.assertRaisesRegex(IndexError, 'invalid index', runner)
+ self.assertRaisesRegex(IndexError, "invalid index", runner)
@onlyCUDA
def test_invalid_device(self, device):
idx = torch.tensor([0, 1])
b = torch.zeros(5, device=device)
- c = torch.tensor([1., 2.], device="cpu")
+ c = torch.tensor([1.0, 2.0], device="cpu")
for accumulate in [True, False]:
- self.assertRaises(RuntimeError, lambda: torch.index_put_(b, (idx,), c, accumulate=accumulate))
+ self.assertRaises(
+ RuntimeError,
+ lambda: torch.index_put_(b, (idx,), c, accumulate=accumulate),
+ )
@onlyCUDA
def test_cpu_indices(self, device):
@@ -1287,7 +1503,9 @@ class TestIndexing(TestCase):
for shape in [(3, 2), (2, 3, 5), (2, 4, 0), (2, 3, 1, 4)]:
for noncontiguous in [True, False]:
- t = make_tensor(shape, device=device, dtype=dtype, noncontiguous=noncontiguous)
+ t = make_tensor(
+ shape, device=device, dtype=dtype, noncontiguous=noncontiguous
+ )
for dim in list(range(t.ndim)) + [None]:
if dim is None:
indices = torch.argsort(t.view(-1))
@@ -1316,8 +1534,9 @@ class TestIndexing(TestCase):
indices = torch.argsort(t, dim=dim)
# dim of `t` and `indices` does not match
- with self.assertRaisesRegex(RuntimeError,
- "input and indices should have the same number of dimensions"):
+ with self.assertRaisesRegex(
+ RuntimeError, "input and indices should have the same number of dimensions"
+ ):
torch.take_along_dim(t, indices[0], dim=0)
# invalid `indices` dtype
@@ -1345,18 +1564,26 @@ class TestIndexing(TestCase):
t = make_tensor(shape, device=device, dtype=dtype)
indices = torch.argsort(t, dim=dim)
- with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Expected all tensors to be on the same device"
+ ):
torch.gather(t, 0, indices.cpu())
- with self.assertRaisesRegex(RuntimeError,
- r"Expected tensor to have .* but got tensor with .* torch.take_along_dim()"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"Expected tensor to have .* but got tensor with .* torch.take_along_dim()",
+ ):
torch.take_along_dim(t, indices.cpu(), dim=0)
- with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Expected all tensors to be on the same device"
+ ):
torch.gather(t.cpu(), 0, indices)
- with self.assertRaisesRegex(RuntimeError,
- r"Expected tensor to have .* but got tensor with .* torch.take_along_dim()"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"Expected tensor to have .* but got tensor with .* torch.take_along_dim()",
+ ):
torch.take_along_dim(t.cpu(), indices, dim=0)
@onlyCUDA
@@ -1409,7 +1636,6 @@ class TestIndexing(TestCase):
self.assertRaises(IndexError, lambda: t[idx_max])
-
# The tests below are from NumPy test_indexing.py with some modifications to
# make them compatible with PyTorch. It's licensed under the BDS license below:
#
@@ -1444,9 +1670,10 @@ class TestIndexing(TestCase):
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
class NumpyTests(TestCase):
def test_index_no_floats(self, device):
- a = torch.tensor([[[5.]]], device=device)
+ a = torch.tensor([[[5.0]]], device=device)
self.assertRaises(IndexError, lambda: a[0.0])
self.assertRaises(IndexError, lambda: a[0, 0.0])
@@ -1494,9 +1721,7 @@ class NumpyTests(TestCase):
self.assertRaises(IndexError, lambda: a[b])
def test_ellipsis_index(self, device):
- a = tensor([[1, 2, 3],
- [4, 5, 6],
- [7, 8, 9]], device=device)
+ a = tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device)
self.assertIsNot(a[...], a)
self.assertEqual(a[...], a)
# `a[...]` was `a` in numpy <1.9.
@@ -1519,9 +1744,7 @@ class NumpyTests(TestCase):
def test_single_int_index(self, device):
# Single integer index selects one row
- a = tensor([[1, 2, 3],
- [4, 5, 6],
- [7, 8, 9]], device=device)
+ a = tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device)
self.assertEqual(a[0], [1, 2, 3])
self.assertEqual(a[-1], [7, 8, 9])
@@ -1533,9 +1756,7 @@ class NumpyTests(TestCase):
def test_single_bool_index(self, device):
# Single boolean index
- a = tensor([[1, 2, 3],
- [4, 5, 6],
- [7, 8, 9]], device=device)
+ a = tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device)
self.assertEqual(a[True], a[None])
self.assertEqual(a[False], a[None][0:0])
@@ -1544,24 +1765,24 @@ class NumpyTests(TestCase):
arr = torch.ones((5, 4, 3), device=device)
index = tensor([True], device=device)
- self.assertRaisesRegex(IndexError, 'mask', lambda: arr[index])
+ self.assertRaisesRegex(IndexError, "mask", lambda: arr[index])
index = tensor([False] * 6, device=device)
- self.assertRaisesRegex(IndexError, 'mask', lambda: arr[index])
+ self.assertRaisesRegex(IndexError, "mask", lambda: arr[index])
index = torch.ByteTensor(4, 4).to(device).zero_()
- self.assertRaisesRegex(IndexError, 'mask', lambda: arr[index])
- self.assertRaisesRegex(IndexError, 'mask', lambda: arr[(slice(None), index)])
+ self.assertRaisesRegex(IndexError, "mask", lambda: arr[index])
+ self.assertRaisesRegex(IndexError, "mask", lambda: arr[(slice(None), index)])
def test_boolean_indexing_onedim(self, device):
# Indexing a 2-dimensional array with
# boolean array of length one
- a = tensor([[0., 0., 0.]], device=device)
+ a = tensor([[0.0, 0.0, 0.0]], device=device)
b = tensor([True], device=device)
self.assertEqual(a[b], a)
# boolean assignment
- a[b] = 1.
- self.assertEqual(a, tensor([[1., 1., 1.]], device=device))
+ a[b] = 1.0
+ self.assertEqual(a, tensor([[1.0, 1.0, 1.0]], device=device))
def test_boolean_assignment_value_mismatch(self, device):
# A boolean assignment should fail when the shape of the values
@@ -1571,34 +1792,33 @@ class NumpyTests(TestCase):
def f(a, v):
a[a > -1] = tensor(v).to(device)
- self.assertRaisesRegex(Exception, 'shape mismatch', f, a, [])
- self.assertRaisesRegex(Exception, 'shape mismatch', f, a, [1, 2, 3])
- self.assertRaisesRegex(Exception, 'shape mismatch', f, a[:1], [1, 2, 3])
+ self.assertRaisesRegex(Exception, "shape mismatch", f, a, [])
+ self.assertRaisesRegex(Exception, "shape mismatch", f, a, [1, 2, 3])
+ self.assertRaisesRegex(Exception, "shape mismatch", f, a[:1], [1, 2, 3])
def test_boolean_indexing_twodim(self, device):
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
- a = tensor([[1, 2, 3],
- [4, 5, 6],
- [7, 8, 9]], device=device)
- b = tensor([[True, False, True],
- [False, True, False],
- [True, False, True]], device=device)
+ a = tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device)
+ b = tensor(
+ [[True, False, True], [False, True, False], [True, False, True]],
+ device=device,
+ )
self.assertEqual(a[b], tensor([1, 3, 5, 7, 9], device=device))
self.assertEqual(a[b[1]], tensor([[4, 5, 6]], device=device))
self.assertEqual(a[b[0]], a[b[2]])
# boolean assignment
a[b] = 0
- self.assertEqual(a, tensor([[0, 2, 0],
- [4, 0, 6],
- [0, 8, 0]], device=device))
+ self.assertEqual(a, tensor([[0, 2, 0], [4, 0, 6], [0, 8, 0]], device=device))
def test_boolean_indexing_weirdness(self, device):
# Weird boolean indexing things
a = torch.ones((2, 3, 4), device=device)
self.assertEqual((0, 2, 3, 4), a[False, True, ...].shape)
- self.assertEqual(torch.ones(1, 2, device=device), a[True, [0, 1], True, True, [1], [[2]]])
+ self.assertEqual(
+ torch.ones(1, 2, device=device), a[True, [0, 1], True, True, [1], [[2]]]
+ )
self.assertRaises(IndexError, lambda: a[False, [0, 1], ...])
def test_boolean_indexing_weirdness_tensors(self, device):
@@ -1607,7 +1827,9 @@ class NumpyTests(TestCase):
true = torch.tensor(True, device=device)
a = torch.ones((2, 3, 4), device=device)
self.assertEqual((0, 2, 3, 4), a[False, True, ...].shape)
- self.assertEqual(torch.ones(1, 2, device=device), a[true, [0, 1], true, true, [1], [[2]]])
+ self.assertEqual(
+ torch.ones(1, 2, device=device), a[true, [0, 1], true, true, [1], [[2]]]
+ )
self.assertRaises(IndexError, lambda: a[false, [0, 1], ...])
def test_boolean_indexing_alldims(self, device):
@@ -1619,9 +1841,7 @@ class NumpyTests(TestCase):
def test_boolean_list_indexing(self, device):
# Indexing a 2-dimensional array with
# boolean lists
- a = tensor([[1, 2, 3],
- [4, 5, 6],
- [7, 8, 9]], device=device)
+ a = tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device)
b = [True, False, False]
c = [True, True, False]
self.assertEqual(a[b], tensor([[1, 2, 3]], device=device))
@@ -1639,14 +1859,18 @@ class NumpyTests(TestCase):
def test_broaderrors_indexing(self, device):
a = torch.zeros(5, 5, device=device)
- self.assertRaisesRegex(IndexError, 'shape mismatch', a.__getitem__, ([0, 1], [0, 1, 2]))
- self.assertRaisesRegex(IndexError, 'shape mismatch', a.__setitem__, ([0, 1], [0, 1, 2]), 0)
+ self.assertRaisesRegex(
+ IndexError, "shape mismatch", a.__getitem__, ([0, 1], [0, 1, 2])
+ )
+ self.assertRaisesRegex(
+ IndexError, "shape mismatch", a.__setitem__, ([0, 1], [0, 1, 2]), 0
+ )
def test_trivial_fancy_out_of_bounds(self, device):
a = torch.zeros(5, device=device)
ind = torch.ones(20, dtype=torch.int64, device=device)
if a.is_cuda:
- raise unittest.SkipTest('CUDA asserts instead of raising an exception')
+ raise unittest.SkipTest("CUDA asserts instead of raising an exception")
ind[-1] = 10
self.assertRaises(IndexError, a.__getitem__, ind)
self.assertRaises(IndexError, a.__setitem__, ind, 0)
@@ -1658,13 +1882,13 @@ class NumpyTests(TestCase):
def test_index_is_larger(self, device):
# Simple case of fancy index broadcasting of the index.
a = torch.zeros((5, 5), device=device)
- a[[[0], [1], [2]], [0, 1, 2]] = tensor([2., 3., 4.], device=device)
+ a[[[0], [1], [2]], [0, 1, 2]] = tensor([2.0, 3.0, 4.0], device=device)
- self.assertTrue((a[:3, :3] == tensor([2., 3., 4.], device=device)).all())
+ self.assertTrue((a[:3, :3] == tensor([2.0, 3.0, 4.0], device=device)).all())
def test_broadcast_subspace(self, device):
a = torch.zeros((100, 100), device=device)
- v = torch.arange(0., 100, device=device)[:, None]
+ v = torch.arange(0.0, 100, device=device)[:, None]
b = torch.arange(99, -1, -1, device=device).long()
a[b] = v
expected = b.float().unsqueeze(1).expand(100, 100)
@@ -1679,8 +1903,9 @@ class NumpyTests(TestCase):
torch.diagonal(kernel2).copy_(torch.square(col_max.view(4)))
self.assertEqual(kernel, kernel2)
-instantiate_device_type_tests(TestIndexing, globals(), except_for='meta')
-instantiate_device_type_tests(NumpyTests, globals(), except_for='meta')
-if __name__ == '__main__':
+instantiate_device_type_tests(TestIndexing, globals(), except_for="meta")
+instantiate_device_type_tests(NumpyTests, globals(), except_for="meta")
+
+if __name__ == "__main__":
run_tests()
diff --git a/test/test_view_ops.py b/test/test_view_ops.py
index 153b65b203..cb019057ae 100644
--- a/test/test_view_ops.py
+++ b/test/test_view_ops.py
@@ -1,23 +1,41 @@
# Owner(s): ["module: tests"]
-import torch
-import numpy as np
+import random
import unittest
-from itertools import product, permutations, combinations
from functools import partial
-import random
+from itertools import combinations, permutations, product
+
+import numpy as np
+import torch
from torch.testing import make_tensor
-from torch.testing._internal.common_utils import (
- IS_FBCODE, TestCase, run_tests, suppress_warnings, gradcheck, gradgradcheck,
- numpy_to_torch_dtype_dict, skipIfTorchDynamo
+from torch.testing._internal.common_device_type import (
+ dtypes,
+ instantiate_device_type_tests,
+ onlyCPU,
+ onlyNativeDeviceTypes,
+ skipLazy,
+ skipMeta,
+ skipXLA,
)
-from torch.testing._internal.common_device_type import \
- (instantiate_device_type_tests, onlyCPU, dtypes, onlyNativeDeviceTypes, skipLazy, skipMeta, skipXLA)
from torch.testing._internal.common_dtype import (
- all_types_and_complex_and, complex_types, all_types_and, floating_and_complex_types_and,
+ all_types_and,
+ all_types_and_complex_and,
+ complex_types,
+ floating_and_complex_types_and,
+)
+from torch.testing._internal.common_utils import (
+ gradcheck,
+ gradgradcheck,
+ IS_FBCODE,
+ numpy_to_torch_dtype_dict,
+ run_tests,
+ skipIfTorchDynamo,
+ suppress_warnings,
+ TestCase,
)
+
# TODO: replace this with make_tensor() in common_utils.py
def _generate_input(shape, dtype, device, with_extremal):
if shape == ():
@@ -29,17 +47,19 @@ def _generate_input(shape, dtype, device, with_extremal):
x = torch.randn(*shape, device=device) * random.randint(30, 100)
x = x.to(torch.bfloat16)
else:
- x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100)
+ x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(
+ 30, 100
+ )
x[torch.randn(*shape) > 0.5] = 0
if with_extremal and dtype.is_floating_point:
# Use extremal values
- x[torch.randn(*shape) > 0.5] = float('nan')
- x[torch.randn(*shape) > 0.5] = float('inf')
- x[torch.randn(*shape) > 0.5] = float('-inf')
+ x[torch.randn(*shape) > 0.5] = float("nan")
+ x[torch.randn(*shape) > 0.5] = float("inf")
+ x[torch.randn(*shape) > 0.5] = float("-inf")
elif with_extremal and dtype.is_complex:
- x[torch.randn(*shape) > 0.5] = complex('nan')
- x[torch.randn(*shape) > 0.5] = complex('inf')
- x[torch.randn(*shape) > 0.5] = complex('-inf')
+ x[torch.randn(*shape) > 0.5] = complex("nan")
+ x[torch.randn(*shape) > 0.5] = complex("inf")
+ x[torch.randn(*shape) > 0.5] = complex("-inf")
elif dtype == torch.bool:
x = torch.zeros(shape, dtype=dtype, device=device)
x[torch.randn(*shape) > 0.5] = True
@@ -48,6 +68,7 @@ def _generate_input(shape, dtype, device, with_extremal):
return x
+
# TODO: replace this with make_tensor() in common_utils.py
def _rand_shape(dim, min_size, max_size):
shape = []
@@ -55,13 +76,15 @@ def _rand_shape(dim, min_size, max_size):
shape.append(random.randint(min_size, max_size))
return tuple(shape)
+
# TODO: refactor tests to avoid this function
# Converts half/bfloat16 dtype to float when device is cpu
def _convert_t(dtype, device):
- if device == 'cpu' and dtype in {torch.half, torch.bfloat16}:
+ if device == "cpu" and dtype in {torch.half, torch.bfloat16}:
return torch.float
return dtype
+
# TODO: replace this with make_tensor() in common_utils.py
# Returns a tensor of the requested shape, dtype, and device
# Requesting a half CPU tensor returns a float CPU tensor with
@@ -80,28 +103,31 @@ def _make_tensor(shape, dtype, device, fill_ones=False) -> torch.Tensor:
return t.to(_convert_t(dtype, device))
# Populates the CPU tensor with floats representable as half/bfloat16
- if dtype == torch.half and device == 'cpu':
+ if dtype == torch.half and device == "cpu":
return torch.randn(*shape, dtype=torch.float, device=device).half().float()
- if dtype == torch.bfloat16 and device == 'cpu':
+ if dtype == torch.bfloat16 and device == "cpu":
return torch.randn(*shape, dtype=torch.float, device=device).bfloat16().float()
# Default: returns a tensor with random float values
return torch.randn(shape, dtype=dtype, device=device).to(dtype=dtype)
+
# Tests ops and indexing to ensure they return views (and new tensors) as
# appropriate.
class TestViewOps(TestCase):
exact_dtype = True
def is_view_of(self, base, other):
- if (not other._is_view() or
- other is base or
- other._base is not base or
- base.device != other.device):
+ if (
+ not other._is_view()
+ or other is base
+ or other._base is not base
+ or base.device != other.device
+ ):
return False
# Note: only validates storage on native device types
# because some accelerators, like XLA, do not expose storage
- if base.device.type == 'cpu' or base.device.type == 'cuda':
+ if base.device.type == "cpu" or base.device.type == "cuda":
if base.untyped_storage().data_ptr() != other.untyped_storage().data_ptr():
return False
@@ -109,7 +135,7 @@ class TestViewOps(TestCase):
# Returns true if v1 and v2 are views of the same base
def is_view_of_same_base(self, v1, v2):
- if (not v1._is_view() or v1 is v2):
+ if not v1._is_view() or v1 is v2:
return False
return self.is_view_of(v1._base, v2)
@@ -130,15 +156,23 @@ class TestViewOps(TestCase):
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half, torch.bool))
def test_view_dtype_new(self, device, dtype):
- dtypes = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
+ dtypes = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}
del dtypes[torch.bool]
def generate_inputs():
yield make_tensor((4, 4, 64), dtype=dtype, device=device, low=-5, high=5)
- yield make_tensor((4, 4, 64), dtype=dtype, device=device, low=-5, high=5).permute(1, 0, 2)
- yield make_tensor((4, 64, 4), dtype=dtype, device=device, low=-5, high=5).permute(2, 0, 1)
- yield make_tensor((1, 5, 1), dtype=dtype, device=device, low=-5, high=5).expand(5, 5, 64)
- yield make_tensor((2, 5, 256), dtype=dtype, device=device, low=-5, high=5)[1::2, 1:, ::2]
+ yield make_tensor(
+ (4, 4, 64), dtype=dtype, device=device, low=-5, high=5
+ ).permute(1, 0, 2)
+ yield make_tensor(
+ (4, 64, 4), dtype=dtype, device=device, low=-5, high=5
+ ).permute(2, 0, 1)
+ yield make_tensor(
+ (1, 5, 1), dtype=dtype, device=device, low=-5, high=5
+ ).expand(5, 5, 64)
+ yield make_tensor((2, 5, 256), dtype=dtype, device=device, low=-5, high=5)[
+ 1::2, 1:, ::2
+ ]
yield make_tensor((0, 5, 64), dtype=dtype, device=device, low=-5, high=5)
yield make_tensor((), dtype=dtype, device=device, low=-5, high=5)
@@ -174,15 +208,21 @@ class TestViewOps(TestCase):
a_np_contiguous = a.cpu().contiguous().numpy()
for view_dtype, np_view_dtype in dtypes.items():
- equal_element_size = torch._utils._element_size(dtype) == torch._utils._element_size(view_dtype)
+ equal_element_size = torch._utils._element_size(
+ dtype
+ ) == torch._utils._element_size(view_dtype)
if not equal_element_size and a.dim() == 0:
- with self.assertRaisesRegex(RuntimeError, r"self.dim\(\) cannot be 0"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"self.dim\(\) cannot be 0"
+ ):
a.view(view_dtype)
continue
if not equal_element_size and a.stride(-1) != 1:
- with self.assertRaisesRegex(RuntimeError, r"self.stride\(-1\) must be 1"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"self.stride\(-1\) must be 1"
+ ):
a.view(view_dtype)
continue
@@ -190,7 +230,9 @@ class TestViewOps(TestCase):
self.assertEqual(a_view.dtype, view_dtype)
self.assertEqual(a.data_ptr(), a_view.data_ptr())
- expected_size, expected_stride = calc_expected_size_and_stride(a, view_dtype)
+ expected_size, expected_stride = calc_expected_size_and_stride(
+ a, view_dtype
+ )
self.assertEqual(a_view.size(), expected_size)
self.assertEqual(a_view.stride(), expected_stride)
@@ -210,8 +252,17 @@ class TestViewOps(TestCase):
# because view(dtype) does not support backward yet
# TODO: Remove this when autograd support is added
if dtype.is_floating_point or dtype.is_complex:
- for view_dtype in floating_and_complex_types_and(torch.half, torch.bfloat16):
- t = make_tensor((5, 5, 64), dtype=dtype, device=device, low=-5, high=5, requires_grad=True)
+ for view_dtype in floating_and_complex_types_and(
+ torch.half, torch.bfloat16
+ ):
+ t = make_tensor(
+ (5, 5, 64),
+ dtype=dtype,
+ device=device,
+ low=-5,
+ high=5,
+ requires_grad=True,
+ )
self.assertFalse(t.view(view_dtype).requires_grad)
# Test the extra error checks that happen when the view dtype
@@ -221,28 +272,35 @@ class TestViewOps(TestCase):
def test_view_dtype_upsize_errors(self, device, dtype):
dtype_size = torch._utils._element_size(dtype)
- for view_dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool):
+ for view_dtype in all_types_and_complex_and(
+ torch.half, torch.bfloat16, torch.bool
+ ):
view_dtype_size = torch._utils._element_size(view_dtype)
if view_dtype_size <= dtype_size:
continue
size_ratio = view_dtype_size // dtype_size
- a = make_tensor((4, 4, size_ratio + 1), dtype=dtype, device=device, low=-5, high=5)
+ a = make_tensor(
+ (4, 4, size_ratio + 1), dtype=dtype, device=device, low=-5, high=5
+ )
with self.assertRaisesRegex(
- RuntimeError,
- rf"self.size\(-1\) must be divisible by {size_ratio}"):
+ RuntimeError, rf"self.size\(-1\) must be divisible by {size_ratio}"
+ ):
a.view(view_dtype)
with self.assertRaisesRegex(
- RuntimeError,
- rf"self.storage_offset\(\) must be divisible by {size_ratio}"):
+ RuntimeError,
+ rf"self.storage_offset\(\) must be divisible by {size_ratio}",
+ ):
a[:, :, 1:].view(view_dtype)
- a = make_tensor((4, 4, size_ratio), dtype=dtype, device=device, low=-5, high=5)
+ a = make_tensor(
+ (4, 4, size_ratio), dtype=dtype, device=device, low=-5, high=5
+ )
a = a.as_strided((4, 4, size_ratio), (size_ratio, 1, 1))
with self.assertRaisesRegex(
- RuntimeError,
- rf"self.stride\(1\) must be divisible by {size_ratio}"):
+ RuntimeError, rf"self.stride\(1\) must be divisible by {size_ratio}"
+ ):
a.view(view_dtype)
@onlyNativeDeviceTypes
@@ -255,14 +313,18 @@ class TestViewOps(TestCase):
if input.size()[-1] != 2:
self.assertRaisesRegex(
- RuntimeError, "Tensor must have a last dimension of size 2",
- lambda: torch.view_as_complex(input))
+ RuntimeError,
+ "Tensor must have a last dimension of size 2",
+ lambda: torch.view_as_complex(input),
+ )
return
if input.stride()[-1] != 1:
self.assertRaisesRegex(
- RuntimeError, "Tensor must have a last dimension with stride 1",
- lambda: torch.view_as_complex(input))
+ RuntimeError,
+ "Tensor must have a last dimension with stride 1",
+ lambda: torch.view_as_complex(input),
+ )
return
res = torch.view_as_complex(input)
@@ -276,25 +338,30 @@ class TestViewOps(TestCase):
# RuntimeError since in this case the last dim of input would not have stride 1
fn(contiguous_input=False, dim0=1, dim1=2)
-
# RuntimeError since in this case the stride of non-last dim of input would not be of size 2
x = torch.randn(3, 3, device=device)
t = torch.as_strided(x, (2, 2), (1, 1))
self.assertRaisesRegex(
- RuntimeError, "Tensor must have a stride divisible by 2 for all but last dimension",
- lambda: torch.view_as_complex(t))
+ RuntimeError,
+ "Tensor must have a stride divisible by 2 for all but last dimension",
+ lambda: torch.view_as_complex(t),
+ )
# tensor with zero elements
x = torch.tensor([], device=device) # torch.Size([0])
self.assertRaisesRegex(
- RuntimeError, "Tensor must have a last dimension of size 2",
- lambda: torch.view_as_complex(x))
+ RuntimeError,
+ "Tensor must have a last dimension of size 2",
+ lambda: torch.view_as_complex(x),
+ )
# zero dimension tensor
z = torch.tensor(2.0)
self.assertRaisesRegex(
- RuntimeError, "Input tensor must have one or more dimensions",
- lambda: torch.view_as_complex(z))
+ RuntimeError,
+ "Input tensor must have one or more dimensions",
+ lambda: torch.view_as_complex(z),
+ )
y = x.reshape(0, 2) # torch.Size([0, 2])
res = torch.view_as_complex(y)
@@ -410,13 +477,20 @@ class TestViewOps(TestCase):
@onlyNativeDeviceTypes
@dtypes(*complex_types())
def test_conj_imag_view(self, device, dtype) -> None:
- t = _make_tensor((4, 5,), dtype, device)
+ t = _make_tensor(
+ (
+ 4,
+ 5,
+ ),
+ dtype,
+ device,
+ )
t_numpy_conj = torch.from_numpy(t.cpu().numpy().conj()).to(device=device)
v = t.conj()
self.assertTrue(self.is_view_of(t, v))
self.assertEqual(v, t_numpy_conj)
- if (t.is_complex()):
+ if t.is_complex():
v_imag = v.imag
self.assertTrue(self.is_view_of(t, v_imag))
self.assertEqual(v_imag, t_numpy_conj.imag)
@@ -424,7 +498,14 @@ class TestViewOps(TestCase):
@onlyNativeDeviceTypes
def test_conj_view_with_shared_memory(self, device) -> None:
- a = _make_tensor((4, 5,), torch.cfloat, device)
+ a = _make_tensor(
+ (
+ 4,
+ 5,
+ ),
+ torch.cfloat,
+ device,
+ )
b = a.conj()
c = a.conj()
@@ -433,7 +514,12 @@ class TestViewOps(TestCase):
self.assertEqual(torch.add(b, c), b.add_(c))
@onlyNativeDeviceTypes
- @dtypes(*product(complex_types(), all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)))
+ @dtypes(
+ *product(
+ complex_types(),
+ all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool),
+ )
+ )
@suppress_warnings
def test_set_real_imag(self, device, dtypes):
x = torch.randn(10, dtype=dtypes[0], device=device)
@@ -499,9 +585,10 @@ class TestViewOps(TestCase):
stacked = torch.randn(3, 10, 10, requires_grad=True)
outs = stacked.unbind()
gi = grad.unbind()[i]
- g, = torch.autograd.grad(outs[i], stacked, gi)
- g_expected = torch.stack([gi if j == i else torch.zeros_like(gi)
- for j in range(3)], dim=0)
+ (g,) = torch.autograd.grad(outs[i], stacked, gi)
+ g_expected = torch.stack(
+ [gi if j == i else torch.zeros_like(gi) for j in range(3)], dim=0
+ )
self.assertEqual(g, g_expected)
# Check with gradcheck
stacked = torch.randn(3, 10, 10, dtype=torch.double, requires_grad=True)
@@ -789,8 +876,9 @@ class TestViewOps(TestCase):
self.assertTrue(self.is_view_of_same_base(t, v))
# stride[i] = stride[i + 1] * size[i + 1] is satisfied for 3 groups:
- t = torch.ones(720, device=device) \
- .as_strided((2, 3, 2, 3, 5, 4), (6, 2, 15, 5, 1, 0))
+ t = torch.ones(720, device=device).as_strided(
+ (2, 3, 2, 3, 5, 4), (6, 2, 15, 5, 1, 0)
+ )
# [--1--|---2---|-3-] [--1--|----2---|-3-]
v1 = t.flatten(0, 1)
v2 = v1.flatten(1, 3)
@@ -811,6 +899,7 @@ class TestViewOps(TestCase):
nv[idx_nv] = 0
if device != "meta":
self.assertNotEqual(t[idx_t], nv[idx_nv])
+
t = torch.ones(2, 3, 2, 3, device=device).transpose(2, 3)
nv = t.flatten(1, 3)
assert_is_nonview(t, nv)
@@ -858,7 +947,9 @@ class TestViewOps(TestCase):
nv[1, 1] = 0
self.assertNotEqual(t[2, 2], nv[1, 1])
- @unittest.skipIf(IS_FBCODE, "TorchScript backend not yet supported in FBCODE/OVRSOURCE builds")
+ @unittest.skipIf(
+ IS_FBCODE, "TorchScript backend not yet supported in FBCODE/OVRSOURCE builds"
+ )
def test_advanced_indexing_assignment(self, device):
t = torch.ones(3, 3, device=device)
rows = torch.tensor([[0, 0], [2, 2]], device=device)
@@ -953,9 +1044,9 @@ class TestViewOps(TestCase):
self.assertEqual(expected1, out1)
self.assertEqual(expected2, out2)
+
class TestOldViewOps(TestCase):
def test_ravel(self, device):
-
def _test_ravel(tensors, size, nc=False):
for src in tensors:
# Continuous Tensor -> View
@@ -996,34 +1087,32 @@ class TestOldViewOps(TestCase):
self.assertTrue(flat2.is_contiguous())
# Test both float tensor and quantized tensor
- tensors = [torch.randn(5, 5, 5, 5, device=device),
- torch._empty_affine_quantized([5, 5, 5, 5],
- scale=2,
- zero_point=3,
- dtype=torch.quint8,
- device=device)]
+ tensors = [
+ torch.randn(5, 5, 5, 5, device=device),
+ torch._empty_affine_quantized(
+ [5, 5, 5, 5], scale=2, zero_point=3, dtype=torch.quint8, device=device
+ ),
+ ]
_test_ravel(tensors, 625)
- tensors = [torch.randn(0, 2, 3, device=device),
- torch.randn(3, 0, 2, device=device),
- torch._empty_affine_quantized([0, 2, 3],
- scale=2,
- zero_point=3,
- dtype=torch.quint8,
- device=device),
- torch._empty_affine_quantized([3, 0, 2],
- scale=2,
- zero_point=3,
- dtype=torch.quint8,
- device=device)]
+ tensors = [
+ torch.randn(0, 2, 3, device=device),
+ torch.randn(3, 0, 2, device=device),
+ torch._empty_affine_quantized(
+ [0, 2, 3], scale=2, zero_point=3, dtype=torch.quint8, device=device
+ ),
+ torch._empty_affine_quantized(
+ [3, 0, 2], scale=2, zero_point=3, dtype=torch.quint8, device=device
+ ),
+ ]
_test_ravel(tensors, 0)
- tensors = [torch.randn(5, 5, device=device),
- torch._empty_affine_quantized([5, 5],
- scale=2,
- zero_point=3,
- dtype=torch.quint8,
- device=device)]
+ tensors = [
+ torch.randn(5, 5, device=device),
+ torch._empty_affine_quantized(
+ [5, 5], scale=2, zero_point=3, dtype=torch.quint8, device=device
+ ),
+ ]
_test_ravel(tensors, 25, True)
# TODO: this should be refactored into the view ops test suite
@@ -1055,7 +1144,9 @@ class TestOldViewOps(TestCase):
# test non-contiguous
noncontig = torch.randn(5, 2, 1, 3, device=device)[:, 0]
self.assertFalse(noncontig.is_contiguous())
- self.assertEqual(noncontig.expand(2, 5, 4, 3), noncontig.contiguous().repeat(2, 1, 4, 1))
+ self.assertEqual(
+ noncontig.expand(2, 5, 4, 3), noncontig.contiguous().repeat(2, 1, 4, 1)
+ )
# make sure it's compatible with unsqueeze
expanded = tensor2.expand(1, 1, 5)
@@ -1068,7 +1159,9 @@ class TestOldViewOps(TestCase):
self.assertRaises(RuntimeError, lambda: tensor2.expand(-1, -1))
# test expanding empty to empty
- self.assertEqual(torch.zeros(0, device=device).expand((0,)), torch.zeros(0, device=device))
+ self.assertEqual(
+ torch.zeros(0, device=device).expand((0,)), torch.zeros(0, device=device)
+ )
# TODO: this should be refactored into the view ops test suite
def test_view_empty(self, device):
@@ -1107,7 +1200,9 @@ class TestOldViewOps(TestCase):
x = torch.randn(3, 3, device=device)
self.assertEqual(x.data_ptr(), x.reshape_as(torch.rand(9)).data_ptr())
self.assertEqual(x.data_ptr(), x.reshape_as(torch.rand(1, 9, 1)).data_ptr())
- self.assertRaises(RuntimeError, lambda: x.reshape_as(torch.rand(10, device=device)))
+ self.assertRaises(
+ RuntimeError, lambda: x.reshape_as(torch.rand(10, device=device))
+ )
def test_flatten(self, device):
# Test that flatten returns 1-dim tensor when given a 0-dim tensor
@@ -1125,12 +1220,12 @@ class TestOldViewOps(TestCase):
self.assertEqual(flat0.shape, flat1.shape)
# Test both float tensor and quantized tensor
- tensors = [torch.randn(5, 5, 5, 5, device=device),
- torch._empty_affine_quantized([5, 5, 5, 5],
- scale=2,
- zero_point=3,
- dtype=torch.quint8,
- device=device)]
+ tensors = [
+ torch.randn(5, 5, 5, 5, device=device),
+ torch._empty_affine_quantized(
+ [5, 5, 5, 5], scale=2, zero_point=3, dtype=torch.quint8, device=device
+ ),
+ ]
for src in tensors:
flat = src.flatten(0, -1)
self.assertEqual(flat.shape, torch.Size([625]))
@@ -1160,11 +1255,13 @@ class TestOldViewOps(TestCase):
self.assertEqual(flat, src)
# out of bounds index
- with self.assertRaisesRegex(IndexError, 'Dimension out of range'):
+ with self.assertRaisesRegex(IndexError, "Dimension out of range"):
src.flatten(5, 10)
# invalid start and end
- with self.assertRaisesRegex(RuntimeError, 'start_dim cannot come after end_dim'):
+ with self.assertRaisesRegex(
+ RuntimeError, "start_dim cannot come after end_dim"
+ ):
src.flatten(2, 0)
# TODO: update to work on CUDA, too
@@ -1176,7 +1273,9 @@ class TestOldViewOps(TestCase):
self.assertEqual(x.narrow(0, 1, 1), torch.tensor([[3, 4, 5]]))
self.assertEqual(x.narrow(0, -1, 1), torch.tensor([[6, 7, 8]]))
self.assertEqual(x.narrow(0, -2, 2), torch.tensor([[3, 4, 5], [6, 7, 8]]))
- self.assertEqual(x.narrow(0, -3, 3), torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
+ self.assertEqual(
+ x.narrow(0, -3, 3), torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
+ )
self.assertEqual(x.narrow(-1, -1, 1), torch.tensor([[2], [5], [8]]))
self.assertEqual(x.narrow(-2, -1, 1), torch.tensor([[6, 7, 8]]))
@@ -1186,7 +1285,7 @@ class TestOldViewOps(TestCase):
x = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
self.assertEqual(x.narrow(0, torch.tensor(0), 1), torch.tensor([[0, 1, 2]]))
with self.assertRaises(Exception):
- x.narrow(0, torch.tensor(0.), 1)
+ x.narrow(0, torch.tensor(0.0), 1)
with self.assertRaises(Exception):
x.narrow(0, torch.tensor([0]), 1)
with self.assertRaises(Exception):
@@ -1215,10 +1314,14 @@ class TestOldViewOps(TestCase):
# Test 3D tensor
x = torch.rand((2, 2, 2))
- with self.assertRaisesRegex(RuntimeError, 'expects a tensor with <= 2 dimensions, but self is 3D'):
+ with self.assertRaisesRegex(
+ RuntimeError, "expects a tensor with <= 2 dimensions, but self is 3D"
+ ):
x.t()
x = x.to_sparse()
- with self.assertRaisesRegex(RuntimeError, 'expects a tensor with <= 2 sparse and 0 dense dimensions'):
+ with self.assertRaisesRegex(
+ RuntimeError, "expects a tensor with <= 2 sparse and 0 dense dimensions"
+ ):
x.t()
@onlyCPU
@@ -1231,19 +1334,23 @@ class TestOldViewOps(TestCase):
start = 0
for target_size, split in zip(target_sizes, splits):
self.assertEqual(split.size(), target_size)
- self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0)
+ self.assertEqual(
+ tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0
+ )
start = start + target_size[dim]
# Variable sections split
tensor = torch.randn(20, 10)
dim = 0
split_sizes = [5, 5, 10]
- target_sizes = ([[5, 10], [5, 10], [10, 10]])
+ target_sizes = [[5, 10], [5, 10], [10, 10]]
splits = tensor.split(split_sizes, dim)
start = 0
for target_size, split in zip(target_sizes, splits):
self.assertEqual(split.size(), target_size)
- self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0)
+ self.assertEqual(
+ tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0
+ )
start = start + target_size[dim]
split_sizes = [2, 2, 6]
@@ -1253,7 +1360,9 @@ class TestOldViewOps(TestCase):
start = 0
for target_size, split in zip(target_sizes, splits):
self.assertEqual(split.size(), target_size)
- self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0)
+ self.assertEqual(
+ tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0
+ )
start = start + target_size[dim]
@onlyCPU
@@ -1266,12 +1375,13 @@ class TestOldViewOps(TestCase):
start = 0
for target_size, split in zip(target_sizes, splits):
self.assertEqual(split.size(), target_size)
- self.assertEqual(tensor.narrow(dim, start, target_size[dim]), split,
- atol=0, rtol=0)
+ self.assertEqual(
+ tensor.narrow(dim, start, target_size[dim]), split, atol=0, rtol=0
+ )
start = start + target_size[dim]
# Invalid chunk sizes
- error_regex = 'chunk expects.*greater than 0'
+ error_regex = "chunk expects.*greater than 0"
with self.assertRaisesRegex(RuntimeError, error_regex):
tensor.chunk(0)
with self.assertRaisesRegex(RuntimeError, error_regex):
@@ -1312,7 +1422,9 @@ class TestOldViewOps(TestCase):
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))
def test_transposes(self, device, dtype):
for op in ("T", "H", "mT", "mH", "adjoint"):
- shapes = ((2, 3), (2, 3, 4)) if op[0] == "m" or op == "adjoint" else ((2, 3),)
+ shapes = (
+ ((2, 3), (2, 3, 4)) if op[0] == "m" or op == "adjoint" else ((2, 3),)
+ )
for shape in shapes:
a = make_tensor(shape, device=device, dtype=dtype)
t1 = getattr(a, op)
@@ -1357,7 +1469,9 @@ class TestOldViewOps(TestCase):
def test_memory_format_resize_as(self, device):
def test_helper(shape, memory_format, device):
- xc = torch.randn(shape, device=device).contiguous(memory_format=memory_format)
+ xc = torch.randn(shape, device=device).contiguous(
+ memory_format=memory_format
+ )
flat = torch.randn(xc.numel(), device=device)
flat.resize_as_(xc, memory_format=torch.preserve_format)
self.assertTrue(flat.is_contiguous(memory_format=memory_format))
@@ -1372,7 +1486,9 @@ class TestOldViewOps(TestCase):
self.assertTrue(flat.is_contiguous(memory_format=memory_format))
test_helper((10, 3, 32, 32), 10 * 3 * 32 * 32, torch.channels_last, device)
- test_helper((3, 10, 3, 32, 32), 3 * 10 * 3 * 32 * 32, torch.channels_last_3d, device)
+ test_helper(
+ (3, 10, 3, 32, 32), 3 * 10 * 3 * 32 * 32, torch.channels_last_3d, device
+ )
@onlyNativeDeviceTypes
@dtypes(torch.int64, torch.float, torch.complex128)
@@ -1407,14 +1523,22 @@ class TestOldViewOps(TestCase):
dst_dim = dst_dim - nd
partial_map = {
- torch.swapdims: partial(torch.swapdims, dim0=src_dim, dim1=dst_dim),
- torch.swapaxes: partial(torch.swapaxes, axis0=src_dim, axis1=dst_dim),
- torch.transpose: partial(torch.transpose, dim0=src_dim, dim1=dst_dim),
+ torch.swapdims: partial(
+ torch.swapdims, dim0=src_dim, dim1=dst_dim
+ ),
+ torch.swapaxes: partial(
+ torch.swapaxes, axis0=src_dim, axis1=dst_dim
+ ),
+ torch.transpose: partial(
+ torch.transpose, dim0=src_dim, dim1=dst_dim
+ ),
}
torch_fn = partial_map[fn]
np_fn = partial(np.swapaxes, axis1=src_dim, axis2=dst_dim)
- self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
+ self.compare_with_numpy(
+ torch_fn, np_fn, x, device=None, dtype=None
+ )
# Move dim to same position
x = torch.randn(2, 3, 5, 7, 11)
@@ -1437,11 +1561,15 @@ class TestOldViewOps(TestCase):
x = _generate_input(shape, dtype, device, with_extremal)
if contiguous:
x = x.T
- self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
+ self.compare_with_numpy(
+ torch_fn, np_fn, x, device=None, dtype=None
+ )
# Compare sequence input
torch_sequence_x = (x,) * random.randint(3, 10)
- np_sequence_x = tuple(np.array(x.detach().cpu().numpy()) for x in torch_sequence_x)
+ np_sequence_x = tuple(
+ np.array(x.detach().cpu().numpy()) for x in torch_sequence_x
+ )
torch_res = torch_fn(*torch_sequence_x)
np_res = np_fn(*np_sequence_x)
@@ -1484,7 +1612,6 @@ class TestOldViewOps(TestCase):
self._test_atleast(device, torch.atleast_2d)
self._test_atleast(device, torch.atleast_3d)
-
@onlyCPU
@dtypes(torch.float)
def test_broadcast_tensors(self, device, dtype):
@@ -1498,7 +1625,6 @@ class TestOldViewOps(TestCase):
self.assertTrue(y1.size() == expected_size)
self.assertTrue(y2.size() == expected_size)
-
@onlyCPU
def test_broadcast_shapes(self, device):
examples = [(), (1,), (2,), (1, 1), (3, 1), (3, 2), (4, 1, 1), (4, 3, 2)]
@@ -1520,30 +1646,48 @@ class TestOldViewOps(TestCase):
res2 = torch.broadcast_tensors(*map(torch.empty, integral_inputs))[0].shape
self.assertEqual(res1, res2)
- inputs_with_neg_vals = [[1, 1, -12], [-1, 1], [-11, ]]
+ inputs_with_neg_vals = [
+ [1, 1, -12],
+ [-1, 1],
+ [
+ -11,
+ ],
+ ]
for integral_inputs_with_neg_vals in inputs_with_neg_vals:
- with self.assertRaisesRegex(RuntimeError, "Trying to create tensor with negative dimension"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Trying to create tensor with negative dimension"
+ ):
torch.broadcast_shapes(*integral_inputs_with_neg_vals)
integral_inputs_error_case = [(3, 5), (2, 4, 1)]
for error_input in integral_inputs_error_case:
- with self.assertRaisesRegex(RuntimeError, "Shape mismatch: objects cannot be broadcast to a single shape"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Shape mismatch: objects cannot be broadcast to a single shape",
+ ):
torch.broadcast_shapes(*error_input)
negative_inputs = [(-1,), (1, -12), (4, -11), (-4, 1), (1, 1, -2)]
for s0 in negative_inputs:
- with self.assertRaisesRegex(RuntimeError, "Trying to create tensor with negative dimension"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Trying to create tensor with negative dimension"
+ ):
torch.broadcast_shapes(s0)
for s1 in negative_inputs:
- with self.assertRaisesRegex(RuntimeError, "Trying to create tensor with negative dimension"):
+ with self.assertRaisesRegex(
+ RuntimeError, "Trying to create tensor with negative dimension"
+ ):
torch.broadcast_shapes(s0, s1)
float_inputs_error_case = [(1.1, 2.0), (1.1, 1.0)]
for error_case in float_inputs_error_case:
for float_input in error_case:
- with self.assertRaisesRegex(RuntimeError, "Input shapes "
- "should be of type ints, a tuple of ints, or a list of ints"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ "Input shapes "
+ "should be of type ints, a tuple of ints, or a list of ints",
+ ):
torch.broadcast_shapes(float_input)
diff_input_types = [(1, (5,)), (3, (1,)), (1, (3, 4))]
@@ -1564,9 +1708,7 @@ class TestOldViewOps(TestCase):
return False
return True
- sizes = (
- (), (1,), (2,), (1, 1), (3, 1), (3, 2), (4, 1, 1), (4, 3, 2)
- )
+ sizes = ((), (1,), (2,), (1, 1), (3, 1), (3, 2), (4, 1, 1), (4, 3, 2))
for s0, s1 in combinations(sizes, r=2):
t = make_tensor(s0, dtype=dtype, device=device, low=-9, high=9)
t_np = t.cpu().numpy()
@@ -1576,9 +1718,11 @@ class TestOldViewOps(TestCase):
np_res = np.broadcast_to(t_np, s1)
self.assertEqual(res, np_res)
else:
- with self.assertRaisesRegex(RuntimeError,
- r"The expanded size of the tensor \(\d\) "
- r"must match the existing size \(\d\)"):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"The expanded size of the tensor \(\d\) "
+ r"must match the existing size \(\d\)",
+ ):
torch.broadcast_to(t, s1)
def test_view(self, device):
@@ -1602,10 +1746,14 @@ class TestOldViewOps(TestCase):
self.assertEqual(empty.view(-1).size(), torch.Size([0]))
self.assertEqual(empty.view(10, 3, -1).size(), torch.Size([10, 3, 0]))
- with self.assertRaisesRegex(RuntimeError, r"because the unspecified dimension size -1 can be any value"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"because the unspecified dimension size -1 can be any value"
+ ):
empty.view(-1, 0)
- with self.assertRaisesRegex(RuntimeError, r"because the unspecified dimension size -1 can be any value"):
+ with self.assertRaisesRegex(
+ RuntimeError, r"because the unspecified dimension size -1 can be any value"
+ ):
empty.view(3, 0, -1, 0)
self.assertRaises(RuntimeError, lambda: tensor.view(15, 0))
@@ -1614,7 +1762,11 @@ class TestOldViewOps(TestCase):
# test view when tensor is not contiguous in every dimension, but only
# contiguous dimensions are touched.
- tensor = torch.rand(4, 2, 5, 1, 6, 2, 9, 3, device=device).transpose(-1, 2).transpose(-2, 3)
+ tensor = (
+ torch.rand(4, 2, 5, 1, 6, 2, 9, 3, device=device)
+ .transpose(-1, 2)
+ .transpose(-2, 3)
+ )
# size: [ 4, 2, 3, 9, 6, 2, 1, 5]
# stride: [3840, 1620, 1, 3, 54, 27, 324, 324]
# contiguous dim chunks: [__________, ____, ____, __________, ____, ____]
@@ -1648,7 +1800,9 @@ class TestOldViewOps(TestCase):
self.assertRaises(RuntimeError, lambda: tensor.view(8, 3, 54, 2, 1, 5))
# view with stride 0 dims
- tensor = torch.empty(1, 1, device=device).expand(3, 4) # all dims are contiguous
+ tensor = torch.empty(1, 1, device=device).expand(
+ 3, 4
+ ) # all dims are contiguous
contig_tensor = tensor.clone()
self.assertEqual(tensor.view(-1), contig_tensor.view(-1))
self.assertEqual(tensor.view(1, -1, 1), contig_tensor.view(1, -1, 1))
@@ -1670,7 +1824,9 @@ class TestOldViewOps(TestCase):
# the copy).
copy_tensor = tensor.transpose(0, 1).reshape(target)
self.assertEqual(copy_tensor.size(), target)
- self.assertNotEqual(tensor.storage().data_ptr(), copy_tensor.storage().data_ptr())
+ self.assertNotEqual(
+ tensor.storage().data_ptr(), copy_tensor.storage().data_ptr()
+ )
def test_contiguous(self, device):
x = torch.randn(1, 16, 5, 5, device=device)
@@ -1700,9 +1856,11 @@ class TestOldViewOps(TestCase):
a_n = a.cpu().numpy()
for dim in range(-a.dim(), a.dim()):
for sections in range(1, 2 * a.size(dim)):
- msg = f'input_size {input_size}, sections {sections}, dim {dim}'
+ msg = f"input_size {input_size}, sections {sections}, dim {dim}"
result1 = torch.tensor_split(a, sections, dim)
- result2 = torch.tensor_split(a, torch.tensor(sections, dtype=torch.int64), dim)
+ result2 = torch.tensor_split(
+ a, torch.tensor(sections, dtype=torch.int64), dim
+ )
for r1, r2 in zip(result1, result2):
self.assertEqual(r1.device, torch.device(device), msg=msg)
self.assertEqual(r1.dtype, dtype, msg=msg)
@@ -1744,9 +1902,11 @@ class TestOldViewOps(TestCase):
for dim in range(-a.dim(), a.dim()):
for indices in indices_args:
result_1 = torch.tensor_split(a, indices, dim)
- result_2 = torch.tensor_split(a, torch.tensor(indices, dtype=torch.int64), dim)
+ result_2 = torch.tensor_split(
+ a, torch.tensor(indices, dtype=torch.int64), dim
+ )
- msg = f'input_size {input_size}, indices {indices}, dim {dim}'
+ msg = f"input_size {input_size}, indices {indices}, dim {dim}"
for r1, r2 in zip(result_1, result_2):
self.assertEqual(r1.device, torch.device(device), msg=msg)
self.assertEqual(r1.dtype, dtype, msg=msg)
@@ -1762,18 +1922,46 @@ class TestOldViewOps(TestCase):
S = 10
test_cases = [
# input size, sections or indices, dim, error type, error message, numpy error type
- [(S,), 10, 1, IndexError, r'Dimension out of range', IndexError],
- [(), 10, 0, RuntimeError, r'tensor_split expected at least a 1-dimensional tensor, '
- + 'but got a tensor with 0 dims', IndexError],
- [(S,), (10,), 1, IndexError, r'Dimension out of range', IndexError],
- [(), (10,), 0, RuntimeError, r'tensor_split expected at least a 1-dimensional tensor, '
- + 'but got a tensor with 0 dims', IndexError],
- [(S,), 0, 0, RuntimeError, r'number of sections must be larger than 0, got 0', ValueError],
- [(S,), -1, 0, RuntimeError, r'number of sections must be larger than 0, got -1', ValueError],
+ [(S,), 10, 1, IndexError, r"Dimension out of range", IndexError],
+ [
+ (),
+ 10,
+ 0,
+ RuntimeError,
+ r"tensor_split expected at least a 1-dimensional tensor, "
+ + "but got a tensor with 0 dims",
+ IndexError,
+ ],
+ [(S,), (10,), 1, IndexError, r"Dimension out of range", IndexError],
+ [
+ (),
+ (10,),
+ 0,
+ RuntimeError,
+ r"tensor_split expected at least a 1-dimensional tensor, "
+ + "but got a tensor with 0 dims",
+ IndexError,
+ ],
+ [
+ (S,),
+ 0,
+ 0,
+ RuntimeError,
+ r"number of sections must be larger than 0, got 0",
+ ValueError,
+ ],
+ [
+ (S,),
+ -1,
+ 0,
+ RuntimeError,
+ r"number of sections must be larger than 0, got -1",
+ ValueError,
+ ],
]
for input_size, sections_or_indices, dim, err, err_msg, numpy_err in test_cases:
a = torch.randn(input_size, device=device)
- msg = f'input_size {input_size}, sections_or_indices {sections_or_indices}, dim {dim}'
+ msg = f"input_size {input_size}, sections_or_indices {sections_or_indices}, dim {dim}"
with self.assertRaisesRegex(err, err_msg, msg=msg):
torch.tensor_split(a, sections_or_indices, dim)
with self.assertRaisesRegex(err, err_msg, msg=msg):
@@ -1782,13 +1970,17 @@ class TestOldViewOps(TestCase):
np.array_split(a.cpu().numpy(), sections_or_indices, dim)
# addtional tests for tensor_split with tensor_indices_or_sections
- with self.assertRaisesRegex(RuntimeError,
- r'tensor_split expected tensor_indices_or_sections to have dtype of long, but got Float'):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"tensor_split expected tensor_indices_or_sections to have dtype of long, but got Float",
+ ):
torch.tensor_split(a, torch.tensor(1.1), dim)
- with self.assertRaisesRegex(RuntimeError,
- r'tensor_split expected tensor_indices_or_sections to be a'
- + ' zero-dimensional or one-dimensional tensor, but got a tensor with 2 dims'):
+ with self.assertRaisesRegex(
+ RuntimeError,
+ r"tensor_split expected tensor_indices_or_sections to be a"
+ + " zero-dimensional or one-dimensional tensor, but got a tensor with 2 dims",
+ ):
torch.tensor_split(torch.rand(S, device=device), torch.tensor(((1,),)), 0)
def test_resize_all_dtypes_and_devices(self, device):
@@ -1808,11 +2000,13 @@ class TestOldViewOps(TestCase):
@onlyNativeDeviceTypes
def test_resize_overflow(self, device):
x = torch.empty((), dtype=torch.float64)
- with self.assertRaisesRegex(RuntimeError, 'Storage size calculation overflowed'):
+ with self.assertRaisesRegex(
+ RuntimeError, "Storage size calculation overflowed"
+ ):
x.resize_([2, 4, 2**29, 2**29])
- with self.assertRaisesRegex(RuntimeError, 'overflow'):
+ with self.assertRaisesRegex(RuntimeError, "overflow"):
x.resize_([8, 8, 2**29, 2**29])
- with self.assertRaisesRegex(RuntimeError, 'Stride calculation overflowed'):
+ with self.assertRaisesRegex(RuntimeError, "Stride calculation overflowed"):
x.resize_([0, 4, 2305843009213693952])
def test_view_all_dtypes_and_devices(self, device):
@@ -1823,12 +2017,26 @@ class TestOldViewOps(TestCase):
@skipIfTorchDynamo("conj bit not implemented in TensorVariable yet")
@onlyCPU
def test_conj_neg_view_numpy_error(self, device):
- self.assertRaisesRegex(RuntimeError, "has conjugate bit set", lambda: torch.tensor([1 + 2j]).conj().numpy())
- self.assertRaisesRegex(RuntimeError, "has negative bit set", lambda: torch.tensor([1 + 2j]).conj().imag.numpy())
- self.assertRaisesRegex(RuntimeError, "not supported for conjugate view tensors",
- lambda: torch.tensor([1 + 2j]).conj().view(torch.float64))
- self.assertRaisesRegex(RuntimeError, "not supported for tensors with negative bit set",
- lambda: torch.tensor([1 + 2j]).conj().imag.view(torch.int32))
+ self.assertRaisesRegex(
+ RuntimeError,
+ "has conjugate bit set",
+ lambda: torch.tensor([1 + 2j]).conj().numpy(),
+ )
+ self.assertRaisesRegex(
+ RuntimeError,
+ "has negative bit set",
+ lambda: torch.tensor([1 + 2j]).conj().imag.numpy(),
+ )
+ self.assertRaisesRegex(
+ RuntimeError,
+ "not supported for conjugate view tensors",
+ lambda: torch.tensor([1 + 2j]).conj().view(torch.float64),
+ )
+ self.assertRaisesRegex(
+ RuntimeError,
+ "not supported for tensors with negative bit set",
+ lambda: torch.tensor([1 + 2j]).conj().imag.view(torch.int32),
+ )
@onlyCPU
def test_crow_col_indices(self, device):
@@ -1842,8 +2050,9 @@ class TestOldViewOps(TestCase):
t.crow_indices()
t.col_indices()
+
instantiate_device_type_tests(TestViewOps, globals(), include_lazy=True)
instantiate_device_type_tests(TestOldViewOps, globals())
-if __name__ == '__main__':
+if __name__ == "__main__":
run_tests()
|
2.41.0
|
022f131b54065ffbfd8e17b4be73a5228f1139a
|
Sun, 28 Apr 2024 12:27:29 -0700
|
[PATCH 0918/1000] [inductor] switch assume_aligned_inputs to False (#124336)
|
In #123319, we guard some behavior behind the `assume_aligned_inputs` config option. If we set this to `False`, then the behavior added in #123319 becomes the default behavior. See the referenced PR for more details about the behavior affected. Side effects: * It's possible that this will hurt performance in some scenarios. For example, if an unaligned input is used in a matmul, it might be better to perform the clone to align it first. * This will occasionally cause recompiles. Specifically: the check we perform (`(storage_offset * get_dtype_size(dtype)) % ALIGNMENT == 0`) can be guarded on if the storage_offset becomes dynamic. storage_offset becomes dynamic during automatic_dynamic_shapes after a shape or stride changes. Previously, this was increasing graph breaks in cpu inductor torchbench tests (but is fixed by more carefully guarding checks on alignment, so that we don't run them and generate guards unless actually needed). Pull Request resolved: https://github.com/pytorch/pytorch/pull/124336 Approved by: https://github.com/eellison
|
diff --git a/test/inductor/test_cudagraph_trees.py b/test/inductor/test_cudagraph_trees.py
index 33ee5247bd..c8877d4a8e 100644
--- a/test/inductor/test_cudagraph_trees.py
+++ b/test/inductor/test_cudagraph_trees.py
@@ -634,7 +634,7 @@ if HAS_CUDA and not TEST_WITH_ASAN:
new_id = self.get_manager().new_graph_id().id
self.assertEqual(new_id, 3)
- def _test_unaligned_static_input_impl(self):
+ def _test_unaligned_static_input_impl(self, expected_clones):
def fn(x, y):
return (x + y,)
@@ -665,21 +665,21 @@ if HAS_CUDA and not TEST_WITH_ASAN:
for _ in range(3):
with CloneCounterMode() as m:
compiled_f(get_unaligned_inputs())
- self.assertEqual(m.count, 2)
+ self.assertEqual(m.count, expected_clones)
compiled_f(get_aligned_inputs())
- self.assertEqual(m.count, 2)
+ self.assertEqual(m.count, expected_clones)
def test_unaligned_static_input_trees(self):
- self._test_unaligned_static_input_impl()
+ self._test_unaligned_static_input_impl(expected_clones=0)
@torch._inductor.config.patch("triton.cudagraph_trees", False)
def test_unaligned_static_input_non_trees(self):
- self._test_unaligned_static_input_impl()
+ self._test_unaligned_static_input_impl(expected_clones=0)
@torch._inductor.config.patch("triton.cudagraphs", False)
def test_unaligned_static_input_no_cudagraphs(self):
- self._test_unaligned_static_input_impl()
+ self._test_unaligned_static_input_impl(expected_clones=0)
def test_sparsity(self):
def foo(view_6, buf31):
diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py
index ce69e81656..f8953b8747 100644
--- a/torch/_inductor/config.py
+++ b/torch/_inductor/config.py
@@ -510,7 +510,7 @@ decompose_mem_bound_mm: bool = False
# assume_aligned_inputs means that we assume that inputs will be aligned; we generate
# code using this assumption, and clone tensors before use if they aren't aligned.
# In the common case, most inputs will be aligned.
-assume_aligned_inputs: bool = True
+assume_aligned_inputs: bool = False
# config specific to codegen/cpp.py
|
2.41.0
|
1f142c44f81384afbdba5e451fc15744868bf26
|
Wed, 1 May 2024 23:56:00 +0000
|
[PATCH 0919/1000] Revert "Fakify script object inputs and attributes for non-strict export (#124239)"
|
This reverts commit ecc2e034f7e55bf9ff7f4e5df4e9086a5c92caaa. Reverted https://github.com/pytorch/pytorch/pull/124239 on behalf of https://github.com/kit1980 due to breaking internal builds ([comment](https://github.com/pytorch/pytorch/pull/124239#issuecomment-2089305447))
|
diff --git a/test/export/test_passes.py b/test/export/test_passes.py
index e2724ead88..41597a6030 100644
--- a/test/export/test_passes.py
+++ b/test/export/test_passes.py
@@ -13,10 +13,6 @@ from typing import List, Set
import torch
from functorch.experimental.control_flow import cond
from torch._dynamo.eval_frame import is_dynamo_supported
-from torch._export.non_strict_utils import (
- _fakify_script_objects,
- _gather_constant_attrs,
-)
from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse
from torch._export.passes.functionalize_side_effectful_ops_pass import (
_FunctionalizeSideEffectfulOpsPass,
@@ -38,24 +34,26 @@ from torch._export.utils import (
sequential_split,
)
from torch._higher_order_ops.auto_functionalize import auto_functionalized
-from torch._subclasses.fake_tensor import FakeTensorMode
+from torch._higher_order_ops.torchbind import enable_torchbind_tracing
from torch.export import export
from torch.export._remove_auto_functionalized_pass import (
unsafe_remove_auto_functionalized_pass,
)
from torch.export._remove_effect_tokens_pass import _remove_effect_tokens
-from torch.fx.experimental.symbolic_shapes import ShapeEnv
from torch.fx.passes.infra.partitioner import Partition
from torch.fx.passes.operator_support import OperatorSupport
from torch.library import _scoped_library, impl
from torch.testing import FileCheck
from torch.testing._internal.common_utils import (
+ find_library_location,
+ IS_FBCODE,
+ IS_MACOS,
+ IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
skipIfTorchDynamo,
TestCase,
)
-from torch.testing._internal.torchbind_impls import init_torchbind_implementations
from torch.utils import _pytree as pytree
@@ -89,53 +87,6 @@ def _get_output_names(gm: torch.fx.GraphModule) -> List[str]:
return [str(arg) for arg in args]
-class ModelsWithScriptObjectAttr:
- class Simple(torch.nn.Module):
- def __init__(self):
- super().__init__()
- self.attr = torch.classes._TorchScriptTesting._Foo(10, 20)
-
- class SimpleWithAttrInContainer(torch.nn.Module):
- def __init__(self):
- super().__init__()
- self.attr = torch.classes._TorchScriptTesting._Foo(10, 20)
- self.pytree_attr2 = [
- torch.classes._TorchScriptTesting._Foo(1, 2),
- {
- torch.classes._TorchScriptTesting._Foo(3, 4),
- },
- {"foo": torch.classes._TorchScriptTesting._Foo(5, 6)},
- ]
-
- class NestedWithAttrInContainer(torch.nn.Module):
- def __init__(self):
- super().__init__()
- self.attr = torch.classes._TorchScriptTesting._Foo(10, 20)
- self.pytree_attr2 = [
- torch.classes._TorchScriptTesting._Foo(1, 2),
- {
- torch.classes._TorchScriptTesting._Foo(3, 4),
- },
- {"foo": torch.classes._TorchScriptTesting._Foo(5, 6)},
- ]
- self.sub_mod = ModelsWithScriptObjectAttr.Simple()
- self.sub_mod2 = ModelsWithScriptObjectAttr.SimpleWithAttrInContainer()
-
- class MoreNestedWithAttrInContainer(torch.nn.Module):
- def __init__(self):
- super().__init__()
- self.attr = torch.classes._TorchScriptTesting._Foo(10, 20)
- self.pytree_attr2 = [
- torch.classes._TorchScriptTesting._Foo(1, 2),
- {
- torch.classes._TorchScriptTesting._Foo(3, 4),
- },
- {"foo": torch.classes._TorchScriptTesting._Foo(5, 6)},
- ]
- self.sub_mod = ModelsWithScriptObjectAttr.Simple()
- self.sub_mod2 = ModelsWithScriptObjectAttr.NestedWithAttrInContainer()
-
-
def _set_grad_enabled_tests():
from torch.export._trace import _export
@@ -262,7 +213,17 @@ class TestPasses(TestCase):
self.SEQUENTIAL_SPLIT_INLINE_TESTS = _sequential_split_inline_tests()
self.SET_GRAD_ENABLED_TESTS = _set_grad_enabled_tests()
- init_torchbind_implementations()
+ if IS_SANDCASTLE or IS_FBCODE:
+ torch.ops.load_library(
+ "//caffe2/test/cpp/jit:test_custom_class_registrations"
+ )
+ elif IS_MACOS:
+ raise unittest.SkipTest("non-portable load_library call used in test")
+ else:
+ lib_file_path = find_library_location("libtorchbind_test.so")
+ if IS_WINDOWS:
+ lib_file_path = find_library_location("torchbind_test.dll")
+ torch.ops.load_library(str(lib_file_path))
def tearDown(self):
self.SEQUENTIAL_SPLIT_INLINE_TESTS.clear()
@@ -460,7 +421,8 @@ class TestPasses(TestCase):
m = MyModule()
inputs = (torch.ones(2, 3),)
- ep = torch.export.export(m, inputs, strict=False)
+ with enable_torchbind_tracing():
+ ep = torch.export.export(m, inputs, strict=False)
inp = torch.randn(2, 3)
orig_res = m(inp)
@@ -473,48 +435,6 @@ class TestPasses(TestCase):
self.assertTrue(torch.allclose(orig_res, ep_res))
self.assertTrue(torch.allclose(orig_res, without_token_res))
- def test_fakify_script_objects(self):
- for m in [
- ModelsWithScriptObjectAttr.Simple(),
- ModelsWithScriptObjectAttr.SimpleWithAttrInContainer(),
- ModelsWithScriptObjectAttr.NestedWithAttrInContainer(),
- ModelsWithScriptObjectAttr.MoreNestedWithAttrInContainer(),
- ]:
- constant_attrs = _gather_constant_attrs(m)
- fake_mode = FakeTensorMode(
- shape_env=ShapeEnv(tracked_fakes=[]),
- allow_non_fake_inputs=True,
- )
- with _fakify_script_objects(m, tuple(), {}, fake_mode) as (
- patched_mod,
- _,
- _,
- fake_constant_attrs,
- fake_to_real,
- ):
- self.assertEqual(len(fake_constant_attrs), len(constant_attrs))
- for fake_obj, fqn in fake_constant_attrs.items():
- self.assertEqual(constant_attrs[fake_to_real[fake_obj]], fqn)
-
- # TODO: _gather_constants doesn't recursively look into the pytree containers.
- @unittest.expectedFailure
- def test_fakify_script_objects_properly_handle_containers(self):
- m = ModelsWithScriptObjectAttr.SimpleWithAttrInContainer()
- constant_attrs = _gather_constant_attrs(m)
- fake_mode = FakeTensorMode(
- shape_env=ShapeEnv(tracked_fakes=[]),
- allow_non_fake_inputs=True,
- )
- with _fakify_script_objects(m, tuple(), {}, fake_mode) as (
- patched_mod,
- _,
- _,
- fake_constant_attrs,
- fake_to_real,
- ):
- self.assertTrue("attr" in fake_constant_attrs.values())
- self.assertTrue("pytree_attr2" in fake_constant_attrs.values())
-
def test_runtime_assert_inline_constraints_for_item(self) -> None:
class M(torch.nn.Module):
def __init__(self):
diff --git a/test/export/test_serialize.py b/test/export/test_serialize.py
index 9709241e9a..27681d48c2 100644
--- a/test/export/test_serialize.py
+++ b/test/export/test_serialize.py
@@ -3,7 +3,6 @@ PYTEST_DONT_REWRITE (prevents pytest from rewriting assertions, which interferes
with test_sym_bool)
"""
-
# Owner(s): ["oncall: export"]
import copy
import io
@@ -31,7 +30,11 @@ from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch.export import Dim, export, load, save
from torch.fx.experimental.symbolic_shapes import is_concrete_int, ValueRanges
from torch.testing._internal.common_utils import (
+ find_library_location,
instantiate_parametrized_tests,
+ IS_FBCODE,
+ IS_MACOS,
+ IS_SANDCASTLE,
IS_WINDOWS,
parametrize,
run_tests,
@@ -39,8 +42,6 @@ from torch.testing._internal.common_utils import (
TestCase,
)
-from torch.testing._internal.torchbind_impls import init_torchbind_implementations
-
def get_filtered_export_db_tests():
return [
@@ -346,8 +347,17 @@ class TestSerialize(TestCase):
@unittest.skipIf(not torchdynamo.is_dynamo_supported(), "dynamo doesn't support")
class TestDeserialize(TestCase):
def setUp(self):
- super().setUp()
- init_torchbind_implementations()
+ if IS_SANDCASTLE or IS_FBCODE:
+ torch.ops.load_library(
+ "//caffe2/test/cpp/jit:test_custom_class_registrations"
+ )
+ elif IS_MACOS:
+ raise unittest.SkipTest("non-portable load_library call used in test")
+ else:
+ lib_file_path = find_library_location("libtorchbind_test.so")
+ if IS_WINDOWS:
+ lib_file_path = find_library_location("torchbind_test.dll")
+ torch.ops.load_library(str(lib_file_path))
def _check_graph_nodes(self, gm1, gm2, _check_meta=True):
# TODO: The _check_meta flag bypasses checking for
@@ -827,7 +837,8 @@ class TestDeserialize(TestCase):
m = MyModule()
inputs = (torch.ones(2, 3),)
- self.check_graph(m, inputs, strict=False)
+ with enable_torchbind_tracing():
+ self.check_graph(m, inputs, strict=False)
def test_custom_obj(self):
class MyModule(torch.nn.Module):
@@ -842,7 +853,8 @@ class TestDeserialize(TestCase):
m = MyModule()
inputs = (torch.ones(2, 3),)
- self.check_graph(m, inputs, strict=False)
+ with enable_torchbind_tracing():
+ self.check_graph(m, inputs, strict=False)
def test_custom_obj_list_out(self):
class MyModule(torch.nn.Module):
@@ -858,7 +870,8 @@ class TestDeserialize(TestCase):
m = MyModule()
inputs = (torch.ones(2, 3),)
- self.check_graph(m, inputs, strict=False)
+ with enable_torchbind_tracing():
+ self.check_graph(m, inputs, strict=False)
instantiate_parametrized_tests(TestDeserialize)
@@ -1048,8 +1061,17 @@ class TestSaveLoad(TestCase):
@unittest.skipIf(not torchdynamo.is_dynamo_supported(), "dynamo doesn't support")
class TestSerializeCustomClass(TestCase):
def setUp(self):
- super().setUp()
- init_torchbind_implementations()
+ if IS_SANDCASTLE or IS_FBCODE:
+ torch.ops.load_library(
+ "//caffe2/test/cpp/jit:test_custom_class_registrations"
+ )
+ elif IS_MACOS:
+ raise unittest.SkipTest("non-portable load_library call used in test")
+ else:
+ lib_file_path = find_library_location("libtorchbind_test.so")
+ if IS_WINDOWS:
+ lib_file_path = find_library_location("torchbind_test.dll")
+ torch.ops.load_library(str(lib_file_path))
def test_custom_class(self):
custom_obj = torch.classes._TorchScriptTesting._PickleTester([3, 4])
diff --git a/test/export/test_torchbind.py b/test/export/test_torchbind.py
index b60fa71459..872c713571 100644
--- a/test/export/test_torchbind.py
+++ b/test/export/test_torchbind.py
@@ -1,5 +1,6 @@
# Owner(s): ["oncall: export"]
+import unittest
import torch
import torch.utils._pytree as pytree
@@ -10,25 +11,38 @@ from torch.export import export
from torch.export._trace import _export
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
+ find_library_location,
instantiate_parametrized_tests,
+ IS_FBCODE,
+ IS_MACOS,
+ IS_SANDCASTLE,
+ IS_WINDOWS,
parametrize,
run_tests,
skipIfTorchDynamo,
TestCase,
)
-from torch.testing._internal.torchbind_impls import init_torchbind_implementations
+from torch.testing._internal.torchbind_impls import register_fake_operators
+
+
+def load_torchbind_test_lib():
+ if IS_SANDCASTLE or IS_FBCODE:
+ torch.ops.load_library("//caffe2/test/cpp/jit:test_custom_class_registrations")
+ elif IS_MACOS:
+ raise unittest.SkipTest("non-portable load_library call used in test")
+ else:
+ lib_file_path = find_library_location("libtorchbind_test.so")
+ if IS_WINDOWS:
+ lib_file_path = find_library_location("torchbind_test.dll")
+ torch.ops.load_library(str(lib_file_path))
+
+ register_fake_operators()
@skipIfTorchDynamo("torchbind not supported with dynamo yet")
class TestExportTorchbind(TestCase):
def setUp(self):
- init_torchbind_implementations()
-
- test = self
- test.tq_push_counter = 0
- test.tq_pop_counter = 0
- test.tq_size_counter = 0
- test.foo_add_tensor_counter = 0
+ load_torchbind_test_lib()
@torch._library.register_fake_class("_TorchScriptTesting::_Foo")
class FakeFoo:
@@ -42,9 +56,13 @@ class TestExportTorchbind(TestCase):
return cls(x, y)
def add_tensor(self, z):
- test.foo_add_tensor_counter += 1
return (self.x + self.y) * z
+ test = self
+ test.tq_push_counter = 0
+ test.tq_pop_counter = 0
+ test.tq_size_counter = 0
+
@torch._library.register_fake_class("_TorchScriptTesting::_TensorQueue")
class FakeTensorQueue:
def __init__(self, q):
@@ -259,10 +277,6 @@ def forward(self, x, cc):
add = torch.ops.aten.add.Tensor(x, call_torchbind); x = call_torchbind = None
return (add,)""",
)
- # aot_export_function runs the program twice
- # in run_functionalized_fw_and_collect_metadata and create_aot_dispatcher_function
- # We also have a re-tracing test, which doubles the count.
- self.assertEqual(self.foo_add_tensor_counter, 4)
@parametrize("pre_dispatch", [True, False])
def test_input_as_custom_op_argument(self, pre_dispatch):
@@ -274,31 +288,9 @@ def forward(self, x, cc):
return x + torch.ops._TorchScriptTesting.takes_foo(cc, x)
cc = torch.classes._TorchScriptTesting._Foo(10, 20)
-
- del torch.ops._TorchScriptTesting.takes_foo.default.py_kernels[
- torch._C.DispatchKey.Meta
- ]
- torch.ops._TorchScriptTesting.takes_foo.default._dispatch_cache.clear()
- # Even though a C++ implementation for takes_foo.default is registered,
- # we still need the python implementation for takes_foo.default to trace with FakeFoo.
- with self.assertRaisesRegex(RuntimeError, "no python implementation is found"):
- self._test_export_same_as_eager(
- MyModule(),
- (torch.ones(2, 3), cc),
- strict=False,
- pre_dispatch=pre_dispatch,
- )
-
- torch.ops._TorchScriptTesting.takes_foo.default.py_impl(
- torch._C.DispatchKey.Meta
- )(lambda cc, x: cc.add_tensor(x))
ep = self._test_export_same_as_eager(
- MyModule(),
- (torch.ones(2, 3), cc),
- strict=False,
- pre_dispatch=pre_dispatch,
+ MyModule(), (torch.ones(2, 3), cc), strict=False, pre_dispatch=pre_dispatch
)
-
self.assertExpectedInline(
ep.module().code.strip(),
"""\
@@ -813,7 +805,7 @@ def forward(self, arg0_1, arg1_1, arg2_1):
@skipIfTorchDynamo("torchbind not supported with dynamo yet")
class TestRegisterFakeClass(TestCase):
def setUp(self):
- init_torchbind_implementations()
+ load_torchbind_test_lib()
def tearDown(self):
torch._library.fake_class_registry.global_fake_class_registry.clear()
diff --git a/test/export/test_unflatten.py b/test/export/test_unflatten.py
index 35fbf2e1d6..8dfca01112 100644
--- a/test/export/test_unflatten.py
+++ b/test/export/test_unflatten.py
@@ -41,8 +41,6 @@ from torch.testing._internal.common_utils import (
skipIfTorchDynamo,
TestCase,
)
-
-from torch.testing._internal.torchbind_impls import init_torchbind_implementations
from torch.utils._pytree import (
LeafSpec,
tree_flatten,
@@ -564,21 +562,18 @@ class TestUnflatten(TestCase):
@skipIfTorchDynamo("custom objects not supported in dynamo yet")
def test_unflatten_constant_obj(self):
- init_torchbind_implementations()
-
- @torch._library.register_fake_class("_TorchScriptTesting::_Foo")
- class FakeFoo:
- def __init__(self, x: int, y: int):
- self.x = x
- self.y = y
-
- @classmethod
- def from_real(cls, foo):
- (x, y), _ = foo.__getstate__()
- return cls(x, y)
-
- def add_tensor(self, z):
- return (self.x + self.y) * z
+ if IS_MACOS:
+ raise unittest.SkipTest("non-portable load_library call used in test")
+ elif IS_SANDCASTLE or IS_FBCODE:
+ torch.ops.load_library(
+ "//caffe2/test/cpp/jit:test_custom_class_registrations"
+ )
+ elif IS_WINDOWS:
+ lib_file_path = find_library_location("torchbind_test.dll")
+ torch.ops.load_library(str(lib_file_path))
+ else:
+ lib_file_path = find_library_location("libtorchbind_test.so")
+ torch.ops.load_library(str(lib_file_path))
class SubMod(torch.nn.Module):
def __init__(self):
diff --git a/torch/_export/non_strict_utils.py b/torch/_export/non_strict_utils.py
index cd20618e4b..f102d1bfb0 100644
--- a/torch/_export/non_strict_utils.py
+++ b/torch/_export/non_strict_utils.py
@@ -1,10 +1,8 @@
-import contextlib
import inspect
from collections import defaultdict
from typing import Any, Callable, Dict, List, Tuple, Union
import torch
-import torch.utils._pytree as pytree
from torch._dynamo.source import (
AttrSource,
GetItemSource,
@@ -14,9 +12,7 @@ from torch._dynamo.source import (
)
from torch._dynamo.variables.builder import TrackedFake
from torch._export.passes.add_runtime_assertions_for_constraints_pass import InputDim
-from torch._export.passes.lift_constants_pass import ConstantAttrMap
from torch._guards import Source
-from torch._library.fake_class_registry import FakeScriptObject
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch.export import Constraint
from torch.export.dynamic_shapes import _Dim
@@ -71,7 +67,6 @@ def fakify(
source = key_path_to_source(kp)
if _is_constant_argument(t) or isinstance(t, torch.ScriptObject):
return t
-
if not isinstance(t, torch.Tensor):
raise ValueError(f"Unsupported input type {type(t)}")
n_dims = len(t.shape)
@@ -324,115 +319,3 @@ def make_constraints(
range_constraints[symbol] = shape_env.var_to_range[symbol]
return range_constraints
-
-
-def _gather_constant_attrs(m: torch.nn.Module) -> ConstantAttrMap:
- """Search the module hierarchy, gathering up all tensor and ScriptObject constants.
-
- Returns a dictionary mapping hash(value) to the name of the constant. We
- have to abuse `hash` here unfortunately, see: [ScriptObject hash].
- """
- constants = ConstantAttrMap()
- buffers_parameters = set(m.buffers())
- buffers_parameters.update(m.parameters())
-
- def inner(m: torch.nn.Module, prefix_atoms: List[str], constants):
- for k, v in m.__dict__.items():
- if isinstance(
- v,
- (
- torch.Tensor,
- torch.ScriptObject,
- FakeScriptObject,
- ),
- ):
- if v in buffers_parameters:
- # filter out buffers and parameters, leaving only constants
- continue
-
- fqn = ".".join(prefix_atoms + [k])
- if v in constants:
- raise ValueError(
- f"Duplicate reference to constant attribute found: '{constants[v]}' and '{fqn}'."
- )
-
- constants[v] = fqn
- for k, v in m.named_children():
- inner(v, prefix_atoms + [k], constants)
-
- inner(m, [], constants)
- return constants
-
-
-@contextlib.contextmanager
-def _fakify_script_objects(
- mod: torch.nn.Module,
- args: Tuple[Any],
- kwargs: Dict[Any, Any],
- fake_mode: torch._subclasses.fake_tensor.FakeTensorMode,
-):
- # This context manager is used to fakify script objects into FakeScriptObject.
- # Inputs:
- # mod: the module to be exported, it (and its recursive submodules)'s script object attrs haven't been fakified.
- # args, kwargs: the args and kwargs inputs for mod, script object inputs haven't been fakified.
- # fake_mode: the fake mode to be used for fakifying script objects. It's the same mode that fakify input tensors.
- #
- # Returns:
- # mod: the patched module, its (and its recursive submodules) script object attrs have been fakified.
- # fake_args, fake_kwargs: new fakified args and kwargs.
- # Script object inputs have been fakified. Don't touch the tensors.
- # fake_constant_attrs: a new map from FakeScriptObject to the fqn of the original script object.
- # fake_to_real: a mapping between FakeScriptObject and the original script object in order to un-do the patching.
-
- constant_attrs: ConstantAttrMap = _gather_constant_attrs(mod)
- assert not any(
- isinstance(obj, FakeScriptObject) for obj in constant_attrs.values()
- ), "Mod shouldn't contain any FakeScriptObject."
- assert not pytree.tree_any(
- lambda obj: isinstance(obj, FakeScriptObject), (args, kwargs)
- ), "args and kwargs shouldn't contain any FakeScriptObject."
-
- patched_attr = {}
- fake_constant_attrs = ConstantAttrMap()
- fake_to_real = {}
-
- def _fakify_obj(obj):
- fake_obj = torch._library.fake_class_registry.to_fake_obj(fake_mode, obj)
- fake_to_real[fake_obj] = obj
- return fake_obj
-
- def _leaf_mod_and_attr(
- mod: torch.nn.Module, attr_fqn: str
- ) -> Tuple[torch.nn.Module, str]:
- *prefix_attr, last_attr = attr_fqn.split(".")
- cur_mod = mod
- for attr in prefix_attr:
- cur_mod = getattr(cur_mod, attr)
- return cur_mod, last_attr
-
- try:
- for obj, fqn in constant_attrs.items():
- if isinstance(obj, torch.ScriptObject):
- cur_mod, attr = _leaf_mod_and_attr(mod, fqn)
- assert obj is getattr(cur_mod, attr)
- fake_script_obj = _fakify_obj(obj)
- setattr(cur_mod, attr, fake_script_obj)
- fake_constant_attrs[fake_script_obj] = fqn
- patched_attr[fqn] = obj
- else:
- fake_constant_attrs[obj] = fqn
-
- fake_args, fake_kwargs = pytree.tree_map_only(
- torch.ScriptObject, _fakify_obj, (args, kwargs)
- )
- assert not any(
- isinstance(obj, torch.ScriptObject) for obj in fake_constant_attrs.values()
- ), "Patched mod shouldn't contain any torch.ScriptObject."
- assert not pytree.tree_any(
- lambda obj: isinstance(obj, torch.ScriptObject), (fake_args, fake_kwargs)
- ), "Fakfied args and kwargs shouldn't contain any torch.ScriptObject."
- yield (mod, fake_args, fake_kwargs, fake_constant_attrs, fake_to_real)
- finally:
- for fqn, orig_obj in patched_attr.items():
- cur_mod, attr = _leaf_mod_and_attr(mod, fqn)
- setattr(cur_mod, attr, orig_obj)
diff --git a/torch/_export/passes/lift_constants_pass.py b/torch/_export/passes/lift_constants_pass.py
index e4bda7a194..fc13403a3f 100644
--- a/torch/_export/passes/lift_constants_pass.py
+++ b/torch/_export/passes/lift_constants_pass.py
@@ -4,8 +4,6 @@ from typing import Any, Dict, Union
import torch
from torch._export.verifier import SpecViolationError
from torch._guards import detect_fake_mode
-
-from torch._library.fake_class_registry import FakeScriptObject
from torch.export.exported_program import (
ArgumentSpec,
CustomObjArgument,
@@ -17,35 +15,33 @@ from torch.export.exported_program import (
class ConstantAttrMap(collections.abc.MutableMapping):
- """A mapping class that understands how to use module constants (tensors,
- ScriptObjects, FakeScriptObjects) as keys. We store tensors and FakeScriptObjects normally,
- but ScriptObjects are stored by hash, because different torch.ScriptObjects can point to
- the same underlying value (but we guarantee that they will `hash()` to the same value
+ """A mapping class that understands how to use module constants (tensors and
+ ScriptObjects) as keys. We store tensors normally, but ScriptObjects are
+ stored by hash, because different torch.ScriptObjects can point to the same
+ underlying value (but we guarantee that they will `hash()` to the same value
if that's the case).
"""
def __init__(self):
# Underlying dict that we use to implement this mapping.
- self._constant_attrs: Dict[Union[int, torch.Tensor, FakeScriptObject], Any] = {}
+ self._constant_attrs: Dict[Union[int, torch.Tensor], Any] = {}
# Map from the hash(ScriptObject) to the ScriptObject itself. Used for
# APIs like `__iter__` that should look like they're returning the
# original ScriptObjects.
self._script_object_map: Dict[int, torch.ScriptObject] = {}
- def __getitem__(
- self, key: Union[torch.Tensor, torch.ScriptObject, FakeScriptObject]
- ) -> Any:
+ def __getitem__(self, key: Union[torch.Tensor, torch.ScriptObject]) -> Any:
real_key = hash(key) if isinstance(key, torch.ScriptObject) else key
- assert isinstance(real_key, (int, torch.Tensor, FakeScriptObject))
+ assert isinstance(real_key, (int, torch.Tensor))
return self._constant_attrs[real_key]
def __setitem__(
- self, key: Union[torch.Tensor, torch.ScriptObject, FakeScriptObject], value: Any
+ self, key: Union[torch.Tensor, torch.ScriptObject], value: Any
) -> None:
if isinstance(key, torch.ScriptObject):
self._constant_attrs[hash(key)] = value
self._script_object_map[hash(key)] = key
- elif isinstance(key, (torch.Tensor, FakeScriptObject)):
+ elif isinstance(key, torch.Tensor):
self._constant_attrs[key] = value
else:
raise TypeError(
@@ -87,7 +83,7 @@ def lift_constants_pass(
gm: torch.fx.GraphModule,
graph_signature: ExportGraphSignature,
constant_attrs: ConstantAttrMap,
-) -> Dict[str, Union[torch.Tensor, torch.ScriptObject, FakeScriptObject]]:
+) -> Dict[str, Union[torch.Tensor, torch._C.ScriptObject]]:
"""
Takes a graph module, graph signature, and modifies them implace to lift any
constants (tensors or custom classes) as inputs to the graph. Returns a
@@ -105,9 +101,7 @@ def lift_constants_pass(
Returns:
A dictionary of fqn => constant value.
"""
- all_constants: Dict[
- str, Union[torch.Tensor, torch.ScriptObject, FakeScriptObject]
- ] = {}
+ all_constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]] = {}
inputs = graph_signature.input_specs
num_custom_obj = sum(
@@ -141,7 +135,7 @@ def lift_constants_pass(
gm.graph.erase_node(node)
continue
- # For ScriptObject, Tensor and FakeScriptObject constants:
+ # For ScriptObject and Tensor constants:
# First check if the constant was an attribute on some module by
# consulting `constant_attrs` map. If it is, use the fqn that keeps
# its location consistent with the eager module.
@@ -150,7 +144,7 @@ def lift_constants_pass(
# constant (e.g. x + torch.tensor(0)), and thus did not have a
# specific location in the eager module. In that case, just generate
# some name and attach it to the module in which it was used.
- if isinstance(constant_val, (torch.ScriptObject, FakeScriptObject)):
+ if isinstance(constant_val, torch.ScriptObject):
constant_kind = InputKind.CUSTOM_OBJ
constant_fqn = constant_attrs.get(constant_val)
if constant_fqn is not None:
@@ -209,14 +203,6 @@ def lift_constants_pass(
input_spec_arg = CustomObjArgument(
name=const_placeholder_node.name, class_fqn=class_fqn
)
- elif isinstance(constant_val, FakeScriptObject):
- class_fqn = constant_val.script_class_name
- const_placeholder_node.meta["val"] = CustomObjArgument(
- constant_fqn, class_fqn
- )
- input_spec_arg = CustomObjArgument(
- name=const_placeholder_node.name, class_fqn=class_fqn
- )
else:
raise SpecViolationError(
f"tried to lift unsupported type {type(constant_val)} from node {node.format_node()}"
@@ -243,35 +229,24 @@ def lift_constants_pass(
def rewrite_script_object_meta(
gm: torch.fx.GraphModule,
-) -> Dict[str, Union[torch.Tensor, FakeScriptObject],]:
- """When tracing, we produce a graph with FakeScriptObject in the
- meta["val"].
+) -> Dict[str, Union[torch.Tensor, torch.ScriptObject]]:
+ """When tracing, we produce a graph with an actual ScriptObject in the
+ meta["val"]. Eventually we want to change this behavior, when FakeMode infra
+ for ScriptObjects lands.
For now, we rewrie meta["val"] to be a placeholder CustomObjArgument
"""
- constants: Dict[
- str,
- Union[
- torch.Tensor,
- FakeScriptObject,
- ],
- ] = {}
+ constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]] = {}
for node in gm.graph.nodes:
- if "val" not in node.meta:
- continue
-
- assert not isinstance(
+ if "val" not in node.meta or not isinstance(
node.meta["val"], torch.ScriptObject
- ), "ScriptObject should already be fakified in to FakeScriptObject."
-
- if isinstance(
- node.meta["val"],
- FakeScriptObject,
):
- old_meta = node.meta["val"]
- class_fqn = old_meta.script_class_name # type: ignore[attr-defined]
- new_meta = CustomObjArgument(node.name, class_fqn)
- constants[node.name] = old_meta
- node.meta["val"] = new_meta
+ continue
+
+ old_meta = node.meta["val"]
+ class_fqn = old_meta._type().qualified_name() # type: ignore[attr-defined]
+ new_meta = CustomObjArgument(node.name, class_fqn)
+ constants[node.name] = old_meta
+ node.meta["val"] = new_meta
return constants
diff --git a/torch/_library/fake_class_registry.py b/torch/_library/fake_class_registry.py
index 47b157b884..7eff756284 100644
--- a/torch/_library/fake_class_registry.py
+++ b/torch/_library/fake_class_registry.py
@@ -10,12 +10,9 @@ log = logging.getLogger(__name__)
class FakeScriptObject:
- def __init__(self, wrapped_obj: Any, script_class_name: str):
+ def __init__(self, wrapped_obj):
self.wrapped_obj = wrapped_obj
- # The fully qualified name of the class of original script object
- self.script_class_name = script_class_name
-
class HasStaticMethodFromReal(Protocol):
@classmethod
@@ -74,13 +71,12 @@ def to_fake_obj(fake_mode, x: torch.ScriptObject) -> FakeScriptObject:
return wrapped
- fake_x_wrapped = FakeScriptObject(fake_x, x._type().qualified_name()) # type: ignore[attr-defined]
+ fake_x_wrapped = FakeScriptObject(fake_x)
for name in x._method_names(): # type: ignore[attr-defined]
attr = getattr(fake_x, name, None)
if attr:
if not callable(attr):
raise RuntimeError(f"Expect {name} to be a callable but got {attr}.")
-
setattr(
fake_x_wrapped,
name,
diff --git a/torch/_ops.py b/torch/_ops.py
index f5d7313591..6e2119f16a 100644
--- a/torch/_ops.py
+++ b/torch/_ops.py
@@ -807,7 +807,6 @@ class TorchBindOpOverload(OpOverload):
DispatchKey.AutogradCPU,
DispatchKey.AutogradCUDA,
DispatchKey.ADInplaceOrView,
- DispatchKey.BackendSelect,
DispatchKey.PythonTLSSnapshot,
DispatchKey.PythonDispatcher,
]
@@ -890,13 +889,8 @@ class TorchBindOpOverload(OpOverload):
)
raise RuntimeError(
- f"Torchbind op {self} received a FakeScriptObject input when dispatching {handler}."
- f" but no python implementation is found."
- f" Please file an issue on this when you encounter this error."
- f" This error can happen when you export or compile the model."
- f" It can still happpen even if a C++ implementation for {dispatch_key}. "
- f" has been registered. That's because FakeScriptObject purely lives in python and cannot work "
- f" with a C++ implementation."
+ f"Cannot handle FakeScriptObject with python dispatcher with dispatch key {handler}."
+ f"Please implement it by annotating a python callable with py_impl({handler})."
)
assert isinstance(handler, Callable) # type: ignore[arg-type]
diff --git a/torch/export/_trace.py b/torch/export/_trace.py
index ddfc6392ba..96570a1a9b 100644
--- a/torch/export/_trace.py
+++ b/torch/export/_trace.py
@@ -15,8 +15,6 @@ import torch.fx
import torch.utils._pytree as pytree
from torch._dynamo.exc import UserError, UserErrorType
from torch._export.non_strict_utils import (
- _fakify_script_objects,
- _gather_constant_attrs,
make_constraints,
make_fake_inputs,
make_fake_params_buffers,
@@ -36,8 +34,6 @@ from torch._export.verifier import SpecViolationError
from torch._export.wrappers import _wrap_submodules
from torch._functorch.aot_autograd import aot_export_module
from torch._guards import detect_fake_mode
-
-from torch._library.fake_class_registry import FakeScriptObject
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._utils_internal import log_export_usage
from torch.export.exported_program import OutputKind
@@ -73,6 +69,7 @@ from .graph_signature import (
TokenArgument,
)
+
log = logging.getLogger(__name__)
@@ -456,6 +453,37 @@ def _export_to_torch_ir(
return gm_torch_level
+def _gather_constant_attrs(m: torch.nn.Module) -> ConstantAttrMap:
+ """Search the module hierarchy, gathering up all tensor and ScriptObject constants.
+
+ Returns a dictionary mapping hash(value) to the name of the constant. We
+ have to abuse `hash` here unfortunately, see: [ScriptObject hash].
+ """
+ constants = ConstantAttrMap()
+ buffers_parameters = set(m.buffers())
+ buffers_parameters.update(m.parameters())
+
+ def inner(m: torch.nn.Module, prefix_atoms: List[str], constants):
+ for k, v in m.__dict__.items():
+ if isinstance(v, (torch.Tensor, torch.ScriptObject)):
+ if v in buffers_parameters:
+ # filter out buffers and parameters, leaving only constants
+ continue
+
+ fqn = ".".join(prefix_atoms + [k])
+ if v in constants:
+ raise ValueError(
+ f"Duplicate reference to constant attribute found: '{constants[v]}' and '{fqn}'."
+ )
+
+ constants[v] = fqn
+ for k, v in m.named_children():
+ inner(v, prefix_atoms + [k], constants)
+
+ inner(m, [], constants)
+ return constants
+
+
def _export_non_strict(
mod: torch.nn.Module,
fake_args,
@@ -466,9 +494,6 @@ def _export_non_strict(
transform=lambda x: x, # TODO(zhxchen17) Revisit if this is needed later.
pre_dispatch=False,
):
- assert not any(
- isinstance(obj, torch.ScriptObject) for obj in constant_attrs
- ), "We expect all script objects have been replaced by FakeScriptObjects."
# [NOTE] If the user is exporting under training mode, we want to detect if there is any
# state change in the autograd global state and error. If the user is exporting under inference
# mode, we don't care. At predispatch level, we don't care about the state change.
@@ -560,8 +585,10 @@ def _export_non_strict(
return TensorArgument(name=node.name)
elif isinstance(val, torch.SymInt):
return SymIntArgument(name=node.name)
- elif isinstance(val, FakeScriptObject):
- return CustomObjArgument(name=node.name, class_fqn=val.script_class_name)
+ elif isinstance(val, torch.ScriptObject):
+ return CustomObjArgument(
+ name=node.name, class_fqn=val._type().qualified_name() # type: ignore[attr-defined]
+ )
elif isinstance(val, (int, bool, str, float, type(None))):
return ConstantArgument(name=node.name, value=val)
else:
@@ -599,14 +626,7 @@ def _export_non_strict(
)
constants = rewrite_script_object_meta(gm)
- attr_constants = lift_constants_pass(gm, export_graph_signature, constant_attrs)
- assert not any(
- isinstance(obj, torch.ScriptObject) for obj in attr_constants.values()
- ), "We expect all script objects have been replaced by FakeScriptObjects."
- constants.update(attr_constants) # type: ignore[arg-type]
- assert not any(
- isinstance(obj, torch.ScriptObject) for obj in constants.values()
- ), "We expect all script objects have been replaced by FakeScriptObjects."
+ constants.update(lift_constants_pass(gm, export_graph_signature, constant_attrs))
# prettify names for placeholder nodes
placeholder_naming_pass(
@@ -623,13 +643,7 @@ def _export_non_strict(
class _ExportedProgramNonStrict:
gm: torch.fx.GraphModule
sig: ExportGraphSignature
- constants: Dict[
- str,
- Union[
- torch.Tensor,
- FakeScriptObject,
- ],
- ]
+ constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]]
return _ExportedProgramNonStrict(
gm,
@@ -927,6 +941,8 @@ def _export(
if isinstance(dynamic_shapes, torch.export.ShapesCollection):
dynamic_shapes = dynamic_shapes.dynamic_shapes(mod, args, kwargs)
+ constant_attrs = _gather_constant_attrs(mod)
+
flat_args, orig_in_spec = pytree.tree_flatten((args, kwargs))
original_state_dict = mod.state_dict(keep_vars=True)
forward_arg_names = _get_forward_arg_names(mod, args, kwargs)
@@ -1013,32 +1029,16 @@ def _export(
fake_params_buffers = make_fake_params_buffers(
fake_mode, _get_params_buffers(mod)
)
-
with fake_mode:
- with _fakify_script_objects(mod, fake_args, fake_kwargs, fake_mode) as (
- patched_mod,
- new_fake_args,
- new_fake_kwargs,
- new_fake_constant_attrs,
- map_fake_to_real,
- ):
- ep_non_strict = _export_non_strict(
- patched_mod,
- new_fake_args,
- new_fake_kwargs,
- fake_params_buffers,
- new_fake_constant_attrs,
- pre_dispatch=pre_dispatch,
- transform=_tuplify_outputs,
- )
- # ep_non_strict.constants contains only fake script objects, we need to map them back
- ep_non_strict.constants = {
- fqn: map_fake_to_real[obj]
- if isinstance(obj, FakeScriptObject)
- else obj
- for fqn, obj in ep_non_strict.constants.items()
- }
-
+ ep_non_strict = _export_non_strict(
+ mod,
+ fake_args,
+ fake_kwargs,
+ fake_params_buffers,
+ constant_attrs,
+ pre_dispatch=pre_dispatch,
+ transform=_tuplify_outputs,
+ )
ep_non_strict.gm.meta["inline_constraints"] = {
k: v
for k, v in fake_mode.shape_env.var_to_range.items()
@@ -1217,7 +1217,6 @@ def _export(
_normalize_nn_module_stack(gm_torch_level, type(mod))
# NOTE: graph module expects only positional args
- constant_attrs = _gather_constant_attrs(mod)
ep_non_strict = _export_non_strict(
gm_torch_level,
_convert_to_positional_args(orig_arg_names, fake_args, fake_kwargs),
diff --git a/torch/export/unflatten.py b/torch/export/unflatten.py
index 891ed24047..8b8d3132cd 100644
--- a/torch/export/unflatten.py
+++ b/torch/export/unflatten.py
@@ -10,7 +10,6 @@ from typing import Any, cast, Dict, List, Optional, Tuple, Union
import torch
import torch.fx._pytree as fx_pytree
import torch.utils._pytree as pytree
-from torch._library.fake_class_registry import FakeScriptObject
from torch.export._tree_utils import reorder_kwargs
from torch.export.exported_program import (
ConstantArgument,
@@ -57,16 +56,7 @@ def _assign_attr(
assert isinstance(from_obj, torch.Tensor)
to_module.register_buffer(field, from_obj, persistent=persistent)
elif attr_kind == _AttrKind.CONSTANT:
- assert not isinstance(
- from_obj, FakeScriptObject
- ), "FakeScriptObject should only exist during tracing."
- assert isinstance(
- from_obj,
- (
- torch.Tensor,
- torch.ScriptObject,
- ),
- )
+ assert isinstance(from_obj, (torch.Tensor, torch.ScriptObject))
setattr(to_module, field, from_obj)
diff --git a/torch/fx/_symbolic_trace.py b/torch/fx/_symbolic_trace.py
index 24b1428b83..b3524dbde4 100644
--- a/torch/fx/_symbolic_trace.py
+++ b/torch/fx/_symbolic_trace.py
@@ -24,7 +24,6 @@ from typing import (
import torch
import torch.utils._pytree as pytree
from torch._C import ScriptObject # type: ignore[attr-defined]
-from torch._library.fake_class_registry import FakeScriptObject
from ._compatibility import compatibility
from .graph import _PyTreeCodeGen, _PyTreeInfo, Graph
@@ -367,7 +366,7 @@ class Tracer(TracerBase):
# a get_attr to retrieve that tensor. Otherwise, we'll store away the
# tensor value into a special attribute on the Module s.t. we can
# retrieve it with a get_attr.
- if isinstance(a, (torch.Tensor, ScriptObject, FakeScriptObject)):
+ if isinstance(a, (torch.Tensor, ScriptObject)):
qualname: Optional[str] = self.tensor_attrs.get(a)
# Tensor was not found in the Module hierarchy, stow it away in a
@@ -730,17 +729,11 @@ class Tracer(TracerBase):
# is some other attribute on the model. Construct a dict mapping Tensor
# values to the qualified name here for efficiency. This is used downstream
# in create_arg
- self.tensor_attrs: Dict[
- Union[
- torch.Tensor,
- ScriptObject,
- FakeScriptObject
- ], str
- ] = {}
+ self.tensor_attrs: Dict[Union[torch.Tensor, ScriptObject], str] = {}
def collect_tensor_attrs(m: torch.nn.Module, prefix_atoms: List[str]):
for k, v in m.__dict__.items():
- if isinstance(v, (torch.Tensor, ScriptObject, FakeScriptObject)):
+ if isinstance(v, (torch.Tensor, ScriptObject)):
self.tensor_attrs[v] = ".".join(prefix_atoms + [k])
for k, v in m.named_children():
collect_tensor_attrs(v, prefix_atoms + [k])
diff --git a/torch/testing/_internal/torchbind_impls.py b/torch/testing/_internal/torchbind_impls.py
index 7babba0530..f66388d2ed 100644
--- a/torch/testing/_internal/torchbind_impls.py
+++ b/torch/testing/_internal/torchbind_impls.py
@@ -1,120 +1,32 @@
-import contextlib
-
import torch
-_TORCHBIND_IMPLS_INITIALIZED = False
-
+def register_if_not(qualname):
+ entry = torch._library.simple_registry.singleton.find(qualname)
+ if entry.abstract_impl.kernel is None:
+ return torch.library.impl_abstract(qualname)
+ else:
-def init_torchbind_implementations():
- global _TORCHBIND_IMPLS_INITIALIZED
- if _TORCHBIND_IMPLS_INITIALIZED:
- return
+ def dummy_wrapper(fn):
+ return fn
- load_torchbind_test_lib()
- register_fake_operators()
- register_fake_classes()
- _TORCHBIND_IMPLS_INITIALIZED = True
+ return dummy_wrapper
# put these under a function because the corresponding library might not be loaded yet.
def register_fake_operators():
- @torch.library.register_fake("_TorchScriptTesting::takes_foo_python_meta")
+ @register_if_not("_TorchScriptTesting::takes_foo_python_meta")
def fake_takes_foo(foo, z):
return foo.add_tensor(z)
- @torch.library.register_fake("_TorchScriptTesting::queue_pop")
+ @register_if_not("_TorchScriptTesting::queue_pop")
def fake_queue_pop(tq):
return tq.pop()
- @torch.library.register_fake("_TorchScriptTesting::queue_push")
+ @register_if_not("_TorchScriptTesting::queue_push")
def fake_queue_push(tq, x):
return tq.push(x)
- @torch.library.register_fake("_TorchScriptTesting::queue_size")
+ @register_if_not("_TorchScriptTesting::queue_size")
def fake_queue_size(tq):
return tq.size()
-
- def meta_takes_foo_list_return(foo, x):
- a = foo.add_tensor(x)
- b = foo.add_tensor(a)
- c = foo.add_tensor(b)
- return [a, b, c]
-
- def meta_takes_foo_tuple_return(foo, x):
- a = foo.add_tensor(x)
- b = foo.add_tensor(a)
- return (a, b)
-
- torch.ops._TorchScriptTesting.takes_foo_list_return.default.py_impl(
- torch._C.DispatchKey.Meta
- )(meta_takes_foo_list_return)
-
- torch.ops._TorchScriptTesting.takes_foo_tuple_return.default.py_impl(
- torch._C.DispatchKey.Meta
- )(meta_takes_foo_tuple_return)
-
- torch.ops._TorchScriptTesting.takes_foo.default.py_impl(torch._C.DispatchKey.Meta)(
- lambda cc, x: cc.add_tensor(x)
- )
-
-
-def register_fake_classes():
- @torch._library.register_fake_class("_TorchScriptTesting::_Foo")
- class FakeFoo:
- def __init__(self, x: int, y: int):
- self.x = x
- self.y = y
-
- @classmethod
- def from_real(cls, foo):
- (x, y), _ = foo.__getstate__()
- return cls(x, y)
-
- def add_tensor(self, z):
- return (self.x + self.y) * z
-
- @torch._library.register_fake_class("_TorchScriptTesting::_ContainsTensor")
- class FakeContainsTensor:
- def __init__(self, x: torch.Tensor):
- self.x = x
-
- @classmethod
- def from_real(cls, foo):
- ctx = torch.library.get_ctx()
- return cls(ctx.to_fake_tensor(foo.get()))
-
- def get(self):
- return self.x
-
-
-def load_torchbind_test_lib():
- import unittest
-
- from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
- find_library_location,
- IS_FBCODE,
- IS_MACOS,
- IS_SANDCASTLE,
- IS_WINDOWS,
- )
-
- if IS_SANDCASTLE or IS_FBCODE:
- torch.ops.load_library("//caffe2/test/cpp/jit:test_custom_class_registrations")
- elif IS_MACOS:
- raise unittest.SkipTest("non-portable load_library call used in test")
- else:
- lib_file_path = find_library_location("libtorchbind_test.so")
- if IS_WINDOWS:
- lib_file_path = find_library_location("torchbind_test.dll")
- torch.ops.load_library(str(lib_file_path))
-
-
-@contextlib.contextmanager
-def _register_py_impl_temporarily(op_overload, key, fn):
- try:
- op_overload.py_impl(key)(fn)
- yield
- finally:
- del op_overload.py_kernels[key]
- op_overload._dispatch_cache.clear()
|
2.41.0
|
e24c263f998819f849bb8293323213101e9aefc
|
Wed, 1 May 2024 23:58:33 +0000
|
[PATCH 0920/1000] =?UTF-8?q?Include=20support=20for=20the=20scatt?= =?UTF-8?q?er=20gather=20cuda=20kernels=20to=20allow=20for=20comp=E2=80=A6?= =?UTF-8?q?=20(#124809)?=MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit
|
Fixes #121965 This PR hopes to add support complex numbers in the scatter/gather related kernels. For brevity, I will only include `complex<float>` for now as `complex<double>`, for example, will be more complicated. C++ unit tests are currently passing alongside tests in `test_scatter_gather_ops.py`. Python test suites also seem to be passing. Please keep the following in mind: 1) I think this is my first time using Pytorch. 2) This is my first contribution to Pytorch. Environment: 3080 & WSL 2. `nvcc` is at 12.4. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124809 Approved by: https://github.com/mikaylagawarecki
|
diff --git a/aten/src/ATen/NumericUtils.h b/aten/src/ATen/NumericUtils.h
index 788da64b4e..421fe0efab 100644
--- a/aten/src/ATen/NumericUtils.h
+++ b/aten/src/ATen/NumericUtils.h
@@ -38,7 +38,11 @@ inline C10_HOST_DEVICE bool _isnan(T val) {
template <typename T, std::enable_if_t<c10::is_complex<T>::value, int> = 0>
inline C10_HOST_DEVICE bool _isnan(T val) {
+#if defined(__CUDACC__) || defined(__HIPCC__)
+ return ::isnan(val.real()) || ::isnan(val.imag());
+#else
return std::isnan(val.real()) || std::isnan(val.imag());
+#endif
}
template <typename T, std::enable_if_t<std::is_same_v<T, at::Half>, int> = 0>
diff --git a/aten/src/ATen/cuda/Atomic.cuh b/aten/src/ATen/cuda/Atomic.cuh
index 56ee8f87e2..2fa55902f9 100644
--- a/aten/src/ATen/cuda/Atomic.cuh
+++ b/aten/src/ATen/cuda/Atomic.cuh
@@ -35,6 +35,26 @@ struct AtomicFPOp<at::Half> {
}
};
+template <>
+struct AtomicFPOp<c10::complex<float>> {
+ template <typename func_t>
+ inline __device__ c10::complex<float> operator() (c10::complex<float> *address, c10::complex<float> val, const func_t& func) {
+ unsigned long long int* addr_as_ull = (unsigned long long int*)address;
+ unsigned long long int old = *addr_as_ull;
+ unsigned long long int assumed, new_val;
+
+ c10::complex<float> csum;
+ do {
+ assumed = old;
+ csum = func(csum, val);
+ new_val = *reinterpret_cast<unsigned long long*>(&csum);
+ old = atomicCAS(addr_as_ull, assumed, new_val);
+ } while (assumed != old);
+
+ return *reinterpret_cast<c10::complex<float>*>(&addr_as_ull);
+ }
+};
+
template <>
struct AtomicFPOp<at::BFloat16> {
template <typename func_t>
@@ -348,6 +368,14 @@ GPU_ATOMIC_INTEGER(Mul, a * b, int16_t)
GPU_ATOMIC_INTEGER(Mul, a * b, int32_t)
GPU_ATOMIC_INTEGER(Mul, a * b, int64_t)
+inline __device__ c10::complex<float> gpuAtomicMul(c10::complex<float> *address, c10::complex<float> val){
+ return AtomicFPOp<c10::complex<float>>()(address, val,
+ [](c10::complex<float> bsum, c10::complex<float> val) {
+ bsum*=(val);
+ return bsum;
+ });
+}
+
inline __device__ at::Half gpuAtomicMul(at::Half * address, at::Half val) {
return AtomicFPOp<at::Half>()(address, val,
[](at::Half bsum, at::Half val) {
@@ -369,7 +397,7 @@ inline __device__ double gpuAtomicMul(double * address, double val) {
});
}
-// Dont use a templated function for this since the addition function defaults to the CUDA built-in.
+// Don't use a templated function for this since the addition function defaults to the CUDA built-in.
inline __device__ float gpuAtomicMul (float * address, float val) {
unsigned int* address_as_ull = (unsigned int*)address;
unsigned int old = *address_as_ull;
@@ -402,6 +430,29 @@ __host__ __device__ T safe_max(T a, T b) {
return max;
}
+__inline__ __device__ c10::complex<float> complex_max(c10::complex<float> a, c10::complex<float> b) {
+ if(at::_isnan(b)) {
+ return b;
+ } else {
+ // Compute the magnitude of the complex numbers and compare each to see which one is greater.
+ float a_magnitude = __fsqrt_rn(
+ (
+ __fmul_rn(a.real(), a.real()) +
+ __fmul_rn(a.imag(),a.imag())
+ )
+ );
+ float b_magnitude = __fsqrt_rn(
+ (
+ __fmul_rn(b.real(), b.real()) +
+ __fmul_rn(b.imag(),b.imag())
+ )
+ );
+ return std::max<float>(a_magnitude, b_magnitude);
+ }
+
+}
+
+
ATOMIC_INTEGER_IMPL(Max)
GPU_ATOMIC_INTEGER(Max, safe_max(a, b), uint8_t)
GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int8_t)
@@ -416,6 +467,13 @@ inline __device__ at::Half gpuAtomicMax(at::Half * address, at::Half val) {
});
}
+inline __device__ c10::complex<float> gpuAtomicMax(c10::complex<float> * address, c10::complex<float> val) {
+ return AtomicFPOp<c10::complex<float>>()(address, val,
+ [](c10::complex<float> bsum, c10::complex<float> val) {
+ return complex_max(bsum, val);
+ });
+}
+
inline __device__ at::BFloat16 gpuAtomicMax(at::BFloat16 * address, at::BFloat16 val) {
return AtomicFPOp<at::BFloat16>()(address, val,
[](at::BFloat16 bsum, at::BFloat16 val) {
@@ -462,6 +520,27 @@ __host__ __device__ T safe_min(T a, T b) {
return min;
}
+__inline__ __device__ c10::complex<float> complex_min(c10::complex<float> a, c10::complex<float> b) {
+ if(at::_isnan(b)) {
+ return b;
+ } else {
+ // Compute the magnitude of the complex numbers and compare each to see which one is smaller.
+ float a_magnitude = __fsqrt_rn(
+ (
+ __fmul_rn(a.real(), a.real()) +
+ __fmul_rn(a.imag(),a.imag())
+ )
+ );
+ float b_magnitude = __fsqrt_rn(
+ (
+ __fmul_rn(b.real(), b.real()) +
+ __fmul_rn(b.imag(),b.imag())
+ )
+ );
+ return std::min<float>(a_magnitude, b_magnitude);
+ }
+}
+
ATOMIC_INTEGER_IMPL(Min)
GPU_ATOMIC_INTEGER(Min, safe_min(a, b), uint8_t)
GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int8_t)
@@ -476,6 +555,13 @@ inline __device__ at::Half gpuAtomicMin(at::Half * address, at::Half val) {
});
}
+inline __device__ c10::complex<float> gpuAtomicMin(c10::complex<float> * address, c10::complex<float> val) {
+ return AtomicFPOp<c10::complex<float>>()(address, val,
+ [](c10::complex<float> bsum, c10::complex<float> val) {
+ return complex_min(bsum, val);
+ });
+}
+
inline __device__ at::BFloat16 gpuAtomicMin(at::BFloat16 * address, at::BFloat16 val) {
return AtomicFPOp<at::BFloat16>()(address, val,
[](at::BFloat16 bsum, at::BFloat16 val) {
diff --git a/aten/src/ATen/native/cuda/ScatterGatherKernel.cu b/aten/src/ATen/native/cuda/ScatterGatherKernel.cu
index 9ef83599cd..78f5d98dfe 100644
--- a/aten/src/ATen/native/cuda/ScatterGatherKernel.cu
+++ b/aten/src/ATen/native/cuda/ScatterGatherKernel.cu
@@ -4,7 +4,6 @@
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/MemoryOverlap.h>
-
#include <ATen/native/ScatterGatherChecks.h>
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/native/TensorIterator.h>
@@ -201,7 +200,6 @@ struct cuda_scatter_gather_base_kernel {
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
-
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
@@ -259,7 +257,6 @@ struct cuda_scatter_gather_base_kernel {
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
-
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
@@ -318,9 +315,9 @@ struct cuda_scatter_gather_base_kernel {
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
-
- AT_DISPATCH_ALL_TYPES_AND2(
+ AT_DISPATCH_ALL_TYPES_AND3(
at::ScalarType::Half, at::ScalarType::BFloat16,
+ at::ScalarType::ComplexFloat,
iter.dtype(),
"cuda_scatter_gather_base_kernel_func", [&] {
using dtype = typename std::conditional<cast_to_opaque,
@@ -450,8 +447,9 @@ struct cuda_scatter_fill_base_kernel {
auto index_size = ensure_nonempty_size(self, dim);
auto index_stride = ensure_nonempty_stride(self, dim);
- AT_DISPATCH_ALL_TYPES_AND2(
+ AT_DISPATCH_ALL_TYPES_AND3(
at::ScalarType::Half, at::ScalarType::BFloat16,
+ at::ScalarType::ComplexFloat,
iter.dtype(),
"cuda_scatter_fill_base_kernel_reduce_multiply", [&] {
using dtype = typename std::conditional<cast_to_opaque,
diff --git a/test/test_scatter_gather_ops.py b/test/test_scatter_gather_ops.py
index 3351b9d257..9074d3e2a4 100644
--- a/test/test_scatter_gather_ops.py
+++ b/test/test_scatter_gather_ops.py
@@ -221,7 +221,8 @@ class TestScatterGather(TestCase):
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
+ include_complex=False, include_bool=False))
def test_scatter_reduce_prod(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
@@ -229,7 +230,8 @@ class TestScatterGather(TestCase):
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_bool=False))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
+ include_complex=False, include_bool=False))
def test_scatter_reduce_mean(self, device, dtype):
for include_self in (True, False):
for deterministic in [False, True]:
@@ -239,7 +241,8 @@ class TestScatterGather(TestCase):
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
+ include_complex=False, include_bool=False))
def test_scatter_reduce_amax(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
@@ -258,7 +261,8 @@ class TestScatterGather(TestCase):
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
+ include_complex=False, include_bool=False))
def test_scatter_reduce_amin(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
diff --git a/test/test_torch.py b/test/test_torch.py
index 21318f3b16..433ccd5d5b 100644
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -57,8 +57,8 @@ from torch.testing._internal.common_cuda import (
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
- floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
- all_types_and, floating_types, floating_and_complex_types, integral_types_and,
+ floating_types_and, get_all_math_dtypes, all_types_and_complex_and, all_types_and, floating_types,
+ floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
@@ -3837,7 +3837,7 @@ else:
self.assertEqual(input, result, msg=f"result: {result} input: {input} method: {str(operation)}")
@onlyCUDA
- @dtypes(*complex_types())
+ @dtypes(torch.cdouble)
def test_scatter_reduce_multiply_unsupported_dtypes(self, device, dtype):
height = 2
width = 2
|
2.41.0
|
731130ea8a83a33b14e5b566130b2f6888c2e6b
|
Tue, 30 Apr 2024 20:09:36 -0700
|
[PATCH 0923/1000] Add a code comment about torch._check_is_size in tensor_split (#125292)
|
Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/125292 Approved by: https://github.com/albanD
|
diff --git a/torch/_decomp/decompositions.py b/torch/_decomp/decompositions.py
index 124ed8fb72..6cfccbab0d 100644
--- a/torch/_decomp/decompositions.py
+++ b/torch/_decomp/decompositions.py
@@ -1413,6 +1413,15 @@ def tensor_split_tensor_indices_or_sections_py_impl(
return self.tensor_split(sections, dim)
else:
indices = [i.item() for i in tensor_indices_or_sections]
+ # WARNING: Tempted to torch._check_is_size on the indices here? You
+ # can't: tensor_split works with negative values in indices:
+ #
+ # >>> torch.tensor_split(torch.randn(10), torch.tensor([-5, 5]))
+ # (tensor([ 0.3540, 2.1074, -0.8507, 1.1639, 0.3055]), tensor([]),
+ # tensor([-0.4285, 1.0692, -0.1776, 0.9362, 1.6143]))
+ #
+ # Sorry, I don't make the rules. Explicitly do the item call in user
+ # code if you KNOW that they are non-negative.
return self.tensor_split(indices, dim)
|
2.41.0
|
119e1bcc2aa7e7d25bbad46cdcdfa06e8d3cc20
|
Thu, 2 May 2024 02:34:30 +0000
|
[PATCH 0924/1000] Fix refcount handling for dtype, layout and memory format (#125271)
|
Finish fixing https://github.com/pytorch/pytorch/issues/124868 re-use our wrap() utils as much as possible and NewRef in other places. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125271 Approved by: https://github.com/colesbury
|
diff --git a/tools/autograd/templates/python_nn_functions.cpp b/tools/autograd/templates/python_nn_functions.cpp
index f311cfebe4..4877df6584 100644
--- a/tools/autograd/templates/python_nn_functions.cpp
+++ b/tools/autograd/templates/python_nn_functions.cpp
@@ -60,14 +60,14 @@ static PyObject * THPVariable__parse_to(PyObject* module, PyObject* args, PyObje
PyTuple_SET_ITEM(tuple.get(), 0, Py_None);
}
if (scalarType) {
- PyTuple_SET_ITEM(tuple.get(), 1, torch::autograd::utils::wrap(torch::getTHPDtype(*scalarType)));
+ PyTuple_SET_ITEM(tuple.get(), 1, Py_NewRef(torch::getTHPDtype(*scalarType)));
} else {
Py_INCREF(Py_None);
PyTuple_SET_ITEM(tuple.get(), 1, Py_None);
}
PyTuple_SET_ITEM(tuple.get(), 2, torch::autograd::utils::wrap(non_blocking));
if (opt_memory_format.has_value()) {
- PyTuple_SET_ITEM(tuple.get(), 3, torch::utils::getTHPMemoryFormat(opt_memory_format.value()));
+ PyTuple_SET_ITEM(tuple.get(), 3, Py_NewRef(torch::utils::getTHPMemoryFormat(opt_memory_format.value())));
} else {
Py_INCREF(Py_None);
PyTuple_SET_ITEM(tuple.get(), 3, Py_None);
diff --git a/torch/csrc/DynamicTypes.h b/torch/csrc/DynamicTypes.h
index 1fd0a9d418..2dd3590aee 100644
--- a/torch/csrc/DynamicTypes.h
+++ b/torch/csrc/DynamicTypes.h
@@ -31,6 +31,7 @@ std::tuple<at::Storage, at::ScalarType, bool> createStorageGetType(
PyObject* obj);
bool isStorage(PyObject* obj);
+// Both methods below return a borrowed reference!
TORCH_PYTHON_API THPDtype* getTHPDtype(at::ScalarType scalarType);
THPLayout* getTHPLayout(at::Layout layout);
} // namespace torch
diff --git a/torch/csrc/autograd/init.cpp b/torch/csrc/autograd/init.cpp
index 7e1c1c69ec..e04d853198 100644
--- a/torch/csrc/autograd/init.cpp
+++ b/torch/csrc/autograd/init.cpp
@@ -536,7 +536,7 @@ static PyObject* get_autocast_dtype(
auto r = parser.parse(args, kwargs, parsed_args);
auto device_type = at::Device(r.string(0)).type();
at::ScalarType current_dtype = at::autocast::get_autocast_dtype(device_type);
- return Py_NewRef(torch::getTHPDtype(current_dtype));
+ return utils::wrap(current_dtype);
END_HANDLE_TH_ERRORS
}
@@ -733,7 +733,7 @@ static PyObject* get_autocast_gpu_dtype(PyObject* _unused, PyObject* arg) {
TORCH_WARN_DEPRECATION(
"torch.get_autocast_gpu_dtype() is deprecated. Please use torch.get_autocast_dtype('cuda') instead.")
at::ScalarType current_dtype = at::autocast::get_autocast_dtype(at::kCUDA);
- return Py_NewRef(torch::getTHPDtype(current_dtype));
+ return utils::wrap(current_dtype);
END_HANDLE_TH_ERRORS
}
@@ -742,7 +742,7 @@ static PyObject* get_autocast_cpu_dtype(PyObject* _unused, PyObject* arg) {
TORCH_WARN_DEPRECATION(
"torch.get_autocast_cpu_dtype() is deprecated. Please use torch.get_autocast_dtype('cpu') instead.")
at::ScalarType current_dtype = at::autocast::get_autocast_dtype(at::kCPU);
- return Py_NewRef(torch::getTHPDtype(current_dtype));
+ return utils::wrap(current_dtype);
END_HANDLE_TH_ERRORS
}
@@ -751,7 +751,7 @@ static PyObject* get_autocast_ipu_dtype(PyObject* _unused, PyObject* arg) {
TORCH_WARN_DEPRECATION(
"torch.get_autocast_ipu_dtype() is deprecated. Please use torch.get_autocast_dtype('ipu') instead.")
at::ScalarType current_dtype = at::autocast::get_autocast_dtype(at::kIPU);
- return Py_NewRef(torch::getTHPDtype(current_dtype));
+ return utils::wrap(current_dtype);
END_HANDLE_TH_ERRORS
}
@@ -760,7 +760,7 @@ static PyObject* get_autocast_xla_dtype(PyObject* _unused, PyObject* arg) {
TORCH_WARN_DEPRECATION(
"torch.get_autocast_xla_dtype() is deprecated. Please use torch.get_autocast_dtype('xla') instead.")
at::ScalarType current_dtype = at::autocast::get_autocast_dtype(at::kXLA);
- return Py_NewRef(torch::getTHPDtype(current_dtype));
+ return utils::wrap(current_dtype);
END_HANDLE_TH_ERRORS
}
diff --git a/torch/csrc/autograd/python_variable.cpp b/torch/csrc/autograd/python_variable.cpp
index 3705ac5e42..07f14c6921 100644
--- a/torch/csrc/autograd/python_variable.cpp
+++ b/torch/csrc/autograd/python_variable.cpp
@@ -1527,7 +1527,7 @@ static PyObject* THPVariable_dtype(THPVariable* self, void* unused) {
return handle_torch_function_getter(self, "dtype");
}
auto& self_ = THPVariable_Unpack(self);
- return torch::autograd::utils::wrap(torch::getTHPDtype(self_.scalar_type()));
+ return torch::autograd::utils::wrap(self_.scalar_type());
END_HANDLE_TH_ERRORS
}
@@ -1537,7 +1537,7 @@ static PyObject* THPVariable_layout(THPVariable* self, void* unused) {
return handle_torch_function_getter(self, "layout");
}
auto& self_ = THPVariable_Unpack(self);
- return torch::autograd::utils::wrap(torch::getTHPLayout(self_.layout()));
+ return torch::autograd::utils::wrap(self_.layout());
END_HANDLE_TH_ERRORS
}
diff --git a/torch/csrc/autograd/utils/wrap_outputs.h b/torch/csrc/autograd/utils/wrap_outputs.h
index ac79ac27ef..c5eae5c82e 100644
--- a/torch/csrc/autograd/utils/wrap_outputs.h
+++ b/torch/csrc/autograd/utils/wrap_outputs.h
@@ -53,21 +53,19 @@ inline PyObject* wrap(void* value) {
}
inline PyObject* wrap(THPDtype* dtype) {
- Py_INCREF(dtype);
- return (PyObject*)dtype;
+ return Py_NewRef(dtype);
}
inline PyObject* wrap(at::ScalarType scalarType) {
- return wrap(getTHPDtype(scalarType));
+ return Py_NewRef(getTHPDtype(scalarType));
}
inline PyObject* wrap(THPLayout* layout) {
- Py_INCREF(layout);
- return (PyObject*)layout;
+ return Py_NewRef(layout);
}
inline PyObject* wrap(at::Layout layout) {
- return wrap(getTHPLayout(layout));
+ return Py_NewRef(getTHPLayout(layout));
}
inline PyObject* wrap(at::Tensor tensor) {
diff --git a/torch/csrc/profiler/python/init.cpp b/torch/csrc/profiler/python/init.cpp
index dc1c4580a1..966bf68d3e 100644
--- a/torch/csrc/profiler/python/init.cpp
+++ b/torch/csrc/profiler/python/init.cpp
@@ -440,8 +440,7 @@ void initPythonBindings(PyObject* module) {
"dtype",
[](const TensorMetadata& metadata) {
return py::reinterpret_borrow<py::object>(
- torch::autograd::utils::wrap(
- torch::getTHPDtype(metadata.dtype_)));
+ torch::autograd::utils::wrap(metadata.dtype_));
})
.def_readonly("dim", &TensorMetadata::dim_)
.def_readonly("sizes", &TensorMetadata::sizes_)
diff --git a/torch/csrc/tensor/python_tensor.cpp b/torch/csrc/tensor/python_tensor.cpp
index 89181656fe..4ea523cedc 100644
--- a/torch/csrc/tensor/python_tensor.cpp
+++ b/torch/csrc/tensor/python_tensor.cpp
@@ -242,8 +242,9 @@ static void set_type(
// This field is lazily initialized from backend and scalar_type
type_obj.backend = static_cast<int>(backend);
type_obj.scalar_type = static_cast<int>(scalarType);
- type_obj.layout = torch::getTHPLayout(layout_from_backend(backend));
- type_obj.dtype = torch::getTHPDtype(scalarType);
+ type_obj.layout =
+ (THPLayout*)Py_NewRef(torch::getTHPLayout(layout_from_backend(backend)));
+ type_obj.dtype = (THPDtype*)Py_NewRef(torch::getTHPDtype(scalarType));
type_obj.is_cuda =
(backend == at::Backend::CUDA || backend == at::Backend::SparseCUDA);
type_obj.is_xpu =
diff --git a/torch/csrc/utils/pybind.h b/torch/csrc/utils/pybind.h
index 1a4e7bb26f..553738b899 100644
--- a/torch/csrc/utils/pybind.h
+++ b/torch/csrc/utils/pybind.h
@@ -1,6 +1,7 @@
#pragma once
#include <torch/csrc/python_headers.h>
+#include <torch/csrc/utils/pythoncapi_compat.h>
#include <ATen/core/Tensor.h>
#include <ATen/core/jit_type_base.h>
@@ -155,7 +156,7 @@ struct type_caster<at::MemoryFormat> {
at::MemoryFormat src,
return_value_policy /* policy */,
handle /* parent */) {
- return handle(torch::utils::getTHPMemoryFormat(src));
+ return handle(Py_NewRef(torch::utils::getTHPMemoryFormat(src)));
}
};
diff --git a/torch/csrc/utils/tensor_memoryformats.cpp b/torch/csrc/utils/tensor_memoryformats.cpp
index aabe2ad407..63dafaf5f5 100644
--- a/torch/csrc/utils/tensor_memoryformats.cpp
+++ b/torch/csrc/utils/tensor_memoryformats.cpp
@@ -18,10 +18,12 @@ std::array<PyObject*, static_cast<int>(at::MemoryFormat::NumOptions)>
} // anonymous namespace
PyObject* getTHPMemoryFormat(at::MemoryFormat memory_format) {
- return py::reinterpret_borrow<py::object>(
- memory_format_registry[static_cast<size_t>(memory_format)])
- .release()
- .ptr();
+ auto py_memory_format =
+ memory_format_registry[static_cast<int>(memory_format)];
+ if (!py_memory_format) {
+ throw std::invalid_argument("unsupported memory_format");
+ }
+ return py_memory_format;
}
void initializeMemoryFormats() {
diff --git a/torch/csrc/utils/tensor_memoryformats.h b/torch/csrc/utils/tensor_memoryformats.h
index 6b820bd028..b9268070e3 100644
--- a/torch/csrc/utils/tensor_memoryformats.h
+++ b/torch/csrc/utils/tensor_memoryformats.h
@@ -7,6 +7,8 @@
namespace torch::utils {
void initializeMemoryFormats();
+
+// This methods returns a borrowed reference!
TORCH_PYTHON_API PyObject* getTHPMemoryFormat(c10::MemoryFormat);
} // namespace torch::utils
|
2.41.0
|
ea54839c90896d9ba1fcf1f26472f8b477954f0
|
Wed, 1 May 2024 07:02:48 -0700
|
[PATCH 0925/1000] Make min(stride, strides[idx]) in collapse_view_helper size oblivious (#125301)
|
Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/125301 Approved by: https://github.com/albanD
|
diff --git a/torch/_prims/__init__.py b/torch/_prims/__init__.py
index 43ba508175..116671703e 100644
--- a/torch/_prims/__init__.py
+++ b/torch/_prims/__init__.py
@@ -1366,7 +1366,10 @@ def _collapse_view_helper(
continue
length = length * shape[idx]
- stride = min(stride, strides[idx])
+ if guard_size_oblivious(stride < strides[idx]):
+ stride = stride
+ else:
+ stride = strides[idx]
if (
guard_size_oblivious(a.numel() > 0)
|
2.41.0
|
ff7a31800296669d0dc880282934efa45d4c339
|
Thu, 2 May 2024 03:38:32 +0000
|
[PATCH 0927/1000] fix torchdeploy issue on sharddim_alltoall op (#125344)
|
Summary: fix torchdeploy issues when registering the distributed op, similar to what functional collective did Differential Revision: D56850434 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125344 Approved by: https://github.com/XilunWu, https://github.com/fegin
|
diff --git a/torch/distributed/_tensor/_collective_utils.py b/torch/distributed/_tensor/_collective_utils.py
index ce4809d996..fe712f3cb2 100644
--- a/torch/distributed/_tensor/_collective_utils.py
+++ b/torch/distributed/_tensor/_collective_utils.py
@@ -23,11 +23,20 @@ from torch.distributed.distributed_c10d import (
logger = logging.getLogger(__name__)
-@torch.library.register_fake("_dtensor::shard_dim_alltoall")
-def _shard_dim_alltoall_meta(input, gather_dim, shard_dim, group_name):
- group_size = _get_group_size_by_name(group_name)
- stacked_list = [torch.empty_like(input) for _ in range(group_size)]
- return torch.cat(stacked_list, dim=gather_dim).chunk(group_size, dim=shard_dim)
+if not torch._running_with_deploy():
+
+ @torch.library.register_fake("_dtensor::shard_dim_alltoall")
+ def _shard_dim_alltoall_meta(input, gather_dim, shard_dim, group_name):
+ group_size = _get_group_size_by_name(group_name)
+ stacked_list = [torch.empty_like(input) for _ in range(group_size)]
+ return torch.cat(stacked_list, dim=gather_dim).chunk(group_size, dim=shard_dim)
+
+else:
+ import warnings
+
+ warnings.warn(
+ "PyTorch Distributed functional collectives do not work with torch::deploy."
+ )
def shard_dim_alltoall(input, gather_dim, shard_dim, mesh, mesh_dim):
|
2.41.0
|
a5d2d9b3ed8c61fb463226854ba92cd0fb682bd
|
Wed, 1 May 2024 16:41:09 -0700
|
[PATCH 0928/1000] Hotfix: restore CPP guard string in structured trace (#125303)
|
Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/125303 Approved by: https://github.com/albanD
|
diff --git a/test/dynamo/test_structured_trace.py b/test/dynamo/test_structured_trace.py
index 07f541edbe..1213a48ffc 100644
--- a/test/dynamo/test_structured_trace.py
+++ b/test/dynamo/test_structured_trace.py
@@ -141,6 +141,7 @@ class StructuredTraceTest(TestCase):
{"inductor_post_grad_graph": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"inductor_output_code": {"filename": "FILENAME"}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"dynamo_guards": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
+{"dynamo_cpp_guards_str": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"compilation_metrics": "METRICS", "frame_id": 0, "frame_compile_id": 0, "attempt": 0}
""", # noqa: B950
)
@@ -160,6 +161,7 @@ class StructuredTraceTest(TestCase):
{"inductor_post_grad_graph": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"inductor_output_code": {"filename": "FILENAME"}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"dynamo_guards": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
+{"dynamo_cpp_guards_str": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"compilation_metrics": "METRICS", "frame_id": 0, "frame_compile_id": 0, "attempt": 0}
""", # noqa: B950
)
@@ -183,6 +185,7 @@ class StructuredTraceTest(TestCase):
{"inductor_post_grad_graph": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"inductor_output_code": {"filename": "FILENAME"}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"dynamo_guards": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
+{"dynamo_cpp_guards_str": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"compilation_metrics": "METRICS", "frame_id": 0, "frame_compile_id": 0, "attempt": 0}
{"dynamo_start": {"stack": "STACK"}, "frame_id": 0, "frame_compile_id": 1, "attempt": 0}
{"dynamo_output_graph": {"sizes": {"l_x_": [1000, 1000], "add": [1000, 1000]}}, "frame_id": 0, "frame_compile_id": 1, "attempt": 0, "has_payload": "HASH"}
@@ -190,6 +193,7 @@ class StructuredTraceTest(TestCase):
{"inductor_post_grad_graph": {}, "frame_id": 0, "frame_compile_id": 1, "attempt": 0, "has_payload": "HASH"}
{"inductor_output_code": {"filename": "FILENAME"}, "frame_id": 0, "frame_compile_id": 1, "attempt": 0, "has_payload": "HASH"}
{"dynamo_guards": {}, "frame_id": 0, "frame_compile_id": 1, "attempt": 0, "has_payload": "HASH"}
+{"dynamo_cpp_guards_str": {}, "frame_id": 0, "frame_compile_id": 1, "attempt": 0, "has_payload": "HASH"}
{"compilation_metrics": "METRICS", "frame_id": 0, "frame_compile_id": 1, "attempt": 0}
""", # noqa: B950
)
@@ -208,6 +212,7 @@ class StructuredTraceTest(TestCase):
{"inductor_post_grad_graph": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"inductor_output_code": {"filename": "FILENAME"}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"dynamo_guards": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
+{"dynamo_cpp_guards_str": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"compilation_metrics": "METRICS", "frame_id": 0, "frame_compile_id": 0, "attempt": 0}
""", # noqa: B950
)
@@ -296,6 +301,7 @@ class StructuredTraceTest(TestCase):
"""\
{"dynamo_start": {"stack": "STACK"}, "rank": 0, "frame_id": 0, "frame_compile_id": 0, "attempt": 0}
{"dynamo_guards": {}, "rank": 0, "frame_id": 0, "frame_compile_id": 0, "attempt": 1, "has_payload": "HASH"}
+{"dynamo_cpp_guards_str": {}, "rank": 0, "frame_id": 0, "frame_compile_id": 0, "attempt": 1, "has_payload": "HASH"}
{"compilation_metrics": "METRICS", "rank": 0, "frame_id": 0, "frame_compile_id": 0, "attempt": 1}
{"dynamo_start": {"stack": "STACK"}, "rank": 0, "frame_id": 1, "frame_compile_id": 0, "attempt": 0}
{"dynamo_output_graph": {"sizes": {"l_x_": [1024, 1024], "l__self___layers_0": [1024, 1024], "l__self___layers_1": [1024, 1024]}}, "rank": 0, "frame_id": 1, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
@@ -313,6 +319,7 @@ class StructuredTraceTest(TestCase):
{"inductor_post_grad_graph": {}, "rank": 0, "frame_id": 1, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"inductor_output_code": {"filename": "FILENAME"}, "rank": 0, "frame_id": 1, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"dynamo_guards": {}, "rank": 0, "frame_id": 1, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
+{"dynamo_cpp_guards_str": {}, "rank": 0, "frame_id": 1, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"compilation_metrics": "METRICS", "rank": 0, "frame_id": 1, "frame_compile_id": 0, "attempt": 0}
""", # noqa: B950
)
@@ -332,6 +339,7 @@ class StructuredTraceTest(TestCase):
"""\
{"dynamo_start": {"stack": "STACK"}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0}
{"dynamo_guards": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 1, "has_payload": "HASH"}
+{"dynamo_cpp_guards_str": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 1, "has_payload": "HASH"}
{"compilation_metrics": "METRICS", "frame_id": 0, "frame_compile_id": 0, "attempt": 1}
{"dynamo_start": {"stack": "STACK"}, "frame_id": 1, "frame_compile_id": 0, "attempt": 0}
{"dynamo_output_graph": {"sizes": {"l_x_": [1], "add": [1]}}, "frame_id": 1, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
@@ -339,6 +347,7 @@ class StructuredTraceTest(TestCase):
{"inductor_post_grad_graph": {}, "frame_id": 1, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"inductor_output_code": {"filename": "FILENAME"}, "frame_id": 1, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"dynamo_guards": {}, "frame_id": 1, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
+{"dynamo_cpp_guards_str": {}, "frame_id": 1, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"compilation_metrics": "METRICS", "frame_id": 1, "frame_compile_id": 0, "attempt": 0}
""", # noqa: B950
)
@@ -363,10 +372,12 @@ class StructuredTraceTest(TestCase):
{"dynamo_start": {"stack": "STACK"}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0}
{"dynamo_output_graph": {"sizes": {"l_a_": [10, 20], "l_b_": [20, 30], "matmul": [10, 30]}}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"dynamo_guards": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
+{"dynamo_cpp_guards_str": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"compilation_metrics": "METRICS", "frame_id": 0, "frame_compile_id": 0, "attempt": 0}
{"dynamo_start": {"stack": "STACK"}, "frame_id": 0, "frame_compile_id": 1, "attempt": 0}
{"dynamo_output_graph": {"sizes": {"l_a_": ["s0", "s1"], "l_b_": ["s1", "s3"], "matmul": ["s0", "s3"]}}, "frame_id": 0, "frame_compile_id": 1, "attempt": 0, "has_payload": "HASH"}
{"dynamo_guards": {}, "frame_id": 0, "frame_compile_id": 1, "attempt": 0, "has_payload": "HASH"}
+{"dynamo_cpp_guards_str": {}, "frame_id": 0, "frame_compile_id": 1, "attempt": 0, "has_payload": "HASH"}
{"compilation_metrics": "METRICS", "frame_id": 0, "frame_compile_id": 1, "attempt": 0}
""", # noqa: B950
)
@@ -396,10 +407,12 @@ class StructuredTraceTest(TestCase):
{"dynamo_start": {"stack": "STACK"}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0}
{"dynamo_output_graph": {"sizes": {"l_x_": [1], "x": [1]}}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"dynamo_guards": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
+{"dynamo_cpp_guards_str": {}, "frame_id": 0, "frame_compile_id": 0, "attempt": 0, "has_payload": "HASH"}
{"compilation_metrics": "METRICS", "frame_id": 0, "frame_compile_id": 0, "attempt": 0}
{"dynamo_start": {"stack": "STACK"}, "frame_id": 0, "frame_compile_id": 1, "attempt": 0}
{"dynamo_output_graph": {"sizes": {"l_x_": [1], "x": [1]}}, "frame_id": 0, "frame_compile_id": 1, "attempt": 0, "has_payload": "HASH"}
{"dynamo_guards": {}, "frame_id": 0, "frame_compile_id": 1, "attempt": 0, "has_payload": "HASH"}
+{"dynamo_cpp_guards_str": {}, "frame_id": 0, "frame_compile_id": 1, "attempt": 0, "has_payload": "HASH"}
{"compilation_metrics": "METRICS", "frame_id": 0, "frame_compile_id": 1, "attempt": 0}
""", # noqa: B950
)
diff --git a/torch/_dynamo/guards.py b/torch/_dynamo/guards.py
index 974e551975..9d6728c0c5 100644
--- a/torch/_dynamo/guards.py
+++ b/torch/_dynamo/guards.py
@@ -1858,10 +1858,11 @@ class CheckFunctionManager:
self.check_fn.id_matched_objs = builder.id_matched_objs
if config.enable_cpp_guard_manager:
- if guards_log.isEnabledFor(logging.DEBUG):
- guards_log.debug("%s", self.guard_manager)
- # print(self.guard_manager)
- # breakpoint()
+ # TODO: don't do the string rep, do something more structured here
+ torch._logging.trace_structured(
+ "dynamo_cpp_guards_str", payload_fn=lambda: str(self.guard_manager)
+ )
+ guards_log.debug("%s", self.guard_manager)
assert self.guard_manager # to make mypy happy
self.guard_manager.id_matched_objs = builder.id_matched_objs
self.check_fn = self.guard_manager
|
2.41.0
|
b70026d3b549782ee194c85a0a0f6805cceaed7
|
Wed, 1 May 2024 16:55:30 -0700
|
[PATCH 0930/1000] Do not pass none to has_pending_mutation (#125359)
|
#fix https://github.com/pytorch/pytorch/issues/125315 Several failures when inlining nn module is enabled are due to passing None to has_pending_mutation from previous code, it sounds like its expected for variable to be none when not found, In that case we should skip it and not call has_pending_mutation this is tested in https://github.com/pytorch/pytorch/pull/125354 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125359 Approved by: https://github.com/mlazos
|
diff --git a/torch/_dynamo/variables/optimizer.py b/torch/_dynamo/variables/optimizer.py
index e183f7a5e5..f7d2056aa2 100644
--- a/torch/_dynamo/variables/optimizer.py
+++ b/torch/_dynamo/variables/optimizer.py
@@ -112,9 +112,8 @@ class OptimizerVariable(UserDefinedObjectVariable):
for g in self.value.param_groups:
for p in g["params"]:
side_effects = tx.output.side_effects
- if side_effects.has_pending_mutation(
- side_effects.id_to_variable.get(id(p), None)
- ):
+ variable = side_effects.id_to_variable.get(id(p), None)
+ if variable and side_effects.has_pending_mutation(variable):
from ..exc import Unsupported
raise Unsupported("Pending mutation on parameter")
|
2.41.0
|
13a0a247906941438717e74b7990c26b2246e0c
|
Thu, 2 May 2024 00:22:14 -0700
|
[PATCH 0931/1000] [dynamo][easy] Simple fixes to prepare for nn module guards (#125316)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/125316 Approved by: https://github.com/williamwen42 ghstack dependencies: #125275
|
diff --git a/test/dynamo/test_modules.py b/test/dynamo/test_modules.py
index de0e66e59f..b46844f219 100644
--- a/test/dynamo/test_modules.py
+++ b/test/dynamo/test_modules.py
@@ -1961,6 +1961,7 @@ class OptimizedModuleTest(torch._dynamo.test_case.TestCase):
self.assertEqual(compiled_func(inp).item(), 16)
self.assertRegex(failure_reason, r"^___check_obj_id\(L\['m'\]._forward_hooks")
+ @patch.object(torch._dynamo.config, "guard_nn_modules", False)
@patch.object(torch._dynamo.config, "skip_nnmodule_hook_guards", True)
def test_hooks_skip_guards(self):
class TestModule(torch.nn.Module):
diff --git a/torch/_dynamo/guards.py b/torch/_dynamo/guards.py
index 9d6728c0c5..fb4cb7a039 100644
--- a/torch/_dynamo/guards.py
+++ b/torch/_dynamo/guards.py
@@ -1475,7 +1475,9 @@ class GuardBuilder(GuardBuilderBase):
self._produce_guard_code(guard, [shape_guard], shape_env=True)
def TENSOR_MATCH(self, guard: Guard, value=None):
- if guard.is_nn_module() or match_on_id_for_tensor(guard):
+ if (
+ not torch._dynamo.config.guard_nn_modules and guard.is_nn_module()
+ ) or match_on_id_for_tensor(guard):
self.ID_MATCH(guard)
else:
if isinstance(value, TensorWeakRef):
|
2.41.0
|
b1bfe115693d4dd3cc2be68a2ea62717bc7f18e
|
Thu, 2 May 2024 15:17:10 +0000
|
[PATCH 0933/1000] Get cutlass_library import working under fbcode (#125257)
|
Differential Revision: D56764089 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125257 Approved by: https://github.com/chenyang78
|
diff --git a/torch/_inductor/codegen/cuda/cutlass_utils.py b/torch/_inductor/codegen/cuda/cutlass_utils.py
index ff60525548..62465b0883 100644
--- a/torch/_inductor/codegen/cuda/cutlass_utils.py
+++ b/torch/_inductor/codegen/cuda/cutlass_utils.py
@@ -10,6 +10,7 @@ from typing import Any, List, Optional
import sympy
import torch
+from ... import config
from ...config import cuda as inductor_cuda_config
from ...ir import Layout
@@ -48,6 +49,9 @@ def _gen_cutlass_file(
@functools.lru_cache(None)
def try_import_cutlass() -> bool:
+ if config.is_fbcode():
+ return True
+
# Copy CUTLASS python scripts to a temp dir and add the temp dir to Python search path.
# This is a temporary hack to avoid CUTLASS module naming conflicts.
# TODO(ipiszy): remove this hack when CUTLASS solves Python scripts packaging structure issues.
diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py
index f8953b8747..f005bc1c67 100644
--- a/torch/_inductor/config.py
+++ b/torch/_inductor/config.py
@@ -9,6 +9,9 @@ def is_fbcode():
return not hasattr(torch.version, "git_version")
+if is_fbcode():
+ from triton.fb import build_paths
+
# add some debug printouts
debug = False
@@ -753,7 +756,9 @@ class cuda:
# The default path only works under PyTorch local development environment.
cutlass_dir = os.environ.get(
"TORCHINDUCTOR_CUTLASS_DIR",
- os.path.abspath(
+ build_paths.cutlass()
+ if is_fbcode()
+ else os.path.abspath(
os.path.join(os.path.dirname(torch.__file__), "../third_party/cutlass/")
),
)
|
2.41.0
|
93b57a57031b893139a6b334cf64b6201dde8c4
|
Tue, 30 Apr 2024 17:08:35 -0700
|
[PATCH 0934/1000] Add propagate_real_tensors mode for unbacked (#125115)
|
A common complaint when working with data-dependent code in PyTorch is that it's hard to tell how far you are from the finish line: every time a GuardOnDataDependentSymNode error is hit, you have to somehow fix or workaround it to see the next one. This PR adds a new mode `torch._functorch.config.fake_tensor_propagate_real_tensors` which modifies fake tensors to also propagate real tensors. This means that when we try to guard on a data-dependent SymNode, we can actually produce a real result. We also produce a warning which you should consult to figure out what the crux points are. I ran this on vision_maskrcnn. In the baseline (without this mode), the model has 27 graph breaks, resulting in 40 graphs. With this mode on, the model has only 11 graph breaks, resulting in 15 graphs (the remaining graph breaks are due to missing functionality for item() on float tensor and some other Dynamo missing features.) You get a list of things that would have errored like this: ``` WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u1) < 2) -> True WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u1), 1)) -> True WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u1), 1)) -> True WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Ne(Max(1, u1), 1)) -> False WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u0) < 2) -> True WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u0), 1)) -> True WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u0), 1)) -> True WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Ne(Max(1, u0), 1)) -> False WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u1) < 2) -> True WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u1), 1)) -> True WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u1), 1)) -> True WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Ne(Max(1, u1), 1)) -> False WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u0) < 2) -> True WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u0), 1)) -> True WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u0), 1)) -> True WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Ne(Max(1, u0), 1)) -> False WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u1) < 2) -> False WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u1), 1)) -> False WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Ne(Max(1, u1), 1)) -> True WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Max(1, u0) < 2) -> False WARNING:torch.fx.experimental.symbolic_shapes:propagate_real_tensors evaluate_expr(Eq(Max(1, u0), 1)) -> False ``` Potential later follow ups: * Improve the warning messages (in particular, should provide user frames) * GC real tensors when they are no longer needed by tracing. Right now, this will use A LOT of memory, equal to as if your GC was broken and every intermediate tensor was kept live Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/125115 Approved by: https://github.com/IvanKobzarev
|
diff --git a/benchmarks/dynamo/common.py b/benchmarks/dynamo/common.py
index 2ecadef60e..48ea0496da 100644
--- a/benchmarks/dynamo/common.py
+++ b/benchmarks/dynamo/common.py
@@ -75,6 +75,7 @@ except ImportError:
graph_break_reasons,
maybe_enable_compiled_autograd,
)
+import torch._functorch.config
from torch._functorch.aot_autograd import set_model_name
from torch._inductor import config as inductor_config, metrics
from torch._subclasses.fake_tensor import FakeTensorMode
@@ -3155,6 +3156,11 @@ def parse_args(args=None):
action="store_true",
help="Runs a dynamic shapes version of the benchmark, if available.",
)
+ parser.add_argument(
+ "--propagate-real-tensors",
+ action="store_true",
+ help="Capture as much data dependent as you can by unsoundly propagating real tensors",
+ )
parser.add_argument(
"--dynamic-batch-only",
action="store_true",
@@ -3603,6 +3609,11 @@ def run(runner, args, original_dir=None):
if args.dynamic_shapes:
if not args.dynamic_batch_only:
torch._dynamo.config.assume_static_by_default = False
+ if args.propagate_real_tensors:
+ # TODO: Separate flag for data dependent
+ torch._dynamo.config.capture_scalar_outputs = True
+ torch._dynamo.config.capture_dynamic_output_shape_ops = True
+ torch._functorch.config.fake_tensor_propagate_real_tensors = True
if args.specialize_int:
torch._dynamo.config.specialize_int = True
if args.ci:
diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py
index f57fce9428..33f1035c1c 100644
--- a/test/dynamo/test_misc.py
+++ b/test/dynamo/test_misc.py
@@ -10525,6 +10525,23 @@ fn
with unittest.mock.patch("torch._dynamo.config.error_on_recompile", True):
fn(torch.randn(4), d)
+ @unittest.skipIf(not TEST_CUDA, "requires cuda")
+ @torch._dynamo.config.patch(
+ capture_scalar_outputs=True, capture_dynamic_output_shape_ops=True
+ )
+ @torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True)
+ def test_interpolate_propagate_real_tensors(self):
+ @torch.compile(backend="eager", fullgraph=True)
+ def f(mask, box):
+ # u0, u1 = mask.tolist()
+ mask = torch.randn(1, 1, 30, 30, device="cuda")
+ h, w = box.tolist()
+ return torch.nn.functional.interpolate(
+ mask, (h, w), mode="bilinear", align_corners=False
+ )
+
+ f(torch.tensor([30, 30], device="cuda"), torch.tensor([68, 32], device="cuda"))
+
def test_custom_iter_dict(self):
class ReversedDict(dict):
def __iter__(self):
diff --git a/test/test_dynamic_shapes.py b/test/test_dynamic_shapes.py
index 284cf85d01..64f4a14ee1 100644
--- a/test/test_dynamic_shapes.py
+++ b/test/test_dynamic_shapes.py
@@ -512,6 +512,12 @@ def forward(self, x_1):
s0 = shape_env.create_unbacked_symint()
self.assertRaises(GuardOnDataDependentSymNode, lambda: bool(s0 == 0))
+ def test_data_dependent_guard_propagate_real_tensors(self):
+ shape_env = ShapeEnv()
+ s0 = shape_env.create_unbacked_symint()
+ shape_env.set_unbacked_var_to_val(s0.node.expr, 0)
+ self.assertEqual(bool(s0 == 0), True)
+
def test_expect_true_basic(self):
shape_env = ShapeEnv()
i0 = shape_env.create_unbacked_symint()
diff --git a/test/test_fake_tensor.py b/test/test_fake_tensor.py
index 41dd80299d..40075eb24e 100644
--- a/test/test_fake_tensor.py
+++ b/test/test_fake_tensor.py
@@ -6,6 +6,7 @@ from torch.testing._internal.common_utils import (
instantiate_parametrized_tests, TemporaryFileName)
import torch
import torch._dynamo
+from torch._dynamo.testing import make_test_cls_with_patches
import itertools
import numpy as np
from torch.testing._internal.jit_utils import RUN_CUDA
@@ -53,6 +54,10 @@ aten = torch.ops.aten
torch._dynamo.config.fake_tensor_cache_enabled = True
torch._dynamo.config.fake_tensor_cache_crosscheck_enabled = True
+def expectedFailurePropagateRealTensors(fn):
+ fn._expected_failure_propagate_real_tensors = True
+ return fn
+
class FakeTensorTest(TestCase):
def checkType(self, t, device_str, size):
self.assertTrue(isinstance(t, FakeTensor))
@@ -83,18 +88,22 @@ class FakeTensorTest(TestCase):
def test_custom_op_fallback(self):
from torch.library import Library, impl
- test_lib = Library("my_test_op", "DEF") # noqa: TOR901
- test_lib.define('foo(Tensor self) -> Tensor')
+ try:
+ test_lib = Library("my_test_op", "DEF") # noqa: TOR901
+ test_lib.define('foo(Tensor self) -> Tensor')
- @impl(test_lib, 'foo', 'CPU')
- def foo_impl(self):
- return self.cos()
+ @impl(test_lib, 'foo', 'CPU')
+ def foo_impl(self):
+ return self.cos()
- x = torch.empty(2, 2, device="cpu")
- with self.assertRaisesRegex(UnsupportedOperatorException, "my_test_op.foo.default"):
- with FakeTensorMode(allow_fallback_kernels=True) as mode:
- x = mode.from_tensor(x)
- torch.ops.my_test_op.foo(x)
+ x = torch.empty(2, 2, device="cpu")
+ with self.assertRaisesRegex(UnsupportedOperatorException, "my_test_op.foo.default"):
+ with FakeTensorMode(allow_fallback_kernels=True) as mode:
+ x = mode.from_tensor(x)
+ torch.ops.my_test_op.foo(x)
+
+ finally:
+ test_lib._destroy()
def test_parameter_instantiation(self):
with FakeTensorMode():
@@ -207,6 +216,8 @@ class FakeTensorTest(TestCase):
FileCheck().check("CPU").check("AutocastCPU").run(torch._C._dispatch_key_set(y))
FileCheck().check_not("ADInplaceOrView").check_not("Autograd").run(torch._C._dispatch_key_set(y))
+ # TODO: functorch support for propagate real tensors
+ @expectedFailurePropagateRealTensors
def test_batch_tensor(self):
x = torch.rand((3, 4, 5))
b = _add_batch_dim(x, 0, 0)
@@ -392,10 +403,10 @@ class FakeTensorTest(TestCase):
x = torch.rand([4])
y = torch.rand([4], device="cuda")
- with self.assertRaisesRegex(Exception, "found two different devices"):
+ with self.assertRaisesRegex(Exception, "found.+two.+devices"):
torch.sin(x, out=y)
- with self.assertRaisesRegex(Exception, "found two different devices"):
+ with self.assertRaisesRegex(Exception, "found.+two.+devices"):
x.add_(y)
@@ -559,6 +570,8 @@ class FakeTensorTest(TestCase):
x = torch.rand([10])
x.tolist()
+ # Propagate real tensors doesn't work with fake-on-fake
+ @expectedFailurePropagateRealTensors
def test_same_shape_env_preserved(self):
shape_env = ShapeEnv()
mode1 = FakeTensorMode(shape_env=shape_env)
@@ -578,6 +591,9 @@ class FakeTensorTest(TestCase):
self.assertIs(t2.size(0).node.shape_env, t1.size(0).node.shape_env)
self.assertEqual(str(t2.size(0)), str(t1.size(0)))
+ # TODO: Support NJT. There's also some funny business with dynamic shapes
+ # which would need to be dealt with as well
+ @expectedFailurePropagateRealTensors
def test_jagged_fake_to_fake_preserved(self):
from torch.nested._internal.nested_tensor import jagged_from_list
@@ -736,7 +752,9 @@ class FakeTensorTest(TestCase):
x2 = torch.rand(4, 4, device="cuda")
i1 = torch.tensor([0, 1], device="cuda")
i2 = torch.tensor([0, 1], device="cpu")
- r1 = torch.ops.aten.index(x1, i1)
+ # NB: This one does not work: cuda indices not allowed on cpu
+ # tensor
+ # r1 = torch.ops.aten.index(x1, i1)
r2 = torch.ops.aten.index(x2, i2)
y1 = torch.rand(4, device="cpu")
@@ -745,7 +763,7 @@ class FakeTensorTest(TestCase):
j2 = torch.tensor([2], device="cpu")
r3 = torch.ops.aten.index_put.default(x1, j1, y1)
r4 = torch.ops.aten.index_put.default(x2, j2, y2)
- self.checkType(r1, "cpu", ())
+ # self.checkType(r1, "cpu", ())
self.checkType(r2, "cuda", ())
self.checkType(r3, "cpu", (4, 4))
self.checkType(r4, "cuda", (4, 4))
@@ -774,6 +792,9 @@ class FakeTensorTest(TestCase):
grad_in = torch.ops.aten._adaptive_avg_pool2d_backward(grad_out, inp)
self.assertTrue(torch._prims_common.suggest_memory_format(grad_in) == torch.channels_last)
+ # Propagate real tensors doesn't work when original input arguments are
+ # fake
+ @expectedFailurePropagateRealTensors
def test_export_numpy(self):
class MyNumpyModel(torch.nn.Module):
def forward(self, input):
@@ -801,6 +822,26 @@ class FakeTensorTest(TestCase):
self.assertEqual(r.size(), [3])
+instantiate_parametrized_tests(FakeTensorTest)
+
+
+def make_propagate_real_tensors_cls(cls):
+ cls = make_test_cls_with_patches(
+ cls,
+ "PropagateRealTensors",
+ "_propagate_real_tensors",
+ (torch._functorch.config, "fake_tensor_propagate_real_tensors", True),
+ xfail_prop="_expected_failure_propagate_real_tensors",
+ decorator=skipIfTorchDynamo("propagate_real_tensors affects Dynamo"),
+ )
+ cls.__file__ = __file__
+ cls.__module__ = __name__
+ globals()[cls.__name__] = cls
+
+
+make_propagate_real_tensors_cls(FakeTensorTest)
+
+
class FakeTensorConstHandling(TestCase):
def assertConst(self, *args):
for arg in args:
@@ -891,6 +932,10 @@ class FakeTensorConstHandling(TestCase):
y = torch.div(4, 4, rounding_mode='trunc')
self.assertConst(y)
+
+make_propagate_real_tensors_cls(FakeTensorConstHandling)
+
+
def contains_type(type: torch._C.Type, maybe_contained_type: torch._C.Type):
return maybe_contained_type.isSubtypeOf(type) or any(
contains_type(e, maybe_contained_type) for e in type.containedTypes()
@@ -907,6 +952,11 @@ class FakeTensorOpInfoTest(TestCase):
optests.fake_check(op, args, kwargs)
+make_propagate_real_tensors_cls(FakeTensorOpInfoTest)
+instantiate_device_type_tests(FakeTensorOpInfoTest, globals(), only_for=("cpu", "cuda"))
+instantiate_device_type_tests(PropagateRealTensorsFakeTensorOpInfoTest, globals(), only_for=("cpu",)) # noqa: F821
+
+
class FakeTensorConverterTest(TestCase):
def test_memoized_conversion_to_meta(self):
x = torch.rand(2, 2, 2)
@@ -1018,16 +1068,17 @@ class FakeTensorConverterTest(TestCase):
assert y_weak() is None
+make_propagate_real_tensors_cls(FakeTensorConverterTest)
+
+
class FakeTensorOperatorInvariants(TestCase):
- @staticmethod
- def get_aten_op(schema):
+ def get_aten_op(self, schema):
namespace, name = schema.name.split("::")
overload = schema.overload_name if schema.overload_name else "default"
assert namespace == "aten"
return getattr(getattr(torch.ops.aten, name), overload)
- @staticmethod
- def get_all_aten_schemas():
+ def get_all_aten_schemas(self):
for schema in torch._C._jit_get_all_schemas():
namespace = schema.name.split("::")[0]
if namespace != "aten":
@@ -1178,6 +1229,10 @@ class FakeTensorOperatorInvariants(TestCase):
# IMPORTANT!!! Always run even if CUDA is not available
def test_fake_cuda_no_init(self):
+ # Skip this test, we will try to run CUDA operations to real prop so
+ # it clearly will not work on CPU runner
+ if torch._functorch.config.fake_tensor_propagate_real_tensors:
+ return
with FakeTensorMode():
torch.empty(10, device='cuda')
torch.ones(10, device='cuda')
@@ -1236,6 +1291,9 @@ class FakeTensorOperatorInvariants(TestCase):
self.assertEqual(mode.count, 0)
+make_propagate_real_tensors_cls(FakeTensorOperatorInvariants)
+
+
class FakeTensorPropTest(TestCase):
def test_fake_tensor_prop_on_nn_module(self):
class ToyNnModuleWithParameters(torch.nn.Module):
@@ -1294,6 +1352,7 @@ class FakeTensorPropTest(TestCase):
self.assertTrue(failed)
+ @expectedFailurePropagateRealTensors # Propagate real tensors doesn't work with fake-on-fake
def test_fake_tensor_prop_on_nn_module_with_optional_args(self):
class OptionalArgumentInBetween(torch.nn.Module):
def __init__(self):
@@ -1321,9 +1380,11 @@ class FakeTensorPropTest(TestCase):
FakeTensorProp(graph_model, fake_mode).propagate(value, None, another_optional_value)
+ @expectedFailurePropagateRealTensors # TODO: not sure about this one, kinda strange
def test_unbacked_shape_realloc(self):
def f(x):
return x.nonzero()
+
shape_env = ShapeEnv()
fake_mode = FakeTensorMode(shape_env=shape_env)
with fake_mode:
@@ -1368,6 +1429,9 @@ class FakeTensorPropTest(TestCase):
torch.load(state_dict_file, map_location="cpu") # scenario 2
+make_propagate_real_tensors_cls(FakeTensorPropTest)
+
+
class FakeTensorSerialization(TestCase):
def test_serialization(self):
x = torch.tensor([0], device="cpu")
@@ -1706,11 +1770,5 @@ class FakeTensorDispatchCache(TestCase):
extract_tensor_metadata(res4),
)
-
-instantiate_parametrized_tests(FakeTensorTest)
-
-only_for = ("cpu", "cuda")
-instantiate_device_type_tests(FakeTensorOpInfoTest, globals(), only_for=only_for)
-
if __name__ == "__main__":
run_tests()
diff --git a/test/test_proxy_tensor.py b/test/test_proxy_tensor.py
index 115305c313..9c56eef304 100644
--- a/test/test_proxy_tensor.py
+++ b/test/test_proxy_tensor.py
@@ -26,6 +26,7 @@ from torch.fx.experimental.proxy_tensor import make_fx, DecompositionInterpreter
from torch.utils._pytree import tree_map
from torch.fx.passes.runtime_assert import insert_deferred_runtime_asserts
from torch import nn
+import torch._functorch.config
import re
import functools
@@ -1544,6 +1545,22 @@ def forward(self, lengths_1, values_1):
make_fx(f, tracing_mode="symbolic")(torch.randn(4))
+ @torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True)
+ def test_invalidate_nonzero_propagate_real_tensors(self):
+ def f(a):
+ b = a.clone()
+ x = b.nonzero()
+ x1 = b.nonzero()
+ x2 = b.nonzero()
+ assert x1.shape[0] == x2.shape[0]
+ b.normal_()
+ y = b.nonzero()
+ # Because you're not actually going to generate exactly zero with
+ # normal_ lol
+ assert x1.shape[0] == y.shape[0]
+
+ make_fx(f, tracing_mode="symbolic")(torch.randn(4))
+
def test_sqrt_size(self):
def f(a):
return a / a.size(-1) ** 0.5
diff --git a/torch/_dynamo/testing.py b/torch/_dynamo/testing.py
index 8a0f95670b..b4c022e8d8 100644
--- a/torch/_dynamo/testing.py
+++ b/torch/_dynamo/testing.py
@@ -311,7 +311,9 @@ def _make_fn_with_patches(fn, *patches):
return _fn
-def make_test_cls_with_patches(cls, cls_prefix, fn_suffix, *patches, xfail_prop=None):
+def make_test_cls_with_patches(
+ cls, cls_prefix, fn_suffix, *patches, xfail_prop=None, decorator=lambda x: x
+):
DummyTestClass = type(f"{cls_prefix}{cls.__name__}", cls.__bases__, {})
DummyTestClass.__qualname__ = DummyTestClass.__name__
@@ -326,7 +328,7 @@ def make_test_cls_with_patches(cls, cls_prefix, fn_suffix, *patches, xfail_prop=
new_fn.__name__ = new_name
if xfail_prop is not None and hasattr(fn, xfail_prop):
new_fn = unittest.expectedFailure(new_fn)
- setattr(DummyTestClass, new_name, new_fn)
+ setattr(DummyTestClass, new_name, decorator(new_fn))
# NB: Doesn't handle slots correctly, but whatever
elif not hasattr(DummyTestClass, name):
setattr(DummyTestClass, name, getattr(cls, name))
diff --git a/torch/_dynamo/variables/tensor.py b/torch/_dynamo/variables/tensor.py
index 7f06483ab1..e928a9e0ea 100644
--- a/torch/_dynamo/variables/tensor.py
+++ b/torch/_dynamo/variables/tensor.py
@@ -6,6 +6,7 @@ import logging
import operator
import textwrap
import types
+import unittest
from typing import Dict, List
import sympy
@@ -645,11 +646,15 @@ class TensorVariable(VariableTracker):
def tolist(tensor, sub_proxy):
def wrap(i, sub_proxy):
- return SymNodeVariable.create(
- tx,
- sub_proxy.item(),
- sym_num=tx.output.shape_env.create_unbacked_symint(),
- )
+ # Sigh, we forgot to gate this, so this data dependent is on
+ # by default and is load bearing in CI
+ with unittest.mock.patch.object(
+ tx.fake_mode, "allow_scalar_outputs", True
+ ):
+ return SymNodeVariable.create(
+ tx,
+ sub_proxy.item(),
+ )
if tensor.dtype not in [
torch.int8,
@@ -963,11 +968,11 @@ class SymNodeVariable(VariableTracker):
}
@classmethod
- def create(cls, tx, proxy, sym_num, **options):
- if "example_value" in proxy.node.meta:
- assert proxy.node.meta["example_value"] == sym_num
+ def create(cls, tx, proxy, sym_num=None, **options):
if sym_num is None:
sym_num = get_fake_value(proxy.node, tx)
+ if "example_value" in proxy.node.meta:
+ assert proxy.node.meta["example_value"] == sym_num
set_example_value(proxy.node, sym_num)
if isinstance(sym_num, (sympy.Integer, int, bool)):
diff --git a/torch/_functorch/config.py b/torch/_functorch/config.py
index aa7235034e..5749477c6e 100644
--- a/torch/_functorch/config.py
+++ b/torch/_functorch/config.py
@@ -106,6 +106,36 @@ fake_tensor_allow_unsafe_data_ptr_access = True
# tokens.
unlift_effect_tokens = False
+# This mode specifies that we should also keep track of the real
+# tensor along with the fake tensor, and do real compute. While
+# seemingly this eliminates the whole point of fake tensors, there are
+# two obvious use cases for it:
+#
+# 1. When users call item()/other data dependent operations,
+# if we propagate_real_tensors we are able to determine what
+# the true value is and keep going.
+#
+# 2. It can be useful for testing, when you want to see if the fake
+# and real tensors agree with each other. (Note that there are
+# currently known inaccuracies in how we clone real tensors, that
+# would have to be tightened up for this to be useful in this
+# case.)
+#
+# Note that fake tensors are typically understood to be cheap to store
+# indefinitely, so we tend to hold on to them longer than we would
+# hold onto the real tensors. So we also support you explicitly
+# deallocating the real tensor associated with a fake tensor, at which
+# point we will stop propagating real tensors.
+#
+# One more thing: when you provide a real tensor to fakeify, we will
+# clone it, so that we can safely perform mutations on it if necessary.
+# This will increase live memory usage. This could potentially be
+# optimized by using COW. We also currently do not faithfully
+# maintain autograd metadata on the real tensor; this is fine because
+# AOTAutograd will only use the fake tensor to determine leafness/etc
+# of tensors in question.
+fake_tensor_propagate_real_tensors = False
+
if TYPE_CHECKING:
from torch.utils._config_typing import * # noqa: F401, F403
diff --git a/torch/_subclasses/fake_impls.py b/torch/_subclasses/fake_impls.py
index dba028b4df..0aed09bd06 100644
--- a/torch/_subclasses/fake_impls.py
+++ b/torch/_subclasses/fake_impls.py
@@ -335,7 +335,10 @@ def repeat_interleave_tensor(fake_mode, func, repeats, output_size=None):
@register_op_impl(torch.ops.aten._local_scalar_dense.default)
def local_scalar_dense(fake_mode, func, arg):
- if fake_mode.shape_env is None or not fake_mode.shape_env.allow_scalar_outputs:
+ if fake_mode.shape_env is None or (
+ not fake_mode.shape_env.allow_scalar_outputs
+ and not fake_mode.allow_scalar_outputs
+ ):
# Without symints/symfloats, cannot handle this
raise DataDependentOutputException(func)
if is_float_dtype(arg.dtype):
diff --git a/torch/_subclasses/fake_tensor.py b/torch/_subclasses/fake_tensor.py
index 28e376de5c..2d8f287cd0 100644
--- a/torch/_subclasses/fake_tensor.py
+++ b/torch/_subclasses/fake_tensor.py
@@ -46,7 +46,7 @@ from torch.utils._python_dispatch import (
is_traceable_wrapper_subclass,
TorchDispatchMode,
)
-from torch.utils._pytree import PyTree, tree_map
+from torch.utils._pytree import PyTree, tree_map, tree_map_
from torch.utils._stats import count
from torch.utils._traceback import CapturedTraceback
@@ -294,6 +294,8 @@ class FakeTensorConverter:
assert not make_constant
def mk_fake_tensor(make_meta_t):
+ from torch._dynamo.utils import clone_input
+
# NB: don't use in_kernel_invocation_manager. to
# ensure FakeTensor can internally do constant computation
# as necessary. Invocation manager is "more correct" as
@@ -306,7 +308,21 @@ class FakeTensorConverter:
fake_mode,
make_meta_t(),
existing_device,
+ # TODO: callback might be used in recursive contexts, in
+ # which case using t is wrong! BUG!
constant=t if make_constant else None,
+ # TODO: This won't preserve aliasing relationships, so if
+ # there is mutation you won't see it reflect elsewhere.
+ # This is fine because propagate_real_tensors isn't
+ # intended to give you exact results and some inaccuracy
+ # is OK, although if its use case expands we would want to
+ # do something similar to meta converter, but poking in
+ # real tensors at the storage cloning phase
+ real_tensor=(
+ (t if make_constant else clone_input(t))
+ if fake_mode.propagate_real_tensors
+ else None
+ ),
)
out = self.meta_converter(
@@ -390,6 +406,7 @@ class FakeTensor(torch.Tensor):
fake_device: torch.device
fake_mode: "FakeTensorMode"
constant: Optional[torch.Tensor]
+ real_tensor: Optional[torch.Tensor]
# This memorizes the unbacked SymInt representing the number of nonzero
# elements in this tensor. This is helpful if you do something like
@@ -478,7 +495,7 @@ class FakeTensor(torch.Tensor):
)
@staticmethod
- def __new__(cls, fake_mode, elem, device, constant=None):
+ def __new__(cls, fake_mode, elem, device, constant=None, real_tensor=None):
self = torch.Tensor._make_subclass(
cls,
elem,
@@ -520,6 +537,8 @@ class FakeTensor(torch.Tensor):
self.fake_device = device # type: ignore[attr-defined]
self.fake_mode = fake_mode # type: ignore[attr-defined]
self.constant = constant # type: ignore[attr-defined]
+ assert not isinstance(real_tensor, FakeTensor)
+ self.real_tensor = real_tensor # type: ignore[attr-defined]
self._nonzero_memo = None # type: ignore[attr-defined]
self._nonzero_memo_vc = None # type: ignore[attr-defined]
self._unique_memo = None # type: ignore[attr-defined]
@@ -850,11 +869,22 @@ class FakeTensorMode(TorchDispatchMode):
import torch._dynamo.config
import torch._functorch.config
+ # This is temporarily patched to True in Dynamo to grandfather in some
+ # places where we unconditionally allow scalar outputs, TO BE REMOVED
+ self.allow_scalar_outputs = False
+
+ self.propagate_real_tensors = (
+ torch._functorch.config.fake_tensor_propagate_real_tensors
+ )
+
self._allow_unsafe_data_ptr_access = (
torch._functorch.config.fake_tensor_allow_unsafe_data_ptr_access
)
self.allow_meta = torch._functorch.config.fake_tensor_allow_meta
- self.cache_enabled = torch._dynamo.config.fake_tensor_cache_enabled
+ self.cache_enabled = (
+ torch._dynamo.config.fake_tensor_cache_enabled
+ and not self.propagate_real_tensors
+ )
self.cache_crosscheck_enabled = (
torch._dynamo.config.fake_tensor_cache_crosscheck_enabled
)
@@ -1428,11 +1458,84 @@ class FakeTensorMode(TorchDispatchMode):
args, kwargs = pytree.tree_unflatten(flat_args, args_spec)
self.invalidate_written_to_constants(func, flat_arg_fake_tensors, args, kwargs)
+ def maybe_to_real_tensor(t):
+ if isinstance(t, FakeTensor):
+ return t.real_tensor
+ elif isinstance(t, SymTypes):
+ return t.node.pytype(
+ t.node.expr.xreplace(self.shape_env.var_to_val).xreplace(
+ self.shape_env.unbacked_var_to_val
+ )
+ )
+ else:
+ return t
+
+ from torch.fx.experimental.symbolic_shapes import (
+ compute_unbacked_bindings,
+ free_unbacked_symbols,
+ SymTypes,
+ )
+
+ nil = object()
+
+ real_out = nil
+ if (
+ self.propagate_real_tensors
+ and all(e.real_tensor is not None for e in flat_arg_fake_tensors)
+ # TODO: Handle SymFloat/SymBool
+ and not any(
+ (
+ isinstance(a, torch.SymInt)
+ and (syms := free_unbacked_symbols(a))
+ and any(s not in self.shape_env.unbacked_var_to_val for s in syms)
+ )
+ for a in flat_args
+ )
+ ):
+ real_flat_args = [maybe_to_real_tensor(a) for a in flat_args]
+ real_args, real_kwargs = pytree.tree_unflatten(real_flat_args, args_spec)
+ real_out = func(*real_args, **real_kwargs)
+ elif self.propagate_real_tensors:
+ # This can happen occasionally legitimately, specifically when you
+ # are inside the meta of a data dependent operation and you create
+ # a tensor on an unbacked SymInt; at this point in time we don't
+ # know what the unbacked SymInt is, but we will know later.
+ # However, if there's a bug in the condition above, this condition
+ # will also trigger.
+ log.debug(
+ "propagate_real_tensors skipped %s(%s, %s) %s",
+ func,
+ flat_arg_fake_tensors,
+ flat_args,
+ self.shape_env.unbacked_var_to_val,
+ )
+
+ def maybe_propagate_real_tensors(fake_out):
+ import sympy
+
+ def go(t, real_t):
+ if isinstance(t, FakeTensor):
+ # NB: unconditionally overwrite
+ t.real_tensor = real_t
+ elif isinstance(t, SymTypes) and free_unbacked_symbols(t):
+ if isinstance(t.node.expr, sympy.Symbol):
+ self.shape_env.set_unbacked_var_to_val(t.node.expr, real_t)
+
+ if real_out is not nil:
+ tree_map_(go, fake_out, real_out)
+
+ # If a data-dependent op is used in a decomposition, we
+ # may need to get the unbacked settings "early"
+ # TODO: Is this really needed?
+ compute_unbacked_bindings(self.shape_env, fake_out, peek=True)
+
+ return fake_out
+
# Try for fastpath
if has_symbolic_sizes:
fast_impl = get_fast_op_impls().get(func)
if fast_impl is not None:
- return fast_impl(self, *args, **kwargs)
+ return maybe_propagate_real_tensors(fast_impl(self, *args, **kwargs))
# If there's a Python meta, prefer that over the decomposition
from torch._decomp import meta_table as meta_table
@@ -1471,7 +1574,9 @@ class FakeTensorMode(TorchDispatchMode):
and not stride_incorrect_op(func)
):
with self:
- return func.prim_meta_impl(*args, **kwargs)
+ return maybe_propagate_real_tensors(
+ func.prim_meta_impl(*args, **kwargs)
+ )
# Users can register FakeTensor rules for custom operators
# Call them if they exist.
@@ -1482,7 +1587,7 @@ class FakeTensorMode(TorchDispatchMode):
ctx = torch._library.abstract_impl.AbstractImplCtx(self, func)
with torch._library.abstract_impl.set_ctx_getter(lambda: ctx), self:
result = maybe_abstract_impl(*args, **kwargs)
- return result
+ return maybe_propagate_real_tensors(result)
# special handling for funcs registered through `register_op_impl`,
# e.g., manipulating args on constructor calls to construct meta tensors
@@ -1491,7 +1596,7 @@ class FakeTensorMode(TorchDispatchMode):
if run_impl_check(func):
op_impl_out = op_impl(self, func, *args, **kwargs)
if op_impl_out != NotImplemented:
- return op_impl_out
+ return maybe_propagate_real_tensors(op_impl_out)
def maybe_run_unsafe_fallback(error=None):
# We infer the meta of a custom ops that return None to just
@@ -1509,7 +1614,7 @@ class FakeTensorMode(TorchDispatchMode):
# Optimization: If there is no Meta kernel, it takes a surprisingly long
# amount of time to catch the NotImplementedError, so we check it here.
if not has_meta(func):
- return maybe_run_unsafe_fallback()
+ return maybe_propagate_real_tensors(maybe_run_unsafe_fallback())
# run kernel registered to meta for func, which include
# python meta registrations, prims, decomps, and c++ meta fns (structured kernels)
@@ -1523,8 +1628,10 @@ class FakeTensorMode(TorchDispatchMode):
log.exception("failed while attempting to run meta for %s", func)
raise
- return self.wrap_meta_outputs_with_default_device_logic(
- r, func, flat_args, device=kwargs.get("device")
+ return maybe_propagate_real_tensors(
+ self.wrap_meta_outputs_with_default_device_logic(
+ r, func, flat_args, device=kwargs.get("device")
+ )
)
# WARNING: DO NOT add any additional namespaces/operators here if they refer to operators
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index fdaeb3a060..28975b08ef 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -496,7 +496,7 @@ class DivideByKey:
return o // self.divisor
-def compute_unbacked_bindings(shape_env, example_value, old_example_value=None):
+def compute_unbacked_bindings(shape_env, example_value, old_example_value=None, peek=False):
"""
After having run fake tensor propagation and producing example_value
result, traverse example_value looking for freshly bound unbacked
@@ -505,40 +505,51 @@ def compute_unbacked_bindings(shape_env, example_value, old_example_value=None):
example_value. (NB: this means if you have a multi-output
function, you must call this on the tuple of tensor output, you
cannot wait!)
+
+ The peek parameter lets you check out what the bindings are without
+ changing the affected list. This is primarily useful for ensuring
+ unbacked_var_to_val is promptly populated when propagate_real_tensors is on.
"""
+ if shape_env is None:
+ return
if shape_env._ignore_fresh_unbacked_symbols_tls():
return
fs = shape_env.pending_fresh_unbacked_symbols
pending = set(fs)
if pending:
- log.info("compute_unbacked_bindings %s", fs)
- fs.clear()
+ if not peek:
+ log.info("compute_unbacked_bindings %s", fs)
+ fs.clear()
def free_unbacked_symbols_with_path(
- a, path
+ a, path, real=None
) -> Dict[sympy.Symbol, pytree.KeyPath]:
r = {}
if isinstance(a, (tuple, list)):
for i in range(len(a)):
r.update(
free_unbacked_symbols_with_path(
- a[i], path + (pytree.SequenceKey(i),)
+ a[i], path + (pytree.SequenceKey(i),),
+ real=real[i] if real is not None else None
)
)
elif isinstance(a, torch.Tensor):
r.update(
free_unbacked_symbols_with_path(
- a.size(), path + (CallMethodKey("size"),)
+ a.size(), path + (CallMethodKey("size"),),
+ real=a.real_tensor.size() if a.real_tensor is not None else None
)
)
r.update(
free_unbacked_symbols_with_path(
- a.stride(), path + (CallMethodKey("stride"),)
+ a.stride(), path + (CallMethodKey("stride"),),
+ real=a.real_tensor.stride() if a.real_tensor is not None else None
)
)
r.update(
free_unbacked_symbols_with_path(
- a.storage_offset(), path + (CallMethodKey("storage_offset"),)
+ a.storage_offset(), path + (CallMethodKey("storage_offset"),),
+ real=a.real_tensor.storage_offset() if a.real_tensor is not None else None
)
)
@@ -550,6 +561,8 @@ def compute_unbacked_bindings(shape_env, example_value, old_example_value=None):
and s in pending
):
r[s] = path
+ if real is not None:
+ shape_env.set_unbacked_var_to_val(s, real)
pending.remove(s)
# When an unbacked SymInt is perfectly divisible by an integer
# constant, we replace it with the integer constant to improve
@@ -566,6 +579,8 @@ def compute_unbacked_bindings(shape_env, example_value, old_example_value=None):
):
# TODO: DivideByKey needs to test divisibility at runtime!
r[s] = path + (DivideByKey(int(lhs)),)
+ if real is not None:
+ shape_env.set_unbacked_var_to_val(s, real // int(lhs))
pending.remove(rhs)
# The annoyance here arises from the fact that SymBool is
# allocated by allocating a SymInt and then testing if it's equal
@@ -579,6 +594,8 @@ def compute_unbacked_bindings(shape_env, example_value, old_example_value=None):
and s.lhs in pending
):
r[s.lhs] = path + (ConvertIntKey(),)
+ if real is not None:
+ shape_env.set_unbacked_var_to_val(s, int(real))
pending.remove(s.lhs)
return r
@@ -2186,6 +2203,9 @@ class ShapeEnv:
# Maps symbolic ints to their original concrete values
# Currently populated from tensors
self.var_to_val: Dict[sympy.Symbol, sympy.Integer] = {}
+ # Like var_to_val, but only set when propagate_real_tensors is on.
+ # Used as last resort to avoid GuardOnDataDependent error
+ self.unbacked_var_to_val: Dict[sympy.Symbol, sympy.Integer] = {}
# Maps symbolic ints to their min/max range. These ranges
# are conservative: the int MUST fall in the range, but the
# range may contain ints which may not actually appear in
@@ -2452,6 +2472,12 @@ class ShapeEnv:
def _eliminate_unbacked(self, orig_s: sympy.Symbol, new_s: sympy.Expr):
self._set_replacement(orig_s, new_s, "eliminate_unbacked")
+ @record_shapeenv_event()
+ def set_unbacked_var_to_val(self, k: sympy.Symbol, v: int) -> None:
+ """Used only when propagate_real_tensors; registers a value for an
+ unbacked symbol, which can be used last resort to resolve hints."""
+ self.unbacked_var_to_val[k] = v
+
# Unlike set_replacement, this records a shapeenv event
@record_shapeenv_event()
def _rename_unbacked_to(self, orig_s: sympy.Symbol, new_s: sympy.Symbol):
@@ -2700,7 +2726,7 @@ class ShapeEnv:
Defines the current "state" of the guards we've accumulated in this ShapeEnv.
Determines when we need to invalidate our cache
"""
- return (len(self.replacements), len(self.divisible), self.num_deferred_runtime_asserts)
+ return (len(self.replacements), len(self.divisible), self.num_deferred_runtime_asserts, len(self.unbacked_var_to_val))
def _update_version_counter(self):
# The shape environment is queried orders of magnitude more often than
@@ -3680,6 +3706,12 @@ class ShapeEnv:
if expr in issued:
return
+ # When propagate_real_tensors is on, we may end up with guards on
+ # data dependent variables. These guards are unissuable, so just ignore them
+ if free_unbacked_symbols(expr):
+ log.warning("propagate_real_tensors: ignoring guard %s", expr)
+ return
+
issued.add(expr)
try:
@@ -4181,6 +4213,13 @@ class ShapeEnv:
return r
if allow_none:
return None
+
+ if self.unbacked_var_to_val:
+ unsound_expr = result_expr.xreplace(self.unbacked_var_to_val)
+ if not unsound_expr.free_symbols:
+ log.warning("propagate_real_tensors size_hint(%s) -> %s", expr, unsound_expr)
+ return unsound_expr
+
raise self._make_data_dependent_error(result_expr, expr)
return result_expr
@@ -4682,6 +4721,7 @@ class ShapeEnv:
assert static_expr == hint, f"{static_expr} != {hint}"
return static_expr
+ concrete_val = None
if not (expr.free_symbols <= self.var_to_val.keys()):
# TODO: dedupe this with _maybe_evaluate_static
# Attempt to eliminate the unbacked SymInt
@@ -4695,14 +4735,24 @@ class ShapeEnv:
size_oblivious=True
)
- raise self._make_data_dependent_error(
- expr.xreplace(self.var_to_val),
- expr,
- size_oblivious_result=size_oblivious_result
- )
- expr = new_expr
+ # Last ditch
+ if (
+ self.unbacked_var_to_val and
+ not (unsound_result := orig_expr.xreplace(self.unbacked_var_to_val)).free_symbols
+ ):
+ log.warning("propagate_real_tensors evaluate_expr(%s) -> %s", orig_expr, unsound_result)
+ concrete_val = unsound_result
+ else:
+ raise self._make_data_dependent_error(
+ expr.xreplace(self.var_to_val),
+ expr,
+ size_oblivious_result=size_oblivious_result
+ )
+ else:
+ expr = new_expr
- concrete_val = compute_concrete_val()
+ if concrete_val is None:
+ concrete_val = compute_concrete_val()
self._check_frozen(expr, concrete_val)
if (
|
2.41.0
|
eb7b8eb6085322c96ba0bbacb039ef6f3b2875b
|
Wed, 1 May 2024 23:19:07 -0700
|
[PATCH 0935/1000] [PT2D] Ensure the trace rules are correct with distributed (#125333)
|
Summary: 1. Avoid using `torch._dynamo.disable`. 2. Clear the LRU cache of the trace rules. This won't do anything if rules are not evluated before PG initilization. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125333 Approved by: https://github.com/yanboliang
|
diff --git a/torch/_dynamo/trace_rules.py b/torch/_dynamo/trace_rules.py
index ab6f52c7fb..954f3997e2 100644
--- a/torch/_dynamo/trace_rules.py
+++ b/torch/_dynamo/trace_rules.py
@@ -3541,3 +3541,11 @@ def lookup_inner(
return SkipFunctionVariable
else:
return UserFunctionVariable
+
+
+def clear_lru_cache():
+ torch._dynamo.trace_rules.get_torch_obj_rule_map.cache_clear()
+ torch._dynamo.trace_rules.get_tensor_method.cache_clear()
+ torch._dynamo.trace_rules.get_legacy_mod_inlinelist.cache_clear()
+ torch._dynamo.trace_rules.get_mod_inlinelist.cache_clear()
+ torch._dynamo.trace_rules.dynamo_dir.cache_clear()
diff --git a/torch/distributed/_composable/replicate.py b/torch/distributed/_composable/replicate.py
index 3409f3a131..2f1c1dda00 100644
--- a/torch/distributed/_composable/replicate.py
+++ b/torch/distributed/_composable/replicate.py
@@ -61,7 +61,7 @@ class _ReplicateState(_State):
)
def lazy_init(self) -> None:
- @torch._dynamo.disable(recursive=True)
+ @torch._disable_dynamo(recursive=True)
def _lazy_init():
assert self._init_args is not None
self.init(*self._init_args, **self._init_kwargs)
diff --git a/torch/distributed/distributed_c10d.py b/torch/distributed/distributed_c10d.py
index c006fbc08c..368d602f47 100644
--- a/torch/distributed/distributed_c10d.py
+++ b/torch/distributed/distributed_c10d.py
@@ -1276,6 +1276,16 @@ def init_process_group(
set_pytorch_distributed_envs_from_justknobs()
+ # Depending on the import order, some trace_rules functions may be evaluated
+ # during the import phase. In such a case, these functions may not correctly
+ # add the distributed related rules due to import circular dependency.
+ # We need to clear the lru_cache during the runtime to ensure the correctness
+ # of these trace_rules.
+ #
+ # Since this API must be called before all distributed code being compiled,
+ # clearing the cache here should be safe.
+ torch._dynamo.trace_rules.clear_lru_cache()
+
assert (store is None) or (
init_method is None
), "Cannot specify both init_method and store."
|
2.41.0
|
a991fac221e8c03ed8924498e58c0ea48416a5c
|
Thu, 2 May 2024 17:16:02 +0000
|
[PATCH 0936/1000] [ROCm][CI] upgrade CI to ROCm 6.1 (#124300)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/124300 Approved by: https://github.com/malfet
|
diff --git a/.ci/docker/build.sh b/.ci/docker/build.sh
index 1b8ed8df93..a45516f46a 100755
--- a/.ci/docker/build.sh
+++ b/.ci/docker/build.sh
@@ -204,7 +204,7 @@ case "$image" in
PROTOBUF=yes
DB=yes
VISION=yes
- ROCM_VERSION=5.7
+ ROCM_VERSION=6.0
NINJA_VERSION=1.9.0
CONDA_CMAKE=yes
TRITON=yes
@@ -215,7 +215,7 @@ case "$image" in
PROTOBUF=yes
DB=yes
VISION=yes
- ROCM_VERSION=6.0
+ ROCM_VERSION=6.1
NINJA_VERSION=1.9.0
CONDA_CMAKE=yes
TRITON=yes
diff --git a/.ci/docker/common/install_rocm.sh b/.ci/docker/common/install_rocm.sh
index 1c56918ac9..085304ac7c 100644
--- a/.ci/docker/common/install_rocm.sh
+++ b/.ci/docker/common/install_rocm.sh
@@ -61,6 +61,10 @@ install_ubuntu() {
rocprofiler-dev \
roctracer-dev
+ if [[ $(ver $ROCM_VERSION) -ge $(ver 6.1) ]]; then
+ DEBIAN_FRONTEND=noninteractive apt-get install -y --allow-unauthenticated rocm-llvm-dev
+ fi
+
# precompiled miopen kernels added in ROCm 3.5, renamed in ROCm 5.5
# search for all unversioned packages
# if search fails it will abort this script; use true to avoid case where search fails
diff --git a/.github/workflows/inductor.yml b/.github/workflows/inductor.yml
index 1934d7fd86..0ad799a80b 100644
--- a/.github/workflows/inductor.yml
+++ b/.github/workflows/inductor.yml
@@ -16,28 +16,28 @@ concurrency:
permissions: read-all
jobs:
- linux-focal-rocm6_0-py3_8-inductor-build:
- name: rocm6.0-py3.8-inductor
+ linux-focal-rocm6_1-py3_8-inductor-build:
+ name: rocm6.1-py3.8-inductor
uses: ./.github/workflows/_linux-build.yml
with:
- build-environment: linux-focal-rocm6.0-py3.8
+ build-environment: linux-focal-rocm6.1-py3.8
docker-image-name: pytorch-linux-focal-rocm-n-py3
test-matrix: |
{ include: [
{ config: "inductor", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.2" },
]}
- linux-focal-rocm6_0-py3_8-inductor-test:
+ linux-focal-rocm6_1-py3_8-inductor-test:
permissions:
id-token: write
contents: read
- name: rocm6.0-py3.8-inductor
+ name: rocm6.1-py3.8-inductor
uses: ./.github/workflows/_rocm-test.yml
- needs: linux-focal-rocm6_0-py3_8-inductor-build
+ needs: linux-focal-rocm6_1-py3_8-inductor-build
with:
- build-environment: linux-focal-rocm6.0-py3.8
- docker-image: ${{ needs.linux-focal-rocm6_0-py3_8-inductor-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-rocm6_0-py3_8-inductor-build.outputs.test-matrix }}
+ build-environment: linux-focal-rocm6.1-py3.8
+ docker-image: ${{ needs.linux-focal-rocm6_1-py3_8-inductor-build.outputs.docker-image }}
+ test-matrix: ${{ needs.linux-focal-rocm6_1-py3_8-inductor-build.outputs.test-matrix }}
linux-focal-cuda12_1-py3_10-gcc9-inductor-build:
name: cuda12.1-py3.10-gcc9-sm86
diff --git a/.github/workflows/periodic.yml b/.github/workflows/periodic.yml
index ca6bcdb94a..716a72cc6d 100644
--- a/.github/workflows/periodic.yml
+++ b/.github/workflows/periodic.yml
@@ -217,11 +217,11 @@ jobs:
docker-image: ${{ needs.linux-vulkan-focal-py3_11-clang10-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-vulkan-focal-py3_11-clang10-build.outputs.test-matrix }}
- linux-focal-rocm6_0-py3_8-build:
- name: linux-focal-rocm6.0-py3.8
+ linux-focal-rocm6_1-py3_8-build:
+ name: linux-focal-rocm6.1-py3.8
uses: ./.github/workflows/_linux-build.yml
with:
- build-environment: linux-focal-rocm6.0-py3.8
+ build-environment: linux-focal-rocm6.1-py3.8
docker-image-name: pytorch-linux-focal-rocm-n-py3
test-matrix: |
{ include: [
@@ -229,16 +229,16 @@ jobs:
{ config: "distributed", shard: 2, num_shards: 2, runner: "linux.rocm.gpu" },
]}
- linux-focal-rocm6_0-py3_8-test:
+ linux-focal-rocm6_1-py3_8-test:
permissions:
id-token: write
contents: read
- name: linux-focal-rocm6.0-py3.8
+ name: linux-focal-rocm6.1-py3.8
uses: ./.github/workflows/_rocm-test.yml
needs:
- - linux-focal-rocm6_0-py3_8-build
+ - linux-focal-rocm6_1-py3_8-build
- target-determination
with:
- build-environment: linux-focal-rocm6.0-py3.8
- docker-image: ${{ needs.linux-focal-rocm6_0-py3_8-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-rocm6_0-py3_8-build.outputs.test-matrix }}
+ build-environment: linux-focal-rocm6.1-py3.8
+ docker-image: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.docker-image }}
+ test-matrix: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.test-matrix }}
diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml
index 21d29d4ba4..0ca9e0d33c 100644
--- a/.github/workflows/pull.yml
+++ b/.github/workflows/pull.yml
@@ -414,13 +414,13 @@ jobs:
{ config: "default", shard: 1, num_shards: 1 },
]}
- linux-focal-rocm6_0-py3_8-build:
+ linux-focal-rocm6_1-py3_8-build:
# don't run build twice on main
if: github.event_name == 'pull_request'
- name: linux-focal-rocm6.0-py3.8
+ name: linux-focal-rocm6.1-py3.8
uses: ./.github/workflows/_linux-build-label.yml
with:
- build-environment: linux-focal-rocm6.0-py3.8
+ build-environment: linux-focal-rocm6.1-py3.8
docker-image-name: pytorch-linux-focal-rocm-n-py3
sync-tag: rocm-build
test-matrix: |
diff --git a/.github/workflows/rocm.yml b/.github/workflows/rocm.yml
index 6d46dc5390..c32abe592b 100644
--- a/.github/workflows/rocm.yml
+++ b/.github/workflows/rocm.yml
@@ -25,11 +25,11 @@ jobs:
id-token: write
contents: read
- linux-focal-rocm6_0-py3_8-build:
- name: linux-focal-rocm6.0-py3.8
+ linux-focal-rocm6_1-py3_8-build:
+ name: linux-focal-rocm6.1-py3.8
uses: ./.github/workflows/_linux-build-label.yml
with:
- build-environment: linux-focal-rocm6.0-py3.8
+ build-environment: linux-focal-rocm6.1-py3.8
docker-image-name: pytorch-linux-focal-rocm-n-py3
sync-tag: rocm-build
test-matrix: |
@@ -42,16 +42,16 @@ jobs:
{ config: "default", shard: 6, num_shards: 6, runner: "linux.rocm.gpu.2" },
]}
- linux-focal-rocm6_0-py3_8-test:
+ linux-focal-rocm6_1-py3_8-test:
permissions:
id-token: write
contents: read
- name: linux-focal-rocm6.0-py3.8
+ name: linux-focal-rocm6.1-py3.8
uses: ./.github/workflows/_rocm-test.yml
needs:
- - linux-focal-rocm6_0-py3_8-build
+ - linux-focal-rocm6_1-py3_8-build
- target-determination
with:
- build-environment: linux-focal-rocm6.0-py3.8
- docker-image: ${{ needs.linux-focal-rocm6_0-py3_8-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-rocm6_0-py3_8-build.outputs.test-matrix }}
+ build-environment: linux-focal-rocm6.1-py3.8
+ docker-image: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.docker-image }}
+ test-matrix: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.test-matrix }}
diff --git a/.github/workflows/slow.yml b/.github/workflows/slow.yml
index 85e9b516aa..ef006c6d11 100644
--- a/.github/workflows/slow.yml
+++ b/.github/workflows/slow.yml
@@ -111,30 +111,30 @@ jobs:
docker-image: ${{ needs.linux-focal-py3_8-clang10-build.outputs.docker-image }}
test-matrix: ${{ needs.linux-focal-py3_8-clang10-build.outputs.test-matrix }}
- linux-focal-rocm6_0-py3_8-build:
- name: linux-focal-rocm6.0-py3.8
+ linux-focal-rocm6_1-py3_8-build:
+ name: linux-focal-rocm6.1-py3.8
uses: ./.github/workflows/_linux-build.yml
with:
- build-environment: linux-focal-rocm6.0-py3.8
+ build-environment: linux-focal-rocm6.1-py3.8
docker-image-name: pytorch-linux-focal-rocm-n-py3
test-matrix: |
{ include: [
{ config: "slow", shard: 1, num_shards: 1, runner: "linux.rocm.gpu" },
]}
- linux-focal-rocm6_0-py3_8-test:
+ linux-focal-rocm6_1-py3_8-test:
permissions:
id-token: write
contents: read
- name: linux-focal-rocm6.0-py3.8
+ name: linux-focal-rocm6.1-py3.8
uses: ./.github/workflows/_rocm-test.yml
needs:
- - linux-focal-rocm6_0-py3_8-build
+ - linux-focal-rocm6_1-py3_8-build
- target-determination
with:
- build-environment: linux-focal-rocm6.0-py3.8
- docker-image: ${{ needs.linux-focal-rocm6_0-py3_8-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-rocm6_0-py3_8-build.outputs.test-matrix }}
+ build-environment: linux-focal-rocm6.1-py3.8
+ docker-image: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.docker-image }}
+ test-matrix: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.test-matrix }}
linux-jammy-py3_10-clang15-asan-build:
name: linux-jammy-py3.10-clang15-asan
diff --git a/.github/workflows/trunk.yml b/.github/workflows/trunk.yml
index 7b8805d63f..50a89ea0fa 100644
--- a/.github/workflows/trunk.yml
+++ b/.github/workflows/trunk.yml
@@ -198,11 +198,11 @@ jobs:
{ config: "force_on_cpu", shard: 1, num_shards: 1, runner: "windows.4xlarge.nonephemeral" },
]}
- linux-focal-rocm6_0-py3_8-build:
- name: linux-focal-rocm6.0-py3.8
+ linux-focal-rocm6_1-py3_8-build:
+ name: linux-focal-rocm6.1-py3.8
uses: ./.github/workflows/_linux-build-label.yml
with:
- build-environment: linux-focal-rocm6.0-py3.8
+ build-environment: linux-focal-rocm6.1-py3.8
docker-image-name: pytorch-linux-focal-rocm-n-py3
sync-tag: rocm-build
test-matrix: |
@@ -210,17 +210,17 @@ jobs:
{ config: "default", shard: 1, num_shards: 1, runner: "linux.rocm.gpu" },
]}
- linux-focal-rocm6_0-py3_8-test:
+ linux-focal-rocm6_1-py3_8-test:
permissions:
id-token: write
contents: read
- name: linux-focal-rocm6.0-py3.8
+ name: linux-focal-rocm6.1-py3.8
uses: ./.github/workflows/_rocm-test.yml
needs:
- - linux-focal-rocm6_0-py3_8-build
+ - linux-focal-rocm6_1-py3_8-build
- target-determination
with:
- build-environment: linux-focal-rocm6.0-py3.8
- docker-image: ${{ needs.linux-focal-rocm6_0-py3_8-build.outputs.docker-image }}
- test-matrix: ${{ needs.linux-focal-rocm6_0-py3_8-build.outputs.test-matrix }}
+ build-environment: linux-focal-rocm6.1-py3.8
+ docker-image: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.docker-image }}
+ test-matrix: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.test-matrix }}
tests-to-include: "test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs test_autograd inductor/test_torchinductor"
diff --git a/torch/testing/_internal/opinfo/definitions/special.py b/torch/testing/_internal/opinfo/definitions/special.py
index 426eb7e547..5b137799db 100644
--- a/torch/testing/_internal/opinfo/definitions/special.py
+++ b/torch/testing/_internal/opinfo/definitions/special.py
@@ -15,7 +15,11 @@ from torch.testing._internal.common_device_type import (
toleranceOverride,
)
from torch.testing._internal.common_dtype import all_types_and, floating_types
-from torch.testing._internal.common_utils import TEST_SCIPY, torch_to_numpy_dtype_dict
+from torch.testing._internal.common_utils import (
+ TEST_SCIPY,
+ TEST_WITH_ROCM,
+ torch_to_numpy_dtype_dict,
+)
from torch.testing._internal.opinfo.core import (
BinaryUfuncInfo,
DecorateInfo,
@@ -463,6 +467,7 @@ op_db: List[OpInfo] = [
DecorateInfo(unittest.skip("Skipped!"), "TestNNCOpInfo"),
# Greatest absolute difference: inf
DecorateInfo(unittest.expectedFailure, "TestCommon", "test_compare_cpu"),
+ DecorateInfo(unittest.skip("Hangs on ROCm 6.1"), active_if=TEST_WITH_ROCM),
),
supports_one_python_scalar=True,
supports_autograd=False,
|
2.41.0
|
741fb36803febc7e5db28e722746a872b3a4884
|
Thu, 2 May 2024 08:53:52 -0700
|
[PATCH 0937/1000] [DCP] Introduce async staging extension points (#122965)
|
Stack from [ghstack](https://github.com/ezyang/ghstack) (oldest at bottom): * #124944 * #124939 * __->__ #122965 Differential Revision: [D55493240](https://our.internmc.facebook.com/intern/diff/D55493240/) *This PR is now ready for merge and is not an RFC* Major choices are: -- the introduction of the AsyncStager protocol -- removed `executor` from param. -- leave async as a separate method (for now) This proposal seeks to add extension points to dcp.async_save, allowing users to: - Specify a specific staging method when calling async_save - Allow a vehicle for also making the staging method async, to allow for cases where we may want to overlap with the training loop (e.g., overlap d2h with and only synchronize at the optim.step) - Potentially specify the execution method for doing async_save in parallel. For example some users may prefer a subprocess over a thread to avoid GIL issues. A totally reasonable alternative to this entire proposal is to expect users who want this level of customization to write their own custom async save methods. Here's an example which addresses the issues mentioned in PR comments. ``` def custom_async_save(...): # this step accomplishes staging and includes the usual 'planning' calls (issue 1) buffered_writer = CpuBufferedWriter() # this is stateful, contains a copy of state_dict dcp.save(state_dict, storage_writer=buffered_writer) final_storage_writer = FileSystemWriter() mp.spawn( # issue2 is gone, do whatever you want here dcp.save, # or some custom sub-process method which calls dcp.save under the hood buffered_writer.state_dict, # lot's of way's to do this, not really the most important part checkpoint_id=checkpoint_id, storage_writer=storage_writer, planner=planner, process_group=process_group, # this actually wouldn't work, but again not the pt. ) # leaving out the rest of the details for managing your extra special subprocess. ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/122965 Approved by: https://github.com/daulet-askarov
|
diff --git a/docs/source/distributed.checkpoint.rst b/docs/source/distributed.checkpoint.rst
index 86f69f9d22..e948b66e1e 100644
--- a/docs/source/distributed.checkpoint.rst
+++ b/docs/source/distributed.checkpoint.rst
@@ -29,6 +29,12 @@ The entrypoints to load and save a checkpoint are the following:
.. autofunction:: load
.. autofunction:: load_state_dict
+The following module is also useful for additional customization of the staging mechanisms used for asynchronous checkpointing (`torch.distributed.checkpoint.async_save`):
+
+.. automodule:: torch.distributed.checkpoint.staging
+
+.. autoclass:: torch.distributed.checkpoint.staging.AsyncStager
+ :members:
In addition to the above entrypoints, `Stateful` objects, as described below, provide additional customization during saving/loading
.. automodule:: torch.distributed.checkpoint.stateful
diff --git a/torch/distributed/checkpoint/staging.py b/torch/distributed/checkpoint/staging.py
new file mode 100644
index 0000000000..3dd294eb0a
--- /dev/null
+++ b/torch/distributed/checkpoint/staging.py
@@ -0,0 +1,63 @@
+from typing import runtime_checkable
+
+from typing_extensions import Protocol
+
+from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
+
+__all__ = ["AsyncStager"]
+
+
+@runtime_checkable
+class AsyncStager(Protocol):
+ """
+ This protocol is meant to provide customization and extensibility for dcp.async_save, allowing users
+ to customize how data is staged previous to executing the usual dcp.save path in parallel.
+ The expected order of operations (concretely defined in `torch.distributed.state_dict_saver.async_save`)
+ is the following:
+
+ 1. AsyncStager.stage_data(state_dict):
+ This call gives the AsyncStager the opportunity to 'stage'
+ the state_dict. The expectation and purpose of staging in this context is to create a "training-safe"
+ representation of the state dict, meaning that any updates to module data after staging is complete
+ should not be reflected in the state dict returned from this method. For example, in the default
+ case a copy of the entire state dict is created on CPU RAM and returned here, allowing users
+ to continue training without risking changes to data which is being serialized.
+
+ 2. dcp.save is called on the state_dict returned from stage in parallel. This call is respondsible
+ for serializing the state_dict and writing it to storage.
+
+ 3. If AsyncStager.should_synchronize_after_execute is True, this method will be called immediately after
+ the serialization thread starts and before returning from dcp.async_save. If this is set to False,
+ the assumption is the user has defined a custom synchronization point for the the purpose of further
+ optimizing save latency in the training loop (for example, by overlapping staging with the
+ forward/backward pass), and it is the respondsibility of the user to call `AsyncStager.synchronize_staging`
+ at the appropriate time.
+
+ """
+
+ # default to True since the common case is to stage synchronously
+ _synchronize_after_execute: bool = True
+
+ @property
+ def should_synchronize_after_execute(self) -> bool:
+ """
+ Whether to synchronize after executing the stage.
+ """
+
+ return self._synchronize_after_execute
+
+ def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
+ """
+ Returns a "staged" copy of `state_dict`. The expectation of the staged copy is that it is
+ innoculated from any updates incurred after the stage call is complete.
+ """
+ raise NotImplementedError(
+ f"{self.__class__.__name__} must implement stage method"
+ )
+
+ def synchronize_staging(self) -> None:
+ """
+ In the case `stage` is async in some way, this method should be called to ensure staging
+ is complete and it is safe to begin modifying the original `state_dict`
+ """
+ pass
diff --git a/torch/distributed/checkpoint/state_dict_saver.py b/torch/distributed/checkpoint/state_dict_saver.py
index e848a25e04..7df74a5f21 100644
--- a/torch/distributed/checkpoint/state_dict_saver.py
+++ b/torch/distributed/checkpoint/state_dict_saver.py
@@ -7,16 +7,17 @@ import torch
import torch.distributed as dist
from torch.distributed._state_dict_utils import _offload_state_dict_to_cpu
from torch.distributed.checkpoint import FileSystemWriter
+
+from torch.distributed.checkpoint._storage_utils import _storage_setup
+from torch.distributed.checkpoint.default_planner import DefaultSavePlanner
from torch.distributed.checkpoint.logger import _dcp_method_logger
-from torch.distributed.checkpoint.planner import SavePlan
+from torch.distributed.checkpoint.metadata import Metadata, STATE_DICT_TYPE
+from torch.distributed.checkpoint.planner import SavePlan, SavePlanner
+from torch.distributed.checkpoint.staging import AsyncStager
from torch.distributed.checkpoint.stateful import Stateful
+from torch.distributed.checkpoint.storage import StorageWriter
from torch.distributed.distributed_c10d import _get_default_group
-from ._storage_utils import _storage_setup
-from .default_planner import DefaultSavePlanner
-from .metadata import Metadata, STATE_DICT_TYPE
-from .planner import SavePlanner
-from .storage import StorageWriter
from .utils import _api_bc_check, _DistWrapper, _profile
@@ -225,14 +226,16 @@ def async_save(
# buffer makes no sense
storage_writer.per_thread_copy_ahead = 0
- cpu_state_dict = _offload_state_dict_to_cpu(
- _stateful_to_state_dict(state_dict), type_check=False
- )
+ state_dict = _stateful_to_state_dict(state_dict)
+ if isinstance(storage_writer, AsyncStager):
+ staged_state_dict = storage_writer.stage(state_dict)
+ else: # provides bwc for storage_writers not implementing AsyncStager
+ staged_state_dict = _offload_state_dict_to_cpu(state_dict, type_check=False)
executor = ThreadPoolExecutor(max_workers=1)
f: Future = executor.submit(
save,
- cpu_state_dict,
+ staged_state_dict,
checkpoint_id=checkpoint_id,
storage_writer=storage_writer,
planner=planner,
@@ -240,6 +243,12 @@ def async_save(
)
f.add_done_callback(lambda f: executor.shutdown(wait=False))
+ if (
+ isinstance(storage_writer, AsyncStager)
+ and storage_writer.should_synchronize_after_execute
+ ):
+ storage_writer.synchronize_staging()
+
return f
diff --git a/torch/distributed/checkpoint/storage.py b/torch/distributed/checkpoint/storage.py
index 1a0e87aca7..8781b8d022 100644
--- a/torch/distributed/checkpoint/storage.py
+++ b/torch/distributed/checkpoint/storage.py
@@ -3,10 +3,15 @@ import os
from dataclasses import dataclass
from typing import Any, List, Union
-from torch.futures import Future
+from torch.distributed.checkpoint.metadata import Metadata, MetadataIndex
+from torch.distributed.checkpoint.planner import (
+ LoadPlan,
+ LoadPlanner,
+ SavePlan,
+ SavePlanner,
+)
-from .metadata import Metadata, MetadataIndex
-from .planner import LoadPlan, LoadPlanner, SavePlan, SavePlanner
+from torch.futures import Future
__all__ = ["WriteResult", "StorageWriter", "StorageReader"]
|
2.41.0
|
99f1460af159a1417bb3ef1f088347cb8d89434
|
Thu, 2 May 2024 08:53:54 -0700
|
[PATCH 0938/1000] [DCP] Provides default AsyncStager (#124939)
|
Differential Revision: [D56575987](https://our.internmc.facebook.com/intern/diff/D56575987/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124939 Approved by: https://github.com/fegin ghstack dependencies: #122965
|
diff --git a/docs/source/distributed.checkpoint.rst b/docs/source/distributed.checkpoint.rst
index e948b66e1e..573faa429b 100644
--- a/docs/source/distributed.checkpoint.rst
+++ b/docs/source/distributed.checkpoint.rst
@@ -36,6 +36,9 @@ The following module is also useful for additional customization of the staging
.. autoclass:: torch.distributed.checkpoint.staging.AsyncStager
:members:
+.. autoclass:: torch.distributed.checkpoint.staging.BlockingAsyncStager
+ :members:
+
In addition to the above entrypoints, `Stateful` objects, as described below, provide additional customization during saving/loading
.. automodule:: torch.distributed.checkpoint.stateful
diff --git a/test/distributed/checkpoint/e2e/test_e2e_save_and_load.py b/test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
index 8d4733b827..f24bb13166 100644
--- a/test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
+++ b/test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
@@ -211,10 +211,18 @@ class TestE2ESaveAndLoad(DTensorTestBase, VerifyStateDictMixin):
@with_comms
@skip_if_lt_x_gpu(4)
@with_temp_dir
- def test_e2e_async(self):
- self._run_e2e_test(compile=False, model_type=ModelType.FSDP, async_op=True)
+ @parametrize("cache_staged_state_dict", [False, True])
+ def test_e2e_async_cached(self, cache_staged_state_dict):
+ self._run_e2e_test(
+ compile=False,
+ model_type=ModelType.FSDP,
+ async_op=True,
+ cache_staged_state_dict=cache_staged_state_dict,
+ )
- def _run_e2e_test(self, compile, model_type, async_op=False):
+ def _run_e2e_test(
+ self, compile, model_type, async_op=False, cache_staged_state_dict=False
+ ):
model, optim = self._create_model(compile, ModelType.NONE)
_train(model, optim, train_steps=2)
@@ -230,7 +238,10 @@ class TestE2ESaveAndLoad(DTensorTestBase, VerifyStateDictMixin):
}
if async_op:
- f = saver.async_save(sd, checkpoint_id=self.temp_dir)
+ writer = DCP.FileSystemWriter(
+ self.temp_dir, cache_staged_state_dict=cache_staged_state_dict
+ )
+ f = saver.async_save(sd, storage_writer=writer)
t = time.monotonic()
while not f.done():
time.sleep(1)
diff --git a/torch/distributed/checkpoint/filesystem.py b/torch/distributed/checkpoint/filesystem.py
index 9b6345862c..3672e2401b 100644
--- a/torch/distributed/checkpoint/filesystem.py
+++ b/torch/distributed/checkpoint/filesystem.py
@@ -11,6 +11,7 @@ from contextlib import contextmanager
from dataclasses import dataclass
from pathlib import Path
from typing import (
+ Any,
Callable,
cast,
Dict,
@@ -28,6 +29,8 @@ import torch
from torch import Tensor
from torch._utils import _get_available_device_type, _get_device_module
from torch.distributed._shard._utils import narrow_tensor_by_index
+from torch.distributed.checkpoint.staging import BlockingAsyncStager
+
from torch.futures import Future
from .metadata import Metadata, MetadataIndex
@@ -393,7 +396,7 @@ class FileSystem(FileSystemBase):
return False
-class FileSystemWriter(StorageWriter):
+class _FileSystemWriter(StorageWriter):
"""
Basic implementation of StorageWriter using file IO.
@@ -414,6 +417,8 @@ class FileSystemWriter(StorageWriter):
sync_files: bool = True,
thread_count: int = 1,
per_thread_copy_ahead: int = 10_000_000,
+ *args: Any,
+ **kwargs: Any,
) -> None:
"""
Initialize the writer pointing to `path`.
@@ -631,3 +636,51 @@ class FileSystemReader(StorageReader):
@classmethod
def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:
return FileSystem.validate_checkpoint_id(checkpoint_id)
+
+
+class FileSystemWriter(_FileSystemWriter, BlockingAsyncStager):
+ """
+ Basic implementation of StorageWriter using file IO.
+
+ This implementation makes the following assumptions and simplifications:
+
+ * The checkpoint path is an empty or non-existing directory.
+ * File creation is atomic
+
+ The checkpoint consist of one file per write request plus
+ a `.metadata` file with the serialized metadata.
+
+ """
+
+ def __init__(
+ self,
+ path: Union[str, os.PathLike],
+ single_file_per_rank: bool = True,
+ sync_files: bool = True,
+ thread_count: int = 1,
+ per_thread_copy_ahead: int = 10_000_000,
+ cache_staged_state_dict: bool = False,
+ ) -> None:
+ """
+ Initialize the writer pointing to `path`.
+
+ Args:
+ path: directory where the checkpoint will be written to.
+ single_file_per_rank: Produce one file per rank instead of one file per tensor/blob. Default to True.
+ sync_files : force files to be synced to permanent storage. Default to True.
+ thread_count: Number of IO threads to use to write. Default to 1.
+ per_thread_copy_ahead: How many bytes to copy from the GPU ahead of saving then. Default 10Mb.
+ cache_staged_state_dict: Whether to cache the staged state_dict. This option decreases staging latency
+ at the cost of increases memory usage. Additionally, if this parameter is set to True, it's the expectation
+ that the stager is maintained and re-used for multiple dcp.async_save calls. Default to False.
+
+ N. B. If sync_files is disabled, there's no guarantee that the checkpoint will be consistent in the case of a failure.
+ """
+ super().__init__(
+ path=path,
+ single_file_per_rank=single_file_per_rank,
+ sync_files=sync_files,
+ thread_count=thread_count,
+ per_thread_copy_ahead=per_thread_copy_ahead,
+ cache_staged_state_dict=cache_staged_state_dict,
+ )
diff --git a/torch/distributed/checkpoint/staging.py b/torch/distributed/checkpoint/staging.py
index 3dd294eb0a..f4ce2673df 100644
--- a/torch/distributed/checkpoint/staging.py
+++ b/torch/distributed/checkpoint/staging.py
@@ -1,10 +1,16 @@
-from typing import runtime_checkable
+from typing import Optional, runtime_checkable
from typing_extensions import Protocol
+from torch.distributed._state_dict_utils import (
+ _copy_state_dict,
+ _create_cpu_state_dict,
+ _offload_state_dict_to_cpu,
+)
+
from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
-__all__ = ["AsyncStager"]
+__all__ = ["AsyncStager", "BlockingAsyncStager"]
@runtime_checkable
@@ -61,3 +67,54 @@ class AsyncStager(Protocol):
is complete and it is safe to begin modifying the original `state_dict`
"""
pass
+
+
+class BlockingAsyncStager(AsyncStager):
+ """
+ An implementation of AsyncStager which stages the state_dict on CPU RAM and blocks until the copy is complete.
+ This implementation also provides an option to optimize stage latency using pinned memory.
+
+ N.B. synchronize_staging is a no-op in this case.
+
+
+ """
+
+ # default to True since the common case is to stage synchronously
+ _synchronize_after_execute: bool = False
+
+ def __init__(
+ self,
+ cache_staged_state_dict: bool = False,
+ type_check: bool = False,
+ ):
+ """
+ Initializes the BlockingAsyncStager.
+
+ Args:
+ cache_staged_state_dict: Whether to cache the staged state_dict. This option decreases staging latency
+ at the cost of increases memory usage. Additionally, if this parameter is set to True, it's the expectation
+ that the stager is maintained and re-used for multiple dcp.async_save calls. Default to False.
+ type_check: Whether to perform a type check during cpu_offload. Defaults to False.
+
+ """
+ self.cache_staged_state_dict = cache_staged_state_dict
+ self.type_check = type_check
+ self.state_dict_cache: Optional[STATE_DICT_TYPE] = None
+
+ def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
+ """
+ Returns a copy of `state_dict` on the CPU.
+ """
+
+ if not self.cache_staged_state_dict:
+ return _offload_state_dict_to_cpu(state_dict, type_check=self.type_check)
+
+ if self.state_dict_cache is None:
+ self.state_dict_cache = _create_cpu_state_dict(state_dict, pin_memory=True)
+ return _copy_state_dict(state_dict, self.state_dict_cache)
+
+ def synchronize_staging(self) -> None:
+ """
+ No-op function, since staging is blocking.
+ """
+ pass
|
2.41.0
|
bbfb708315137ad709656f7df76dc7871346f59
|
Thu, 2 May 2024 20:30:49 +0000
|
[PATCH 0939/1000] ignore unsupported module from flop counter (#125346)
|
Summary: Torchscript modules do not support forward hooks and thus can't work with flop_counter context manager for hierarchical output by passing a module to FlopCounterMode on construction. Currently any module that includes a script module causes an exception to be thrown so adding a try/catch to ignore any script modules for forward hooks. Test Plan: CI Signals Differential Revision: D56850661 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125346 Approved by: https://github.com/842974287
|
diff --git a/torch/utils/flop_counter.py b/torch/utils/flop_counter.py
index 42868dc359..09bb039ea6 100644
--- a/torch/utils/flop_counter.py
+++ b/torch/utils/flop_counter.py
@@ -388,11 +388,19 @@ class FlopCounterMode(TorchDispatchMode):
else:
name = ".".join([prefix, name])
- forward_pre_hook_handle = module.register_forward_pre_hook(self._enter_module(name))
- forward_hook_handle = module.register_forward_hook(self._exit_module(name))
- self._module_to_forward_hook_handles[module] = _ForwardHookHandles(
- forward_pre_hook_handle, forward_hook_handle
- )
+ forward_pre_hook_handle, forward_hook_handle = (None, None)
+
+ try:
+ forward_pre_hook_handle = module.register_forward_pre_hook(self._enter_module(name))
+ forward_hook_handle = module.register_forward_hook(self._exit_module(name))
+ except RuntimeError:
+ # ignore any module that doesn't support forward hook, e.g. script.
+ if forward_pre_hook_handle is not None:
+ forward_pre_hook_handle.remove()
+ else:
+ self._module_to_forward_hook_handles[module] = _ForwardHookHandles(
+ forward_pre_hook_handle, forward_hook_handle
+ )
def _deregister_forward_hooks(self):
for forward_hook_handles in self._module_to_forward_hook_handles.values():
|
2.41.0
|
f62494bf9c6915189ffd2ec1dbef0c5dd8b91ae
|
Thu, 2 May 2024 08:53:56 -0700
|
[PATCH 0940/1000] [DCP] Move async logic into filesystem for better encapsulation (#124944)
|
This logic is specific to FilesystemWriter, and now has a better place to live due to the new AsyncStager class Differential Revision: [D56578436](https://our.internmc.facebook.com/intern/diff/D56578436/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124944 Approved by: https://github.com/fegin ghstack dependencies: #122965, #124939
|
diff --git a/torch/distributed/checkpoint/filesystem.py b/torch/distributed/checkpoint/filesystem.py
index 3672e2401b..07bc85560e 100644
--- a/torch/distributed/checkpoint/filesystem.py
+++ b/torch/distributed/checkpoint/filesystem.py
@@ -29,8 +29,8 @@ import torch
from torch import Tensor
from torch._utils import _get_available_device_type, _get_device_module
from torch.distributed._shard._utils import narrow_tensor_by_index
+from torch.distributed.checkpoint.metadata import STATE_DICT_TYPE
from torch.distributed.checkpoint.staging import BlockingAsyncStager
-
from torch.futures import Future
from .metadata import Metadata, MetadataIndex
@@ -684,3 +684,10 @@ class FileSystemWriter(_FileSystemWriter, BlockingAsyncStager):
per_thread_copy_ahead=per_thread_copy_ahead,
cache_staged_state_dict=cache_staged_state_dict,
)
+
+ def stage(self, state_dict: STATE_DICT_TYPE) -> STATE_DICT_TYPE:
+ """Override of AsyncStager.stage"""
+ # in the async case, the state dict is already on CPU, so maintaining this
+ # buffer makes no sense
+ self.per_thread_copy_ahead = 0
+ return super().stage(state_dict)
diff --git a/torch/distributed/checkpoint/state_dict_saver.py b/torch/distributed/checkpoint/state_dict_saver.py
index 7df74a5f21..72ce7f69a9 100644
--- a/torch/distributed/checkpoint/state_dict_saver.py
+++ b/torch/distributed/checkpoint/state_dict_saver.py
@@ -6,7 +6,6 @@ from typing import cast, Optional, Union
import torch
import torch.distributed as dist
from torch.distributed._state_dict_utils import _offload_state_dict_to_cpu
-from torch.distributed.checkpoint import FileSystemWriter
from torch.distributed.checkpoint._storage_utils import _storage_setup
from torch.distributed.checkpoint.default_planner import DefaultSavePlanner
@@ -221,10 +220,6 @@ def async_save(
storage_writer = cast(
StorageWriter, _storage_setup(storage_writer, checkpoint_id, reader=False)
)
- if isinstance(storage_writer, FileSystemWriter):
- # in the async case, the state dict is already on CPU, so maintaining this
- # buffer makes no sense
- storage_writer.per_thread_copy_ahead = 0
state_dict = _stateful_to_state_dict(state_dict)
if isinstance(storage_writer, AsyncStager):
|
2.41.0
|
ae574c7133de4ee36b1802f1ac113be6d644c1e
|
Thu, 2 May 2024 11:22:54 -0400
|
[PATCH 0941/1000] Don't make replacements for i variables (#125398)
|
This was introduced in https://github.com/pytorch/pytorch/pull/110262 but actually it looks like they were trying to hit unbacked SymInt. Now that unbacked SymInt is renamed to u, this code is no longer necessary Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/125398 Approved by: https://github.com/lezcano, https://github.com/Skylion007
|
diff --git a/torch/_inductor/codegen/common.py b/torch/_inductor/codegen/common.py
index 7126d565cf..a044fd3210 100644
--- a/torch/_inductor/codegen/common.py
+++ b/torch/_inductor/codegen/common.py
@@ -1683,7 +1683,6 @@ class Kernel(CodeGen):
x: self.args.size(x)
for x in sorted_symbols
if x.name.startswith(("s", "u", "ps"))
- or (x.name.startswith("i") and not x.name.startswith("idx"))
}
return sympy_subs(index, replacements)
|
2.41.0
|
c76764a567c3e2425432671de04c4cfc705b745
|
Thu, 2 May 2024 20:43:17 +0000
|
[PATCH 0942/1000] Always pass down kernel_file and grid as string (#125384)
|
From my test with Ads production workload, I found sometime kernel_file is None and grid is a tuple. It will crash since ExecutionTraceObserver expects string for both kernel_file and grid. This PR is to make sure kernel_file and grid are always passed down as string. Need to find the root cause why kernel_file is none. Unit test: buck test @mode/dev-nosan caffe2/test:profiler -- test_execution_trace_with_pt2 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125384 Approved by: https://github.com/davidberard98, https://github.com/sraikund16
|
diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py
index 6d49c60b0d..b66bdbf393 100644
--- a/torch/_inductor/runtime/triton_heuristics.py
+++ b/torch/_inductor/runtime/triton_heuristics.py
@@ -794,14 +794,15 @@ class CachingAutotuner(KernelInterface):
# manager is a nullcontext.
if autograd_profiler._is_profiler_enabled:
# grid can be a tuple of ints or a string.
- grid_info = (
- grid if isinstance(grid, tuple) else getattr(grid, "grid_fn_str", None)
- )
+ if isinstance(grid, tuple):
+ grid_info = str(grid)
+ else:
+ grid_info = getattr(grid, "grid_fn_str", "")
with torch._C._profiler._RecordFunctionFast(
self.inductor_meta.get("kernel_name", "triton kernel"),
args,
{
- "kernel_file": self.filename,
+ "kernel_file": "" if self.filename is None else self.filename,
"kernel_backend": "triton",
"grid": grid_info,
"stream": stream,
|
2.41.0
|
199ce8d6c7149fb4ba9e1d92cb133abe4ac0667
|
Tue, 30 Apr 2024 17:04:50 -0700
|
[PATCH 0944/1000] [pipelining] Add microbatch split and merge utils (#125273)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/125273 Approved by: https://github.com/H-Huang ghstack dependencies: #124776, #124875, #124958
|
diff --git a/docs/source/distributed.pipelining.rst b/docs/source/distributed.pipelining.rst
index ec7423f261..0083fe4226 100644
--- a/docs/source/distributed.pipelining.rst
+++ b/docs/source/distributed.pipelining.rst
@@ -176,3 +176,16 @@ Note that since we split our model into three stages, we must run this script wi
.. code-block:: bash
torchrun --nproc_per_node=3 example.py
+
+Microbatch Utilities
+====================
+
+.. automodule:: torch.distributed.pipelining.microbatch
+
+.. currentmodule:: torch.distributed.pipelining.microbatch
+
+.. autoclass:: TensorChunkSpec
+
+.. autofunction:: split_args_kwargs_into_chunks
+
+.. autofunction:: merge_chunks
diff --git a/test/distributed/pipelining/test_microbatch.py b/test/distributed/pipelining/test_microbatch.py
new file mode 100644
index 0000000000..c526c6ff7b
--- /dev/null
+++ b/test/distributed/pipelining/test_microbatch.py
@@ -0,0 +1,57 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates
+# Owner(s): ["oncall: distributed"]
+import torch
+from torch.distributed.pipelining.microbatch import (
+ merge_chunks,
+ split_args_kwargs_into_chunks,
+ TensorChunkSpec,
+)
+from torch.testing._internal.common_utils import run_tests, TestCase
+
+
+d_hid = 512
+
+
+class MicrobatchTests(TestCase):
+ def test_split_and_merge(self):
+ x0 = torch.randn(128, d_hid)
+ x1 = torch.randn(256, d_hid)
+ x2 = torch.randn(512, d_hid)
+
+ args = (x0, x1, x2)
+ kwargs = {"x0": x0, "x1": x1, "x2": x2}
+
+ # Default chunking: dim 0
+ arg_chunks, kwarg_chunks = split_args_kwargs_into_chunks(args, kwargs, 2)
+ assert len(arg_chunks) == 2
+ assert len(kwarg_chunks) == 2
+ assert arg_chunks[0][0].shape == torch.Size([64, d_hid])
+ assert arg_chunks[1][0].shape == torch.Size([64, d_hid])
+ assert arg_chunks[0][1].shape == torch.Size([128, d_hid])
+ assert arg_chunks[0][2].shape == torch.Size([256, d_hid])
+ assert kwarg_chunks[0]["x0"].shape == torch.Size([64, d_hid])
+ assert kwarg_chunks[0]["x1"].shape == torch.Size([128, d_hid])
+ assert kwarg_chunks[1]["x2"].shape == torch.Size([256, d_hid])
+
+ # Merge chunks back together
+ merged_args = merge_chunks(
+ arg_chunks,
+ (TensorChunkSpec(0), TensorChunkSpec(0), TensorChunkSpec(0)),
+ )
+ torch.testing.assert_close(merged_args, args)
+
+ merged_kwargs = merge_chunks(
+ kwarg_chunks,
+ {
+ "x0": TensorChunkSpec(0),
+ "x1": TensorChunkSpec(0),
+ "x2": TensorChunkSpec(0),
+ },
+ )
+ torch.testing.assert_close(merged_kwargs, kwargs)
+
+ print("Microbatch test passed")
+
+
+if __name__ == "__main__":
+ run_tests()
diff --git a/torch/distributed/pipelining/microbatch.py b/torch/distributed/pipelining/microbatch.py
new file mode 100644
index 0000000000..5477885e79
--- /dev/null
+++ b/torch/distributed/pipelining/microbatch.py
@@ -0,0 +1,420 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates
+import logging
+from typing import Any, Dict, List, Optional, Tuple
+
+import torch
+from torch.utils._pytree import tree_flatten, tree_unflatten
+
+
+logger = logging.getLogger(__name__)
+
+"""
+_debug_mask_minibatches specifies to send masked versions of the mini-batch
+through instead of micro-batch slices--this can be used for more stable
+numerical testing (see [A Note About Correctness Testing])
+"""
+_debug_mask_minibatches = False
+
+
+class _CustomReducer:
+ """
+ Custom reducer class that can be used to specify a custom operation that
+ reduces losses of multiple microbatches into one value.
+
+ Example:
+ >>> sum_reducer = _CustomReducer(
+ >>> torch.tensor(0.0),
+ >>> lambda a, b: a + b
+ >>> )
+ """
+
+ def __init__(self, init_value, reduce_fn):
+ self.init_value = init_value
+ self.reduce_fn = reduce_fn
+
+
+class _LossReducer(_CustomReducer):
+ pass
+
+
+sum_reducer = _LossReducer(torch.tensor(0.0), lambda a, b: a + b)
+
+# Default chunking dimension is 0. This is used for the case where the user did
+# not specify a chunking dimension.
+DEFAULT_CHUNK_DIM = 0
+
+
+# Class used to specify chunking of inputs
+class TensorChunkSpec:
+ def __init__(self, split_dim):
+ self.split_dim = split_dim
+
+ split_dim: int
+
+ def __repr__(self):
+ return (
+ f"{self.__class__.__module__}.{self.__class__.__name__}({self.split_dim})"
+ )
+
+ def __str__(self):
+ return f"TensorChunkSpec({self.split_dim})"
+
+
+# Class used to specify replication of inputs
+class _Replicate:
+ pass
+
+
+def _shard_dict_of_args(
+ args_dict,
+ args_chunk_spec,
+ num_chunks,
+):
+ """
+ Given a dictionary of args, and a dictionary of chunking specs, shard the
+ args according to the chunking specs.
+
+ Args:
+ args_dict: Dictionary of args
+ args_chunk_spec: Dictionary of chunking specs
+ num_chunks: Number of chunks to shard the args into
+
+ Returns:
+ args_split: List of sharded args
+ """
+ # Stage 1+2: flatten and shard/replicate
+
+ # args_sharded_replicated : [num args, num flat values, num chunks]
+ args_sharded_replicated = {}
+ arg_specs = []
+
+ real_num_chunks = num_chunks
+ first_tensor = True
+
+ assert len(args_dict) == len(
+ args_chunk_spec
+ ), f"args_dict.keys() = {list(args_dict.keys())} args_chunk_spec.keys() = {list(args_chunk_spec.keys())}"
+
+ for arg_key, arg in args_dict.items():
+ flat, spec = tree_flatten(arg)
+ arg_specs.append(spec)
+
+ chunk_spec = args_chunk_spec[arg_key]
+ assert chunk_spec is not None # Should have been set by caller
+ chunk_spec_flat, _ = tree_flatten(chunk_spec)
+ if len(flat) != len(chunk_spec_flat):
+ raise ValueError(
+ f"Argument value {arg} did not have the same number of "
+ f"values as as chunk spec {chunk_spec}"
+ )
+
+ sharded_arg_flat = []
+
+ for v, chunk_v in zip(flat, chunk_spec_flat):
+ if chunk_v is _Replicate or not isinstance(v, torch.Tensor):
+ sharded_arg_flat.append([v] * real_num_chunks)
+ elif isinstance(chunk_v, TensorChunkSpec):
+ # TODO: check type of v. If it's a tensor, use chunk (or debug mask).
+ # If it's a collection type, split it as you would expect. Otherwise,
+ # Throw an error
+ assert isinstance(v, torch.Tensor), f"{v} is not a tensor"
+
+ v_split_dim_size = v.size(chunk_v.split_dim)
+ if v_split_dim_size < real_num_chunks:
+ if first_tensor:
+ # We can only adjust number of chunks when we hit this
+ # issue at the first tensor encountered
+ logger.warning(
+ f"Tensor size on chunking dimension is {v_split_dim_size}, " # noqa: G004
+ f"downsizing the number of chunks from {num_chunks} to {v_split_dim_size}."
+ )
+ real_num_chunks = v_split_dim_size
+ else:
+ raise RuntimeError(
+ f"Arg {arg_key} on chunking dimension has a size of {v_split_dim_size}, "
+ f"smaller than the number of chunks {num_chunks}. "
+ "PiPPy cannot reduce the number of chunks because "
+ "other arguments have bigger chunk-dimension sizes. "
+ "Please adjust your num_chunks setting."
+ )
+
+ chunk_tensors = torch.tensor_split(
+ v, real_num_chunks, chunk_v.split_dim
+ )
+
+ if _debug_mask_minibatches:
+ expanded_chunks = []
+
+ split_dim_idx = 0
+ for chunk_tensor in chunk_tensors:
+ new_val = torch.zeros_like(v)
+ upper_idx = split_dim_idx + chunk_tensor.size(chunk_v.split_dim)
+
+ slice_indices = [slice(None, None, None)] * new_val.ndim
+ slice_indices[chunk_v.split_dim] = slice(
+ split_dim_idx, upper_idx
+ )
+ new_val[slice_indices] = chunk_tensor
+
+ expanded_chunks.append(new_val)
+
+ split_dim_idx += chunk_tensor.size(chunk_v.split_dim)
+
+ sharded_arg_flat.append(expanded_chunks)
+ else:
+ sharded_arg_flat.append(chunk_tensors) # type: ignore[arg-type]
+
+ first_tensor = False
+ else:
+ raise TypeError(f"Unrecognized chunk spec: {chunk_v}")
+
+ args_sharded_replicated[arg_key] = sharded_arg_flat
+
+ # chunks_flat : [num chunks, num args, num flat values]
+ chunks_flat = []
+ for chunk_idx in range(real_num_chunks):
+ chunk_args = {}
+ for key, arg in args_sharded_replicated.items():
+ arg_single_chunk = []
+ for v_flat in arg:
+ arg_single_chunk.append(v_flat[chunk_idx])
+ chunk_args[key] = arg_single_chunk
+ chunks_flat.append(chunk_args)
+
+ # args_split : [num chunks, num args]
+ args_split = []
+
+ for chunk in chunks_flat:
+ per_chunk_args = {}
+ assert len(arg_specs) == len(chunk)
+ for (key, arg), arg_spec in zip(chunk.items(), arg_specs):
+ per_chunk_args[key] = tree_unflatten(arg, arg_spec)
+ args_split.append(per_chunk_args)
+
+ return args_split
+
+
+def split_args_kwargs_into_chunks(
+ args: Tuple[Any, ...],
+ kwargs: Optional[Dict[str, Any]],
+ chunks: int,
+ args_chunk_spec: Optional[Tuple[TensorChunkSpec, ...]] = None,
+ kwargs_chunk_spec: Optional[Dict[str, TensorChunkSpec]] = None,
+) -> Tuple[List[Tuple], List[Dict]]:
+ """
+ Given a sequence of args and kwargs, split them into a number of chunks
+ according to their respective chunking specs.
+
+ Args:
+ args: Tuple of args
+ kwargs: Dict of kwargs
+ chunks: Number of chunks to split the args and kwargs into
+ args_chunk_spec: chunking specs for args, in same shape as args
+ kwargs_chunk_spec: chunking specs for kwargs, in same shape as kwargs
+
+ Returns:
+ args_split: List of sharded args
+ kwargs_split: List of sharded kwargs
+ """
+ # Given `args` and `kwargs`, we want to yield a set of `chunks` args and kwargs such that
+ # the constituent Tensor values have been sharded/replicated according to the `args_chunk_spec`
+ # and `kwargs_chunk_spec` specifications. The steps are as follows:
+ #
+ # 1. Use pytree.tree_flatten to flatten each arg and its spec into nto a 1d array of values.
+ # To use a running example: suppose our inputs look like
+ #
+ # args = ([A, [B, C]], D) args_spec = ([None, [None, TensorChunkSpec]], None)
+ # (kwargs not shown but it's a similar process)
+ #
+ # Then for this step we would end up with
+ #
+ # args = ([A, B, C], D) args_spec = ([None, None, TensorChunkSpec], None)
+ #
+ # 2. Shard or replicate the arguments subject to the policy in the spec. Suppose chunks = 2
+ #
+ # args = ([[A, A], [B, B], [C_1, C_2]], [D, D])
+ #
+ # 3. Rotate the nesting order such that chunks are the outer dimension
+ #
+ # args_chunks = [
+ # ([A, B, C_1], D),
+ # ([A, B, C_2], D),
+ # ]
+ #
+ # 4. Unflatten each chunk according to the spec
+ #
+ # args_chunks = [
+ # ([A, [B, C_1]], D),
+ # ([A, [B, C_2]], D),
+ # ]
+
+ # TODO: _debug_mask_minibatches
+ # Handle the case where kwargs is None
+ if kwargs is None:
+ kwargs = {}
+
+ # If user did not provide args_chunk_spec or kwargs_chunk_spec, we extend
+ # their format and use default chunking along dim 0
+ if args_chunk_spec is None:
+ args_chunk_spec = (TensorChunkSpec(DEFAULT_CHUNK_DIM),) * len(args)
+
+ if kwargs_chunk_spec is None:
+ kwargs_chunk_spec = dict.fromkeys(kwargs, TensorChunkSpec(DEFAULT_CHUNK_DIM))
+
+ args_split_dict = _shard_dict_of_args(
+ dict(enumerate(args)),
+ dict(enumerate(args_chunk_spec)),
+ chunks,
+ )
+ real_num_chunks = len(args_split_dict)
+
+ kwargs_split = _shard_dict_of_args(
+ kwargs,
+ kwargs_chunk_spec,
+ real_num_chunks,
+ )
+
+ if len(kwargs_split) < real_num_chunks:
+ # In case kwargs are sharded into less chunks
+ # e.g. when `args` has no tensor, just values
+ real_num_chunks = len(kwargs_split)
+ # Re-shard args
+ args_split_dict = _shard_dict_of_args(
+ dict(enumerate(args)),
+ dict(enumerate(args_chunk_spec)),
+ real_num_chunks,
+ )
+
+ if len(args_split_dict) != len(kwargs_split):
+ raise RuntimeError(
+ "args and kwargs are split into different number of chunks: "
+ f"{len(args_split_dict)}, {len(kwargs_split)}"
+ )
+
+ args_split = []
+ for chunk_args in args_split_dict:
+ args_split.append(tuple(chunk_args[i] for i in range(len(chunk_args))))
+
+ return args_split, kwargs_split
+
+
+def merge_chunks(
+ chunks: List[Any],
+ chunk_spec,
+):
+ """
+ Given a list of chunks, merge them into a single value according to
+ the chunk spec.
+
+ Args:
+ chunks: list of chunks
+ chunk_spec: Chunking spec for the chunks
+
+ Returns:
+ value: Merged value
+ """
+ # This is essentially the inverse of `split_args_kwargs_into_chunks`, so the
+ # steps are similar to the steps in that function but in reverse. Given the
+ # input values:
+ #
+ # chunks = [
+ # ([A, [B, C_1]], D),
+ # ([A, [B, C_2]], D),
+ # ]
+ # args_spec = ([None, [None, TensorChunkSpec]], None)
+ #
+ # 1. Flatten the chunks according to the chunk_spec
+ #
+ # chunks_flat = [
+ # ([A, B, C_1], D),
+ # ([A, B, C_2], D),
+ # ]
+ #
+ # 2. Rotate the nesting order such that chunks are the inner dimension
+ #
+ # value_inner = ([A, B, [C_1, C_2]], D)
+ #
+ # 3. Concatenate sharded arguments
+ #
+ # value_combined = ([A, B, C], D)
+ #
+ # 4. Unflatten the combined args given the spec
+ #
+ # value = ([A, [B, C]], D)
+
+ # Preliminary: flatten the chunk spec
+ if chunk_spec is not None:
+ spec_flattened, flatten_spec = tree_flatten(chunk_spec)
+ else:
+ # If chunk_spec is not provided, we will merge chunks along the default dimension (0), for all output fields
+ # We obtain the output structure by flattening chunk 0 and generate the chunk_spec
+ chunk0_flat, flatten_spec = tree_flatten(chunks[0])
+ spec_flattened = [TensorChunkSpec(DEFAULT_CHUNK_DIM)] * len(chunk0_flat)
+
+ # Stage 1: flatten chunks
+ # chunks_flattened : [num chunks, num args]
+ chunks_flattened = []
+
+ for chunk in chunks:
+ chunk_flattened, _ = tree_flatten(chunk)
+ if len(chunk_flattened) != len(spec_flattened):
+ raise ValueError(f"Chunk {chunk} did not match chunk spec {chunk_spec}")
+
+ chunks_flattened.append(chunk_flattened)
+
+ # Stage 2 and 3: Rotate nesting order s.t. chunks are inner dimension and
+ # concatenate sharded operands
+ # args_flattened : [num args]
+ args_flattened = []
+ for arg_idx, arg in enumerate(spec_flattened):
+ if isinstance(arg, TensorChunkSpec):
+ partial_values = [
+ chunks_flattened[chunk_idx][arg_idx]
+ for chunk_idx in range(len(chunks_flattened))
+ ]
+
+ if _debug_mask_minibatches:
+ # Infer size of individual chunks by running `tensor_split` again
+ overall_shape = partial_values[0].shape
+ for val in partial_values[1:]:
+ assert val.shape == overall_shape
+ meta_chunks = torch.tensor_split(
+ torch.empty(*overall_shape, device="meta"),
+ sections=len(partial_values),
+ dim=arg.split_dim,
+ )
+
+ values_to_cat = []
+ chunk_start_idx = 0
+ assert len(partial_values) == len(meta_chunks)
+ for partial_value, meta_chunk in zip(partial_values, meta_chunks):
+ chunk_end_idx = chunk_start_idx + meta_chunk.size(arg.split_dim)
+
+ slice_indices = [slice(None, None, None)] * partial_value.ndim
+ slice_indices[arg.split_dim] = slice(chunk_start_idx, chunk_end_idx)
+ sliced = partial_value[slice_indices]
+ values_to_cat.append(sliced)
+
+ chunk_start_idx = chunk_end_idx
+
+ else:
+ values_to_cat = partial_values
+
+ args_flattened.append(torch.cat(values_to_cat, dim=arg.split_dim))
+ elif isinstance(arg, _CustomReducer):
+ reduced_val = arg.init_value
+
+ for chunk_idx in range(len(chunks_flattened)):
+ reduced_val = arg.reduce_fn(
+ reduced_val, chunks_flattened[chunk_idx][arg_idx]
+ )
+
+ args_flattened.append(reduced_val)
+ else:
+ value = chunks_flattened[0][arg_idx]
+ for chunk_idx in range(1, len(chunks_flattened)):
+ assert chunks_flattened[chunk_idx][arg_idx] == value
+ args_flattened.append(value)
+
+ # Stage 4: Unflatten combined args
+ return tree_unflatten(args_flattened, flatten_spec)
|
2.41.0
|
dad82fc906f7e738bca1f918a97ccbc534c43ab
|
Thu, 2 May 2024 21:28:23 +0000
|
[PATCH 0945/1000] Add private helper for determining which version of FA2 closest matches kernel version (#123653)
|
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/123653 Approved by: https://github.com/mikaylagawarecki
|
diff --git a/torch/nn/attention/__init__.py b/torch/nn/attention/__init__.py
index fca8055ad2..fc4835f046 100644
--- a/torch/nn/attention/__init__.py
+++ b/torch/nn/attention/__init__.py
@@ -115,3 +115,8 @@ def sdpa_kernel(backends: Union[List[SDPBackend], SDPBackend]):
enable_flash_sdp(previous_flash)
enable_mem_efficient_sdp(previous_mem_efficient)
enable_math_sdp(previous_math)
+
+
+def _get_flash_version() -> str:
+ """This returns the closest matching tag for the flash attention backend"""
+ return "2.5.6"
|
2.41.0
|
1b03992d02911ac59f8320ba08eed8bf002ad84
|
Thu, 2 May 2024 21:29:28 +0000
|
[PATCH 0946/1000] Merge the pyi files into py files of optimizer (#125153)
|
Merge the interfaces in pyi files into py files in `torch/optim`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125153 Approved by: https://github.com/janeyx99
|
diff --git a/torch/optim/adadelta.py b/torch/optim/adadelta.py
index 4061a6b68f..d0cf2b32dd 100644
--- a/torch/optim/adadelta.py
+++ b/torch/optim/adadelta.py
@@ -1,4 +1,4 @@
-from typing import List, Optional
+from typing import Any, Dict, List, Optional
import torch
from torch import Tensor
@@ -13,6 +13,7 @@ from .optimizer import (
_use_grad_for_differentiable,
_view_as_real,
Optimizer,
+ ParamsT,
)
__all__ = ["Adadelta", "adadelta"]
@@ -21,7 +22,7 @@ __all__ = ["Adadelta", "adadelta"]
class Adadelta(Optimizer):
def __init__(
self,
- params,
+ params: ParamsT,
lr=1.0,
rho=0.9,
eps=1e-6,
@@ -73,9 +74,16 @@ class Adadelta(Optimizer):
)
def _init_group(
- self, group, params_with_grad, grads, square_avgs, acc_deltas, state_steps
+ self,
+ group: Dict[str, Any],
+ params_with_grad: List[Tensor],
+ grads: List[Tensor],
+ square_avgs: List[Tensor],
+ acc_deltas: List[Tensor],
+ state_steps: List[Tensor],
):
has_complex = False
+ p: Tensor
for p in group["params"]:
if p.grad is None:
continue
@@ -124,11 +132,11 @@ class Adadelta(Optimizer):
loss = closure()
for group in self.param_groups:
- params_with_grad = []
- grads = []
- square_avgs = []
- acc_deltas = []
- state_steps = []
+ params_with_grad: List[Tensor] = []
+ grads: List[Tensor] = []
+ square_avgs: List[Tensor] = []
+ acc_deltas: List[Tensor] = []
+ state_steps: List[Tensor] = []
(
lr,
rho,
@@ -348,9 +356,9 @@ def _multi_tensor_adadelta(
state_steps: List[Tensor],
*,
lr: float,
- weight_decay: float,
rho: float,
eps: float,
+ weight_decay: float,
maximize: bool,
differentiable: bool,
capturable: bool,
@@ -394,14 +402,14 @@ def _multi_tensor_adadelta(
torch._foreach_add_(device_state_steps, 1)
if maximize:
- device_grads = torch._foreach_neg(device_grads)
+ device_grads = torch._foreach_neg(device_grads) # type: ignore[assignment]
if weight_decay != 0:
# Re-use the intermediate memory (device_grads) already allocated for maximize
if maximize:
torch._foreach_add_(device_grads, device_params, alpha=weight_decay)
else:
- device_grads = torch._foreach_add(
+ device_grads = torch._foreach_add( # type: ignore[assignment]
device_grads, device_params, alpha=weight_decay
)
diff --git a/torch/optim/adadelta.pyi b/torch/optim/adadelta.pyi
deleted file mode 100644
index 0f475331c1..0000000000
--- a/torch/optim/adadelta.pyi
+++ /dev/null
@@ -1,11 +0,0 @@
-from .optimizer import Optimizer, ParamsT
-
-class Adadelta(Optimizer):
- def __init__(
- self,
- params: ParamsT,
- lr: float = ...,
- rho: float = ...,
- eps: float = ...,
- weight_decay: float = ...,
- ) -> None: ...
diff --git a/torch/optim/adagrad.py b/torch/optim/adagrad.py
index e1d0422b4d..4eb9235b3d 100644
--- a/torch/optim/adagrad.py
+++ b/torch/optim/adagrad.py
@@ -13,6 +13,7 @@ from .optimizer import (
_use_grad_for_differentiable,
_view_as_real,
Optimizer,
+ ParamsT,
)
__all__ = ["Adagrad", "adagrad"]
@@ -21,7 +22,7 @@ __all__ = ["Adagrad", "adagrad"]
class Adagrad(Optimizer):
def __init__(
self,
- params,
+ params: ParamsT,
lr=1e-2,
lr_decay=0,
weight_decay=0,
@@ -120,10 +121,10 @@ class Adagrad(Optimizer):
loss = closure()
for group in self.param_groups:
- params_with_grad = []
- grads = []
- state_sums = []
- state_steps = []
+ params_with_grad: List[Tensor] = []
+ grads: List[Tensor] = []
+ state_sums: List[Tensor] = []
+ state_steps: List[Tensor] = []
has_sparse_grad, has_complex = self._init_group(
group, params_with_grad, grads, state_sums, state_steps
@@ -202,7 +203,7 @@ def adagrad(
state_steps: List[Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting these as kwargs for now as functional API is compiled by torch/distributed/optim
- has_sparse_grad: bool = None,
+ has_sparse_grad: bool = False,
foreach: Optional[bool] = None,
differentiable: bool = False,
has_complex: bool = False,
@@ -372,7 +373,7 @@ def _multi_tensor_adagrad(
_view_as_real(device_params, device_grads, device_state_sums)
if maximize:
- device_grads = torch._foreach_neg(device_grads)
+ device_grads = torch._foreach_neg(device_grads) # type: ignore[assignment]
# Update steps
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
@@ -390,7 +391,7 @@ def _multi_tensor_adagrad(
if maximize:
torch._foreach_add_(device_grads, device_params, alpha=weight_decay)
else:
- device_grads = torch._foreach_add(
+ device_grads = torch._foreach_add( # type: ignore[assignment]
device_grads, device_params, alpha=weight_decay
)
@@ -408,6 +409,6 @@ def _multi_tensor_adagrad(
torch._foreach_mul_(device_grads, minus_clr)
numerator = device_grads
else:
- numerator = torch._foreach_mul(device_grads, minus_clr)
+ numerator = torch._foreach_mul(device_grads, minus_clr) # type: ignore[assignment]
torch._foreach_addcdiv_(device_params, numerator, std)
diff --git a/torch/optim/adagrad.pyi b/torch/optim/adagrad.pyi
deleted file mode 100644
index 4557ece141..0000000000
--- a/torch/optim/adagrad.pyi
+++ /dev/null
@@ -1,12 +0,0 @@
-from .optimizer import Optimizer, ParamsT
-
-class Adagrad(Optimizer):
- def __init__(
- self,
- params: ParamsT,
- lr: float = ...,
- lr_decay: float = ...,
- weight_decay: float = ...,
- initial_accumulator_value: float = ...,
- eps: float = ...,
- ) -> None: ...
diff --git a/torch/optim/optimizer.py b/torch/optim/optimizer.py
index 0fa8a8d8b7..7a86514351 100644
--- a/torch/optim/optimizer.py
+++ b/torch/optim/optimizer.py
@@ -31,13 +31,13 @@ from torch.utils._foreach_utils import (
_get_fused_kernels_supported_devices,
_group_tensors_by_device_and_dtype,
Indices,
- TensorListList,
)
from torch.utils.hooks import RemovableHandle
Args: TypeAlias = Tuple[Any, ...]
Kwargs: TypeAlias = Dict[str, Any]
StateDict: TypeAlias = Dict[str, Any]
+TensorListList: TypeAlias = List[List[torch.Tensor]]
GlobalOptimizerPreHook: TypeAlias = Callable[
["Optimizer", Args, Kwargs], Optional[Tuple[Args, Kwargs]]
@@ -463,7 +463,7 @@ class Optimizer:
if is_compiling():
return {(None, None): (tensorlistlist, list(range(len(tensorlistlist[0]))))}
else:
- return _group_tensors_by_device_and_dtype(tensorlistlist, with_indices)
+ return _group_tensors_by_device_and_dtype(tensorlistlist, with_indices) # type: ignore[return-value, arg-type]
def _patch_step_function(self) -> None:
self._zero_grad_profile_name = (
|
2.41.0
|
0e2f62edd79ebdc21c3b8c16bf480db1af168a0
|
Thu, 2 May 2024 21:36:18 +0000
|
[PATCH 0947/1000] =?UTF-8?q?Revert=20"Include=20support=20for=20t?= =?UTF-8?q?he=20scatter=20gather=20cuda=20kernels=20to=20allow=20for=20com?= =?UTF-8?q?p=E2=80=A6=20(#124809)"?=MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit
|
This reverts commit 9e24c263f998819f849bb8293323213101e9aefc. Reverted https://github.com/pytorch/pytorch/pull/124809 on behalf of https://github.com/kit1980 due to breaking internal builds ([comment](https://github.com/pytorch/pytorch/pull/124809#issuecomment-2091751002))
|
diff --git a/aten/src/ATen/NumericUtils.h b/aten/src/ATen/NumericUtils.h
index 421fe0efab..788da64b4e 100644
--- a/aten/src/ATen/NumericUtils.h
+++ b/aten/src/ATen/NumericUtils.h
@@ -38,11 +38,7 @@ inline C10_HOST_DEVICE bool _isnan(T val) {
template <typename T, std::enable_if_t<c10::is_complex<T>::value, int> = 0>
inline C10_HOST_DEVICE bool _isnan(T val) {
-#if defined(__CUDACC__) || defined(__HIPCC__)
- return ::isnan(val.real()) || ::isnan(val.imag());
-#else
return std::isnan(val.real()) || std::isnan(val.imag());
-#endif
}
template <typename T, std::enable_if_t<std::is_same_v<T, at::Half>, int> = 0>
diff --git a/aten/src/ATen/cuda/Atomic.cuh b/aten/src/ATen/cuda/Atomic.cuh
index 2fa55902f9..56ee8f87e2 100644
--- a/aten/src/ATen/cuda/Atomic.cuh
+++ b/aten/src/ATen/cuda/Atomic.cuh
@@ -35,26 +35,6 @@ struct AtomicFPOp<at::Half> {
}
};
-template <>
-struct AtomicFPOp<c10::complex<float>> {
- template <typename func_t>
- inline __device__ c10::complex<float> operator() (c10::complex<float> *address, c10::complex<float> val, const func_t& func) {
- unsigned long long int* addr_as_ull = (unsigned long long int*)address;
- unsigned long long int old = *addr_as_ull;
- unsigned long long int assumed, new_val;
-
- c10::complex<float> csum;
- do {
- assumed = old;
- csum = func(csum, val);
- new_val = *reinterpret_cast<unsigned long long*>(&csum);
- old = atomicCAS(addr_as_ull, assumed, new_val);
- } while (assumed != old);
-
- return *reinterpret_cast<c10::complex<float>*>(&addr_as_ull);
- }
-};
-
template <>
struct AtomicFPOp<at::BFloat16> {
template <typename func_t>
@@ -368,14 +348,6 @@ GPU_ATOMIC_INTEGER(Mul, a * b, int16_t)
GPU_ATOMIC_INTEGER(Mul, a * b, int32_t)
GPU_ATOMIC_INTEGER(Mul, a * b, int64_t)
-inline __device__ c10::complex<float> gpuAtomicMul(c10::complex<float> *address, c10::complex<float> val){
- return AtomicFPOp<c10::complex<float>>()(address, val,
- [](c10::complex<float> bsum, c10::complex<float> val) {
- bsum*=(val);
- return bsum;
- });
-}
-
inline __device__ at::Half gpuAtomicMul(at::Half * address, at::Half val) {
return AtomicFPOp<at::Half>()(address, val,
[](at::Half bsum, at::Half val) {
@@ -397,7 +369,7 @@ inline __device__ double gpuAtomicMul(double * address, double val) {
});
}
-// Don't use a templated function for this since the addition function defaults to the CUDA built-in.
+// Dont use a templated function for this since the addition function defaults to the CUDA built-in.
inline __device__ float gpuAtomicMul (float * address, float val) {
unsigned int* address_as_ull = (unsigned int*)address;
unsigned int old = *address_as_ull;
@@ -430,29 +402,6 @@ __host__ __device__ T safe_max(T a, T b) {
return max;
}
-__inline__ __device__ c10::complex<float> complex_max(c10::complex<float> a, c10::complex<float> b) {
- if(at::_isnan(b)) {
- return b;
- } else {
- // Compute the magnitude of the complex numbers and compare each to see which one is greater.
- float a_magnitude = __fsqrt_rn(
- (
- __fmul_rn(a.real(), a.real()) +
- __fmul_rn(a.imag(),a.imag())
- )
- );
- float b_magnitude = __fsqrt_rn(
- (
- __fmul_rn(b.real(), b.real()) +
- __fmul_rn(b.imag(),b.imag())
- )
- );
- return std::max<float>(a_magnitude, b_magnitude);
- }
-
-}
-
-
ATOMIC_INTEGER_IMPL(Max)
GPU_ATOMIC_INTEGER(Max, safe_max(a, b), uint8_t)
GPU_ATOMIC_INTEGER(Max, safe_max(a, b), int8_t)
@@ -467,13 +416,6 @@ inline __device__ at::Half gpuAtomicMax(at::Half * address, at::Half val) {
});
}
-inline __device__ c10::complex<float> gpuAtomicMax(c10::complex<float> * address, c10::complex<float> val) {
- return AtomicFPOp<c10::complex<float>>()(address, val,
- [](c10::complex<float> bsum, c10::complex<float> val) {
- return complex_max(bsum, val);
- });
-}
-
inline __device__ at::BFloat16 gpuAtomicMax(at::BFloat16 * address, at::BFloat16 val) {
return AtomicFPOp<at::BFloat16>()(address, val,
[](at::BFloat16 bsum, at::BFloat16 val) {
@@ -520,27 +462,6 @@ __host__ __device__ T safe_min(T a, T b) {
return min;
}
-__inline__ __device__ c10::complex<float> complex_min(c10::complex<float> a, c10::complex<float> b) {
- if(at::_isnan(b)) {
- return b;
- } else {
- // Compute the magnitude of the complex numbers and compare each to see which one is smaller.
- float a_magnitude = __fsqrt_rn(
- (
- __fmul_rn(a.real(), a.real()) +
- __fmul_rn(a.imag(),a.imag())
- )
- );
- float b_magnitude = __fsqrt_rn(
- (
- __fmul_rn(b.real(), b.real()) +
- __fmul_rn(b.imag(),b.imag())
- )
- );
- return std::min<float>(a_magnitude, b_magnitude);
- }
-}
-
ATOMIC_INTEGER_IMPL(Min)
GPU_ATOMIC_INTEGER(Min, safe_min(a, b), uint8_t)
GPU_ATOMIC_INTEGER(Min, safe_min(a, b), int8_t)
@@ -555,13 +476,6 @@ inline __device__ at::Half gpuAtomicMin(at::Half * address, at::Half val) {
});
}
-inline __device__ c10::complex<float> gpuAtomicMin(c10::complex<float> * address, c10::complex<float> val) {
- return AtomicFPOp<c10::complex<float>>()(address, val,
- [](c10::complex<float> bsum, c10::complex<float> val) {
- return complex_min(bsum, val);
- });
-}
-
inline __device__ at::BFloat16 gpuAtomicMin(at::BFloat16 * address, at::BFloat16 val) {
return AtomicFPOp<at::BFloat16>()(address, val,
[](at::BFloat16 bsum, at::BFloat16 val) {
diff --git a/aten/src/ATen/native/cuda/ScatterGatherKernel.cu b/aten/src/ATen/native/cuda/ScatterGatherKernel.cu
index 78f5d98dfe..9ef83599cd 100644
--- a/aten/src/ATen/native/cuda/ScatterGatherKernel.cu
+++ b/aten/src/ATen/native/cuda/ScatterGatherKernel.cu
@@ -4,6 +4,7 @@
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/MemoryOverlap.h>
+
#include <ATen/native/ScatterGatherChecks.h>
#include <ATen/native/ReduceOpsUtils.h>
#include <ATen/native/TensorIterator.h>
@@ -200,6 +201,7 @@ struct cuda_scatter_gather_base_kernel {
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
+
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
@@ -257,6 +259,7 @@ struct cuda_scatter_gather_base_kernel {
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
+
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
iter.dtype(),
@@ -315,9 +318,9 @@ struct cuda_scatter_gather_base_kernel {
auto index_size = is_scatter_like ? self_dim_size : src_dim_size;
auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride;
- AT_DISPATCH_ALL_TYPES_AND3(
+
+ AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
- at::ScalarType::ComplexFloat,
iter.dtype(),
"cuda_scatter_gather_base_kernel_func", [&] {
using dtype = typename std::conditional<cast_to_opaque,
@@ -447,9 +450,8 @@ struct cuda_scatter_fill_base_kernel {
auto index_size = ensure_nonempty_size(self, dim);
auto index_stride = ensure_nonempty_stride(self, dim);
- AT_DISPATCH_ALL_TYPES_AND3(
+ AT_DISPATCH_ALL_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
- at::ScalarType::ComplexFloat,
iter.dtype(),
"cuda_scatter_fill_base_kernel_reduce_multiply", [&] {
using dtype = typename std::conditional<cast_to_opaque,
diff --git a/test/test_scatter_gather_ops.py b/test/test_scatter_gather_ops.py
index 9074d3e2a4..3351b9d257 100644
--- a/test/test_scatter_gather_ops.py
+++ b/test/test_scatter_gather_ops.py
@@ -221,8 +221,7 @@ class TestScatterGather(TestCase):
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
- include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_prod(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
@@ -230,8 +229,7 @@ class TestScatterGather(TestCase):
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_bool=False))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
- include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_mean(self, device, dtype):
for include_self in (True, False):
for deterministic in [False, True]:
@@ -241,8 +239,7 @@ class TestScatterGather(TestCase):
include_self=include_self)
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
- include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_amax(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
@@ -261,8 +258,7 @@ class TestScatterGather(TestCase):
@dtypes(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False))
- @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex32=True,
- include_complex=False, include_bool=False))
+ @dtypesIfCUDA(*get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False, include_bool=False))
def test_scatter_reduce_amin(self, device, dtype):
for include_self in (True, False):
self._test_scatter_base(torch.Tensor.scatter_reduce_, device=device, dtype=dtype,
diff --git a/test/test_torch.py b/test/test_torch.py
index 433ccd5d5b..21318f3b16 100644
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -57,8 +57,8 @@ from torch.testing._internal.common_cuda import (
_create_scaling_case, _create_scaling_models_optimizers)
from torch.testing._internal.common_mkldnn import bf32_on_and_off
from torch.testing._internal.common_dtype import (
- floating_types_and, get_all_math_dtypes, all_types_and_complex_and, all_types_and, floating_types,
- floating_and_complex_types, integral_types_and,
+ floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types,
+ all_types_and, floating_types, floating_and_complex_types, integral_types_and,
get_all_qint_dtypes,
)
from torch.testing._internal.two_tensor import TwoTensor
@@ -3837,7 +3837,7 @@ else:
self.assertEqual(input, result, msg=f"result: {result} input: {input} method: {str(operation)}")
@onlyCUDA
- @dtypes(torch.cdouble)
+ @dtypes(*complex_types())
def test_scatter_reduce_multiply_unsupported_dtypes(self, device, dtype):
height = 2
width = 2
|
2.41.0
|
c8237c6aa32ab7470e76bde03f2d3dcb9dd42a1
|
Thu, 2 May 2024 22:26:39 +0000
|
[PATCH 0949/1000] [ATen-VK] Resolve compiler_flags to allow Mac build (#125361)
|
Summary: ## `-Wmissing-prototypes` In ATen-Vulkan, we often define functions in `.cpp` files without declaring them in `.h` files without hiding them in an anonymous namespace. Example: [`Packing.cpp`'s channel_image_repacking()](https://github.com/pytorch/pytorch/blob/f1f142c44f81384afbdba5e451fc15744868bf26/aten/src/ATen/native/vulkan/impl/Packing.cpp#L299-L348) On Mac, this results in a `-Wmissing-prototypes` warning, which is disabled in this change. ## `-Wshadow` In `Adapter.cpp`, we overwrite a variable called `properties`, which we fix in this change as opposed to disabling the warning. Test Plan: CI Differential Revision: D56850324 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125361 Approved by: https://github.com/SS-JIA
|
diff --git a/aten/src/ATen/native/vulkan/api/Adapter.cpp b/aten/src/ATen/native/vulkan/api/Adapter.cpp
index ff1ea41cf6..173479a0c2 100644
--- a/aten/src/ATen/native/vulkan/api/Adapter.cpp
+++ b/aten/src/ATen/native/vulkan/api/Adapter.cpp
@@ -44,10 +44,10 @@ PhysicalDevice::PhysicalDevice(VkPhysicalDevice physical_device_handle)
handle, &queue_family_count, queue_families.data());
// Find the total number of compute queues
- for (const VkQueueFamilyProperties& properties : queue_families) {
+ for (const VkQueueFamilyProperties& p : queue_families) {
// Check if this family has compute capability
- if (properties.queueFlags & VK_QUEUE_COMPUTE_BIT) {
- num_compute_queues += properties.queueCount;
+ if (p.queueFlags & VK_QUEUE_COMPUTE_BIT) {
+ num_compute_queues += p.queueCount;
}
}
}
diff --git a/c2_defs.bzl b/c2_defs.bzl
index 63519a3b20..be08b79ac5 100644
--- a/c2_defs.bzl
+++ b/c2_defs.bzl
@@ -274,6 +274,9 @@ C2_FBOBJC_EXTRA_TARGET_CONFIG = {
"MTL_LANGUAGE_REVISION": "Metal12",
}
+def get_c2_torch_vulkan_compiler_flags():
+ return ["-Wno-missing-prototypes"]
+
def get_c2_default_cxx_args():
return dict(
header_namespace = "",
|
2.41.0
|
b5f6b10add7515064f20dace8f80b6226d26ada
|
Thu, 2 May 2024 22:51:03 +0000
|
[PATCH 0951/1000] [Inductor] default block size for head_dim = 256 for flex attention (#125380)
|
## H100 ### torch.bfloat16 No major change, as expected. ``` | Type | Speedup | batch_size | num_heads | q_seq_len | k_seq_len | head_dim | score_mod | dtype | |---------|-----------|--------------|-------------|-------------|-------------|------------|-------------|----------------| | Average | 1.122 | | | | | | | | | Max | 1.437 | 1 | 16 | 512 | 512 | 128 | head_bias | torch.bfloat16 | | Min | 0.895 | 1 | 16 | 1024 | 1024 | 64 | head_bias | torch.bfloat16 | ``` ### torch.float32 Before: OOM when ```head_dim``` = 256 After: ``` | Type | Speedup | batch_size | num_heads | q_seq_len | k_seq_len | head_dim | score_mod | dtype | |---------|-----------|--------------|-------------|-------------|-------------|------------|-------------|---------------| | Average | 2.231 | | | | | | | | | Max | 3.760 | 16 | 16 | 4096 | 4096 | 64 | noop | torch.float32 | | Min | 1.532 | 1 | 16 | 512 | 512 | 256 | causal_mask | torch.float32 | ``` ## A100 ### torch.bfloat16 Before: ``` | Type | Speedup | batch_size | num_heads | q_seq_len | k_seq_len | head_dim | score_mod | dtype | |---------|-----------|--------------|-------------|-------------|-------------|------------|---------------|----------------| | Average | 0.587 | | | | | | | | | Max | 0.960 | 1 | 16 | 512 | 512 | 64 | noop | torch.bfloat16 | | Min | 0.017 | 8 | 16 | 4096 | 4096 | 256 | relative_bias | torch.bfloat16 | ``` After: ``` | Type | Speedup | batch_size | num_heads | q_seq_len | k_seq_len | head_dim | score_mod | dtype | |---------|-----------|--------------|-------------|-------------|-------------|------------|-------------|----------------| | Average | 0.756 | | | | | | | | | Max | 0.931 | 1 | 16 | 512 | 512 | 64 | noop | torch.bfloat16 | | Min | 0.467 | 16 | 16 | 1024 | 1024 | 256 | noop | torch.bfloat16 | ``` ### torch.float32 Before: OOM when ```head_dim``` = 256 After: ``` | Type | Speedup | batch_size | num_heads | q_seq_len | k_seq_len | head_dim | score_mod | dtype | |---------|-----------|--------------|-------------|-------------|-------------|------------|-------------|---------------| | Average | 2.386 | | | | | | | | | Max | 7.584 | 16 | 16 | 512 | 512 | 64 | noop | torch.float32 | | Min | 0.948 | 1 | 16 | 512 | 512 | 256 | causal_mask | torch.float32 | ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/125380 Approved by: https://github.com/drisspg
|
diff --git a/benchmarks/transformer/score_mod.py b/benchmarks/transformer/score_mod.py
index e337049707..0e9e8d11a3 100644
--- a/benchmarks/transformer/score_mod.py
+++ b/benchmarks/transformer/score_mod.py
@@ -211,7 +211,7 @@ def generate_experiment_configs() -> List[ExperimentConfig]:
batch_sizes = [1, 8, 16]
num_heads = [16]
q_kv_seq_lens = [(512, 512), (1024, 1024), (4096, 4096)]
- head_dims = [64, 128]
+ head_dims = [64, 128, 256]
dtypes = [
torch.bfloat16,
]
diff --git a/torch/_inductor/kernel/flex_attention.py b/torch/_inductor/kernel/flex_attention.py
index 635af59f8c..15a99faa7b 100644
--- a/torch/_inductor/kernel/flex_attention.py
+++ b/torch/_inductor/kernel/flex_attention.py
@@ -173,31 +173,44 @@ sdpa_template = TritonTemplate(
)
+_h100_default_config = {
+ (torch.float32, 64): (128, 32, 4, 3),
+ (torch.float32, 128): (32, 64, 4, 3),
+ (torch.float32, 256): (32, 32, 4, 3),
+ (torch.bfloat16, 64): (128, 64, 4, 3),
+ (torch.bfloat16, 128): (64, 32, 4, 3),
+ (torch.bfloat16, 256): (64, 32, 4, 3),
+}
+
+_a100_default_config = {
+ (torch.float32, 64): (128, 32, 4, 3),
+ (torch.float32, 128): (128, 32, 4, 3),
+ (torch.float32, 256): (64, 16, 4, 3),
+ (torch.bfloat16, 64): (128, 64, 4, 3),
+ (torch.bfloat16, 128): (128, 32, 4, 3),
+ (torch.bfloat16, 256): (32, 64, 4, 3),
+}
+
+
def _get_default_config(query):
+ dtype = query.get_dtype()
head_dim = query.get_size()[-1]
default_config = None
- if torch.cuda.get_device_capability() >= (9, 0): # H100
- if query.get_dtype() == torch.float32:
- if head_dim == 64:
- default_config = (128, 32, 4, 3)
- else:
- default_config = (32, 64, 4, 3)
+ if head_dim <= 256 and torch.cuda.get_device_capability() >= (9, 0): # H100
+ if dtype == torch.float32:
+ default_config = (64, 64, 4, 3)
else:
- if head_dim == 64:
- default_config = (128, 64, 4, 3)
- else:
- default_config = (64, 32, 4, 3)
- elif torch.cuda.get_device_capability() >= (8, 0): # A100
- if query.get_dtype() == torch.float32:
- default_config = (128, 32, 4, 3)
+ default_config = (128, 64, 4, 3)
+ default_config = _h100_default_config.get((dtype, head_dim), default_config)
+ elif head_dim <= 256 and torch.cuda.get_device_capability() >= (8, 0): # A100
+ if dtype == torch.float32:
+ default_config = (64, 64, 4, 3)
else:
- if head_dim == 64:
- default_config = (128, 64, 4, 3)
- else:
- default_config = (128, 32, 4, 3)
- else:
- if query.get_dtype() == torch.float32:
+ default_config = (128, 64, 4, 3)
+ default_config = _a100_default_config.get((dtype, head_dim), default_config)
+ else: # modest hardware or extremely large head_dim
+ if dtype == torch.float32:
default_config = (32, 16, 4, 3)
else:
default_config = (64, 32, 4, 3)
|
2.41.0
|
551755cec99adef3b9210fd85996fc304bed349
|
Thu, 2 May 2024 23:34:18 +0000
|
[PATCH 0952/1000] Update tolerance for flex fp32 (#125444)
|
# Summary Updates the tolerances to account for internal failure Pull Request resolved: https://github.com/pytorch/pytorch/pull/125444 Approved by: https://github.com/kit1980
|
diff --git a/test/inductor/test_flex_attention.py b/test/inductor/test_flex_attention.py
index e5f31e7bcb..e5aa2aeb86 100644
--- a/test/inductor/test_flex_attention.py
+++ b/test/inductor/test_flex_attention.py
@@ -143,7 +143,7 @@ class TestTemplatedSDPA(InductorTestCase):
# Note, it seems like we really are less accurate than the float32
# computation, likely due to the online softmax
if dtype == torch.float32:
- fudge_factor = 4.0
+ fudge_factor = 10.0
else:
fudge_factor = 1.1
if compiled_error > ref_error * fudge_factor:
|
2.41.0
|
440d0755ab4659c450de1bc901839e675f253ad
|
Thu, 2 May 2024 23:44:09 +0000
|
[PATCH 0953/1000] Support custom layout call under torch dispatch mode (#125379)
|
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/125379 Approved by: https://github.com/jbschlosser
|
diff --git a/test/test_nestedtensor.py b/test/test_nestedtensor.py
index 3df4b63801..31ea10a930 100644
--- a/test/test_nestedtensor.py
+++ b/test/test_nestedtensor.py
@@ -3878,6 +3878,14 @@ class TestNestedTensorSubclass(TestCase):
self.assertTrue(not nt_noncontiguous.is_contiguous(memory_format=torch.contiguous_format))
self.assertTrue(nt_contiguous_narrow.is_contiguous(memory_format=torch.contiguous_format))
+ def test_layout_under_torch_dispatch_mode(self):
+ from torch.testing._internal.logging_tensor import capture_logs_with_logging_tensor_mode
+
+ nt = random_nt_from_dims([2, None, 3], torch.device('cpu'), torch.float32, layout=torch.jagged)
+
+ with capture_logs_with_logging_tensor_mode():
+ self.assertEqual(nt.layout, torch.jagged)
+
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
@parametrize("func", [torch.empty_like, torch.randn_like],
name_fn=lambda f: f.__name__)
diff --git a/torch/csrc/PyInterpreter.cpp b/torch/csrc/PyInterpreter.cpp
index f71253d400..4582cb2a83 100644
--- a/torch/csrc/PyInterpreter.cpp
+++ b/torch/csrc/PyInterpreter.cpp
@@ -831,12 +831,16 @@ c10::Layout ConcretePyInterpreterVTable::layout(
"torch.ops.prim");
TORCH_CHECK(
- THPLayout_Check(out.ptr()),
+ THPLayout_Check(out.ptr()) || PyLong_Check(out.ptr()),
"layout returned invalid type ",
py::detail::get_fully_qualified_tp_name(Py_TYPE(out.ptr())),
", expected Layout");
- return toLayout(out.ptr());
+ if (THPLayout_Check(out.ptr())) {
+ return toLayout(out.ptr());
+ } else {
+ return c10::Layout(py::cast<int64_t>(out));
+ }
}
int64_t ConcretePyInterpreterVTable::numel(const c10::TensorImpl* self) const {
|
2.41.0
|
18a6f46d01db665cf092d501fb9597cdac91a33
|
Fri, 3 May 2024 00:50:49 +0000
|
[PATCH 0954/1000] Adding Compare in torch.utils.benchmark documentation (#125009)
|
`torch.utils.benchmark.Compare` is not directly exposed in torch.utils.benchmark documentation. I think this is a valuable resource to add since it can help people embracing the torch benchmark way of doing things, and help people building documentation towards it. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125009 Approved by: https://github.com/mikaylagawarecki
|
diff --git a/docs/source/benchmark_utils.rst b/docs/source/benchmark_utils.rst
index c93fbfd66c..7546179c50 100644
--- a/docs/source/benchmark_utils.rst
+++ b/docs/source/benchmark_utils.rst
@@ -19,6 +19,9 @@ Benchmark Utils - torch.utils.benchmark
.. autoclass:: FunctionCounts
:members:
+.. autoclass:: Compare
+ :members:
+
.. These are missing documentation. Adding them here until a better place
.. is made in this file.
.. py:module:: torch.utils.benchmark.examples
diff --git a/torch/utils/benchmark/utils/compare.py b/torch/utils/benchmark/utils/compare.py
index 337b742ca0..20122df667 100644
--- a/torch/utils/benchmark/utils/compare.py
+++ b/torch/utils/benchmark/utils/compare.py
@@ -267,6 +267,21 @@ Times are in {common.unit_to_english(self.time_unit)}s ({self.time_unit}).
class Compare:
+ """Helper class for displaying the results of many measurements in a
+ formatted table.
+
+ The table format is based on the information fields provided in
+ :class:`torch.utils.benchmark.Timer` (`description`, `label`, `sub_label`,
+ `num_threads`, etc).
+
+ The table can be directly printed using :meth:`print` or casted as a `str`.
+
+ For a full tutorial on how to use this class, see:
+ https://pytorch.org/tutorials/recipes/recipes/benchmark.html
+
+ Args:
+ results: List of Measurment to display.
+ """
def __init__(self, results: List[common.Measurement]):
self._results: List[common.Measurement] = []
self.extend_results(results)
@@ -278,6 +293,10 @@ class Compare:
return "\n".join(self._render())
def extend_results(self, results):
+ """Append results to already stored ones.
+
+ All added results must be instances of ``Measurement``.
+ """
for r in results:
if not isinstance(r, common.Measurement):
raise ValueError(
@@ -286,15 +305,22 @@ class Compare:
self._results.extend(results)
def trim_significant_figures(self):
+ """Enables trimming of significant figures when building the formatted table."""
self._trim_significant_figures = True
def colorize(self, rowwise=False):
+ """Colorize formatted table.
+
+ Colorize columnwise by default.
+ """
self._colorize = Colorize.ROWWISE if rowwise else Colorize.COLUMNWISE
def highlight_warnings(self):
+ """Enables warning highlighting when building formatted table."""
self._highlight_warnings = True
def print(self):
+ """Print formatted table"""
print(str(self))
def _render(self):
|
2.41.0
|
cac7aa70ffc62090ab12211c36a3ac37e73ed0b
|
Fri, 3 May 2024 01:18:52 +0000
|
[PATCH 0955/1000] [CI] Unskip Linalg tests on ARM (#125377)
|
Removes obscure "Issue with numpy version on arm" added by https://github.com/pytorch/pytorch/pull/82213 And replaces it with 4 targeted skips: - test_addmv for `float16` - test_vector_norm for `float16`, `bfloat16` and `float32` Followups to fix them are tracked in https://github.com/pytorch/pytorch/issues/125438 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125377 Approved by: https://github.com/kit1980
|
diff --git a/test/test_linalg.py b/test/test_linalg.py
index 8976d81c5a..0a5577d54c 100644
--- a/test/test_linalg.py
+++ b/test/test_linalg.py
@@ -53,7 +53,6 @@ def blaslt_supported_device():
return True
return False
-@unittest.skipIf(IS_ARM64, "Issue with numpy version on arm")
class TestLinalg(TestCase):
def setUp(self):
super(self.__class__, self).setUp()
@@ -1224,6 +1223,9 @@ class TestLinalg(TestCase):
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)
def test_vector_norm(self, device, dtype):
+ if IS_ARM64 and device == 'cpu' and dtype in [torch.float16, torch.bfloat16, torch.float32]:
+ raise unittest.SkipTest("Fails on ARM, see https://github.com/pytorch/pytorch/issues/125438")
+ # have to use torch.randn(...).to(bfloat16) instead of
# This test compares torch.linalg.vector_norm's output with
# torch.linalg.norm given a flattened tensor
ord_vector = [0, 0.9, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf]
@@ -5583,6 +5585,8 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2:
torch.half))
@dtypes(torch.bfloat16, torch.half, torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_addmv(self, device, dtype):
+ if IS_ARM64 and device == 'cpu' and dtype == torch.float16:
+ raise unittest.SkipTest("Fails on ARM, see https://github.com/pytorch/pytorch/issues/125438")
# have to use torch.randn(...).to(bfloat16) instead of
# torch.randn(..., dtype=bfloat16). randn does not support
# bfloat16 yet.
|
2.41.0
|
15da7856caf2deacbf0d9adabd53e5e3f0d9a1d
|
Fri, 3 May 2024 01:19:21 +0000
|
[PATCH 0956/1000] [MPS] Fix overflow in cumsum when dtype is bool (#125318)
|
`cumsum` and `cumprod` was (is?) buggy for MPS: https://github.com/pytorch/pytorch/blob/c8d2a55273757c90989fde7c6f05e957aba9a238/aten/src/ATen/native/mps/operations/UnaryOps.mm#L435-L436 A workaround casts the input to int32 prior to performing the op to prevent overflow for certain numeric types. It turns out this issue also affects boolean types: ```python import torch print(torch.ones(128, dtype=torch.bool, device="mps").cumsum(0)[-1]) # tensor(-128, device='mps:0') ``` In this PR I'm adding logic to also cast bool dtypes to int32 prior to `cumsum` and `cumprod`, although output is guaranteed not to overflow for the latter with bools. I'm also adding a test to prevent regressions. Fixes #96614 #106112 #109166 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125318 Approved by: https://github.com/malfet
|
diff --git a/aten/src/ATen/native/mps/operations/UnaryOps.mm b/aten/src/ATen/native/mps/operations/UnaryOps.mm
index c76a60e4a4..7709c79db6 100644
--- a/aten/src/ATen/native/mps/operations/UnaryOps.mm
+++ b/aten/src/ATen/native/mps/operations/UnaryOps.mm
@@ -434,7 +434,7 @@ static void cumulative_op_impl(const Tensor& self,
// issue #103810551: cumsum / cumprod are broken for int8, int16 and as chances for overflow are pretty high, cast to
// int32 fixed in macOS 13.3
- bool castInputData = (isIntegralType(input.scalar_type(), false) && input.scalar_type() != ScalarType::Int &&
+ bool castInputData = (isIntegralType(input.scalar_type(), true) && input.scalar_type() != ScalarType::Int &&
input.scalar_type() != ScalarType::Long);
TORCH_CHECK(macOS13_3_plus || input.scalar_type() != ScalarType::Long,
diff --git a/test/test_mps.py b/test/test_mps.py
index 95828315d5..38fea5b3f8 100644
--- a/test/test_mps.py
+++ b/test/test_mps.py
@@ -156,11 +156,6 @@ def mps_ops_grad_modifier(ops):
# On the backward pass for `sort` both are used (values and indices), thus resulting in a issmatch between CPU and MPS.
# Running `msort` with stable `sort` passes.
'msort': [torch.float16],
-
- # See https://github.com/pytorch/pytorch/issues/106112 for more information
- 'cumprod': [torch.float32, torch.float16],
- # See https://github.com/pytorch/pytorch/issues/109166 for more information
- 'masked.cumprod': [torch.float16],
}
SKIPLIST_GRAD = {
@@ -4273,6 +4268,13 @@ class TestMPS(TestCaseMPS):
self.assertEqual(e_string, "MPS does not support cumsum_out_mps op with int64 input." +
" Support has been added in macOS 13.3")
+ def test_cumsum_bool(self):
+ a = torch.ones(2**16, dtype=torch.bool)
+ t_cpu = a.cumsum(0)
+ t_mps = a.to("mps").cumsum(0)
+
+ self.assertEqual(t_cpu, t_mps)
+
def test_cumsum_minus_one_axis(self):
def helper(dtype):
# Test with axis -1
|
2.41.0
|
40d6df448de1acb263ed8f6ff9e7d26f5a1a161
|
Fri, 3 May 2024 03:50:55 +0000
|
[PATCH 0957/1000] [MPS] Native nonzero implementation (#125355)
|
Fixes https://github.com/pytorch/pytorch/issues/124850 Replace previous MPSGraph nonzero construction with native nonzero op. For older OSes, fallback to CPU (previous implementation was not reliable and was comparable to CPU in speed). Pull Request resolved: https://github.com/pytorch/pytorch/pull/125355 Approved by: https://github.com/kulinseth
|
diff --git a/aten/src/ATen/native/mps/operations/Indexing.mm b/aten/src/ATen/native/mps/operations/Indexing.mm
index d86f57c49f..38c3212500 100644
--- a/aten/src/ATen/native/mps/operations/Indexing.mm
+++ b/aten/src/ATen/native/mps/operations/Indexing.mm
@@ -241,14 +241,20 @@ static void index_put_kernel_mps(TensorIterator& iter,
} // namespace mps
static Tensor nonzero_fallback(const Tensor& self) {
- TORCH_WARN_ONCE("MPS: nonzero op is supported natively starting from macOS 13.0. ",
- "Falling back on CPU. This may have performance implications.");
-
return at::nonzero(self.to("cpu")).clone().to("mps");
}
Tensor& nonzero_out_mps(const Tensor& self, Tensor& out_) {
- if (!is_macos_13_or_newer()) {
+ if (!is_macos_13_or_newer(MacOSVersion::MACOS_VER_14_0_PLUS)) {
+ TORCH_WARN_ONCE("MPS: nonzero op is supported natively starting from macOS 13.0. ",
+ "Falling back on CPU. This may have performance implications.");
+ Tensor out_fallback = nonzero_fallback(self);
+ at::native::resize_output(out_, out_fallback.sizes());
+ out_.copy_(out_fallback.to("mps"));
+ return out_;
+ } else if (self.is_complex()) {
+ TORCH_WARN_ONCE("MPS: nonzero op is not supported for complex datatypes. ",
+ "Falling back on CPU. This may have performance implications.");
Tensor out_fallback = nonzero_fallback(self);
at::native::resize_output(out_, out_fallback.sizes());
out_.copy_(out_fallback.to("mps"));
@@ -282,7 +288,6 @@ Tensor& nonzero_out_mps(const Tensor& self, Tensor& out_) {
CachedGraph(MPSGraph* graph) : MPSCachedGraph(graph) {}
MPSGraphTensor* inputTensor_ = nil;
MPSGraphTensor* outputTensor_ = nil;
- MPSGraphTensor* scatterDataTensor_ = nil;
};
dispatch_sync(stream->queue(), ^() {
@@ -300,93 +305,20 @@ Tensor& nonzero_out_mps(const Tensor& self, Tensor& out_) {
out = at::empty(out_.sizes(), out_.scalar_type(), c10::nullopt, kMPS, c10::nullopt, c10::nullopt);
}
- int64_t _apparentInputShape = 1;
- for (auto dim : self.sizes()) {
- _apparentInputShape *= dim;
- }
- MPSShape* apparentOutputShape = @[ @(total_nonzero * nDim) ];
- MPSShape* apparentInputShape = @[ @(_apparentInputShape) ];
-
- // Pseudocode:
- //
- // inputTensor = [1, 0, 0, 3]
- // inputNonZero = [1, 0, 0, 1]
- // indices = [1, 1, 1, 2]
- // maskedIndices = [0, -1, -1, 1]
- // coordinates = [0, 1, 2, 3]
- // scatterResult = [0, 3]
-
@autoreleasepool {
string key = "nonzero_out_mps" + getTensorsStringKey(self);
auto cachedGraph = LookUpOrCreateCachedGraph<CachedGraph>(key, [&](auto mpsGraph, auto newCachedGraph) {
- MPSDataType inputDataType = getMPSDataType(self);
- MPSShape* inputShape = getMPSShape(self);
-
- MPSGraphTensor* inputTensor =
- mpsGraphRankedPlaceHolder(mpsGraph, getMPSScalarType(self.scalar_type()), apparentInputShape);
- MPSGraphTensor* scatterDataTensor = mpsGraphUnrankedPlaceHolder(mpsGraph, getMPSScalarType(out.scalar_type()));
- MPSGraphTensor* zeroTensor = [mpsGraph constantWithScalar:0.0 dataType:inputDataType];
- MPSGraphTensor* oneTensor = [mpsGraph constantWithScalar:1.0 dataType:MPSDataTypeInt32];
- MPSGraphTensor* minusMaxDimTensor = [mpsGraph constantWithScalar:-maxDimensions dataType:MPSDataTypeInt32];
- MPSGraphTensor* inputNotEqualToZeroTensor = [mpsGraph notEqualWithPrimaryTensor:inputTensor
- secondaryTensor:zeroTensor
- name:nil];
- MPSGraphTensor* maskTensor = [mpsGraph castTensor:inputNotEqualToZeroTensor
- toType:MPSDataTypeInt32
- name:@"castToInt32"];
- MPSGraphTensor* indicesTensor = [mpsGraph cumulativeSumWithTensor:maskTensor axis:0 name:nil];
- MPSGraphTensor* indicesMinusOneTensor = [mpsGraph subtractionWithPrimaryTensor:indicesTensor
- secondaryTensor:oneTensor
- name:nil];
- MPSGraphTensor* maskedIndicesTensor = [mpsGraph selectWithPredicateTensor:inputNotEqualToZeroTensor
- truePredicateTensor:indicesMinusOneTensor
- falsePredicateTensor:minusMaxDimTensor
- name:nil];
- MPSGraphTensor* coordinatesTensor = [mpsGraph reshapeTensor:[mpsGraph coordinateAlongAxis:0
- withShape:inputShape
- name:nil]
- withShape:@[ @-1 ]
- name:nil];
- if (nDim > 1) {
- NSMutableArray<MPSGraphTensor*>* maskedIndicesTensorArray = [NSMutableArray arrayWithCapacity:nDim];
- NSMutableArray<MPSGraphTensor*>* coordinatesTensorArray = [NSMutableArray arrayWithCapacity:nDim];
-
- MPSGraphTensor* constantRankTensor = [mpsGraph constantWithScalar:nDim dataType:MPSDataTypeInt32];
- maskedIndicesTensorArray[0] = [mpsGraph multiplicationWithPrimaryTensor:maskedIndicesTensor
- secondaryTensor:constantRankTensor
- name:nil];
- coordinatesTensorArray[0] = coordinatesTensor;
- for (int i = 1; i < nDim; i++) {
- maskedIndicesTensorArray[i] = [mpsGraph additionWithPrimaryTensor:maskedIndicesTensorArray[i - 1]
- secondaryTensor:oneTensor
- name:nil];
- coordinatesTensorArray[i] = [mpsGraph reshapeTensor:[mpsGraph coordinateAlongAxis:i
- withShape:inputShape
- name:nil]
- withShape:@[ @-1 ]
- name:nil];
- }
- maskedIndicesTensor = [mpsGraph concatTensors:maskedIndicesTensorArray dimension:0 interleave:YES name:nil];
- coordinatesTensor = [mpsGraph concatTensors:coordinatesTensorArray dimension:0 interleave:YES name:nil];
- }
+ MPSGraphTensor* inputTensor = mpsGraphRankedPlaceHolder(mpsGraph, getMPSDataType(self), getMPSShape(self));
- MPSGraphTensor* outputTensor = [mpsGraph scatterWithDataTensor:scatterDataTensor
- updatesTensor:coordinatesTensor
- indicesTensor:maskedIndicesTensor
- axis:0
- mode:MPSGraphScatterModeSet
- name:nil];
+ MPSGraphTensor* outputTensor = [mpsGraph nonZeroIndicesOfTensor:inputTensor name:nil];
newCachedGraph->inputTensor_ = inputTensor;
- newCachedGraph->scatterDataTensor_ = scatterDataTensor;
newCachedGraph->outputTensor_ = outputTensor;
});
- Placeholder selfPlaceholder = Placeholder(cachedGraph->inputTensor_, self, apparentInputShape);
- Placeholder outputPlaceholder = Placeholder(cachedGraph->outputTensor_, out, apparentOutputShape);
- Placeholder scatterPlaceholder = Placeholder(cachedGraph->scatterDataTensor_, out, apparentOutputShape);
-
- auto feeds = dictionaryFromPlaceholders(selfPlaceholder, scatterPlaceholder);
+ Placeholder selfPlaceholder = Placeholder(cachedGraph->inputTensor_, self);
+ Placeholder outputPlaceholder = Placeholder(cachedGraph->outputTensor_, out);
+ auto feeds = dictionaryFromPlaceholders(selfPlaceholder);
runMPSGraph(stream, cachedGraph->graph(), feeds, outputPlaceholder);
}
@@ -398,7 +330,13 @@ Tensor& nonzero_out_mps(const Tensor& self, Tensor& out_) {
}
Tensor nonzero_mps(const Tensor& self) {
- if (!is_macos_13_or_newer()) {
+ if (!is_macos_13_or_newer(MacOSVersion::MACOS_VER_14_0_PLUS)) {
+ TORCH_WARN_ONCE("MPS: nonzero op is supported natively starting from macOS 13.0. ",
+ "Falling back on CPU. This may have performance implications.");
+ return nonzero_fallback(self);
+ } else if (self.is_complex()) {
+ TORCH_WARN_ONCE("MPS: nonzero op is not supported for complex datatypes ",
+ "Falling back on CPU. This may have performance implications.");
return nonzero_fallback(self);
}
diff --git a/test/test_mps.py b/test/test_mps.py
index 38fea5b3f8..1bc1ca87db 100644
--- a/test/test_mps.py
+++ b/test/test_mps.py
@@ -227,6 +227,7 @@ def mps_ops_modifier(ops):
'__rmul__',
'__getitem__',
'add',
+ 'argwhere',
'atleast_1d',
'atleast_2d',
'atleast_3d',
@@ -287,6 +288,7 @@ def mps_ops_modifier(ops):
'nn.functional.padcircular',
'nn.functional.feature_alpha_dropoutwithout_train',
'nn.functional.unfold',
+ 'nonzero',
'ones',
'outer',
'permute',
@@ -340,7 +342,6 @@ def mps_ops_modifier(ops):
'any',
'addcdiv',
'addcmul',
- 'argwhere',
'asin',
'atan',
'atanh',
@@ -408,7 +409,6 @@ def mps_ops_modifier(ops):
'nn.functional.pixel_shuffle',
'nn.functional.pixel_unshuffle',
'nn.functional.tanhshrink',
- 'nonzero',
'prod',
'reciprocal',
'roll',
|
2.41.0
|
156cb2e1279bd44d140fdb469d41dda5d0c40c2
|
Fri, 3 May 2024 04:42:38 +0000
|
[PATCH 0958/1000] Fix mem size mismatch from split/chunk in const folding (#125199)
|
Summary: The chunk/split ops on the weights/constants is folded in a fx pass and each output tensor has the same storage size of the original tensor (which is 3x of its actual size if chunk(3)). However Backend calculates the mem size on device from tensor shape/stride/dtype. This causes the mismatch when copying weights/constants to device as allocated mem on device is always smaller than the size of weights/constants and results in a runtime error in loading weight/constant (T172125529). This diff fixes the issue by cloning the tensors after const folding so that the tensors has correct storage size. Test Plan: Before this change: (18432 = 48 * 64 * 2 * 3) ``` RuntimeError: Failed to load constant getitem_idx0 split (remaining=18432) at fbcode/caffe2/torch/fb/acc_runtime/afg/afg_bindings.cpp:3422: Request failed because an invalid parameter ``` ``` buck2 run mode/opt //caffe2/torch/fb/acc_runtime/afg/tests:test_operators-artemis -- -r test_mem_size_mismatch ``` ``` Ran 1 test in 7.048s OK ``` Reviewed By: jfix71 Differential Revision: D56663931 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125199 Approved by: https://github.com/jfix71
|
diff --git a/torch/fx/experimental/const_fold.py b/torch/fx/experimental/const_fold.py
index 548d1d3852..8176ccb562 100644
--- a/torch/fx/experimental/const_fold.py
+++ b/torch/fx/experimental/const_fold.py
@@ -60,7 +60,7 @@ class FoldedGraphModule(torch.fx.GraphModule):
def _create_param(i):
return torch.nn.Parameter(
- i
+ i.detach().clone()
if not isinstance(i, int)
else torch.Tensor([i]).to(device=self.device_for_folded_attrs),
requires_grad=i.requires_grad if isinstance(i, torch.Tensor) else False,
|
2.41.0
|
706da2bad835f521a1062b80aa6c631730a2064
|
Thu, 2 May 2024 16:29:05 -0700
|
[PATCH 0959/1000] [dynamo][cpp-guards] Improve recompilation reason logic for NO_TENSOR_ALIASING guard (#125439)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/125439 Approved by: https://github.com/williamwen42
|
diff --git a/torch/_dynamo/guards.py b/torch/_dynamo/guards.py
index fb4cb7a039..c04d114336 100644
--- a/torch/_dynamo/guards.py
+++ b/torch/_dynamo/guards.py
@@ -132,6 +132,7 @@ class GuardManager:
self.cache_entry = None
self.extra_state = None
self.id_matched_objs = None
+ self.no_tensor_aliasing_sources = []
def get_guard_lines(self, guard):
guard_name = guard.__class__.__name__
@@ -2094,6 +2095,7 @@ class CheckFunctionManager:
# when the CacheEntry is constructed
guard_fn.cache_entry = None
guard_fn.extra_state = None
+ guard_fn.no_tensor_aliasing_sources = tensor_check_names
return guard_fn
def invalidate(self):
@@ -2184,6 +2186,23 @@ def is_recompiles_verbose_enabled():
return torch._logging._internal.log_state.is_artifact_enabled("recompiles_verbose")
+def recompilation_reason_for_no_tensor_aliasing_guard(guard_manager, scope):
+ duplicate_tensors = []
+ global_scope = dict(guard_manager.global_scope)
+ ids_to_source = collections.defaultdict(list)
+ for tensor_source in guard_manager.no_tensor_aliasing_sources: # type: ignore[attr-defined]
+ global_scope["__compile_source__"] = tensor_source
+ tensor_id = id(eval(tensor_source, global_scope, scope))
+ ids_to_source[tensor_id].append(tensor_source)
+
+ for key in ids_to_source:
+ if len(ids_to_source[key]) > 1:
+ duplicate_tensors.append(f"{ids_to_source[key]}")
+
+ reason = ", ".join(duplicate_tensors)
+ return [f"Duplicate tensors found: {reason}"]
+
+
def get_guard_fail_reason(
guard_fn: GuardFn,
code: types.CodeType,
@@ -2198,6 +2217,8 @@ def get_guard_fail_reason(
scope.update(guard_fn.closure_vars)
reasons: List[str] = []
+ no_tensor_aliasing_check_failed = False
+
verbose_code_parts: List[str] = []
if config.enable_cpp_guard_manager:
guard_manager = guard_fn
@@ -2213,35 +2234,42 @@ def get_guard_fail_reason(
# walk through this list and find the guard that failed. This is
# very important for symbolic shape guards which are currently
# installed as a lambda guard and can encompass a long list of code_parts.
+
if len(verbose_code_parts) == 1:
- reasons = verbose_code_parts
- verbose_code_parts = []
+ if "Duplicate tensor found" in verbose_code_parts[0]:
+ no_tensor_aliasing_check_failed = True
+ else:
+ reasons = verbose_code_parts
+ verbose_code_parts = []
else:
verbose_code_parts = guard_fn.verbose_code_parts
# This is not needed for CPP guard because the verbose check is already
# run in C++.
scope["___check_tensors"] = scope["___check_tensors_verbose"]
- for part in verbose_code_parts:
- global_scope = dict(guard_fn.global_scope)
- global_scope["__compile_source__"] = part
- with report_compile_source_on_error():
- try:
- fail_reason = eval(part, global_scope, scope)
- except Exception as e:
- if is_recompiles_verbose_enabled():
- continue
- else:
- raise
- # Only ___check_tensors knows how to return a fancy fail reason;
- # for everything else we just report the code that failed
-
- if isinstance(fail_reason, bool) and not fail_reason:
- fail_reason = part
- if isinstance(fail_reason, str):
- reasons.append(fail_reason)
- if not is_recompiles_verbose_enabled():
- break
+ if no_tensor_aliasing_check_failed:
+ reasons = recompilation_reason_for_no_tensor_aliasing_guard(guard_fn, scope)
+ else:
+ for part in verbose_code_parts:
+ global_scope = dict(guard_fn.global_scope)
+ global_scope["__compile_source__"] = part
+ with report_compile_source_on_error():
+ try:
+ fail_reason = eval(part, global_scope, scope)
+ except Exception as e:
+ if is_recompiles_verbose_enabled():
+ continue
+ else:
+ raise
+ # Only ___check_tensors knows how to return a fancy fail reason;
+ # for everything else we just report the code that failed
+
+ if isinstance(fail_reason, bool) and not fail_reason:
+ fail_reason = part
+ if isinstance(fail_reason, str):
+ reasons.append(fail_reason)
+ if not is_recompiles_verbose_enabled():
+ break
reason_str = "\n".join(reasons)
guard_failures[orig_code_map[code]].append(reason_str)
diff --git a/torch/csrc/dynamo/guards.cpp b/torch/csrc/dynamo/guards.cpp
index cedaad3e35..1ee8362326 100644
--- a/torch/csrc/dynamo/guards.cpp
+++ b/torch/csrc/dynamo/guards.cpp
@@ -1273,13 +1273,11 @@ class TENSOR_ALIASING : public RelationalGuard {
class NO_TENSOR_ALIASING : public RelationalGuard {
public:
NO_TENSOR_ALIASING(
- long unsigned int num_tensors,
- py::object tensor_names,
+ const py::list& tensor_names,
py::object verbose_code_parts)
: RelationalGuard(std::move(verbose_code_parts)),
- _num_tensors(num_tensors),
- _tensor_names(std::move(tensor_names)) {
- _unique_tensors.reserve(num_tensors);
+ _tensor_names(tensor_names) {
+ _unique_tensors.reserve(tensor_names.size());
}
bool check_nopybind(PyObject* value) override { // borrowed ref
@@ -1303,19 +1301,13 @@ class NO_TENSOR_ALIASING : public RelationalGuard {
bool result = check_nopybind(value);
if (!result) {
- std::stringstream fail_reason;
- fail_reason << "Duplicate tensor found where not expected! ";
- fail_reason << py::cast<std::string>(_tensor_names[_counter])
- << " should not alias to anything, but is aliased."
- << " Total number of tensors are " << _num_tensors;
- return GuardDebugInfo(false, fail_reason.str(), 0);
+ return GuardDebugInfo(
+ false, "Duplicate tensor found where not expected!", 0);
}
- _counter += 1;
return GuardDebugInfo(true, 1);
}
void reset_state() final {
- _counter = 0;
for (auto item : _unique_tensors) {
Py_DECREF(item.first);
}
@@ -1323,10 +1315,8 @@ class NO_TENSOR_ALIASING : public RelationalGuard {
}
private:
- long unsigned int _num_tensors;
py::list _tensor_names;
ska::flat_hash_map<PyObject*, std::nullptr_t> _unique_tensors;
- long unsigned int _counter = 0;
};
class DYNAMIC_INDICES : public LeafGuard {
@@ -3186,9 +3176,7 @@ void install_no_tensor_aliasing_guard(
// relational guard. There is one guard object that is shared between multiple
// guard managers.
std::shared_ptr<RelationalGuard> guard = std::make_shared<NO_TENSOR_ALIASING>(
- guard_managers.size(),
- std::move(tensor_names),
- std::move(verbose_code_parts));
+ std::move(tensor_names), std::move(verbose_code_parts));
// Register the resetter on the toor gaurd mananger, so that it can reset
// the newly added relational guard when the guard eval fails.
|
2.41.0
|
f757a5c004641c7ef4781aad4a20196e7dd8b9e
|
Fri, 3 May 2024 04:59:17 +0000
|
[PATCH 0961/1000] [export] use tree_map for _flatten_dynamic_shapes (#125415)
|
Summary: Fixing the implementation of `_flatten_dynamic_shapes()`, to follow how `_process_dynamic_shapes()` does it. The previous implementation would misinterpret some nested dynamic shapes specs, causing it to miss out on some shapes specs, for example with nested inputs/constant input tuples: ``` inputs = ( (2, 1), ( torch.randn(2, 1), torch.randn(2, 2), torch.randn(2, 3), ) ) dynamic_shapes = ( (None, None), ( None, None, None, ) ) ``` This would get interpreted as 2 shapes specs for 2d and 3d tensors. Fixing so this doesn't happen. Test Plan: Existing export tests Differential Revision: D56894923 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125415 Approved by: https://github.com/angelayi
|
diff --git a/test/export/test_export.py b/test/export/test_export.py
index c7b6f53aaf..8aede91977 100644
--- a/test/export/test_export.py
+++ b/test/export/test_export.py
@@ -4561,6 +4561,35 @@ def forward(self, x):
self.assertEqual(div_spec.arg.name, "div")
self.assertEqual(div_spec.arg.value, "floor")
+ def test_nested_dynamic_shapes_spec(self):
+ class Foo(torch.nn.Module):
+ def forward(self, x):
+ (a0, a1), (b0, b1), (c0, c1, c2) = x
+ return a0 + a1 + b0 + b1 + c0 + c1 + c2
+
+ f = Foo()
+ inputs = (
+ (1, 2),
+ (
+ torch.randn(4, 4),
+ torch.randn(4, 4),
+ ),
+ (
+ torch.randn(4, 4),
+ torch.randn(4, 4),
+ torch.randn(4, 4),
+ ),
+ )
+ # make sure this gets parsed correctly as 7 individual inputs, not 3 tensors
+ dynamic_shapes = {
+ "x": (
+ (None, None),
+ (None, None),
+ (None, None, None),
+ )
+ }
+ export(f, (inputs,), dynamic_shapes=dynamic_shapes)
+
@unittest.skipIf(not torchdynamo.is_dynamo_supported(), "dynamo isn't support")
class TestOneOffModelExportResult(TestCase):
diff --git a/torch/_export/__init__.py b/torch/_export/__init__.py
index 5591b40e2f..022f47ad9b 100644
--- a/torch/_export/__init__.py
+++ b/torch/_export/__init__.py
@@ -43,6 +43,7 @@ from torch.export.dynamic_shapes import (
Constraint,
dims,
dynamic_dim,
+ _combine_args,
)
from torch.export.exported_program import (
_disable_prexisiting_fake_mode,
@@ -175,9 +176,11 @@ def capture_pre_autograd_graph(
_restore_state_dict(f, m)
flat_args, _ = pytree.tree_flatten((args, kwargs or {}))
+ combined_args = _combine_args(f, args, kwargs)
range_constraints = make_constraints(
fake_mode,
m,
+ combined_args,
dynamic_shapes,
0,
)
diff --git a/torch/_export/non_strict_utils.py b/torch/_export/non_strict_utils.py
index f102d1bfb0..98627e75b9 100644
--- a/torch/_export/non_strict_utils.py
+++ b/torch/_export/non_strict_utils.py
@@ -15,7 +15,7 @@ from torch._export.passes.add_runtime_assertions_for_constraints_pass import Inp
from torch._guards import Source
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch.export import Constraint
-from torch.export.dynamic_shapes import _Dim
+from torch.export.dynamic_shapes import _tree_map
from torch.export.graph_signature import CustomObjArgument
from torch.fx.experimental.symbolic_shapes import (
ConstraintViolationError,
@@ -30,7 +30,6 @@ from torch.utils._pytree import (
KeyPath,
MappingKey,
SequenceKey,
- tree_flatten,
tree_map_with_path,
)
@@ -180,25 +179,17 @@ def make_fake_inputs(nn_module, args, kwargs, dynamic_shapes):
def _flatten_dynamic_shapes(
- dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any]]
-):
- def _is_dynamic_shape_leaf(x):
- if isinstance(x, dict):
- x = list(x.values())
- return x is None or all(isinstance(y, (_Dim, int)) or y is None for y in x)
-
- if isinstance(dynamic_shapes, (list, tuple)):
- flat_dynamic_shapes = []
- for item in dynamic_shapes:
- flat_shapes, _ = tree_flatten(
- dynamic_shapes, is_leaf=_is_dynamic_shape_leaf
- )
- flat_dynamic_shapes += flat_shapes
- else:
- flat_dynamic_shapes, _ = tree_flatten(
- dynamic_shapes, is_leaf=_is_dynamic_shape_leaf
- )
- return flat_dynamic_shapes
+ combined_args: Dict[str, Any],
+ dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any]],
+) -> List[Any]:
+ flat_shapes = []
+
+ def _tree_map_helper(t, shape):
+ nonlocal flat_shapes
+ flat_shapes.append(shape)
+
+ _tree_map(_tree_map_helper, combined_args, dynamic_shapes)
+ return flat_shapes
def produce_guards_and_solve_constraints(
@@ -260,6 +251,7 @@ def produce_guards_and_solve_constraints(
def make_constraints(
fake_mode: FakeTensorMode,
gm: torch.fx.GraphModule,
+ combined_args: Dict[str, Any],
dynamic_shapes: Union[Dict[str, Any], Tuple[Any], List[Any], None],
num_lifted_inputs: int,
):
@@ -280,7 +272,16 @@ def make_constraints(
if not dynamic_shapes:
return range_constraints
- flat_dynamic_shapes = _flatten_dynamic_shapes(dynamic_shapes)
+ # get individual dynamic shapes spec for each input
+ if not isinstance(dynamic_shapes, dict):
+ assert isinstance(dynamic_shapes, (tuple, list))
+ combined_args = type(dynamic_shapes)(combined_args.values()) # type: ignore[assignment, misc]
+ flat_dynamic_shapes = _flatten_dynamic_shapes(combined_args, dynamic_shapes)
+
+ # check number of shapes vs. number of inputs
+ num_placeholders = [node.op == "placeholder" for node in gm.graph.nodes].count(True)
+ assert len(flat_dynamic_shapes) == num_placeholders - num_lifted_inputs
+
input_dims = defaultdict(list)
free_symbols = set()
for input_index, node in enumerate(gm.graph.nodes):
diff --git a/torch/export/_trace.py b/torch/export/_trace.py
index 96570a1a9b..fe70fc2f99 100644
--- a/torch/export/_trace.py
+++ b/torch/export/_trace.py
@@ -36,6 +36,7 @@ from torch._functorch.aot_autograd import aot_export_module
from torch._guards import detect_fake_mode
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._utils_internal import log_export_usage
+from torch.export.dynamic_shapes import _combine_args
from torch.export.exported_program import OutputKind
from torch.fx._utils import first_call_function_nn_module_stack
from torch.fx.experimental.symbolic_shapes import (
@@ -1061,9 +1062,11 @@ def _export(
except (ConstraintViolationError, ValueRangeError) as e:
raise UserError(UserErrorType.CONSTRAINT_VIOLATION, str(e)) # noqa: TRY200
+ combined_args = _combine_args(mod, args, kwargs)
range_constraints = make_constraints(
fake_mode,
ep_non_strict.gm,
+ combined_args,
dynamic_shapes,
num_lifted,
)
@@ -1269,9 +1272,11 @@ def _export(
),
len(export_graph_signature.input_specs),
)
+ combined_args = _combine_args(mod, args, kwargs)
range_constraints = make_constraints(
dynamo_fake_mode,
gm,
+ combined_args,
dynamic_shapes,
num_lifted,
)
|
2.41.0
|
71ee40793c21858cc7ccb9855f930549c35777d
|
Thu, 2 May 2024 16:29:05 -0700
|
[PATCH 0962/1000] [dynamo][nn module] Check for duplicate tensors in register_attr_or_module (#125421)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/125421 Approved by: https://github.com/mlazos ghstack dependencies: #125439
|
diff --git a/torch/_dynamo/output_graph.py b/torch/_dynamo/output_graph.py
index 0be89d59e2..7e0d2e6197 100644
--- a/torch/_dynamo/output_graph.py
+++ b/torch/_dynamo/output_graph.py
@@ -767,22 +767,32 @@ class OutputGraph:
# are registered as get_attr nodes in the root graph.
tracer = self.root_tracer
- if get_static_address_type(target) == "guarded":
- install_guard(source.make_guard(GuardBuilder.ID_MATCH))
- elif not is_constant_source(source):
- install_guard(source.make_guard(GuardBuilder.TENSOR_MATCH))
-
def wrap_name(module_key):
assert self.param_name_to_source is not None
self.param_name_to_source[module_key] = source
- return wrap_fx_proxy(
+ # Check if the attr has already been registered. This can happen
+ # when two different sources point to the same tensor.
+ if target in self.root_tx.output.side_effects:
+ return self.root_tx.output.side_effects[target]
+
+ if get_static_address_type(target) == "guarded":
+ install_guard(source.make_guard(GuardBuilder.ID_MATCH))
+ elif not is_constant_source(source):
+ install_guard(source.make_guard(GuardBuilder.TENSOR_MATCH))
+
+ vt = wrap_fx_proxy(
self.root_tx,
tracer.create_proxy("get_attr", module_key, tuple(), {}),
example_value=target,
**options,
)
+ # Track the object so to avoid duplicate registration in case of
+ # different sources pointing to the same tensor object.
+ vt = self.root_tx.output.side_effects.track_object_existing(target, vt)
+ return vt
+
elif isinstance(target, torch.nn.Module):
assert isinstance(target, torch.nn.Module)
|
2.41.0
|
c8478974367753eb1ff3c82dbd8922193ab8416
|
Fri, 3 May 2024 05:55:25 +0000
|
[PATCH 0963/1000] [vision hash update] update the pinned vision hash (#123227)
|
This PR is auto-generated nightly by [this action](https://github.com/pytorch/pytorch/blob/main/.github/workflows/nightly.yml). Update the pinned vision hash. Pull Request resolved: https://github.com/pytorch/pytorch/pull/123227 Approved by: https://github.com/pytorchbot
|
diff --git a/.github/ci_commit_pins/vision.txt b/.github/ci_commit_pins/vision.txt
index 15cb85caca..50412178ab 100644
--- a/.github/ci_commit_pins/vision.txt
+++ b/.github/ci_commit_pins/vision.txt
@@ -1 +1 @@
-2c4665ffbb64f03f5d18016d3398af4ac4da5f03
+06ad737628abc3a1e617571dc03cbdd5b36ea96a
|
2.41.0
|
d92637f445d2787f83829079276f71b1ad1fc7c
|
Thu, 2 May 2024 21:19:48 -0700
|
[PATCH 0964/1000] Add `write_record_metadata` to PyTorchFileWriter (#125184)
|
Add `PyTorchFileWriter.write_record_metadata(record_name, num_bytes)` that - writes the zipfile header/end of central directory metadata for an entry* - reserves `num_bytes` in the zipfile for the payload. *Since the payload is not provided, the CRC32 computation is skipped and 0s are written in the corresponding entry of the zipfile header Pull Request resolved: https://github.com/pytorch/pytorch/pull/125184 Approved by: https://github.com/albanD
|
diff --git a/caffe2/serialize/inline_container.cc b/caffe2/serialize/inline_container.cc
index 533fd42a04..173153e805 100644
--- a/caffe2/serialize/inline_container.cc
+++ b/caffe2/serialize/inline_container.cc
@@ -612,15 +612,35 @@ size_t ostream_write_func(
return ret;
}
+// This func will not update combined_uncomp_crc32_ with the uncomp_crc32
+// since there is no way to get the uncomp_crc32 when no buffer is provided.
+size_t ostream_seek_func(
+ void* pOpaque,
+ mz_uint64 file_ofs,
+ size_t n) {
+ auto self = static_cast<PyTorchStreamWriter*>(pOpaque);
+ if (self->current_pos_ != file_ofs) {
+ CAFFE_THROW("unexpected pos ", self->current_pos_, " vs ", file_ofs);
+ }
+ size_t ret = self->seek_func_(n);
+ if (self->current_pos_ + n != ret) {
+ self->err_seen_ = true;
+ }
+ self->current_pos_ += n;
+ return n;
+}
+
PyTorchStreamWriter::PyTorchStreamWriter(const std::string& file_name)
: archive_name_(basename(file_name)) {
setup(file_name);
}
PyTorchStreamWriter::PyTorchStreamWriter(
- const std::function<size_t(const void*, size_t)> writer_func)
+ const std::function<size_t(const void*, size_t)> writer_func,
+ const std::function<size_t(size_t)> seek_func)
: archive_name_("archive"),
- writer_func_(writer_func) {
+ writer_func_(writer_func),
+ seek_func_(seek_func) {
setup(archive_name_);
}
@@ -649,10 +669,15 @@ void PyTorchStreamWriter::setup(const string& file_name) {
file_stream_.write(static_cast<const char*>(buf), nbytes);
return !file_stream_ ? 0 : nbytes;
};
+ seek_func_ = [this](size_t nbytes) -> size_t {
+ file_stream_.seekp(nbytes, std::ios_base::cur);
+ return file_stream_.tellp();
+ };
}
ar_->m_pIO_opaque = this;
ar_->m_pWrite = ostream_write_func;
+ ar_->m_pSeek = ostream_seek_func;
mz_zip_writer_init_v2(ar_.get(), 0, MZ_ZIP_FLAG_WRITE_ZIP64);
valid("initializing archive ", file_name.c_str());
@@ -682,20 +707,20 @@ void PyTorchStreamWriter::writeRecord(
detail::getPadding(ar_->m_archive_size, full_name.size(), size, padding_);
uint32_t flags = compress ? MZ_BEST_COMPRESSION : 0;
mz_zip_writer_add_mem_ex_v2(
- ar_.get(),
- full_name.c_str(),
- data,
- size,
- nullptr,
- 0,
- flags,
- 0,
- 0,
- nullptr,
- padding_.c_str(),
- padding_size,
- nullptr,
- 0);
+ /*pZip=*/ar_.get(),
+ /*pArchive_name=*/full_name.c_str(),
+ /*pBuf=*/data,
+ /*buf_size=*/size,
+ /*pComment=*/nullptr,
+ /*comment_size=*/0,
+ /*level_and_flags=*/flags,
+ /*uncomp_size=*/0,
+ /*uncomp_crc32=*/0,
+ /*last_modified=*/nullptr,
+ /*user_extra_data=*/padding_.c_str(),
+ /*user_extra_data_len=*/padding_size,
+ /*user_extra_data_central=*/nullptr,
+ /*user_extra_data_central_len=*/0);
valid("writing file ", name.c_str());
files_written_.insert(name);
}
diff --git a/caffe2/serialize/inline_container.h b/caffe2/serialize/inline_container.h
index 6a13d414fe..6dea54f9eb 100644
--- a/caffe2/serialize/inline_container.h
+++ b/caffe2/serialize/inline_container.h
@@ -203,11 +203,21 @@ class TORCH_API PyTorchStreamReader final {
size_t additional_reader_size_threshold_;
};
+namespace {
+
+size_t default_seek_func(size_t nbytes) {
+ TORCH_CHECK(false, "attempting to write record metadata but seek_func unimplemented, please implement seek_func");
+ return 0;
+}
+
+} // namespace
+
class TORCH_API PyTorchStreamWriter final {
public:
explicit PyTorchStreamWriter(const std::string& archive_name);
explicit PyTorchStreamWriter(
- const std::function<size_t(const void*, size_t)> writer_func);
+ const std::function<size_t(const void*, size_t)> writer_func,
+ const std::function<size_t(size_t)> seek_func = default_seek_func);
void setMinVersion(const uint64_t version);
@@ -246,6 +256,7 @@ class TORCH_API PyTorchStreamWriter final {
std::string padding_;
std::ofstream file_stream_;
std::function<size_t(const void*, size_t)> writer_func_;
+ std::function<size_t(size_t)> seek_func_;
uint64_t combined_uncomp_crc32_ = 0;
std::string serialization_id_;
@@ -259,6 +270,10 @@ class TORCH_API PyTorchStreamWriter final {
uint64_t file_ofs,
const void* pBuf,
size_t n);
+ friend size_t ostream_seek_func(
+ void* pOpaque,
+ uint64_t file_ofs,
+ size_t n);
};
namespace detail {
diff --git a/test/test_serialization.py b/test/test_serialization.py
index 2f7e6babde..e3e7b8c592 100644
--- a/test/test_serialization.py
+++ b/test/test_serialization.py
@@ -4000,6 +4000,50 @@ class TestSerialization(TestCase, SerializationMixin):
y['even'][0] = torch.tensor(-0.25, dtype=dtype)
self.assertEqual(y['x'][:2].to(dtype=torch.float32), torch.tensor([-0.25, 0.25]))
+ @parametrize('filename', (True, False))
+ @unittest.skipIf(IS_WINDOWS, "NamedTemporaryFile on windows")
+ def test_filewriter_metadata_writing(self, filename):
+ sd = torch.nn.Linear(3, 5).state_dict()
+ weight_nbytes = sd['weight'].untyped_storage().nbytes()
+ bias_nbytes = sd['bias'].untyped_storage().nbytes()
+ # TemporaryFileName will give a string
+ # NamedTemporaryFile will be treated as a buffer
+ file_creation_func = TemporaryFileName if filename else tempfile.NamedTemporaryFile
+
+ with file_creation_func() as f, file_creation_func() as g:
+ # save state_dict in f
+ torch.save(sd, f)
+ if not filename:
+ f.seek(0)
+ # extract 'data.pkl' for use in our fake checkpoint
+ with torch.serialization._open_file_like(f, 'rb') as opened_file:
+ with torch.serialization._open_zipfile_reader(opened_file) as zip_file:
+ data_file = io.BytesIO(zip_file.get_record('data.pkl'))
+ data_0_offset = zip_file.get_record_offset('data/0')
+ data_1_offset = zip_file.get_record_offset('data/1')
+
+ # write nulls for 'data/0' and 'data/1'
+ with open(f if filename else f.name, 'rb+') as opened_f:
+ opened_f.seek(data_0_offset)
+ opened_f.write(b'0' * weight_nbytes)
+ opened_f.seek(data_1_offset)
+ opened_f.write(b'0' * bias_nbytes)
+
+ with torch.serialization._open_zipfile_writer(g) as zip_file:
+ data_value = data_file.getvalue()
+ zip_file.write_record('data.pkl', data_value, len(data_value))
+ zip_file.write_record('byteorder', sys.byteorder, len(sys.byteorder))
+ # Only write metadata for storages
+ zip_file.write_record_metadata('data/0', weight_nbytes)
+ zip_file.write_record_metadata('data/1', bias_nbytes)
+
+ if not filename:
+ f.seek(0)
+ g.seek(0)
+ sd_loaded = torch.load(g)
+ sd_loaded_ref = torch.load(f)
+ self.assertEqual(sd_loaded, sd_loaded_ref)
+
def run(self, *args, **kwargs):
with serialization_method(use_zip=True):
return super().run(*args, **kwargs)
diff --git a/third_party/miniz-2.1.0/miniz.c b/third_party/miniz-2.1.0/miniz.c
index 4b5d53f817..7d526cf3c6 100755
--- a/third_party/miniz-2.1.0/miniz.c
+++ b/third_party/miniz-2.1.0/miniz.c
@@ -6250,6 +6250,7 @@ mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_n
mz_uint32 extra_size = 0;
mz_uint8 extra_data[MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE];
mz_uint16 bit_flags = 0;
+ mz_bool write_metadata_only = buf_size && !pBuf;
if ((int)level_and_flags < 0)
level_and_flags = MZ_DEFAULT_LEVEL;
@@ -6263,7 +6264,7 @@ mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_n
level = level_and_flags & 0xF;
store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));
- if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
+ if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
pState = pZip->m_pState;
@@ -6308,7 +6309,9 @@ mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_n
if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
{
- uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
+ if (!write_metadata_only) {
+ uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
+ }
uncomp_size = buf_size;
if (uncomp_size <= 3)
{
@@ -6330,8 +6333,8 @@ mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_n
if (!pState->m_zip64)
{
/* Bail early if the archive would obviously become too large */
- if ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + archive_name_size
- + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size + user_extra_data_len +
+ if ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + archive_name_size
+ + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size + user_extra_data_len +
pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE + user_extra_data_central_len
+ MZ_ZIP_DATA_DESCRIPTER_SIZE32) > 0xFFFFFFFF)
{
@@ -6442,7 +6445,14 @@ mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_n
if (store_data_uncompressed)
{
- if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size)
+ mz_bool write_failed;
+ if (write_metadata_only) {
+ write_failed = pZip->m_pSeek(pZip->m_pIO_opaque, cur_archive_file_ofs, buf_size) != buf_size;
+ } else {
+ write_failed = pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size;
+ }
+
+ if (write_failed)
{
pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);
diff --git a/third_party/miniz-2.1.0/miniz.h b/third_party/miniz-2.1.0/miniz.h
index 2cad1370c6..cb7eb9d926 100755
--- a/third_party/miniz-2.1.0/miniz.h
+++ b/third_party/miniz-2.1.0/miniz.h
@@ -116,7 +116,7 @@
-/* Defines to completely disable specific portions of miniz.c:
+/* Defines to completely disable specific portions of miniz.c:
If all macros here are defined the only functionality remaining will be CRC-32, adler-32, tinfl, and tdefl. */
/* Define MINIZ_NO_STDIO to disable all usage and any functions which rely on stdio for file I/O. */
@@ -139,7 +139,7 @@
/* Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent conflicts against stock zlib. */
#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES
-/* Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
+/* Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc.
Note if MINIZ_NO_MALLOC is defined then the user must always provide custom user alloc/free/realloc
callbacks to the zlib and archive API's, and a few stand-alone helper API's which don't provide custom user
functions (such as tdefl_compress_mem_to_heap() and tinfl_decompress_mem_to_heap()) won't work. */
@@ -980,6 +980,7 @@ typedef struct
typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n);
+typedef size_t (*mz_file_seek_func)(void *pOpaque, mz_uint64 file_ofs, size_t n);
typedef mz_bool (*mz_file_needs_keepalive)(void *pOpaque);
struct mz_zip_internal_state_tag;
@@ -1071,6 +1072,7 @@ typedef struct mz_zip_archive /* note: added name so it can be forward declared
mz_file_read_func m_pRead;
mz_file_write_func m_pWrite;
+ mz_file_seek_func m_pSeek;
mz_file_needs_keepalive m_pNeeds_keepalive;
void *m_pIO_opaque;
diff --git a/torch/csrc/jit/python/init.cpp b/torch/csrc/jit/python/init.cpp
index a5e3c6059b..8b3e606d4d 100644
--- a/torch/csrc/jit/python/init.cpp
+++ b/torch/csrc/jit/python/init.cpp
@@ -1394,9 +1394,21 @@ void initJITBindings(PyObject* module) {
buffer.attr("write")(std::move(memory_view));
return size;
};
- return std::make_unique<PyTorchStreamWriter>(std::move(writer_func));
+ auto seek_func = [=](size_t offset) {
+ auto current_pos = py::cast<size_t>(buffer.attr("tell")());
+ buffer.attr("seek")(
+ offset, py::module::import("os").attr("SEEK_CUR"));
+ return current_pos + offset;
+ };
+ return std::make_unique<PyTorchStreamWriter>(
+ std::move(writer_func), std::move(seek_func));
}))
.def(py::init<const std::function<size_t(const void*, size_t)>&>())
+ .def(
+ "write_record_metadata",
+ [](PyTorchStreamWriter& self, const std::string& name, size_t size) {
+ return self.writeRecord(name, nullptr, size);
+ })
.def(
"write_record",
[](PyTorchStreamWriter& self,
|
2.41.0
|
5cc7ada6706f737959d8488d96028b3eb29aeea
|
Thu, 2 May 2024 20:50:18 -0700
|
[PATCH 0965/1000] skip triton template precompilation in 311.0-3.11.7 to workaround 311 cpython bug (#125446)
|
Fix for https://github.com/pytorch/pytorch/issues/125374. We dont have CI for this specific versions, but I verified locally. THere is a cpython bug from 3.11.0->3.11.7 where the ast parsing state is global, and errors with multiple threads. when dust settles a little around the new process based compilation we can look into migrating. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125446 Approved by: https://github.com/Chillee ghstack dependencies: #125289
|
diff --git a/torch/_inductor/select_algorithm.py b/torch/_inductor/select_algorithm.py
index 8fcb441ed8..23f73c6207 100644
--- a/torch/_inductor/select_algorithm.py
+++ b/torch/_inductor/select_algorithm.py
@@ -976,6 +976,14 @@ class AlgorithmSelectorCache(PersistentCache):
if num_workers <= 0:
return no_op
+ # https://github.com/python/cpython/issues/106905
+ if (
+ sys.version_info.major == 3
+ and sys.version_info.minor == 11
+ and sys.version_info.micro <= 8
+ ):
+ return no_op
+
# TODO - debug issue
if torch.version.hip:
return no_op
|
2.41.0
|
ebefcf84520e46e4b156a9426ef76c6e110668d
|
Fri, 3 May 2024 09:10:11 +0000
|
[PATCH 0966/1000] Driver folder check (#117548)
|
Added extra check for driver folders for Libtorch, as stat struct does not recognize driver folders, so torch.save should work for them as well. (e.g. save model.pt directly under C: ) Fixes [#111121](https://github.com/pytorch/pytorch/issues/111121) and #105488 Co-authored-by: Ozan Aydin <148207261+ozanMSFT@users.noreply.github.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/117548 Approved by: https://github.com/malfet
|
diff --git a/caffe2/serialize/inline_container.cc b/caffe2/serialize/inline_container.cc
index 173153e805..d3bba2c797 100644
--- a/caffe2/serialize/inline_container.cc
+++ b/caffe2/serialize/inline_container.cc
@@ -93,7 +93,15 @@ static std::string parentdir(const std::string& name) {
end = name.find_last_of('\\');
}
- if(end == std::string::npos) {
+ #ifdef WIN32
+ if (end != std::string::npos && end > 1 && name[end - 1] == ':') {
+ // This is a Windows root directory, so include the slash in
+ // the parent directory
+ end++;
+ }
+ #endif
+
+ if (end == std::string::npos) {
return "";
}
diff --git a/test/cpp/jit/test_save_load.cpp b/test/cpp/jit/test_save_load.cpp
index 067892a28e..9c66db990e 100644
--- a/test/cpp/jit/test_save_load.cpp
+++ b/test/cpp/jit/test_save_load.cpp
@@ -1,6 +1,7 @@
#include <gtest/gtest.h>
#include <test/cpp/jit/test_utils.h>
+#include <cstdlib>
#include <iostream>
#include <sstream>
@@ -263,6 +264,37 @@ TEST(SerializationTest, ParentDirNotExist) {
"Parent directory ./doesnotexist does not exist.");
}
+#ifdef WIN32
+TEST(SerializationTest, WindowsDrivePathTest) {
+ // "ZZZ" is typically not a valid drive letter.
+ // We expect to see "ZZZ:\\" or "ZZZ:/" in the error message.
+ // Note: slash should be included for the drive letter parent in Windows.
+ expectThrowsEq(
+ []() {
+ auto t = torch::nn::Linear(5, 5);
+ torch::save(t, "ZZZ:\\file.pt");
+ },
+ "Parent directory ZZZ:\\ does not exist.");
+ expectThrowsEq(
+ []() {
+ auto t = torch::nn::Linear(5, 5);
+ torch::save(t, "ZZZ:/file.pt");
+ },
+ "Parent directory ZZZ:/ does not exist.");
+}
+
+TEST(SerializationTest, WindowsTempPathTest) {
+ // Test for verifying file saving and loading in the temporary folder
+ std::string temp_dir = std::getenv("TEMP");
+ std::string file_path = temp_dir + "/file.pt";
+ auto t1 = torch::tensor(1.0);
+ torch::save(t1, file_path);
+ torch::Tensor t2;
+ torch::load(t2, file_path);
+ ASSERT_TRUE(t1.allclose(t2, 0.0, 0.0));
+}
+#endif
+
TEST(SerializationTest, CalculateNecessaryArgsTest) {
auto schema = torch::schema(
"sync_stream(int stream_id = -1) -> ()",
|
2.41.0
|
89b4586e95752dc65a1821a4383b9679ccd5b6b
|
Fri, 3 May 2024 13:57:32 +0800
|
[PATCH 0967/1000] [optim]fix ut and sgd kernel (#124904)
|
- Original `test_grad_scaling_autocast_fused_optimizers` does not work since there is no "fused" in `optim_inputs` - We should use different `grad_scaler`, they should not share 1 `scale`, there is no issue exposed here because the default `_growth_interval` is 2000 so it will not growth and there is also no inf is found so it will not reduced. The one in `test_cuda.py` should also have this issue, - I set a manual seed to reproduce purpose if there is any numerical failure - I use Tensor tracker here because we failed this UT in dynamo case, the cpp generated code are not exactly same with fused/non fused kernel. - I make it check both `cuda` and `cpu`. - I find some SGD numerical issue with `clang`, and fixed it by using `fmadd` instead of `add/mul` in fused sgd veckernel. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124904 Approved by: https://github.com/jgong5, https://github.com/janeyx99
|
diff --git a/aten/src/ATen/native/cpu/FusedSGDKernel.cpp b/aten/src/ATen/native/cpu/FusedSGDKernel.cpp
index 3383585675..c19aa249a1 100644
--- a/aten/src/ATen/native/cpu/FusedSGDKernel.cpp
+++ b/aten/src/ATen/native/cpu/FusedSGDKernel.cpp
@@ -52,8 +52,8 @@ typename std::enable_if<
grad_vec2 = grad_vec2 * fVec(opmath_t(-1.0));
}
if (weight_decay != 0.0){
- grad_vec1 += param_vec1 * fVec(scalar_t(weight_decay));
- grad_vec2 += param_vec2 * fVec(scalar_t(weight_decay));
+ grad_vec1 = vec::fmadd(param_vec1, fVec(scalar_t(weight_decay)), grad_vec1);
+ grad_vec2 = vec::fmadd(param_vec2, fVec(scalar_t(weight_decay)), grad_vec2);
}
if (momentum != 0.0) {
fVec momentum_vec1, momentum_vec2;
@@ -61,17 +61,16 @@ typename std::enable_if<
momentum_vec1 = grad_vec1;
momentum_vec2 = grad_vec2;
} else {
- momentum_vec1 =
- fVec::loadu(momentum_buf_ptr + d) * fVec(scalar_t(momentum)) +
- grad_vec1 * fVec(scalar_t(1 - dampening));
- momentum_vec2 =
- fVec::loadu(momentum_buf_ptr + d + fVec::size()) * fVec(scalar_t(momentum)) +
- grad_vec2 * fVec(scalar_t(1 - dampening));
+
+ momentum_vec1 = fVec::loadu(momentum_buf_ptr + d) * fVec(scalar_t(momentum));
+ momentum_vec2 = fVec::loadu(momentum_buf_ptr + d + fVec::size()) * fVec(scalar_t(momentum));
+ momentum_vec1 = vec::fmadd(fVec(scalar_t(1 - dampening)), grad_vec1, momentum_vec1);
+ momentum_vec2 = vec::fmadd(fVec(scalar_t(1 - dampening)), grad_vec2, momentum_vec2);
}
vec::convert_from_float<scalar_t>(momentum_vec1, momentum_vec2).store(momentum_buf_ptr + d);;
if (nesterov) {
- grad_vec1 += momentum_vec1 * fVec(scalar_t(momentum));
- grad_vec2 += momentum_vec2 * fVec(scalar_t(momentum));
+ grad_vec1 = vec::fmadd(momentum_vec1, fVec(scalar_t(momentum)), grad_vec1);
+ grad_vec2 = vec::fmadd(momentum_vec2, fVec(scalar_t(momentum)), grad_vec2);
} else {
grad_vec1 = momentum_vec1;
grad_vec2 = momentum_vec2;
@@ -142,7 +141,7 @@ typename std::enable_if<
}
if (maximize) grad_vec = grad_vec * Vec(scalar_t(-1.0));
if (weight_decay != 0.0){
- grad_vec += param_vec * Vec(scalar_t(weight_decay));
+ grad_vec = vec::fmadd(param_vec, Vec(scalar_t(weight_decay)), grad_vec);
}
if (momentum != 0.0) {
Vec momentum_vec;
@@ -150,12 +149,12 @@ typename std::enable_if<
momentum_vec = grad_vec;
} else {
momentum_vec =
- Vec::loadu(momentum_buf_ptr + d) * Vec(scalar_t(momentum)) +
- grad_vec * Vec(scalar_t(1 - dampening));
+ Vec::loadu(momentum_buf_ptr + d) * Vec(scalar_t(momentum));
+ momentum_vec = vec::fmadd(Vec(scalar_t(1 - dampening)), grad_vec, momentum_vec);
}
momentum_vec.store(momentum_buf_ptr + d);
if (nesterov) {
- grad_vec += momentum_vec * Vec(scalar_t(momentum));
+ grad_vec = vec::fmadd(momentum_vec, Vec(scalar_t(momentum)), grad_vec);
} else {
grad_vec = momentum_vec;
}
diff --git a/test/test_cuda.py b/test/test_cuda.py
index 24acfb0dc2..778bdd3136 100644
--- a/test/test_cuda.py
+++ b/test/test_cuda.py
@@ -29,7 +29,6 @@ from torch.cuda._memory_viz import (
)
from torch.testing._internal.autocast_test_lists import AutocastTestLists
from torch.testing._internal.common_cuda import (
- _create_scaling_case,
_get_torch_cuda_version,
TEST_CUDNN,
TEST_MULTIGPU,
@@ -1274,109 +1273,6 @@ torch.cuda.synchronize()
)
self.assertTrue(r != 0)
- # Compare non-fused optimizer vs fused one as the fused one unscales gradients
- # inside its cuda kernel unlike the other.
- def test_grad_scaling_autocast_fused_optimizers(self):
- for optimizer_ctor, optimizer_kwargs, separate_unscale in list(
- product(
- (torch.optim.Adam, torch.optim.AdamW),
- ({"fused": True, "amsgrad": False}, {"fused": True, "amsgrad": True}),
- (False, True),
- )
- ) + list(
- product(
- (torch.optim.SGD,),
- [
- {
- "momentum": 0.0,
- "dampening": d,
- "weight_decay": w,
- "nesterov": n,
- "fused": True,
- }
- for d, w, n in product((0.0, 0.5), (0.0, 0.5), (False,))
- ]
- + [
- {
- "momentum": 0.5,
- "dampening": d,
- "weight_decay": w,
- "nesterov": n,
- "fused": True,
- }
- for d, w, n in product((0.0,), (0.0, 0.5), (True, False))
- ],
- (False, True),
- )
- ):
- with self.subTest(
- optim=optimizer_ctor,
- kwargs=optimizer_kwargs,
- separate_unscale=separate_unscale,
- ):
- self._grad_scaling_autocast_fused_optimizers(
- optimizer_ctor=optimizer_ctor,
- optimizer_kwargs=optimizer_kwargs,
- separate_unscale=separate_unscale,
- )
-
- def _grad_scaling_autocast_fused_optimizers(
- self, optimizer_ctor, optimizer_kwargs, separate_unscale
- ):
- (
- mod_control,
- mod_scaling,
- opt_control,
- opt_scaling,
- data,
- loss_fn,
- _,
- ) = _create_scaling_case(
- optimizer_ctor=optimizer_ctor, optimizer_kwargs=optimizer_kwargs
- )
- kwargs = deepcopy(optimizer_kwargs)
- kwargs["fused"] = False
- opt_control = optimizer_ctor(mod_control.parameters(), lr=1.0, **kwargs)
-
- scaler = torch.cuda.amp.GradScaler(init_scale=128.0)
-
- for input, target in data:
- opt_control.zero_grad()
- with torch.autocast("cuda"):
- output_control = mod_control(input)
- loss_control = loss_fn(output_control, target)
- scaler.scale(loss_control).backward()
- scaler.step(opt_control)
- scaler.update()
-
- opt_scaling.zero_grad()
- with torch.autocast("cuda"):
- output_scaling = mod_scaling(input)
- loss_scaling = loss_fn(output_scaling, target)
- scaler.scale(loss_scaling).backward()
- if separate_unscale:
- scaler.unscale_(opt_scaling)
- scaler.step(opt_scaling)
- scaler.update()
-
- self.assertEqual(loss_control, loss_scaling)
- for param_control, param_scaling in zip(
- mod_control.parameters(), mod_scaling.parameters()
- ):
- self.assertEqual(param_control.grad, param_scaling.grad)
- self.assertEqual(param_control, param_scaling)
-
- state_control, state_scaling = (
- opt_control.state[param_control],
- opt_scaling.state[param_scaling],
- )
-
- for k in state_control:
- actual = state_scaling[k]
- if k == "step":
- actual = actual.squeeze()
- self.assertEqual(state_control[k], actual)
-
@unittest.skipIf(TEST_CUDAMALLOCASYNC, "FAIL")
def test_cublas_multiple_threads_same_device(self):
# Note, these parameters should be very carefully tuned
diff --git a/test/test_optim.py b/test/test_optim.py
index 031f2aa6ca..709c28fdc8 100644
--- a/test/test_optim.py
+++ b/test/test_optim.py
@@ -1677,7 +1677,7 @@ class TestOptimRenewed(TestCase):
optimizers.append(optimizer)
self._compare_between(inpts, models, optimizers)
- @onlyCPU
+ @onlyNativeDeviceTypes
@optims([optim for optim in optim_db if "fused" in optim.supported_impls], dtypes=[torch.float32])
def test_grad_scaling_autocast_fused_optimizers(self, device, dtype, optim_info):
# This ut is from test_cuda.py test_grad_scaling_autocast_fused_optimizers
@@ -1689,11 +1689,13 @@ class TestOptimRenewed(TestCase):
optim_cls = optim_info.optim_cls
for optim_input in optim_inputs:
kwargs = optim_input.kwargs
+ kwargs["fused"] = True
for _separate_unscale in (True, False):
self._grad_scaling_autocast_fused_optimizers(
- optimizer_ctor=optim_cls, optimizer_kwargs=kwargs, separate_unscale=_separate_unscale)
+ device=device, optimizer_ctor=optim_cls, optimizer_kwargs=kwargs, separate_unscale=_separate_unscale)
- def _grad_scaling_autocast_fused_optimizers(self, optimizer_ctor, optimizer_kwargs, separate_unscale):
+ def _grad_scaling_autocast_fused_optimizers(self, device, optimizer_ctor, optimizer_kwargs, separate_unscale):
+ torch.manual_seed(20)
(
mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, _,
) = _create_scaling_case(optimizer_ctor=optimizer_ctor, optimizer_kwargs=optimizer_kwargs, device='cpu')
@@ -1704,30 +1706,35 @@ class TestOptimRenewed(TestCase):
kwargs['lr'] = 1.0
opt_control = optimizer_ctor(mod_control.parameters(), **kwargs)
- scaler = torch.cpu.amp.GradScaler(init_scale=128.0)
+ scaler_scaling = torch.amp.GradScaler(device, init_scale=128.0)
+ scaler_control = torch.amp.GradScaler(device, init_scale=128.0)
+ tracker = TensorTracker()
for input, target in data:
opt_control.zero_grad()
- with torch.autocast('cpu', dtype=torch.half):
+ with torch.autocast(device_type=device, dtype=torch.half):
output_control = mod_control(input)
loss_control = loss_fn(output_control, target)
- scaler.scale(loss_control).backward()
- scaler.step(opt_control)
- scaler.update()
+ scaler_control.scale(loss_control).backward()
+ scaler_control.step(opt_control)
+ scaler_control.update()
opt_scaling.zero_grad()
- with torch.autocast('cpu', dtype=torch.half):
+ with torch.autocast(device_type=device, dtype=torch.half):
output_scaling = mod_scaling(input)
loss_scaling = loss_fn(output_scaling, target)
- scaler.scale(loss_scaling).backward()
+ scaler_scaling.scale(loss_scaling).backward()
if separate_unscale:
- scaler.unscale_(opt_scaling)
- scaler.step(opt_scaling)
- scaler.update()
+ scaler_scaling.unscale_(opt_scaling)
+ scaler_scaling.step(opt_scaling)
+ scaler_scaling.update()
- self.assertEqual(loss_control, loss_scaling,)
+ tracker.add(loss_control)
+ tracker.pop_check_set(loss_scaling, self)
for param_control, param_scaling in zip(mod_control.parameters(), mod_scaling.parameters()):
- self.assertEqual(param_control.grad, param_scaling.grad,)
- self.assertEqual(param_control, param_scaling,)
+ tracker.add(param_control.grad)
+ tracker.pop_check_set(param_scaling.grad, self)
+ tracker.add(param_control)
+ tracker.pop_check_set(param_scaling, self)
state_control, state_scaling = opt_control.state[param_control], opt_scaling.state[param_scaling]
@@ -1735,7 +1742,8 @@ class TestOptimRenewed(TestCase):
actual = state_scaling[k]
if k == "step":
actual = actual.squeeze()
- self.assertEqual(state_control[k], actual,)
+ tracker.add(state_control[k])
+ tracker.pop_check_set(actual, self)
@onlyCUDA
@optims([o for o in optim_db if "foreach" in o.supported_impls], dtypes=[torch.float32])
|
2.41.0
|
a98c2a932132e49559bf777c02798633d585e66
|
Fri, 3 May 2024 10:24:14 +0000
|
[PATCH 0968/1000] inductor: Add Conv3d support (#124361)MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit
|
This PR is to add Conv3d support in inductor. Basicly reuse and expand Conv2d logic and unit tests to Conv3d. Conv3d inductor support will improve the performance of C2D_R50, I3D_R50, I3D_R101, Slow and SlowFast-R50 from OOB models. | C2D_R50 | I3D_R50 | I3D_R101 | Slow | SlowFast-R50 -- | -- | -- | -- | -- | -- eager | 15.805 | 13.909 | 11.639 | 12.101 | 6.606 Compile w/o conv3d | 17.244 | 14.893 | 12.109 | 13.015 | 6.603 Compile w/ conv3d | 21.212 | 17.707 | 14.974 | 16.130 | 8.537 Pull Request resolved: https://github.com/pytorch/pytorch/pull/124361 Approved by: https://github.com/leslie-fang-intel, https://github.com/CaoE, https://github.com/jgong5, https://github.com/jansel
|
diff --git a/aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp b/aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp
index ea5464f285..b2901bc522 100644
--- a/aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp
+++ b/aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp
@@ -198,24 +198,40 @@ Tensor mkldnn_reorder_conv3d_weight(
IntArrayRef padding,
IntArrayRef stride,
IntArrayRef dilation,
- int64_t groups) {
+ int64_t groups,
+ c10::OptionalArrayRef<int64_t> input_size) {
mkldnn_check_low_precision(self.scalar_type(), "mkldnn_reorder_conv3d_weight");
const auto padding_expanded = expand_param_if_needed(padding, "padding", 3);
const auto stride_expanded = expand_param_if_needed(stride, "stride", 3);
const auto dilation_expanded = expand_param_if_needed(dilation, "dilation", 3);
- auto w = itensor_from_mkldnn(self);
-
- auto desc =
- ideep::convolution_forward::expected_weights_desc(
- w.get_dims(),
- w.get_data_type(),
- stride_expanded,
- padding_expanded,
- padding_expanded,
- dilation_expanded,
- groups,
- ideep::algorithm::convolution_direct);
+ ideep::dims src_dims = ideep::dims();
+ bool is_channels_last = false;
+ auto memory_format = at::MemoryFormat::Contiguous;
+ if (input_size.has_value()) {
+ src_dims = input_size.value().vec();
+ // if has input size, we always use channels last.
+ is_channels_last = true;
+ memory_format = at::MemoryFormat::ChannelsLast3d;
+ }
+
+ auto self_ = self.is_mkldnn() ? self : self.contiguous(memory_format);
+ auto w = itensor_from_tensor(self_);
+
+ auto desc = ideep::convolution_forward::expected_weights_desc(
+ w.get_dims(),
+ w.get_data_type(),
+ stride_expanded,
+ padding_expanded,
+ padding_expanded,
+ dilation_expanded,
+ groups,
+ ideep::algorithm::convolution_direct,
+ ideep::prop_kind::forward,
+ w.get_data_type(),
+ src_dims,
+ ideep::attr_t(),
+ is_channels_last);
ideep::tensor result;
result.init(desc);
result.feed_from(w);
@@ -223,6 +239,21 @@ Tensor mkldnn_reorder_conv3d_weight(
return new_with_itensor_mkldnn(std::move(result), optTypeMetaToScalarType(self.options().dtype_opt()), self.options().device_opt());
}
+static Tensor mkldnn_reorder_conv_weight(
+ const Tensor& self,
+ IntArrayRef padding,
+ IntArrayRef stride,
+ IntArrayRef dilation,
+ int64_t groups,
+ c10::OptionalArrayRef<int64_t> input_size) {
+ TORCH_CHECK((self.dim() == 4 || self.dim() == 5), "mkldnn_reorder_conv_weight only supports conv2d and conv3d");
+ if (self.dim() == 4) {
+ return at::native::mkldnn_reorder_conv2d_weight(self, padding, stride, dilation, groups, input_size);
+ } else {
+ return at::native::mkldnn_reorder_conv3d_weight(self, padding, stride, dilation, groups, input_size);
+ }
+}
+
static Tensor mkldnn_reorder_linear_weight(
const Tensor& self,
c10::optional<int64_t> batch_size_opt) {
@@ -486,7 +517,7 @@ TORCH_LIBRARY_IMPL(mkldnn, CPU, m) {
TORCH_FN(mkldnn_reorder_linear_weight));
m.impl(
TORCH_SELECTIVE_NAME("mkldnn::_reorder_convolution_weight"),
- TORCH_FN(mkldnn_reorder_conv2d_weight));
+ TORCH_FN(mkldnn_reorder_conv_weight));
m.impl(
TORCH_SELECTIVE_NAME("mkldnn::_reorder_mkldnn_rnn_layer_weight"),
TORCH_FN(mkldnn_reorder_mkldnn_rnn_layer_weight));
@@ -517,7 +548,8 @@ Tensor mkldnn_reorder_conv3d_weight(
IntArrayRef padding,
IntArrayRef stride,
IntArrayRef dilation,
- int64_t groups) {
+ int64_t groups,
+ c10::OptionalArrayRef<int64_t> input_size) {
TORCH_CHECK(false, "mkldnn_reorder_conv3d_weight: MKL-DNN build is disabled");
}
diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml
index 8c1173683b..ab538753fa 100644
--- a/aten/src/ATen/native/native_functions.yaml
+++ b/aten/src/ATen/native/native_functions.yaml
@@ -7463,7 +7463,7 @@
MkldnnCPU: mkldnn_reorder_conv2d_weight
autogen: mkldnn_reorder_conv2d_weight.out
|
+- func: mkldnn_reorder_conv3d_weight(Tensor self, SymInt[3] padding=0, SymInt[3] stride=1, SymInt[3] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor
|
9af8143692571c7dec07465fe93605f82815ce8
|
Thu, 2 May 2024 18:04:01 -0700
|
[PATCH 0969/1000] [FSDP] Added private `_unshard` API (#124304)MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit
|
Some toy example: <img width="998" alt="Screenshot 2024-04-17 at 2 00 05 PM" src="https://github.com/pytorch/pytorch/assets/31054793/b5665a63-beb0-4ca1-92c6-c57a052812fd"> We define `FullyShardedDataParallel._unshard(async_op: bool = False)` that can be used to prefetch all-gathers. The user should make sure: 1. Run lazy init before the first `_unshard` call of training. For example, this can hackily be done via `root_module.check_is_root()` on the root FSDP module `root_module`. 2. Call `root_module._wait_unshard_streams_on_current_stream()` before the first `_unshard` call of the current iteration (just need to call it once after last optimizer step and before first `_unshard` of this iteration). Differential Revision: [D56262876](https://our.internmc.facebook.com/intern/diff/D56262876) Pull Request resolved: https://github.com/pytorch/pytorch/pull/124304 Approved by: https://github.com/wanchaol
|
diff --git a/test/distributed/fsdp/test_fsdp_comm.py b/test/distributed/fsdp/test_fsdp_comm.py
index 2e5993b23e..c20637061d 100644
--- a/test/distributed/fsdp/test_fsdp_comm.py
+++ b/test/distributed/fsdp/test_fsdp_comm.py
@@ -3,18 +3,23 @@
import sys
from contextlib import nullcontext
from enum import auto, Enum
-from typing import Optional
+from typing import List, Optional
from unittest.mock import patch
import torch
+import torch.nn as nn
+import torch.nn.functional as F
from torch import distributed as dist
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
+from torch.distributed.fsdp.wrap import ModuleWrapPolicy
+from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
from torch.testing._internal.common_fsdp import (
CUDAInitMode,
FSDPInitMode,
FSDPTest,
+ MLP,
NestedWrappedModule,
TransformerWithSharedParams,
)
@@ -283,7 +288,102 @@ class TestCommunication(FSDPTest):
self.assertEqual(num_reduce_scatters, ref_num_reduce_scatters)
+class TestExplicitUnshard(FSDPTest):
+ @property
+ def world_size(self) -> int:
+ return min(torch.cuda.device_count(), 2)
+
+ @skip_if_lt_x_gpu(2)
+ @parametrize("use_orig_params", [False, True])
+ def test_unshard_async(self, use_orig_params: bool):
+ class ReduceModule(nn.Module):
+ def __init__(self, dim: int, group: dist.ProcessGroup):
+ super().__init__()
+ self.group = group
+ self.weight = nn.Parameter(torch.randn(dim, dim))
+
+ def forward(self, x: torch.Tensor):
+ y = F.relu(x @ self.weight)
+ # NOTE: This all-reduce is not differentiable and is included
+ # to exercise the overlap.
+ work = dist.all_reduce(y, group=self.group, async_op=True)
+ return y, work
+
+ class MLPs(nn.Module):
+ def __init__(self, dim: int):
+ super().__init__()
+ self.mlp1 = MLP(dim)
+ self.mlp2 = MLP(dim)
+ self.mlp3 = MLP(dim)
+
+ def forward(self, ys: List[torch.Tensor], works: List[dist.Work]):
+ (y1, y2, y3), (work1, work2, work3) = ys, works
+ work1.wait()
+ z1 = self.mlp1(y1)
+ work2.wait()
+ z2 = self.mlp2(y2)
+ work3.wait()
+ z3 = self.mlp3(y3)
+ return z1 + z2 + z3
+
+ class ReduceModel(nn.Module):
+ def __init__(self, dim: int, group: dist.ProcessGroup):
+ super().__init__()
+ self.reduce_module1 = ReduceModule(dim, group)
+ self.reduce_module2 = ReduceModule(dim, group)
+ self.reduce_module3 = ReduceModule(dim, group)
+ self.mlps = MLPs(dim)
+
+ def forward(self, x: torch.Tensor):
+ y1, work1 = self.reduce_module1(x)
+ if isinstance(self.mlps.mlp1, FSDP):
+ self.mlps.mlp1._unshard(async_op=True)
+ y2, work2 = self.reduce_module2(x)
+ if isinstance(self.mlps.mlp2, FSDP):
+ self.mlps.mlp2._unshard(async_op=True)
+ y3, work3 = self.reduce_module3(x)
+ if isinstance(self.mlps.mlp3, FSDP):
+ self.mlps.mlp3._unshard(async_op=True)
+ return self.mlps([y1, y2, y3], [work1, work2, work3])
+
+ group = self.process_group
+ batch_size, dim = 2, 8
+ torch.manual_seed(42)
+ ref_model = DDP(ReduceModel(dim, group).cuda(), device_ids=[self.rank])
+ ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
+
+ torch.manual_seed(42)
+ model = ReduceModel(dim, group)
+ model.mlps = FSDP(
+ model.mlps,
+ sharding_strategy=ShardingStrategy.SHARD_GRAD_OP,
+ auto_wrap_policy=ModuleWrapPolicy((MLP,)),
+ device_id=self.rank,
+ use_orig_params=use_orig_params,
+ )
+ model.mlps.check_is_root()
+ mlp_params = set(model.mlps.parameters())
+ mlp_param_names = {n for n, p in model.named_parameters() if p in mlp_params}
+ DDP._set_params_and_buffers_to_ignore_for_model(model, mlp_param_names)
+ model = DDP(model.cuda(), device_ids=[self.rank])
+ optim = torch.optim.Adam(model.parameters(), lr=1e-2)
+
+ torch.manual_seed(42 + self.rank + 1)
+ inp = torch.randn((batch_size, dim), device="cuda")
+
+ for _ in range(10):
+ losses: List[torch.Tensor] = []
+ for _model, _optim in ((ref_model, ref_optim), (model, optim)):
+ losses.append(_model(inp).sum())
+ losses[-1].backward()
+ _optim.step()
+ _optim.zero_grad()
+ self.assertEqual(losses[0], losses[1])
+ model.module.mlps._wait_unshard_streams_on_current_stream()
+
+
instantiate_parametrized_tests(TestCommunication)
+instantiate_parametrized_tests(TestExplicitUnshard)
if __name__ == "__main__":
run_tests()
diff --git a/torch/distributed/fsdp/_common_utils.py b/torch/distributed/fsdp/_common_utils.py
index 5da5437879..d2b8b95859 100644
--- a/torch/distributed/fsdp/_common_utils.py
+++ b/torch/distributed/fsdp/_common_utils.py
@@ -139,6 +139,7 @@ class _FSDPState(_State):
self._gradient_postdivide_factor: int = 0
self._comm_hook: Optional[Callable] = None
self._comm_hook_state: Optional[Any] = None
+ self._unshard_event: Optional[torch.cuda.Event] = None
# Abstract device handle for fsdp compute device. For now,
# the compute device must implement cuda semantics used by fsdp
self._device_handle: _FSDPDeviceHandle = _UninitializedDeviceHandle()
diff --git a/torch/distributed/fsdp/_flat_param.py b/torch/distributed/fsdp/_flat_param.py
index 6e2ef8ec71..2f344d19e9 100644
--- a/torch/distributed/fsdp/_flat_param.py
+++ b/torch/distributed/fsdp/_flat_param.py
@@ -1394,7 +1394,7 @@ class FlatParamHandle:
tensor_list = list(
torch.chunk(padded_unsharded_flat_param, dist.get_world_size(pg))
)
- work = dist.all_gather(tensor_list, sharded_flat_param, group=pg)
+ dist.all_gather(tensor_list, sharded_flat_param, group=pg)
else:
dist.all_gather_into_tensor(
padded_unsharded_flat_param,
diff --git a/torch/distributed/fsdp/_init_utils.py b/torch/distributed/fsdp/_init_utils.py
index 369439da55..0a592ea5c7 100644
--- a/torch/distributed/fsdp/_init_utils.py
+++ b/torch/distributed/fsdp/_init_utils.py
@@ -469,6 +469,7 @@ def _init_core_state(
backward_prefetch_limit,
forward_prefetch_limit,
)
+ state._unshard_event = None
# Mapping from fully sharded module to the handles it is responsible to
# unshard and reshard (see [Note: Fully Sharded Module])
_fully_sharded_module_to_handle: Dict[nn.Module, FlatParamHandle] = dict()
diff --git a/torch/distributed/fsdp/_runtime_utils.py b/torch/distributed/fsdp/_runtime_utils.py
index 5fbb727a01..2b60c14757 100644
--- a/torch/distributed/fsdp/_runtime_utils.py
+++ b/torch/distributed/fsdp/_runtime_utils.py
@@ -416,7 +416,12 @@ def _pre_forward_unshard(
handle._needs_pre_forward_unshard = False
# Don't wait during trace
if not torch.distributed._functional_collectives.is_torchdynamo_compiling():
- state._device_handle.current_stream().wait_stream(state._unshard_stream)
+ current_stream = state._device_handle.current_stream()
+ if state._unshard_event is not None:
+ current_stream.wait_event(state._unshard_event)
+ state._unshard_event = None
+ else:
+ current_stream.wait_stream(state._unshard_stream)
with torch.profiler.record_function(
"FullyShardedDataParallel._pre_forward_prefetch"
):
diff --git a/torch/distributed/fsdp/fully_sharded_data_parallel.py b/torch/distributed/fsdp/fully_sharded_data_parallel.py
index f298f2b75d..3b549c8229 100644
--- a/torch/distributed/fsdp/fully_sharded_data_parallel.py
+++ b/torch/distributed/fsdp/fully_sharded_data_parallel.py
@@ -36,6 +36,7 @@ from torch.distributed.fsdp._common_utils import (
_get_param_to_fqns,
FSDP_PREFIX,
FSDP_WRAPPED_MODULE,
+ HandleTrainingState,
TrainingState,
)
from torch.distributed.fsdp._dynamo_utils import _annotate_modules_for_dynamo
@@ -63,6 +64,8 @@ from torch.distributed.fsdp._runtime_utils import (
_pre_forward,
_pre_forward_unshard,
_root_pre_forward,
+ _unshard,
+ _wait_for_computation_stream,
)
from torch.distributed.fsdp._wrap_utils import _auto_wrap
from torch.distributed.fsdp.api import (
@@ -82,7 +85,7 @@ from torch.distributed.fsdp.api import (
StateDictType,
)
from torch.distributed.utils import _p_assert
-from ._flat_param import FlatParameter
+from ._flat_param import FlatParameter, FlatParamHandle
from ._optim_utils import (
_flatten_optim_state_dict,
@@ -1987,6 +1990,62 @@ class FullyShardedDataParallel(nn.Module, _FSDPState):
fsdp_state._comm_hook = hook
fsdp_state._comm_hook_state = state
+ def _unshard(self, async_op: bool = False):
+ class UnshardHandle:
+ def __init__(
+ self,
+ flat_param_handle: Optional[FlatParamHandle],
+ unshard_event: torch.cuda.Event,
+ ):
+ self._flat_param_handle = flat_param_handle
+ self._unshard_event = unshard_event
+
+ def wait(self):
+ if self._flat_param_handle is not None:
+ current_stream = (
+ self._flat_param_handle._device_handle.current_stream()
+ )
+ current_stream.wait_event(self._unshard_event)
+ self._flat_param_handle = None
+
+ if self._handle:
+ with self._use_training_state(
+ TrainingState.FORWARD_BACKWARD, HandleTrainingState.FORWARD
+ ):
+ _unshard(
+ self, self._handle, self._unshard_stream, self._pre_unshard_stream
+ )
+ self._unshard_event = self._unshard_stream.record_event()
+ self._handle._prefetched = True
+ unshard_handle = UnshardHandle(self._handle, self._unshard_stream)
+ if async_op:
+ return unshard_handle
+ unshard_handle.wait()
+ return None
+
+ def _wait_unshard_streams_on_current_stream(self):
+ _wait_for_computation_stream(
+ self._device_handle.current_stream(),
+ self._unshard_stream,
+ self._pre_unshard_stream,
+ )
+
+ @contextlib.contextmanager
+ def _use_training_state(
+ self, training_state: TrainingState, handle_training_state: HandleTrainingState
+ ):
+ prev_training_state = self.training_state
+ self.training_state = training_state
+ if self._handle:
+ prev_handle_training_state = self._handle._training_state
+ self._handle._training_state = handle_training_state
+ try:
+ yield
+ finally:
+ self.training_state = prev_training_state
+ if self._handle:
+ self._handle._training_state = prev_handle_training_state
+
def _get_grad_norm(
params: Iterable[nn.Parameter],
|
2.41.0
|
99ada5b27a1032e4bda4849b34411031d893b5e
|
Fri, 3 May 2024 14:54:15 +0000
|
[PATCH 0970/1000] call `super().__post_init__` in `ForeachFuncinfo.__post_init__` (#125457)
|
obviously the current main branch's `ForeachFuncInfo`'s dunder post init doesn't `super().__post_init__()` which does some setup including setting `dtypesIfCUDA` and `dtypesIfROCM`. Fixes #125295 related: #125001 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125457 Approved by: https://github.com/janeyx99
|
diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py
index 182bf42cce..001d93de18 100644
--- a/torch/testing/_internal/common_methods_invocations.py
+++ b/torch/testing/_internal/common_methods_invocations.py
@@ -9479,7 +9479,6 @@ foreach_unary_op_db: List[OpInfo] = [
backward_requires_result=True,
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- dtypesIfROCM=floating_and_complex_types_and(torch.half,),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9489,7 +9488,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- dtypesIfROCM=floating_and_complex_types_and(torch.half,),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9499,7 +9497,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- dtypesIfROCM=floating_and_complex_types_and(torch.half,),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9509,7 +9506,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- dtypesIfROCM=floating_and_complex_types_and(torch.half,),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9519,7 +9515,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- dtypesIfROCM=floating_and_complex_types_and(torch.half,),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9529,7 +9524,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- dtypesIfROCM=floating_and_complex_types_and(torch.half,),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9539,7 +9533,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- dtypesIfROCM=floating_and_complex_types_and(torch.half,),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9549,7 +9542,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- dtypesIfROCM=floating_and_complex_types_and(torch.half,),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9559,7 +9551,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- dtypesIfROCM=floating_and_complex_types_and(torch.half,),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9570,7 +9561,6 @@ foreach_unary_op_db: List[OpInfo] = [
backward_requires_result=True,
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- dtypesIfROCM=floating_and_complex_types_and(torch.half,),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9594,7 +9584,6 @@ foreach_unary_op_db: List[OpInfo] = [
backward_requires_result=True,
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- dtypesIfROCM=floating_and_complex_types_and(torch.half,),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9614,7 +9603,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- dtypesIfROCM=floating_and_complex_types_and(torch.half,),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9624,7 +9612,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types(),
dtypesIfCUDA=floating_and_complex_types_and(torch.half,),
- dtypesIfROCM=floating_and_complex_types_and(torch.half,),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9633,8 +9620,6 @@ foreach_unary_op_db: List[OpInfo] = [
'neg',
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and_complex(),
- dtypesIfCUDA=all_types_and_complex(),
- dtypesIfROCM=all_types_and_complex(),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9644,7 +9629,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
- dtypesIfROCM=floating_and_complex_types_and(torch.half),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9655,7 +9639,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
- dtypesIfROCM=all_types_and(torch.half, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9665,7 +9648,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- dtypesIfROCM=floating_types_and(torch.half, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9675,7 +9657,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- dtypesIfROCM=floating_types_and(torch.half, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9685,7 +9666,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
- dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9696,7 +9676,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
- dtypesIfROCM=all_types_and(torch.half, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9706,7 +9685,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_and_complex_types_and(torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half),
- dtypesIfROCM=floating_and_complex_types_and(torch.half),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9716,7 +9694,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
- dtypesIfROCM=all_types_and(torch.half, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9726,7 +9703,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16),
- dtypesIfROCM=floating_types_and(torch.half, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9736,7 +9712,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
- dtypesIfROCM=floating_types_and(torch.half),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9747,7 +9722,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bfloat16),
dtypesIfCUDA=floating_types_and(torch.half),
- dtypesIfROCM=floating_types_and(torch.half),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9758,7 +9732,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.half, torch.bfloat16),
- dtypesIfROCM=all_types_and(torch.half, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9768,7 +9741,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
- dtypesIfROCM=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9784,7 +9756,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half),
- dtypesIfROCM=all_types_and_complex_and(torch.bfloat16, torch.half),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9795,7 +9766,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=floating_types_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
- dtypesIfROCM=floating_types_and(torch.bfloat16, torch.float16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9805,7 +9775,6 @@ foreach_unary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(1, False, False),
dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half),
dtypesIfCUDA=all_types_and(torch.bool, torch.float16),
- dtypesIfROCM=all_types_and(torch.bool, torch.float16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9825,8 +9794,6 @@ foreach_binary_op_db: List[OpInfo] = [
"add",
sample_inputs_func=foreach_inputs_sample_func(2, True, True, True),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
- dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
- dtypesIfROCM=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
supports_autograd=True,
supports_inplace_autograd=True,
@@ -9848,8 +9815,6 @@ foreach_binary_op_db: List[OpInfo] = [
"sub",
sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
- dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
- dtypesIfROCM=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
supports_alpha_param=True,
supports_autograd=True,
supports_inplace_autograd=True,
@@ -9868,8 +9833,6 @@ foreach_binary_op_db: List[OpInfo] = [
ForeachFuncInfo(
"mul",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
- dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
- dtypesIfROCM=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=foreach_inputs_sample_func(2, True, True, True),
supports_autograd=True,
supports_inplace_autograd=True,
@@ -9889,8 +9852,6 @@ foreach_binary_op_db: List[OpInfo] = [
ForeachFuncInfo(
"div",
dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
- dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
- dtypesIfROCM=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16),
sample_inputs_func=foreach_inputs_sample_func(2, True, True, True),
supports_autograd=True,
supports_inplace_autograd=True,
@@ -9921,7 +9882,6 @@ foreach_binary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
- dtypesIfROCM=all_types_and(torch.bfloat16, torch.float16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9941,7 +9901,6 @@ foreach_binary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
- dtypesIfROCM=all_types_and(torch.bfloat16, torch.float16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -9962,7 +9921,6 @@ foreach_binary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
- dtypesIfROCM=all_types_and(torch.bfloat16, torch.float16),
supports_autograd=True,
supports_inplace_autograd=False,
supports_forward_ad=False,
@@ -9983,7 +9941,6 @@ foreach_binary_op_db: List[OpInfo] = [
sample_inputs_func=foreach_inputs_sample_func(2, True, True),
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
- dtypesIfROCM=all_types_and(torch.bfloat16, torch.float16),
supports_autograd=True,
supports_forward_ad=False,
supports_inplace_autograd=False,
@@ -10002,7 +9959,6 @@ foreach_binary_op_db: List[OpInfo] = [
"pow",
dtypes=all_types_and(torch.bfloat16),
dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16),
- dtypesIfROCM=all_types_and(torch.bfloat16, torch.float16),
supports_alpha_param=False,
supports_scalar_self_arg=True,
sample_inputs_func=foreach_inputs_sample_func(2, True, True),
@@ -10025,8 +9981,6 @@ foreach_binary_op_db: List[OpInfo] = [
"copy",
sample_inputs_func=foreach_inputs_sample_func(2, False, False),
dtypes=all_types_and_complex_and(torch.bfloat16, torch.half),
- dtypesIfCUDA=all_types_and_complex_and(torch.bfloat16, torch.half),
- dtypesIfROCM=all_types_and_complex_and(torch.bfloat16, torch.half),
supports_out=False,
supports_forward_ad=False,
supports_autograd=False,
@@ -10040,7 +9994,6 @@ foreach_pointwise_op_db: List[ForeachFuncInfo] = [
sample_inputs_func=foreach_pointwise_sample_func(4, True, True),
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
- dtypesIfROCM=all_types_and_complex_and(torch.half, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -10065,7 +10018,6 @@ foreach_pointwise_op_db: List[ForeachFuncInfo] = [
sample_inputs_func=foreach_pointwise_sample_func(4, True, True),
dtypes=all_types_and_complex(),
dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16),
- dtypesIfROCM=all_types_and_complex_and(torch.half, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -10093,8 +10045,6 @@ foreach_reduce_op_db: List[ForeachFuncInfo] = [
"norm",
sample_inputs_func=foreach_norm_sample_func(1, False, False),
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
- dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16),
- dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16),
supports_autograd=True,
supports_inplace_autograd=True,
supports_forward_ad=True,
@@ -10115,8 +10065,6 @@ foreach_other_op_db: List[ForeachFuncInfo] = [
supports_inplace_autograd=True,
supports_forward_ad=True,
dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16),
- dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
- dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16),
),
]
diff --git a/torch/testing/_internal/opinfo/core.py b/torch/testing/_internal/opinfo/core.py
index 2237da1c19..70c643d2b8 100644
--- a/torch/testing/_internal/opinfo/core.py
+++ b/torch/testing/_internal/opinfo/core.py
@@ -2750,6 +2750,9 @@ class ForeachFuncInfo(OpInfo):
self.ref = torch.clamp_min
self.ref_inplace = torch.Tensor.clamp_min_
+ # The following sets `dtypesIfCUDA` and `dtypesIfROCM` accordingly.
+ super().__post_init__()
+
def sample_zero_size_inputs(self, device, dtype, requires_grad=False, **kwargs):
if not hasattr(self.sample_inputs_func, "sample_zero_size_tensor_inputs"):
return []
|
2.41.0
|
6052a35d462ed783a32d37f5278cab2f5cfa17d
|
Thu, 2 May 2024 07:38:49 -0700
|
[PATCH 0975/1000] [RFC][FSDP2] Added `register_fsdp_forward_method` for user fwd methods (#125394)
|
FSDP only runs its pre/post-forward hooks on `nn.Module.forward`. This means that if the user runs a custom method meant as a forward pass, then FSDP will not all-gather the parameters. Examples include HuggingFace models' `generate()` (https://github.com/pytorch/pytorch/issues/123962, https://github.com/pytorch/pytorch/issues/100069) or others (https://github.com/pytorch/pytorch/issues/109385). This PR adds a monkey patching API `register_fsdp_forward_method(module: nn.Module, method_name: str)` to allow FSDP pre/post-forward hooks to run on the method. The function is a no-op if the passed-in `module` is not an FSDP module so that the register function can be called even if the FSDP wrapping changes. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125394 Approved by: https://github.com/weifengpy, https://github.com/wanchaol
|
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_training.py b/test/distributed/_composable/fsdp/test_fully_shard_training.py
index bf90bd165c..8d1e1dbfb6 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_training.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_training.py
@@ -16,6 +16,7 @@ from torch.distributed._composable.fsdp import (
FSDPModule,
fully_shard,
OffloadPolicy,
+ register_fsdp_forward_method,
)
from torch.distributed._tensor import DTensor, init_device_mesh
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
@@ -1139,5 +1140,57 @@ class TestFullyShardHSDPTraining(FSDPTest):
check_sharded_parity(self, ref_model, model)
+class TestFullyShardCustomForwardMethod(FSDPTestMultiThread):
+ @property
+ def world_size(self) -> int:
+ return 2
+
+ @unittest.skipIf(not TEST_CUDA, "no cuda")
+ def test_register_fsdp_forward_method(self):
+ """Based on https://github.com/pytorch/pytorch/issues/109385"""
+
+ class VisionTransformer(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.patch_proj = nn.Conv2d(3, 1024, kernel_size=14, stride=14)
+
+ def forward_features(self, imgs: torch.Tensor) -> torch.Tensor:
+ return self.patch_proj(imgs).flatten(2).transpose(1, 2)
+
+ def forward(self, imgs: torch.Tensor) -> torch.Tensor:
+ return self.forward_features(imgs).sum(dim=1)
+
+ class Model(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.vit, self.projector = VisionTransformer(), nn.Linear(1024, 256)
+
+ def forward(self, imgs: torch.Tensor) -> torch.Tensor:
+ # Run `vit.forward_features`, which is not `forward`!
+ patch_embeddings = self.vit.forward_features(imgs)
+ return self.projector(patch_embeddings)
+
+ torch.manual_seed(42)
+ model = Model()
+ for param in model.parameters():
+ dist.broadcast(param.detach(), src=0)
+ ref_model = copy.deepcopy(model).cuda()
+ fully_shard(model.vit)
+ fully_shard(model.projector)
+ fully_shard(model)
+ register_fsdp_forward_method(model.vit, "forward_features")
+
+ torch.manual_seed(42 + self.rank + 1)
+ inp = torch.randn(4, 3, 224, 224, device="cuda")
+ ref_loss = ref_model(inp).sum()
+ loss = model(inp).sum()
+ self.assertEqual(ref_loss, loss)
+ ref_loss.backward()
+ loss.backward()
+ for param in ref_model.parameters():
+ dist.all_reduce(param.grad, op=dist.ReduceOp.AVG)
+ check_sharded_parity(self, ref_model, model)
+
+
if __name__ == "__main__":
run_tests()
diff --git a/torch/distributed/_composable/fsdp/__init__.py b/torch/distributed/_composable/fsdp/__init__.py
index 4a0523f0a0..191a836632 100644
--- a/torch/distributed/_composable/fsdp/__init__.py
+++ b/torch/distributed/_composable/fsdp/__init__.py
@@ -1,2 +1,2 @@
from ._fsdp_api import CPUOffloadPolicy, MixedPrecisionPolicy, OffloadPolicy
-from .fully_shard import FSDPModule, fully_shard
+from .fully_shard import FSDPModule, fully_shard, register_fsdp_forward_method
diff --git a/torch/distributed/_composable/fsdp/fully_shard.py b/torch/distributed/_composable/fsdp/fully_shard.py
index e1538b00f6..a520470173 100644
--- a/torch/distributed/_composable/fsdp/fully_shard.py
+++ b/torch/distributed/_composable/fsdp/fully_shard.py
@@ -1,3 +1,4 @@
+import functools
from typing import Any, cast, Optional, Union
import typing_extensions
@@ -314,3 +315,35 @@ class UnshardHandle:
self._fsdp_param_group.wait_for_unshard()
# Avoid keeping a reference
self._fsdp_param_group = None
+
+
+def register_fsdp_forward_method(module: nn.Module, method_name: str) -> None:
+ """
+ Registers a method on ``module`` to be a forward method for FSDP.
+
+ FSDP only knows to run its pre-forward and post-forward hooks on the
+ default :meth:`nn.Module.forward` method. This function patches a user
+ specified method to run the pre/post-forward hooks before/after the method,
+ respectively. If ``module`` is not an :class:`FSDPModule`, then this is a
+ no-op.
+
+ Args:
+ module (nn.Module): Module to register the forward method on.
+ method_name (str): Name of the forward method.
+ """
+ if not isinstance(module, FSDPModule):
+ # Make no-op to allow including both when using/not using FSDP
+ return
+ if not hasattr(module, method_name):
+ raise ValueError(f"{type(module)} does not have a method {method_name}")
+ orig_method = getattr(module, method_name)
+
+ @functools.wraps(orig_method)
+ def wrapped_method(self, *args, **kwargs):
+ fsdp_state = self._get_fsdp_state()
+ args, kwargs = fsdp_state._pre_forward(self, args, kwargs)
+ out = orig_method(*args, **kwargs)
+ return fsdp_state._post_forward(self, args, out)
+
+ # Use `__get__` to make `wrapped_method` an instance method
+ setattr(module, method_name, wrapped_method.__get__(module, type(module)))
|
2.41.0
|
84a5b6cc0ecf0d75dbdbe438f2dee4909dc8db4
|
Wed, 1 May 2024 13:02:42 -0700
|
[PATCH 0976/1000] [AOTI] Add missing std::move for constant args (#125329)
|
Summary: fix https://github.com/pytorch/pytorch/issues/123187 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125329 Approved by: https://github.com/angelayi, https://github.com/chenyang78
|
diff --git a/test/inductor/test_cpu_cpp_wrapper.py b/test/inductor/test_cpu_cpp_wrapper.py
index 828ed8eb0e..c28451613c 100644
--- a/test/inductor/test_cpu_cpp_wrapper.py
+++ b/test/inductor/test_cpu_cpp_wrapper.py
@@ -88,7 +88,6 @@ if config.abi_compatible:
"test_qlinear_cpu",
"test_qlinear_dequant_promotion_cpu",
"test_qlinear_relu_cpu",
- "test_tensor2_cpu",
]
for test_name in xfail_list:
test_failures_cpp_wrapper[test_name] = test_torchinductor.TestFailure(
diff --git a/torch/_inductor/codegen/cpp_wrapper_cpu.py b/torch/_inductor/codegen/cpp_wrapper_cpu.py
index 95e4ef3ac7..ea544ae88c 100644
--- a/torch/_inductor/codegen/cpp_wrapper_cpu.py
+++ b/torch/_inductor/codegen/cpp_wrapper_cpu.py
@@ -593,9 +593,14 @@ class CppWrapperCpu(WrapperCodeGen):
else:
# Append constants as inputs to the graph
constants_idx = inputs_len + idx
- self.prefix.writeline(
- f"auto {constants_key} = inputs[{constants_idx}];"
- )
+ if config.abi_compatible:
+ self.prefix.writeline(
+ f"auto {constants_key} = std::move(inputs[{constants_idx}]);"
+ )
+ else:
+ self.prefix.writeline(
+ f"auto {constants_key} = inputs[{constants_idx}];"
+ )
self.codegen_inputs(self.prefix, V.graph.graph_inputs)
|
2.41.0
|
1a7455b996a2bed2f716f54315509b6b9a6dcce
|
Fri, 3 May 2024 17:19:43 +0200
|
[PATCH 0978/1000] [Inductor cutlass backend] Fix cutlass_utils.get_max_alignment() for strided layouts. (#124930)
|
Fixes cutlass_utils.get_max_alignment() which was so far not checking the alignment properly. Basically the method so far assumed that the passed layout is contiguous and row-major, which does not have to be true. Test Plan: CI - test_cutlass_backend.py to prevent regressions Added unit test Pull Request resolved: https://github.com/pytorch/pytorch/pull/124930 Approved by: https://github.com/int3 ghstack dependencies: #124929
|
diff --git a/test/inductor/test_cutlass_backend.py b/test/inductor/test_cutlass_backend.py
index 7da02e7077..aed07919a8 100644
--- a/test/inductor/test_cutlass_backend.py
+++ b/test/inductor/test_cutlass_backend.py
@@ -9,7 +9,8 @@ import torch
from torch._dynamo.utils import counters
from torch._inductor import config
from torch._inductor.codegen.cuda.cuda_kernel import CUDATemplateCaller
-from torch._inductor.ir import ChoiceCaller
+from torch._inductor.codegen.cuda.cutlass_utils import get_max_alignment
+from torch._inductor.ir import ChoiceCaller, FixedLayout
from torch._inductor.select_algorithm import NoValidChoicesError
from torch._inductor.test_case import run_tests, TestCase
from torch._inductor.utils import fresh_inductor_cache
@@ -616,6 +617,52 @@ class TestCutlassBackend(TestCase):
cuda_template_count += 1
assert cuda_template_count > 0, "No CUDATemplateCaller choices"
+ @unittest.skipIf(not SM80OrLater, "need sm_90")
+ @unittest.skipIf(config.is_fbcode(), "fbcode requires different CUTLASS path setup")
+ @unittest.mock.patch.dict(os.environ, {"PATH": _get_path_without_sccache()})
+ def test_get_max_alignment(self):
+ l4 = FixedLayout("cpu", torch.half, size=(1, 2, 4), stride=(0, 4, 1))
+ m4 = get_max_alignment(l4)
+ self.assertEqual(
+ m4, 4, "Wrong max alignment. Should have been 4. (simple, contiguous case)"
+ )
+
+ l4_2 = FixedLayout("cpu", torch.half, size=(1, 4, 2), stride=(0, 1, 4))
+ m4_2 = get_max_alignment(l4_2)
+ self.assertEqual(
+ m4_2,
+ 4,
+ "Wrong max alignment. Should have been 4. Did not deal with strides correctly",
+ )
+
+ l1 = FixedLayout("cpu", torch.half, size=(2, 4, 2), stride=(23, 1, 4))
+ m1 = get_max_alignment(l1)
+ self.assertEqual(
+ m1,
+ 1,
+ "Wrong max alignment. Should have been 1. Did not take stride into account correctly",
+ )
+
+ l2 = FixedLayout("cpu", torch.half, size=(1, 2, 4), stride=(0, 4, 1), offset=6)
+ m2 = get_max_alignment(l2)
+ self.assertEqual(
+ m2, 2, "Wrong max alignment. Should have been 2. (due to choice of offset)"
+ )
+
+ l8 = FixedLayout(
+ "cpu", torch.half, size=(2, 2, 8), stride=(32, 8, 1), offset=24
+ )
+ m8 = get_max_alignment(l8)
+ self.assertEqual(m8, 8, "Wrong max alignment. Should have been 8.")
+
+ l4 = FixedLayout(
+ "cpu", torch.float32, size=(2, 2, 8), stride=(32, 8, 1), offset=24
+ )
+ m4 = get_max_alignment(l4)
+ self.assertEqual(
+ m4, 4, "Wrong max alignment. Should have been 4 (due to float32 dtype )."
+ )
+
if __name__ == "__main__":
from torch._inductor.utils import is_big_gpu
diff --git a/torch/_inductor/codegen/cuda/cutlass_utils.py b/torch/_inductor/codegen/cuda/cutlass_utils.py
index 62465b0883..0247b2ae38 100644
--- a/torch/_inductor/codegen/cuda/cutlass_utils.py
+++ b/torch/_inductor/codegen/cuda/cutlass_utils.py
@@ -297,12 +297,29 @@ def get_max_alignment(inductor_layout: Layout) -> int:
def is_static_int(number):
return isinstance(number, (int, sympy.Integer))
- if is_static_int(size[-1]) and is_static_int(offset):
+ try:
+ contiguous_dim = inductor_layout.stride.index(1)
+ except ValueError:
+ # No dim with stride 1 found, return 1
+ return 1
+ if (
+ is_static_int(size[contiguous_dim])
+ and is_static_int(offset)
+ and all(is_static_int(s) for s in inductor_layout.stride)
+ ):
alignments = get_alignments(dtype)
for alignment in alignments:
- if int(size[-1]) % alignment == 0 and int(offset) % alignment == 0:
+ if (
+ int(size[contiguous_dim]) % alignment != 0
+ or int(offset) % alignment != 0
+ ):
+ continue
+ if all(
+ (dim == contiguous_dim)
+ or (inductor_layout.stride[dim] % alignment == 0)
+ for dim in range(len(size))
+ ):
return alignment
-
return 1
|
2.41.0
|
45baef05da3b2624382b9c3242233bba2a61c83
|
Fri, 3 May 2024 20:52:01 +0000
|
[PATCH 0979/1000] s390x: remove workaround for sleef issue (#124730)
|
This workaround is no longer needed since sleef was updated. Pull Request resolved: https://github.com/pytorch/pytorch/pull/124730 Approved by: https://github.com/soulitzer
|
diff --git a/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h b/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h
index 9b53745b03..b70b494649 100644
--- a/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h
+++ b/aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h
@@ -13,8 +13,6 @@
#include <ATen/cpu/vec/vec_base.h>
#include <c10/util/complex.h>
-#define SLEEF_MEMORY_WORKAROUND
-
namespace at {
namespace vec {
@@ -1148,32 +1146,20 @@ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
}
Vectorized<T> sin() const {
-#ifndef SLEEF_MEMORY_WORKAROUND
return mapSleef(Sleef_sinf4_u10, Sleef_sind2_u10);
-#else
- return mapOrdinary(std::sin);
-#endif
}
Vectorized<T> sinh() const {
return mapSleef(Sleef_sinhf4_u10, Sleef_sinhd2_u10);
}
Vectorized<T> cos() const {
-#ifndef SLEEF_MEMORY_WORKAROUND
return mapSleef(Sleef_cosf4_u10, Sleef_cosd2_u10);
-#else
- return mapOrdinary(std::cos);
-#endif
}
Vectorized<T> cosh() const {
return mapSleef(Sleef_coshf4_u10, Sleef_coshd2_u10);
}
Vectorized<T> tan() const {
-#ifndef SLEEF_MEMORY_WORKAROUND
return mapSleef(Sleef_tanf4_u10, Sleef_tand2_u10);
-#else
- return mapOrdinary(std::tan);
-#endif
}
Vectorized<T> tanh() const {
return mapSleef(Sleef_tanhf4_u10, Sleef_tanhd2_u10);
|
2.41.0
|
941fee7ea6d069cd79c420dbe5528decb86ac3e
|
Fri, 3 May 2024 21:10:36 +0000
|
[PATCH 0980/1000] [CPP extention] Baton lock is called regardless the code version (#125404)
|
Greetings! Fixes #125403 Please assist me with the testing as it is possible for my reproducer to miss the error in the code. Several (at least two) threads should enter the same part of the code at the same time to check file lock is actually working Pull Request resolved: https://github.com/pytorch/pytorch/pull/125404 Approved by: https://github.com/ezyang
|
diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py
index 8961dfcae3..ee5584315e 100644
--- a/torch/utils/cpp_extension.py
+++ b/torch/utils/cpp_extension.py
@@ -1691,10 +1691,10 @@ def _jit_compile(name,
file=sys.stderr)
name = f'{name}_v{version}'
- if version != old_version:
- baton = FileBaton(os.path.join(build_directory, 'lock'))
- if baton.try_acquire():
- try:
+ baton = FileBaton(os.path.join(build_directory, 'lock'))
+ if baton.try_acquire():
+ try:
+ if version != old_version:
with GeneratedFileCleaner(keep_intermediates=keep_intermediates) as clean_ctx:
if IS_HIP_EXTENSION and (with_cuda or with_cudnn):
hipify_result = hipify_python.hipify(
@@ -1727,14 +1727,13 @@ def _jit_compile(name,
verbose=verbose,
with_cuda=with_cuda,
is_standalone=is_standalone)
- finally:
- baton.release()
- else:
- baton.wait()
- elif verbose:
- print('No modifications detected for re-loaded extension '
- f'module {name}, skipping build step...',
- file=sys.stderr)
+ elif verbose:
+ print('No modifications detected for re-loaded extension '
+ f'module {name}, skipping build step...', file=sys.stderr)
+ finally:
+ baton.release()
+ else:
+ baton.wait()
if verbose:
print(f'Loading extension module {name}...', file=sys.stderr)
|
2.41.0
|
a578df57cc0f417f671634e564c62ef5d9a97e2
|
Fri, 3 May 2024 11:19:09 -0700
|
[PATCH 0981/1000] [FSDP2] Added test to show rank 0 broadcast for HSDP replicas (#125431)
|
This PR shows a simple utility to broadcast the parameters across replicas for HSDP: ``` replicate_group = mesh.get_group("replicate") for param in model.parameters(): # E.g. for mesh [[0, 1, 2, 3], [4, 5, 6, 7]] sharding on dim-1 and # replicating on dim-0, broadcast with sources 0, 1, 2, 3 src_rank = dist.get_process_group_ranks(replicate_group)[0] torch.distributed.broadcast( param.to_local(), src=src_rank, group=replicate_group ) ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/125431 Approved by: https://github.com/weifengpy, https://github.com/wanchaol
|
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_init.py b/test/distributed/_composable/fsdp/test_fully_shard_init.py
index 2e2e97e3a3..958f375fe2 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_init.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_init.py
@@ -725,5 +725,72 @@ class TestFullyShardProcessGroupInit(FSDPTestMultiThread):
self.assertEqual(param.grad, ref_param.grad)
+class TestFullyShardHSDPBroadcast(FSDPTestMultiThread):
+ @property
+ def world_size(self) -> int:
+ return 4
+
+ @unittest.skipIf(not TEST_CUDA, "no cuda")
+ def test_hsdp_broadcast_across_replicas(self):
+ shard_size, replicate_size = 2, 2
+ mesh = init_device_mesh(
+ "cuda", (replicate_size, shard_size), mesh_dim_names=("replicate", "shard")
+ )
+ model_args = ModelArgs()
+ model = Transformer(model_args)
+ # Add a buffer to show that this flow works for buffers too
+ model.register_buffer("buf", torch.randn((model_args.dim,)))
+ for module in model.modules():
+ if isinstance(module, TransformerBlock):
+ fully_shard(module, mesh=mesh)
+ fully_shard(model, mesh=mesh)
+
+ # Only preserve the model states on the replicate mesh's rank 0
+ if mesh.get_local_rank("replicate") > 0:
+ for tensor in itertools.chain(model.parameters(), model.buffers()):
+ tensor.detach().fill_(1337)
+
+ # Check that replicas are different
+ for tensor in itertools.chain(model.parameters(), model.buffers()):
+ local_tensor = tensor.to_local() if isinstance(tensor, DTensor) else tensor
+ local_tensor_list = [
+ torch.empty_like(local_tensor) for _ in range(mesh["replicate"].size())
+ ]
+ dist.all_gather(
+ local_tensor_list, local_tensor, group=mesh.get_group("replicate")
+ )
+ for other_local_tensor in local_tensor_list[1:]:
+ self.assertEqual(other_local_tensor.shape, local_tensor_list[0].shape)
+ self.assertNotEqual(other_local_tensor, local_tensor_list[0])
+
+ # Broadcast from replicate mesh's rank 0
+ replicate_group = mesh.get_group("replicate")
+ for tensor in itertools.chain(model.parameters(), model.buffers()):
+ # E.g. for mesh [[0, 1, 2, 3], [4, 5, 6, 7]] sharding on dim-1 and
+ # replicating on dim-0, broadcast with sources 0, 1, 2, 3
+ src_rank = dist.get_process_group_ranks(replicate_group)[0]
+ torch.distributed.broadcast(
+ tensor.to_local() if isinstance(tensor, DTensor) else tensor,
+ src=src_rank,
+ group=replicate_group,
+ )
+
+ # Check that replicas are the same
+ for tensor in itertools.chain(model.parameters(), model.buffers()):
+ local_tensor = tensor.to_local() if isinstance(tensor, DTensor) else tensor
+ local_tensor_list = [
+ torch.empty_like(local_tensor) for _ in range(mesh["replicate"].size())
+ ]
+ dist.all_gather(
+ local_tensor_list, local_tensor, group=mesh.get_group("replicate")
+ )
+ for other_local_tensor in local_tensor_list[1:]:
+ self.assertEqual(other_local_tensor, local_tensor_list[0])
+
+ # Check that we can run an iteration without erroring
+ inp = torch.randint(0, model_args.vocab_size, (2, 16), device="cuda")
+ model(inp).sum().backward()
+
+
if __name__ == "__main__":
run_tests()
|
2.41.0
|
503c29357f8ec7a5f97779a83f3a752eeb1654e
|
Fri, 3 May 2024 11:13:52 -0700
|
[PATCH 0982/1000] Introduce torch.utils._sympy.symbol (#125395)
|
This provides utilities for creating and querying properties on sympy.Symbol. I want to use this refactor to get a better handle on how the 's' prefix is being used in Inductor. To start, I only do symbolic_shapes code because that's what I'm familiar with. Signed-off-by: Edward Z. Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/125395 Approved by: https://github.com/Skylion007
|
diff --git a/test/inductor/test_compiled_autograd.py b/test/inductor/test_compiled_autograd.py
index d0e6949450..4c52630f8f 100644
--- a/test/inductor/test_compiled_autograd.py
+++ b/test/inductor/test_compiled_autograd.py
@@ -1516,11 +1516,13 @@ def wrap_test_class(orig_cls):
elif name.startswith("test_"):
dct[name] = make_wrapped(fn)
- return type(
+ cls = type(
orig_cls.__name__ + "WithCompiledAutograd",
orig_cls.__bases__,
dct,
)
+ cls.__file__ = __file__
+ return cls
# These groups of tests aren't supported yet
diff --git a/torch/_inductor/codegen/common.py b/torch/_inductor/codegen/common.py
index a044fd3210..6f340a906e 100644
--- a/torch/_inductor/codegen/common.py
+++ b/torch/_inductor/codegen/common.py
@@ -26,6 +26,7 @@ import torch
import torch.fx
from torch._prims_common import ELEMENTWISE_TYPE_PROMOTION_KIND
from torch.utils import _pytree as pytree
+from torch.utils._sympy.symbol import symbol_is_type, SymT
from torch.utils._sympy.value_ranges import ValueRanges
from .. import config, metrics
@@ -1682,7 +1683,8 @@ class Kernel(CodeGen):
replacements = {
x: self.args.size(x)
for x in sorted_symbols
- if x.name.startswith(("s", "u", "ps"))
+ if symbol_is_type(x, (SymT.UNBACKED_INT, SymT.SIZE))
+ or x.name.startswith("ps")
}
return sympy_subs(index, replacements)
diff --git a/torch/_inductor/codegen/triton.py b/torch/_inductor/codegen/triton.py
index b49aa3aa32..3f15cb5d30 100644
--- a/torch/_inductor/codegen/triton.py
+++ b/torch/_inductor/codegen/triton.py
@@ -37,6 +37,7 @@ from torch._inductor.metrics import is_metric_table_enabled, log_kernel_metadata
from torch._inductor.runtime.hints import AutotuneHint, DeviceProperties
from torch._prims_common import is_integer_dtype
from torch.utils._sympy.functions import FloorDiv, ModularIndexing
+from torch.utils._sympy.symbol import symbol_is_type, SymT
from torch.utils._sympy.value_ranges import ValueRanges
from torch.utils._triton import has_triton_package
@@ -1671,7 +1672,9 @@ class TritonKernel(Kernel):
# indirect indexing
cse_var = self.cse.varname_map[var.name]
mask_vars.update(cse_var.mask_vars)
- elif var.name.startswith(("s", "ps", "i", "u")):
+ elif var.name.startswith(("ps", "i")) or symbol_is_type(
+ var, (SymT.UNBACKED_INT, SymT.SIZE)
+ ):
pass
else:
# var is one of xN, yN or rN
diff --git a/torch/_inductor/lowering.py b/torch/_inductor/lowering.py
index 99fe0a9f03..5cd56aac7b 100644
--- a/torch/_inductor/lowering.py
+++ b/torch/_inductor/lowering.py
@@ -831,6 +831,8 @@ def trunc(x):
@register_lowering(aten.expand, type_promotion_kind=None)
def expand(x, sizes):
+ from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
+
(x,) = promote_constants([x])
if isinstance(x, ir.BaseConstant):
return ExpandView.create(x, tuple(sizes))
@@ -839,15 +841,13 @@ def expand(x, sizes):
if tuple(x.get_size()) == tuple(sizes):
return x
- if not any(V.graph.sizevars.shape_env.is_unbacked_symint(s) for s in x.get_size()):
+ if not free_unbacked_symbols(x.get_size()):
x_size_product = V.graph.sizevars.size_hint(sympy_product(x.get_size()))
# TODO: It would be better to realize the input if any of its sizes
# are unbacked, because typically the size will be non-zero. However,
# this cannot be done directly as below as we'll choke on the size_hint
# here
- if x_size_product > 0 and not any(
- V.graph.sizevars.shape_env.is_unbacked_symint(s) for s in sizes
- ):
+ if x_size_product > 0 and not free_unbacked_symbols(sizes):
# maybe realize input before broadcasting it
x.mark_reuse(
V.graph.sizevars.size_hint(sympy_product(sizes)) // x_size_product
diff --git a/torch/fx/experimental/symbolic_shapes.py b/torch/fx/experimental/symbolic_shapes.py
index 28975b08ef..2ee3df3ced 100644
--- a/torch/fx/experimental/symbolic_shapes.py
+++ b/torch/fx/experimental/symbolic_shapes.py
@@ -68,6 +68,7 @@ from torch.utils._traceback import format_frame, CapturedTraceback
from torch._utils_internal import signpost_event
from torch._subclasses.meta_utils import is_sparse_any
import torch.utils._pytree as pytree
+from torch.utils._sympy.symbol import SymT, make_symbol, symbol_is_type
from torch._logging import LazyString
@@ -439,7 +440,7 @@ def has_free_symbols(val: Union[SymInt, torch.Tensor]) -> bool:
# Like free_symbols, but filtered to only report unbacked symbols
def free_unbacked_symbols(x):
# NB: keep synced with is_unbacked_symint
- return {s for s in free_symbols(x) if s.name.startswith(("u", "f"))}
+ return {s for s in free_symbols(x) if symbol_is_type(s, (SymT.UNBACKED_INT, SymT.UNBACKED_FLOAT))}
# WARNING: Don't use this on Dynamo produced graphs, they don't have meta
# setup!
@@ -1661,7 +1662,7 @@ class DimConstraints:
# We are given a congruence of the form base % divisor == 0 with a free variable s. So:
# - we transform this into an equation of the form base = divisor * tmp;
# - we solve this equation for s to get a linear solution with free variable tmp.
- tmp = sympy.Symbol("tmp", integer=True)
+ tmp = sympy.Symbol("reduce_congruences_tmp", integer=True)
symbol, solution = sympy.solve_linear(base - divisor * tmp, symbols=[s])
# See https://docs.sympy.org/latest/modules/solvers/solvers.html#sympy.solvers.solvers.solve_linear
# for how to interpret the results.
@@ -3025,7 +3026,7 @@ class ShapeEnv:
def create_unbacked_symfloat(self):
"""Create a symbolic float without a hint value
"""
- symbol: sympy.Symbol = sympy.Symbol(f"f{next(self.unbacked_symfloat_counter)}")
+ symbol: sympy.Symbol = make_symbol(SymT.UNBACKED_FLOAT, next(self.unbacked_symfloat_counter))
self.counter["create_unbacked_symbol"] += 1
if not self._ignore_fresh_unbacked_symbols_tls():
self.pending_fresh_unbacked_symbols.append(symbol)
@@ -3043,7 +3044,7 @@ class ShapeEnv:
def create_unbacked_symint(self):
"""Create a symbolic integer without a hint value
"""
- symbol: sympy.Symbol = sympy.Symbol(f"u{next(self.unbacked_symint_counter)}", integer=True)
+ symbol: sympy.Symbol = make_symbol(SymT.UNBACKED_INT, next(self.unbacked_symint_counter), integer=True)
if not self._ignore_fresh_unbacked_symbols_tls():
self.pending_fresh_unbacked_symbols.append(symbol)
self.counter["create_unbacked_symbol"] += 1
@@ -3060,14 +3061,13 @@ class ShapeEnv:
def is_unbacked_symint(self, symbol: sympy.Symbol) -> bool:
"""Check if a sympy symbol matches the naming convention for unbacked symbols
"""
- # NB: keep synced with free_unbacked_symbols
- return str(symbol).startswith("u")
+ return symbol_is_type(symbol, SymT.UNBACKED_INT)
@record_shapeenv_event()
def create_unbacked_symbool(self):
"""Create a symbolic boolean without a hint value
"""
- symbol: sympy.Symbol = sympy.Symbol(f"u{next(self.unbacked_symint_counter)}", integer=True)
+ symbol: sympy.Symbol = make_symbol(SymT.UNBACKED_INT, next(self.unbacked_symint_counter), integer=True)
if not self._ignore_fresh_unbacked_symbols_tls():
self.pending_fresh_unbacked_symbols.append(symbol)
self.counter["create_unbacked_symbol"] += 1
@@ -3179,7 +3179,7 @@ class ShapeEnv:
# If we're not duck shaping, we always create a new symbol
# Even if we're duck shaping, if we haven't seen this particular
# value before, we also create a new symbol
- sympy_expr = sympy.Symbol(f"s{len(self.var_to_val)}", positive=positive, integer=True)
+ sympy_expr = make_symbol(SymT.SIZE, len(self.var_to_val), positive=positive, integer=True)
# We always associate vars to vals
if isinstance(val, int):
self.var_to_val[sympy_expr] = sympy.Integer(val)
@@ -4094,7 +4094,7 @@ class ShapeEnv:
# we have to increase it by offset (and conversely, the new
# variables have to have their value range bounds adjusted as
# well)
- s = sympy.Symbol(f"shape_{idx}", positive=True, integer=True)
+ s = sympy.Symbol(f"evaluate_static_shape_{idx}", positive=True, integer=True)
# Note:
# Offset might be a fraction(e.g. aten.split.Tensor), but shapes are always integers.
@@ -4896,7 +4896,7 @@ class ShapeEnv:
stack = CapturedTraceback.extract(skip=1)
ra = RuntimeAssert(expr, msg, stack)
# TODO: Do this in a way that is less janky than int(s.name[1:])
- cands = sorted([s for s in expr.free_symbols if s.name.startswith("u")], key=lambda s: int(s.name[1:]))
+ cands = sorted((s for s in expr.free_symbols if symbol_is_type(s, SymT.UNBACKED_INT)), key=lambda s: int(s.name[1:]))
# Is None when prefer_deferred_runtime_asserts_over_guards=True
# and the guard in question has no unbacked SymInts in front
ix = cands[-1] if cands else None
diff --git a/torch/utils/_sympy/symbol.py b/torch/utils/_sympy/symbol.py
new file mode 100644
index 0000000000..6fdced2bbf
--- /dev/null
+++ b/torch/utils/_sympy/symbol.py
@@ -0,0 +1,44 @@
+"""
+This file contains canonical definitions for our symbol naming conventions,
+across torch.fx.experimental.symbolic_shapes and torch._inductor. The
+intention is:
+
+1. To make it easily greppable where all the sites we use a prefix are
+2. Make it possible to easily tell if we can introduce a new prefix without
+ introducing a conflict
+
+You can occasionally test if prefixes have been hardcoded by renaming prefixes
+in this file and seeing what breaks.
+"""
+
+from enum import auto, Enum
+from typing import Sequence, Union
+
+import sympy
+
+
+class SymT(Enum):
+ SIZE = auto()
+ UNBACKED_INT = auto()
+ UNBACKED_FLOAT = auto()
+
+
+# Invariant: there must not be a prefix which is a prefix of another string,
+# as this introduces ambiguity
+prefix_str = {
+ SymT.SIZE: "s", # integer
+ SymT.UNBACKED_INT: "u", # integer
+ SymT.UNBACKED_FLOAT: "f",
+}
+
+
+def make_symbol(prefix: SymT, idx: int, **kwargs) -> sympy.Symbol:
+ # TODO: maybe put the assumptions here directly
+ return sympy.Symbol(f"{prefix_str[prefix]}{idx}", **kwargs)
+
+
+def symbol_is_type(sym: sympy.Symbol, prefix: Union[SymT, Sequence[SymT]]) -> bool:
+ if isinstance(prefix, SymT):
+ return sym.name.startswith(prefix_str[prefix])
+ else:
+ return sym.name.startswith(tuple(prefix_str[p] for p in prefix))
|
2.41.0
|
b5ae2611e22d992565f202df9267fe66469efaa
|
Fri, 3 May 2024 21:34:34 +0000
|
[PATCH 0983/1000] s390x: use runtime detection for vectorization support (#123936)
|
s390x: use runtime detection for vectorization support Pull Request resolved: https://github.com/pytorch/pytorch/pull/123936 Approved by: https://github.com/malfet, https://github.com/jansel, https://github.com/xuhancn
|
diff --git a/aten/src/ATen/native/DispatchStub.cpp b/aten/src/ATen/native/DispatchStub.cpp
index c7db94889c..93c004acdc 100644
--- a/aten/src/ATen/native/DispatchStub.cpp
+++ b/aten/src/ATen/native/DispatchStub.cpp
@@ -10,8 +10,19 @@
#include <cstdlib>
#include <cstring>
+#ifdef HAVE_ZVECTOR_CPU_DEFINITION
+#include <sys/auxv.h>
+#endif
+
namespace at::native {
+#ifdef HAVE_ZVECTOR_CPU_DEFINITION
+static inline bool cpu_has_vxe()
+{
+ return (getauxval(AT_HWCAP) & HWCAP_S390_VXE);
+}
+#endif
+
static CPUCapability compute_cpu_capability() {
auto envar = std::getenv("ATEN_CPU_CAPABILITY");
if (envar) {
@@ -60,10 +71,16 @@ static CPUCapability compute_cpu_capability() {
#endif
}
#endif
+
+#ifdef HAVE_ZVECTOR_CPU_DEFINITION
+ // vxe is needed for fp32 vector instructions
+ if (cpu_has_vxe()) {
+ return CPUCapability::ZVECTOR;
+ }
+#endif
+
#ifdef HAVE_VSX_CPU_DEFINITION
return CPUCapability::VSX;
-#elif HAVE_ZVECTOR_CPU_DEFINITION
- return CPUCapability::ZVECTOR;
#else
return CPUCapability::DEFAULT;
#endif
diff --git a/cmake/Modules/FindZVECTOR.cmake b/cmake/Modules/FindZVECTOR.cmake
index cf4cf1126f..b4e8994d0a 100644
--- a/cmake/Modules/FindZVECTOR.cmake
+++ b/cmake/Modules/FindZVECTOR.cmake
@@ -1,22 +1,5 @@
-
IF(CMAKE_SYSTEM_NAME MATCHES "Linux")
message("-- <FindZVECTOR>")
- set(Z_ARCH_LIST "")
- #firstly, tries to add the arch of the platform
- EXEC_PROGRAM(LD_SHOW_AUXV=1 ARGS "/bin/true" OUTPUT_VARIABLE bintrue)
- if(bintrue MATCHES "AT_PLATFORM:[ \\t\\n\\r]*([a-zA-Z0-9_]+)[ \\t\\n\\r]*")
- if(CMAKE_MATCH_COUNT GREATER 0)
- string(TOLOWER ${CMAKE_MATCH_1} platform)
- if(${platform} MATCHES "^z(14|15|16)")
- message("-- Z ARCH Platform: ${platform}")
- list( APPEND Z_ARCH_LIST "${platform}" )
- endif()
- endif()
- endif()
- #adds other archs in descending order. as its cached nothing will be checked twice
- list( APPEND Z_ARCH_LIST "z16" )
- list( APPEND Z_ARCH_LIST "z15" )
- list( APPEND Z_ARCH_LIST "z14" )
SET(VECTORIZATION_CODE "
#include <vecintrin.h>
@@ -32,25 +15,25 @@ IF(CMAKE_SYSTEM_NAME MATCHES "Linux")
vuint32 selector= {0xFFFFFFFF, 0, 0xFFFFFFFF, 0xFFFFFFFF};
vfloat32 hf = vsel_ext(selector, h1,h2);
int ret = (int)(hf[0]*1000+hf[1]*100+hf[2]*10+hf[3]);
- return ret==3856;
+ return (ret == 3856) ? 0 : -1;
}
")
- foreach(Z_ARCH ${Z_ARCH_LIST})
- SET(ARCH_SIMD_TEST_FLAGS_${Z_ARCH} " -mvx -mzvector -march=${Z_ARCH} -mtune=${Z_ARCH}")
- message("-- check ${Z_ARCH}")
- SET(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
- SET(CMAKE_REQUIRED_FLAGS "${ARCH_SIMD_TEST_FLAGS_${Z_ARCH}}")
- set(VECTORIZATION_CODE_${Z_ARCH} "${VECTORIZATION_CODE}")
- CHECK_CXX_SOURCE_COMPILES("${VECTORIZATION_CODE_${Z_ARCH}}" COMPILE_OUT_${Z_ARCH})
- SET(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE})
- if(COMPILE_OUT_${Z_ARCH})
- message("-- ${Z_ARCH} SIMD flags were set.")
- set(CXX_ZVECTOR_FOUND TRUE)
- SET(CXX_ZVECTOR_FLAGS "${ARCH_SIMD_TEST_FLAGS_${Z_ARCH}}" )
- break()
- endif()
- endforeach()
+ SET(ARCH_SIMD_TEST_FLAGS " -mvx -mzvector")
+ SET(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
+ SET(CMAKE_REQUIRED_FLAGS "${ARCH_SIMD_TEST_FLAGS}")
+ # Do compilation check instead of runtime check
+ # in case it is compiled on older hardware
+ # or crosscompiled
+ CHECK_CXX_SOURCE_COMPILES("${VECTORIZATION_CODE}" COMPILE_OUT_ZVECTOR)
+ SET(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE})
+ if(COMPILE_OUT_ZVECTOR)
+ message("-- ZVECTOR flags were set.")
+ set(CXX_ZVECTOR_FOUND TRUE)
+ SET(CXX_ZVECTOR_FLAGS "${ARCH_SIMD_TEST_FLAGS}" )
+ else()
+ message("-- ZVECTOR flags were NOT set.")
+ endif()
message("-- </FindZVECTOR>")
endif()
diff --git a/torch/_inductor/codecache.py b/torch/_inductor/codecache.py
index 5d884ee62b..15b7d93b29 100644
--- a/torch/_inductor/codecache.py
+++ b/torch/_inductor/codecache.py
@@ -1294,7 +1294,18 @@ def valid_vec_isa_list() -> List[VecISA]:
return []
if platform.machine() == "s390x":
- return [VecZVECTOR()]
+ with open("/proc/cpuinfo") as _cpu_info:
+ while True:
+ line = _cpu_info.readline()
+ if not line:
+ break
+ # process line
+ featuresmatch = re.match(r"^features\s*:\s*(.*)$", line)
+ if featuresmatch:
+ for group in featuresmatch.groups():
+ if re.search(r"[\^ ]+vxe[\$ ]+", group):
+ return [VecZVECTOR()]
+ return []
isa_list = []
with open("/proc/cpuinfo") as _cpu_info:
|
2.41.0
|
783fef9904ea785ed1489cdcc0d4c7f55af4a83
|
Fri, 3 May 2024 14:58:27 -0700
|
[PATCH 0984/1000] [AOTI] Add a missing mypy ignore (#125508)
|
Summary: Caused by https://github.com/pytorch/pytorch/pull/125397, but somehow was not caught by CI. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125508 Approved by: https://github.com/izaitsevfb
|
diff --git a/torch/_inductor/ir.py b/torch/_inductor/ir.py
index f240efa7ae..49261cfd58 100644
--- a/torch/_inductor/ir.py
+++ b/torch/_inductor/ir.py
@@ -5547,7 +5547,7 @@ class FallbackKernel(ExternKernelAlloc):
self.set_cpp_kernel(kernel)
else:
self.cpp_kernel_name = get_aten_cpp_kernel_name(kernel)
- schema = kernel._schema
+ schema = kernel._schema # type: ignore[union-attr]
self.init_args_default_value(schema)
else:
self.python_kernel_name = str(kernel)
|
2.41.0
|
9abd1dccbbf6a20c76f82a2e9d7670126299a99
|
Fri, 3 May 2024 22:58:20 +0000
|
[PATCH 0986/1000] Fix lint after PR 122611 (#125512)
|
Fix lint after https://github.com/pytorch/pytorch/pull/122611 Pull Request resolved: https://github.com/pytorch/pytorch/pull/125512 Approved by: https://github.com/clee2000
|
diff --git a/c10/util/Exception.h b/c10/util/Exception.h
index 55de996683..bc01ad8d4e 100644
--- a/c10/util/Exception.h
+++ b/c10/util/Exception.h
@@ -68,7 +68,10 @@ class C10_API Error : public std::exception {
const void* caller = nullptr);
// Base constructor
- Error(std::string msg, std::string backtrace = "", const void* caller = nullptr);
+ Error(
+ std::string msg,
+ std::string backtrace = "",
+ const void* caller = nullptr);
// Add some new context to the message stack. The last added context
// will be formatted at the end of the context list upon printing.
diff --git a/torch/csrc/jit/python/script_init.cpp b/torch/csrc/jit/python/script_init.cpp
index fdeeb912ad..22809069f8 100644
--- a/torch/csrc/jit/python/script_init.cpp
+++ b/torch/csrc/jit/python/script_init.cpp
@@ -974,7 +974,10 @@ void initJitScriptBindings(PyObject* module) {
[mm_name](const Object& self, py::args args, py::kwargs kwargs) {
auto method = self.find_method(mm_name);
if (!method) {
- std::string msg = fmt::format("'{}' is not implemented for {}", mm_name, self.type()->str());
+ std::string msg = fmt::format(
+ "'{}' is not implemented for {}",
+ mm_name,
+ self.type()->str());
throw c10::NotImplementedError(msg);
}
return invokeScriptMethodFromPython(
|
2.41.0
|
2ab96a57e5b19c72cf11bc098fd690d238c1d86
|
Wed, 1 May 2024 14:45:19 -0700
|
[PATCH 0987/1000] [dynamo] fix crash when context manager is passed to a function (#125321)
|
Fix https://github.com/pytorch/pytorch/issues/125274. Main change was to reconstruct `ContextWrappingVariables` as objects in general, but we can replace them with the class on the caller side when generating the resume function. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125321 Approved by: https://github.com/jansel
|
diff --git a/test/dynamo/test_ctx_manager.py b/test/dynamo/test_ctx_manager.py
index cc6e39de4d..eab8fdb41a 100644
--- a/test/dynamo/test_ctx_manager.py
+++ b/test/dynamo/test_ctx_manager.py
@@ -1304,7 +1304,7 @@ class GraphModule(torch.nn.Module):
self.assertEqual(fn(x), opt_fn(x))
self.assertEqual(fn(x).requires_grad, opt_fn(x).requires_grad)
- def test_inactive_context_graph_break(self):
+ def test_inactive_context_graph_break_local(self):
def fn(x):
x = x + 1
ctx = torch.set_grad_enabled(True)
@@ -1320,6 +1320,42 @@ class GraphModule(torch.nn.Module):
self.assertEqual(fn(x).requires_grad, opt_fn(x).requires_grad)
self.assertEqual(cnts.frame_count, 2)
+ def test_inactive_context_graph_break_stack(self):
+ def gn(ctx):
+ torch._dynamo.graph_break()
+ return ctx
+
+ def fn(x):
+ x = x + 1
+ ctx = gn(torch.set_grad_enabled(True))
+ # we expect a graph break on next line as well
+ with ctx:
+ x = x + 1
+ return x
+
+ x = torch.zeros(10, requires_grad=False)
+ cnts = torch._dynamo.testing.CompileCounter()
+ opt_fn = torch.compile(fn, backend=cnts)
+ self.assertEqual(fn(x), opt_fn(x))
+ self.assertEqual(fn(x).requires_grad, opt_fn(x).requires_grad)
+
+ def test_inactive_context_graph_break_stack2(self):
+ def gn(x, ctx, y, z, dummy):
+ with ctx:
+ return x * y * z
+
+ def fn(x):
+ x = x + 1
+ x = gn(x, torch.set_grad_enabled(True), 2, 3, torch._dynamo.graph_break())
+ return x
+
+ x = torch.zeros(10, requires_grad=False)
+ cnts = torch._dynamo.testing.CompileCounter()
+ opt_fn = torch.compile(fn, backend=cnts)
+ self.assertEqual(fn(x), opt_fn(x))
+ self.assertEqual(fn(x).requires_grad, opt_fn(x).requires_grad)
+ self.assertEqual(cnts.frame_count, 2)
+
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
diff --git a/torch/_dynamo/bytecode_transformation.py b/torch/_dynamo/bytecode_transformation.py
index 83f77626e0..dec673b0e9 100644
--- a/torch/_dynamo/bytecode_transformation.py
+++ b/torch/_dynamo/bytecode_transformation.py
@@ -244,6 +244,53 @@ def create_load_method(name) -> Instruction:
return create_instruction("LOAD_METHOD", argval=name)
+def create_setup_with(target) -> Instruction:
+ opname = "BEFORE_WITH" if sys.version_info >= (3, 11) else "SETUP_WITH"
+ return create_instruction(opname, target=target)
+
+
+def create_swap(n) -> List[Instruction]:
+ if sys.version_info >= (3, 11):
+ return [create_instruction("SWAP", arg=n)]
+ # in Python < 3.11, SWAP is a macro that expands to multiple instructions
+ if n == 1:
+ return []
+ """
+ e.g. swap "a" and "b" in this stack:
+ 0 a 1 2 3 b
+ 0 a [1 2 3 b]
+ 0 a [1 2 3 b] [1 2 3 b]
+ 0 a [1 2 3 b] [1 2 3 b] -1
+ 0 a [1 2 3 b] b
+ 0 b a [1 2 3 b]
+ 0 b a [1 2 3 b] [1 2 3 b]
+ 0 b [1 2 3 b] a [1 2 3 b]
+ 0 b [1 2 3 b] a [1 2 3 b] -1
+ 0 b [1 2 3 a]
+ 0 b [1 2 3 a] [1 2 3 a]
+ 0 b [1 2 3 a] [1 2 3 a] reverse
+ 0 b [a 3 2 1] None
+ 0 b [a 3 2 1]
+ 0 b 1 2 3 a
+ """
+ return [
+ create_instruction("BUILD_LIST", arg=n - 1),
+ create_instruction("DUP_TOP"),
+ create_instruction("LOAD_CONST", argval=-1),
+ create_instruction("BINARY_SUBSCR"),
+ create_instruction("ROT_THREE"),
+ create_instruction("DUP_TOP"),
+ create_instruction("ROT_THREE"),
+ create_instruction("LOAD_CONST", argval=-1),
+ create_instruction("STORE_SUBSCR"),
+ create_instruction("DUP_TOP"),
+ create_load_method("reverse"),
+ *create_call_method(0),
+ create_instruction("POP_TOP"),
+ create_instruction("UNPACK_SEQUENCE", arg=n - 1),
+ ]
+
+
def lnotab_writer(
lineno: int, byteno: int = 0
) -> Tuple[List[int], Callable[[int, int], None]]:
@@ -982,6 +1029,17 @@ def get_const_index(code_options, val) -> int:
def fix_vars(instructions: List[Instruction], code_options, varname_from_oparg=None):
# compute instruction arg from argval if arg is not provided
names = {name: idx for idx, name in enumerate(code_options["co_names"])}
+
+ def get_name_index(name) -> int:
+ try:
+ idx = names[name]
+ except KeyError:
+ # Add a missing item to co_names
+ idx = names[name] = len(names)
+ code_options["co_names"] = (*code_options["co_names"], name)
+ assert len(code_options["co_names"]) == len(names)
+ return idx
+
if sys.version_info < (3, 11):
assert varname_from_oparg is None
varnames = {name: idx for idx, name in enumerate(code_options["co_varnames"])}
@@ -1016,27 +1074,27 @@ def fix_vars(instructions: List[Instruction], code_options, varname_from_oparg=N
assert instructions[i].arg is not None
assert instructions[i].argval is not _NotProvided
if sys.version_info >= (3, 11):
- instructions[i].arg = (names[instructions[i].argval] << 1) + (
+ instructions[i].arg = (get_name_index(instructions[i].argval) << 1) + (
cast(int, instructions[i].arg) % 2
)
else:
- instructions[i].arg = names[instructions[i].argval]
+ instructions[i].arg = get_name_index(instructions[i].argval)
elif instructions[i].opname == "LOAD_ATTR":
# 3.12 LOAD_ATTR requires both arg and argval, like LOAD_GLOBAL
assert instructions[i].arg is not None
assert instructions[i].argval is not _NotProvided
if sys.version_info >= (3, 12):
- instructions[i].arg = (names[instructions[i].argval] << 1) + (
+ instructions[i].arg = (get_name_index(instructions[i].argval) << 1) + (
cast(int, instructions[i].arg) % 2
)
else:
- instructions[i].arg = names[instructions[i].argval]
+ instructions[i].arg = get_name_index(instructions[i].argval)
elif instructions[i].opname == "LOAD_SUPER_ATTR":
assert instructions[i].arg is not None
assert instructions[i].argval is not _NotProvided
# Copy low bit, force second bit on for explicit super (the "+ 2")
instructions[i].arg = (
- (names[instructions[i].argval] << 2)
+ (get_name_index(instructions[i].argval) << 2)
+ (cast(int, instructions[i].arg) % 2)
+ 2
)
@@ -1045,14 +1103,7 @@ def fix_vars(instructions: List[Instruction], code_options, varname_from_oparg=N
instructions[i].arg = varnames[instructions[i].argval]
elif instructions[i].opcode in HAS_NAME:
if should_compute_arg():
- name = instructions[i].argval
- try:
- instructions[i].arg = names[name]
- except KeyError:
- # Add a missing item to co_names
- instructions[i].arg = names[name] = len(names)
- code_options["co_names"] = (*code_options["co_names"], name)
- assert len(code_options["co_names"]) == len(names)
+ instructions[i].arg = get_name_index(instructions[i].argval)
elif instructions[i].opcode in HAS_FREE:
if should_compute_arg():
instructions[i].arg = freenames[instructions[i].argval]
diff --git a/torch/_dynamo/resume_execution.py b/torch/_dynamo/resume_execution.py
index 969a679c9e..ced0013cad 100644
--- a/torch/_dynamo/resume_execution.py
+++ b/torch/_dynamo/resume_execution.py
@@ -51,6 +51,7 @@ class ReenterWith:
finally:
exit context
"""
+ # NOTE: we assume that TOS is a context manager CLASS!
load_args = []
if self.target_values:
load_args = [
@@ -156,6 +157,7 @@ class ReenterWith:
with ctx(args):
(rest)
"""
+ # NOTE: we assume that TOS is a context manager CLASS!
load_args = []
if self.target_values:
load_args = [
@@ -455,8 +457,8 @@ class ContinueExecutionCache:
old_hook_target_remap[old_hook_target] = exn_target
real_i = i + null_idxes_i
if real_i in stack_ctx_vars_d:
- # current stack variable is a context var -
- # load args for context variable and construct it
+ # NOTE: we assume that current stack var is a context manager CLASS!
+ # Load args for context variable and construct it
prefix.extend(_load_tuple_and_call(stack_ctx_vars_d[real_i]))
if is_py311_plus:
@@ -468,6 +470,7 @@ class ContinueExecutionCache:
assert not hooks
+ # NOTE: we assume that local var is a context manager CLASS!
# initialize inactive context vars in argnames
for name, vals in argnames_ctx_vars:
prefix.append(create_instruction("LOAD_FAST", argval=name))
diff --git a/torch/_dynamo/symbolic_convert.py b/torch/_dynamo/symbolic_convert.py
index 1ebf5ec26f..2085d0813b 100644
--- a/torch/_dynamo/symbolic_convert.py
+++ b/torch/_dynamo/symbolic_convert.py
@@ -18,7 +18,7 @@ import traceback
import types
import typing
import weakref
-from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type
+from typing import Any, Callable, cast, Dict, List, Optional, Set, Tuple, Type
from unittest.mock import patch
import torch
@@ -37,6 +37,7 @@ from .bytecode_transformation import (
create_call_function,
create_instruction,
create_jump_absolute,
+ create_swap,
get_code_keys,
Instruction,
is_generator,
@@ -560,10 +561,10 @@ def break_graph_if_unsupported(*, push):
self.output.compile_subgraph(self, reason=reason)
cg = PyCodegen(self)
cleanup: List[Instruction] = []
- # Reconstruct the context variables in the block stack
+ # Reconstruct the context variable CLASS in the block stack
for b in self.block_stack:
assert b.with_context is not None
- cg(b.with_context)
+ b.with_context.reconstruct_type(cg)
cg.extend_output(b.resume_fn().try_except(cg.code_options, cleanup))
self.output.add_output_instructions(cg.get_instructions())
del cg
@@ -2285,24 +2286,32 @@ class InstructionTranslator(InstructionTranslatorBase):
if sys.version_info < (3, 12):
assert len(argnames_null) == 0, "variables should not be NULL in < 3.12"
- # Handle inactive context variables - inactive context variables
- # are reconstructed to be the class, NOT the object.
- # So the resume function needs to construct the context object
- # from the class and the context object's target values.
- # e.g. torch.set_grad_enabled(True) will be reconstructed as
- # torch.set_grad_enabled
+ cg = PyCodegen(self)
+
+ # Handle inactive context variables.
+ # The resume function assumes that context variables are the class, NOT the object.
+ # e.g. torch.set_grad_enabled(True) will be reconstructed as torch.set_grad_enabled
stack_ctx_vars = []
for i, var in enumerate(self.stack):
if type.__instancecheck__(ContextWrappingVariable, var):
- stack_ctx_vars.append((i, tuple(var.target_values))) # type: ignore[attr-defined]
+ ctx = cast(ContextWrappingVariable, var)
+ stack_ctx_vars.append((i, tuple(ctx.target_values)))
+ # Replace the current stack var with the context class
+ ctx.reconstruct_type(cg)
+ cg.extend_output(create_swap(len(self.stack) - i + 1))
+ cg.append_output(create_instruction("POP_TOP"))
+
argnames_ctx_vars = []
for name in argnames:
if type.__instancecheck__(
ContextWrappingVariable, var := self.symbolic_locals[name]
):
- argnames_ctx_vars.append((name, tuple(var.target_values))) # type: ignore[attr-defined]
-
- cg = PyCodegen(self)
+ ctx = cast(ContextWrappingVariable, var)
+ argnames_ctx_vars.append((name, tuple(ctx.target_values)))
+ # Replace the local with the context class
+ cg.append_output(create_instruction("LOAD_FAST", argval=name))
+ ctx.reconstruct_type(cg)
+ cg.append_output(create_instruction("STORE_FAST", argval=name))
# Python does not allow null to be an arg to a function, so
# we remove nulls from the stack and restore them in the
diff --git a/torch/_dynamo/variables/ctx_manager.py b/torch/_dynamo/variables/ctx_manager.py
index fa6d7d4f71..637636f1e0 100644
--- a/torch/_dynamo/variables/ctx_manager.py
+++ b/torch/_dynamo/variables/ctx_manager.py
@@ -1,6 +1,7 @@
# mypy: ignore-errors
import dataclasses
import inspect
+import sys
import warnings
from typing import Callable, Dict, List, Optional
@@ -8,7 +9,11 @@ import torch._C
from torch._guards import Guard
from .. import variables
-from ..bytecode_transformation import create_call_function, create_instruction
+from ..bytecode_transformation import (
+ create_call_function,
+ create_instruction,
+ create_setup_with,
+)
from ..device_interface import get_interface_for_device
from ..exc import unimplemented, Unsupported
from ..guards import GuardBuilder, install_guard
@@ -77,11 +82,21 @@ class ContextWrappingVariable(VariableTracker):
self.state.cleanup_assert()
return variables.ConstantVariable.create(None)
- def reconstruct(self, codegen):
+ def reconstruct_type(self, codegen):
codegen(
AttrSource(codegen.tx.import_source(self.module_name()), self.fn_name())
)
+ def reconstruct(self, codegen):
+ if sys.version_info >= (3, 11):
+ codegen.append_output(create_instruction("PUSH_NULL"))
+ self.reconstruct_type(codegen)
+ target_values = self.target_values
+ if not target_values:
+ target_values = ()
+ codegen.extend_output([codegen.create_load_const(val) for val in target_values])
+ codegen.extend_output(create_call_function(len(target_values), False))
+
def module_name(self):
raise NotImplementedError("module_name called on base")
@@ -963,18 +978,16 @@ class WithExitFunctionVariable(VariableTracker):
# Note here we reconstruct the context manager rather than the
# exit function. The handler generated by BlockStackEntry
# will re-enter the context in the resume function.
- codegen(
- AttrSource(
- codegen.tx.import_source(self.ctx.module_name()), self.ctx.fn_name()
- )
- )
-
+ self.ctx.reconstruct_type(codegen)
if codegen.tx.output.partial_convert:
+ if sys.version_info >= (3, 11):
+ codegen.append_output(create_instruction("PUSH_NULL"))
+ codegen.append_output(create_instruction("SWAP", arg=2))
codegen.extend_output(
[codegen.create_load_const(val) for val in self.ctx.target_values]
)
codegen.extend_output(
- create_call_function(len(self.ctx.target_values), True)
+ create_call_function(len(self.ctx.target_values), False)
)
- codegen.append_output(create_instruction("SETUP_WITH", target=self.target))
+ codegen.append_output(create_setup_with(self.target))
codegen.append_output(create_instruction("POP_TOP"))
diff --git a/torch/_dynamo/variables/lazy.py b/torch/_dynamo/variables/lazy.py
index 4c68c7bf78..fb4f5cfa76 100644
--- a/torch/_dynamo/variables/lazy.py
+++ b/torch/_dynamo/variables/lazy.py
@@ -19,17 +19,10 @@ class LazyCache:
assert self.vt is None
from ..symbolic_convert import InstructionTranslator
from .builder import VariableBuilder
- from .ctx_manager import ContextWrappingVariable, NullContextVariable
- from .misc import NullVariable
tx = InstructionTranslator.current_tx()
self.vt = VariableBuilder(tx, self.source)(self.value)
- # we do not expect wrapping these variables in lazy VTs
- assert not isinstance(
- self.vt, (NullVariable, ContextWrappingVariable)
- ) or isinstance(self.vt, NullContextVariable)
-
del self.value
del self.source
|
2.41.0
|
96bb74077ddc4349ba928e2f6b011d7ed35058c
|
Fri, 3 May 2024 11:19:09 -0700
|
[PATCH 0989/1000] [FSDP2] Added HSDP grad acc tests and some minor changes (#125479)
|
This adds HSDP to the existing gradient accumulation tests and includes some minor changes to simplify things a tiny bit. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125479 Approved by: https://github.com/wanchaol ghstack dependencies: #125431
|
diff --git a/.ci/pytorch/test.sh b/.ci/pytorch/test.sh
index c903a26998..72a3943039 100755
--- a/.ci/pytorch/test.sh
+++ b/.ci/pytorch/test.sh
@@ -322,6 +322,7 @@ test_inductor_distributed() {
pytest test/distributed/_composable/fsdp/test_fully_shard_training.py -k test_train_parity_2d_mlp
pytest test/distributed/_composable/fsdp/test_fully_shard_training.py -k test_train_parity_hsdp
pytest test/distributed/_composable/fsdp/test_fully_shard_training.py -k test_train_parity_2d_transformer_checkpoint_resume
+ pytest test/distributed/_composable/fsdp/test_fully_shard_training.py -k test_gradient_accumulation
pytest test/distributed/_composable/fsdp/test_fully_shard_frozen.py
pytest test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py -k test_compute_dtype
pytest test/distributed/_composable/fsdp/test_fully_shard_mixed_precision.py -k test_reduce_dtype
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_training.py b/test/distributed/_composable/fsdp/test_fully_shard_training.py
index 8d1e1dbfb6..ef79906df9 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_training.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_training.py
@@ -42,6 +42,7 @@ from torch.testing._internal.common_fsdp import (
FSDPTestMultiThread,
MLP,
patch_all_gather,
+ patch_all_reduce,
patch_reduce_scatter,
test_compiled_fsdp,
)
@@ -650,7 +651,7 @@ class TestFullyShardSharedParams(FSDPTest):
class TestFullyShardGradientAccumulation(FSDPTest):
@property
def world_size(self) -> int:
- return min(2, torch.cuda.device_count())
+ return min(4, torch.cuda.device_count())
@skip_if_lt_x_gpu(2)
def test_gradient_accumulation(self):
@@ -658,8 +659,13 @@ class TestFullyShardGradientAccumulation(FSDPTest):
Tests gradient accumulation with/without gradient reduction and
with/without resharding after backward.
"""
+ meshes = [init_device_mesh("cuda", (self.world_size,))] # always test FSDP
+ if self.world_size == 4: # test HSDP too if enough GPUs
+ shard_size, replicate_size = 2, 2
+ meshes.append(init_device_mesh("cuda", (replicate_size, shard_size)))
self.run_subtests(
{
+ "mesh": meshes,
"reshard_after_forward": [True, False, 2],
# "all": disable reduce-scatter for all modules
# "root_only": disable reduce-scatter for root's linear only
@@ -673,6 +679,7 @@ class TestFullyShardGradientAccumulation(FSDPTest):
def _test_gradient_accumulation(
self,
+ mesh: DeviceMesh,
reshard_after_forward: Union[bool, int],
mode: str,
reshard_after_backward: bool,
@@ -692,15 +699,13 @@ class TestFullyShardGradientAccumulation(FSDPTest):
global_batch_size = local_batch_size * self.world_size
if mode == "some_mlps":
num_mlps_to_disable_reduce_scatter = 2
- model = nn.Sequential(
- *(
- [nn.Linear(lin_dim, lin_dim)]
- + [MLP(lin_dim, torch.device("cpu")) for _ in range(num_mlps)]
- )
- )
+ modules = [nn.Linear(lin_dim, lin_dim)]
+ modules.extend(MLP(lin_dim) for _ in range(num_mlps))
+ model = nn.Sequential(*modules)
ref_model = copy.deepcopy(model).cuda()
fully_shard_fn = functools.partial(
fully_shard,
+ mesh=mesh,
reshard_after_forward=reshard_after_forward,
offload_policy=offload_policy,
)
@@ -710,10 +715,11 @@ class TestFullyShardGradientAccumulation(FSDPTest):
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
+ # TODO: Migrate to `CommDebugMode` once it supports c10d collectives.
orig_all_gather = dist.all_gather_into_tensor
- all_gather_count = 0
orig_reduce_scatter = dist.reduce_scatter_tensor
- reduce_scatter_count = 0
+ orig_all_reduce = dist.all_reduce
+ all_gather_count, reduce_scatter_count, all_reduce_count = 0, 0, 0
def all_gather_with_count(*args, **kwargs):
nonlocal all_gather_count
@@ -725,11 +731,16 @@ class TestFullyShardGradientAccumulation(FSDPTest):
reduce_scatter_count += 1
return orig_reduce_scatter(*args, **kwargs)
+ def all_reduce_with_count(*args, **kwargs):
+ nonlocal all_reduce_count
+ all_reduce_count += 1
+ return orig_all_reduce(*args, **kwargs)
+
torch.manual_seed(1) # same on all ranks
for iter_idx in range(5):
with patch_all_gather(all_gather_with_count), patch_reduce_scatter(
reduce_scatter_with_count
- ):
+ ), patch_all_reduce(all_reduce_with_count):
for microbatch_idx in range(num_microbatches):
is_last_microbatch = microbatch_idx == num_microbatches - 1
if mode == "all":
@@ -757,10 +768,7 @@ class TestFullyShardGradientAccumulation(FSDPTest):
* local_batch_size
].detach()
losses: List[torch.Tensor] = []
- for _model, _optim, inp in (
- (ref_model, ref_optim, global_inp),
- (model, optim, local_inp),
- ):
+ for _model, inp in ((ref_model, global_inp), (model, local_inp)):
losses.append(_model(inp).sum())
losses[-1].backward()
dist.all_reduce(losses[1]) # partial -> replicated
@@ -779,7 +787,13 @@ class TestFullyShardGradientAccumulation(FSDPTest):
# Expect additional reduce-scatters for all MLPs
expected_reduce_scatter_count += (num_mlps) * (num_microbatches - 1)
self.assertEqual(reduce_scatter_count, expected_reduce_scatter_count)
- reduce_scatter_count = 0
+ # Exclude the loss all-reduce per microbatch in our training loop
+ all_reduce_count -= num_microbatches
+ if mesh.ndim == 2:
+ self.assertEqual(all_reduce_count, expected_reduce_scatter_count)
+ else:
+ self.assertEqual(all_reduce_count, 0)
+ reduce_scatter_count = all_reduce_count = 0
# Expect one all-gather per MLP plus one for the root's linear in
# the first microbatch's forward
@@ -873,8 +887,7 @@ class TestFullyShardGradientAccumulation(FSDPTest):
ref_losses.append(ref_model(inp).sum())
ref_losses[-1].backward()
for param in ref_model.parameters():
- dist.all_reduce(param.grad)
- param.grad.detach().div_(self.world_size)
+ dist.all_reduce(param.grad, op=dist.ReduceOp.AVG)
for loss, ref_loss in zip(losses, ref_losses):
self.assertEqual(loss, ref_loss)
diff --git a/torch/distributed/_composable/fsdp/_fsdp_param_group.py b/torch/distributed/_composable/fsdp/_fsdp_param_group.py
index 737c2fe801..8644146135 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_param_group.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_param_group.py
@@ -167,7 +167,7 @@ class FSDPParamGroup:
def _init_grad_divide_factors(self):
data_parallel_world_size = 1
data_parallel_world_size *= self.mesh_info.shard_mesh_size
- if isinstance(self.mesh_info, HSDPMeshInfo):
+ if self._is_hsdp:
data_parallel_world_size *= self.mesh_info.replicate_mesh_size
if self._reduce_dtype in (torch.float32, torch.bfloat16):
# Use NCCL's AVG op to divide after reduction since it is more
@@ -348,7 +348,7 @@ class FSDPParamGroup:
self.device,
self._grad_divide_factors,
self._all_reduce_process_group
- if self._should_all_reduce_grads()
+ if self._is_hsdp and self.all_reduce_grads
else None,
self.comm_ctx.all_reduce_stream,
)
@@ -481,6 +481,10 @@ class FSDPParamGroup:
and self.mesh_info != self.post_forward_mesh_info
)
+ @property
+ def _is_hsdp(self) -> bool:
+ return isinstance(self.mesh_info, HSDPMeshInfo)
+
@property
def _all_gather_process_group(self) -> dist.ProcessGroup:
mesh_info = (
@@ -493,18 +497,13 @@ class FSDPParamGroup:
@property
def _reduce_scatter_process_group(self) -> dist.ProcessGroup:
- mesh_info = self.mesh_info
- assert isinstance(mesh_info, FSDPMeshInfo)
- return mesh_info.shard_process_group
+ assert isinstance(self.mesh_info, FSDPMeshInfo)
+ return self.mesh_info.shard_process_group
@property
def _all_reduce_process_group(self) -> dist.ProcessGroup:
- mesh_info = self.mesh_info
- assert isinstance(mesh_info, HSDPMeshInfo)
- return mesh_info.replicate_process_group
-
- def _should_all_reduce_grads(self) -> bool:
- return isinstance(self.mesh_info, HSDPMeshInfo) and self.all_reduce_grads
+ assert isinstance(self.mesh_info, HSDPMeshInfo)
+ return self.mesh_info.replicate_process_group
def _get_param_module_infos(
diff --git a/torch/testing/_internal/common_fsdp.py b/torch/testing/_internal/common_fsdp.py
index 1f8487290e..31bf3fafd2 100644
--- a/torch/testing/_internal/common_fsdp.py
+++ b/torch/testing/_internal/common_fsdp.py
@@ -907,6 +907,18 @@ def patch_reduce_scatter(new_reduce_scatter_tensor: Callable):
dist.reduce_scatter_tensor = orig_reduce_scatter
+@contextlib.contextmanager
+def patch_all_reduce(new_all_reduce: Callable):
+ orig_all_reduce = dist.all_reduce
+ dist.barrier()
+ dist.all_reduce = new_all_reduce
+ try:
+ yield
+ finally:
+ dist.barrier()
+ dist.all_reduce = orig_all_reduce
+
+
@no_type_check
@contextlib.contextmanager
def patch_unshard(new_unshard: Callable):
|
2.41.0
|
aa7699185e4ec39077e3046dfd63244dffa9ddb
|
Fri, 3 May 2024 12:09:18 -0700
|
[PATCH 0990/1000] [FSDP2] Computed grad divide factors at runtime (#125484)
|
**Context** We are interested in supporting the case where HSDP reduce-scatters but does not all-reduce in a microbatch backward. This saves communication while still saving memory. Only on the last microbatch do we need to both reduce-scatter and all-reduce. This is not implemented yet and will hopefully come in a future PR. There is one notable part of doing this. On the last microbatch, we need to perform an accumulation step after reduce-scatter and before all-reduce. If not, then the preceding microbatch's gradients will not be contributed across the replica group. (In other words, we cannot simply accumulate _after_ all-reduce.) Consider 32 GPUs with 4-way replication and 8-way sharding and 2 microbatches, and focus on global rank 0. - After the first microbatch, rank 0 will have its shard of $\frac{1}{8} \sum_{i \in S(0)} g_i^{(1)}$, where we define $S(0) = \{0, 1, \dots, 7\}$ to be the ranks in its shard group and we define the $(1)$ superscript to denote the first microbatch. - Upon the second microbatch, rank 0 after its reduce-scatter will additionally have its shard of $\frac{1}{8} \sum_{i \in S(0)} g_i^{(2)}$. If we only all-reduce this, then this second microbatch's gradients become $\frac{1}{32} \sum_{i=0, 1, \dots, 31} g_i^{(2)}$, so in total, rank 0 has $\frac{1}{8} \sum_{i \in S(0)} g_i^{(1)} + \frac{1}{32} \sum_{i=0, 1, \dots, 31} g_i^{(2)}$, which is wrong. - Importantly, we must accumulate $\frac{1}{8} \sum_{i \in S(0)} g_i^{(1)} + \frac{1}{8} \sum_{i \in S(0)} g_i^{(2)} = \frac{1}{8}\sum_{i \in S(0)} (g_i^{(1)} + g_i^{(2)})$ first before all-reducing to get $\frac{1}{32} \sum_{i=0, 1, \dots, 31} (g_i^{(1)} + g_i^{(2)})$. Now, note how under this approach, we want a factor of $\frac{1}{8}$ only (i.e. reciprocal of the shard group size), not $\frac{1}{32}$, for the first microbatch's gradients. - For bf16/fp32, since we use `ReduceOp.AVG` and we only reduce-scatter on the first microbatch, we correctly have a factor of $\frac{1}{8}$ on the first microbatch. - For fp16, since we precompute the gradient divide factors at init time assuming always reducing over both shard and replica groups, we incorrectly have a factor of $\frac{1}{32}$ on the first microbatch, deviating from the bf16/fp32 case. We can address this issue by matching the bf16/fp32 vs. fp16 semantics by computing the divide factors at runtime based on which process groups were passed into the reduction function (`foreach_reduce`). **Additional Notes** How to implement the HSDP reduce-scatter but no all-reduce is not entirely clear yet. (What is the cleanest way to do this?) We need to store the partial reduce-scatter output and check for it upon the next backward. We should also be sure to error if the set of parameters receiving gradients changes, in which case we cannot support this easily. Anyway, we will implement this in a follow-up. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125484 Approved by: https://github.com/wanchaol ghstack dependencies: #125431, #125479
|
diff --git a/test/distributed/_composable/fsdp/test_fully_shard_comm.py b/test/distributed/_composable/fsdp/test_fully_shard_comm.py
index 33c6e61ac4..115c1f9322 100644
--- a/test/distributed/_composable/fsdp/test_fully_shard_comm.py
+++ b/test/distributed/_composable/fsdp/test_fully_shard_comm.py
@@ -18,6 +18,8 @@ from torch.distributed._composable.fsdp import (
OffloadPolicy,
)
from torch.distributed._composable.fsdp._fsdp_collectives import (
+ _div_if_needed,
+ _get_gradient_divide_factors,
foreach_all_gather,
foreach_all_gather_copy_out,
foreach_reduce,
@@ -207,6 +209,18 @@ class TestFullyShardCollectiveOps(FSDPTestMultiThread):
reduce_scatter_dtype=torch.float32,
)
+ @unittest.skipIf(not TEST_CUDA, "no cuda")
+ def test_reduce_scatter_fp16(self):
+ param_sizes = self._get_param_sizes()
+ default_stream = torch.cuda.current_stream()
+ stream = torch.cuda.Stream()
+ for reduce_scatter_stream in (default_stream, stream):
+ self._test_reduce_scatter(
+ param_sizes,
+ reduce_scatter_stream=reduce_scatter_stream,
+ reduce_scatter_dtype=torch.float16,
+ )
+
def _test_reduce_scatter(
self,
param_sizes: List[torch.Size],
@@ -238,17 +252,24 @@ class TestFullyShardCollectiveOps(FSDPTestMultiThread):
orig_dtype=orig_params[0].dtype,
reduce_dtype=reduce_scatter_dtype,
device=self.device,
- divide_factors=fsdp_param_group._grad_divide_factors,
all_reduce_group=None,
all_reduce_stream=all_reduce_stream,
)
torch.cuda.current_stream().wait_event(view_out_event)
# Check reduce-scatter correctness
+ predivide_factor, postdivide_factor = _get_gradient_divide_factors(
+ group, None, reduce_scatter_dtype
+ )
reduced_grads = [grad.detach().clone() for grad in unsharded_grads]
for grad in reduced_grads:
- dist.all_reduce(grad, group=group)
- grad /= self.world_size
+ _div_if_needed(grad, predivide_factor)
+ dist.all_reduce(
+ grad,
+ group=group,
+ op=dist.ReduceOp.AVG if predivide_factor is None else dist.ReduceOp.SUM,
+ )
+ _div_if_needed(grad, postdivide_factor)
for fsdp_param, reduced_grad in zip(fsdp_params, reduced_grads):
sharded_grad = fsdp_param.sharded_param.grad
self.assertIsInstance(sharded_grad, DTensor)
diff --git a/torch/distributed/_composable/fsdp/_fsdp_collectives.py b/torch/distributed/_composable/fsdp/_fsdp_collectives.py
index 9adbd6b8a6..f279703151 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_collectives.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_collectives.py
@@ -125,7 +125,6 @@ def foreach_reduce(
orig_dtype: torch.dtype,
reduce_dtype: Optional[torch.dtype],
device: torch.device,
- divide_factors: Union[Tuple[None, None], Tuple[float, float]],
all_reduce_group: Optional[dist.ProcessGroup],
all_reduce_stream: torch.cuda.Stream,
) -> torch.cuda.Event:
@@ -142,7 +141,9 @@ def foreach_reduce(
)
grad_dtype = unsharded_grads[0].dtype
reduce_dtype = reduce_dtype or grad_dtype
- predivide_factor, postdivide_factor = divide_factors
+ predivide_factor, postdivide_factor = _get_gradient_divide_factors(
+ reduce_scatter_group, all_reduce_group, reduce_dtype
+ )
world_size = reduce_scatter_group.size()
padded_unsharded_sizes = tuple(
_get_dim0_padded_size(grad.size(), world_size) for grad in unsharded_grads
@@ -166,18 +167,22 @@ def foreach_reduce(
(reduce_scatter_output_numel,)
)
_div_if_needed(reduce_scatter_input, predivide_factor)
- _reduce_scatter(
- post_reduce_output,
- reduce_scatter_input,
- reduce_scatter_group,
- divide_factors,
+ dist.reduce_scatter_tensor(
+ output=post_reduce_output,
+ input=reduce_scatter_input,
+ group=reduce_scatter_group,
+ op=ReduceOp.AVG if predivide_factor is None else ReduceOp.SUM,
)
view_out_stream = reduce_scatter_stream
if all_reduce_group is not None:
view_out_stream = all_reduce_stream
all_reduce_stream.wait_stream(reduce_scatter_stream)
with torch.cuda.stream(all_reduce_stream):
- _all_reduce(post_reduce_output, all_reduce_group, divide_factors)
+ dist.all_reduce(
+ post_reduce_output,
+ group=all_reduce_group,
+ op=ReduceOp.AVG if predivide_factor is None else ReduceOp.SUM,
+ )
with torch.cuda.stream(view_out_stream):
_div_if_needed(post_reduce_output, postdivide_factor)
post_reduce_output = _to_dtype_if_needed(post_reduce_output, orig_dtype)
@@ -257,30 +262,27 @@ def _get_all_gather_input_metadatas(
)
-def _reduce_scatter(
- output: torch.Tensor,
- input: torch.Tensor,
- group: dist.ProcessGroup,
- divide_factors: Union[Tuple[None, None], Tuple[float, float]],
-) -> None:
- if divide_factors[0]:
- dist.reduce_scatter_tensor(output, input, group=group)
- else:
- # Using NCCL's reduce-scatter to do the division by world size saves
- # extra memory read/write from a separate division kernel
- dist.reduce_scatter_tensor(output, input, op=ReduceOp.AVG, group=group)
-
-
-def _all_reduce(
- tensor: torch.Tensor,
- group: dist.ProcessGroup,
- divide_factors: Union[Tuple[None, None], Tuple[float, float]],
-) -> None:
- if divide_factors[0]:
- dist.all_reduce(tensor, group=group)
- else:
- # saves extra memory read/write from a separate division kernel
- dist.all_reduce(tensor, op=ReduceOp.AVG, group=group)
+def _get_gradient_divide_factors(
+ reduce_scatter_group: dist.ProcessGroup,
+ all_reduce_group: Optional[dist.ProcessGroup],
+ reduce_dtype: torch.dtype,
+) -> Union[Tuple[None, None], Tuple[float, float]]:
+ # For fp32/bf16, we do not need to worry about overflow/underflow, so we
+ # use NCCL's built-in division to avoid separate div kernels
+ if reduce_dtype in (torch.float32, torch.bfloat16):
+ return None, None
+ data_parallel_size = reduce_scatter_group.size()
+ if all_reduce_group is not None:
+ data_parallel_size *= all_reduce_group.size()
+ # Since fp16 has smaller dynamic range than fp32/bf16, we want to avoid
+ # overflow/underflow. For N data parallel workers, each worker computes
+ # g_i, and they collectively reduce (g_1 + ... + g_N) / N. To avoid
+ # overflow/underflow, we divide by ~sqrt(N) before/after the reduction.
+ factor: int = 1
+ while data_parallel_size % factor == 0 and data_parallel_size / factor > factor:
+ factor *= 2
+ factor = float(factor)
+ return (factor, data_parallel_size / factor)
def _div_if_needed(tensor: torch.Tensor, div_factor: Optional[float]) -> None:
diff --git a/torch/distributed/_composable/fsdp/_fsdp_param_group.py b/torch/distributed/_composable/fsdp/_fsdp_param_group.py
index 8644146135..9e9813102d 100644
--- a/torch/distributed/_composable/fsdp/_fsdp_param_group.py
+++ b/torch/distributed/_composable/fsdp/_fsdp_param_group.py
@@ -1,6 +1,6 @@
import contextlib
-from typing import Any, cast, Dict, List, NamedTuple, Optional, Set, Tuple, Union
+from typing import Any, cast, Dict, List, NamedTuple, Optional, Set, Tuple
import torch
import torch.distributed as dist
@@ -164,32 +164,6 @@ class FSDPParamGroup:
)
self._reduce_dtype = next(iter(reduce_dtypes))
- def _init_grad_divide_factors(self):
- data_parallel_world_size = 1
- data_parallel_world_size *= self.mesh_info.shard_mesh_size
- if self._is_hsdp:
- data_parallel_world_size *= self.mesh_info.replicate_mesh_size
- if self._reduce_dtype in (torch.float32, torch.bfloat16):
- # Use NCCL's AVG op to divide after reduction since it is more
- # performant and fp32 has sufficient precision
- self._grad_divide_factors: Union[Tuple[None, None], Tuple[float, float]] = (
- None,
- None,
- )
- return
- # Since fp16 has smaller dynamic range than fp32/bf16, we want to avoid
- # overflow/underflow. For N data parallel workers, each worker computes
- # g_i, and they collectively reduce (g_1 + ... + g_N) / N. To avoid
- # overflow/underflow, we divide by ~sqrt(N) before/after the reduction.
- factor: int = 1
- while (
- data_parallel_world_size % factor == 0
- and data_parallel_world_size / factor > factor
- ):
- factor *= 2
- factor = float(factor)
- self._grad_divide_factors = (factor, data_parallel_world_size / factor)
-
def lazy_init(self):
# Lazy init should be idempotent
param_names_on_meta = [
@@ -207,7 +181,6 @@ class FSDPParamGroup:
# Initialize mixed precision attributes lazily in case the user changes
# the parameter dtypes after construction time but before forward
self._init_mp_dtypes()
- self._init_grad_divide_factors()
self._register_state_dict_hooks()
# Runtime #
@@ -346,7 +319,6 @@ class FSDPParamGroup:
self._orig_dtype,
self._reduce_dtype,
self.device,
- self._grad_divide_factors,
self._all_reduce_process_group
if self._is_hsdp and self.all_reduce_grads
else None,
|
2.41.0
|
a1af95b0979d85c4fe32a75e797323ad81f298d
|
Sat, 4 May 2024 00:10:53 +0000
|
[PATCH 0991/1000] [Inductor] Properly package target info for triton.compile (#125241)
|
Triton updated the interface for `triton.compile` https://github.com/openai/triton/commit/5162346487b3e3ebc062d9697429bafad25f22f6 The `target` argument to compile needs to be wrapped in a `GPUTarget` object. Without proper wrapping, we hit an assert in `compile`. If that assert is removed, Triton attempts to read device info from Torch while inside a torch thread, which hits an in bad fork assert. This change is required for compatibility with latest commits in Triton. The implementation is backwards compatible, so existing versions of Triton that work now continue to work. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125241 Approved by: https://github.com/jansel
|
diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py
index b66bdbf393..0dc43f0649 100644
--- a/torch/_inductor/runtime/triton_heuristics.py
+++ b/torch/_inductor/runtime/triton_heuristics.py
@@ -55,11 +55,17 @@ if triton is not None:
from triton.compiler.compiler import ASTSource
except ImportError:
ASTSource = None
+
+ try:
+ from triton.backends.compiler import GPUTarget
+ except ImportError:
+ GPUTarget = None
else:
Config = object
KernelInterface = object
OutOfResources = object
ASTSource = None
+ GPUTarget = None
try:
autograd_profiler = torch.autograd.profiler
@@ -334,11 +340,22 @@ class CachingAutotuner(KernelInterface):
else:
rocm_warp_size = 64
- target = (
- (compile_meta["device_type"], compile_meta["cc"])
- if not torch.version.hip
- else [compile_meta["device_type"], compile_meta["cc"], rocm_warp_size]
- )
+ if GPUTarget:
+ target = GPUTarget(
+ compile_meta["device_type"],
+ compile_meta["cc"],
+ rocm_warp_size if torch.version.hip else 32,
+ )
+ else:
+ target = (
+ (compile_meta["device_type"], compile_meta["cc"])
+ if not torch.version.hip
+ else [
+ compile_meta["device_type"],
+ compile_meta["cc"],
+ rocm_warp_size,
+ ]
+ )
options = {
"num_warps": compile_meta["num_warps"],
|
2.41.0
|
325c55896648ccecc5cfec11d3220dd93f75f23
|
Sat, 4 May 2024 00:29:35 +0000
|
[PATCH 0992/1000] Add CUDA paths to `CODEOWNERS` (#125409)
|
CC @ptrblck @albanD Pull Request resolved: https://github.com/pytorch/pytorch/pull/125409 Approved by: https://github.com/albanD
|
diff --git a/CODEOWNERS b/CODEOWNERS
index 6999f8553b..e481e66112 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -144,3 +144,14 @@ caffe2/utils/hip @jeffdaily @jithunnair-amd
/torch/csrc/Storage* @mikaylagawarecki
# subscribing for PyTorchFileWriter/PyTorchFileReader changes
/torch/csrc/jit/python/init.cpp @mikaylagawarecki
+
+# CUDA and CUDA math libraries
+aten/src/ATen/cuda/ @eqy
+aten/src/ATen/cudnn/ @eqy
+aten/src/ATen/native/cuda/ @eqy
+aten/src/ATen/native/cudnn/ @eqy
+c10/cuda @eqy
+torch/cuda/ @eqy
+torch/csrc/cuda/ @eqy
+torch/backends/cuda/ @eqy
+torch/backends/cudnn/ @eqy
|
2.41.0
|
fd5bb0c44dfaef36961eaa67008df4d6134199d
|
Fri, 3 May 2024 13:38:04 -0700
|
[PATCH 0993/1000] [c10d] only PG0 should dump when monitoring thread timed out (#125356)
|
Summary: We found that some dumps are missing when monitoring thread timeout. This is likely due to multiple PGs could still dump the same records at the same time. So we should allow only PG0 to actualy dump Test Plan: unit test python test/run_test.py --cpp --verbose -i cpp/ProcessGroupNCCLErrorsTest Tags: Pull Request resolved: https://github.com/pytorch/pytorch/pull/125356 Approved by: https://github.com/c-p-i-o
|
diff --git a/test/cpp/c10d/ProcessGroupNCCLErrorsTest.cpp b/test/cpp/c10d/ProcessGroupNCCLErrorsTest.cpp
index 991a8c5dc7..aef97daae2 100644
--- a/test/cpp/c10d/ProcessGroupNCCLErrorsTest.cpp
+++ b/test/cpp/c10d/ProcessGroupNCCLErrorsTest.cpp
@@ -389,6 +389,7 @@ TEST_F(ProcessGroupNCCLErrorsTest, testNCCLErrorsNoHeartbeat) {
setenv("TORCH_NCCL_DEBUG_INFO_TEMP_FILE", tempFilename.c_str(), 1) == 0);
// Enable nccl flight recorder.
ASSERT_TRUE(setenv("TORCH_NCCL_TRACE_BUFFER_SIZE", "10", 1) == 0);
+ ASSERT_TRUE(setenv(c10d::TORCH_NCCL_DUMP_ON_TIMEOUT[0].c_str(), "1", 1) == 0);
auto options = c10d::ProcessGroupNCCL::Options::create();
// Set a long watchdog timeout, so that we have enough time to lock the
// watchdog and let the heartbeat monitor thread to kick in.
diff --git a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
index 46b2923651..228fb79c54 100644
--- a/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
+++ b/torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp
@@ -1250,6 +1250,11 @@ void ProcessGroupNCCL::heartbeatMonitor() {
lastTimePollStore = currentTime;
if (globalStore_->check({std::string(EXCEPTION_DUMP)})) {
int timeOutRank = -1;
+ if (!shouldDump_.load()) {
+ LOG(ERROR)
+ << logPrefix()
+ << "First PG on this rank detecting the dump signal through tcpstore.";
+ }
shouldDump_.store(true);
try {
auto vec = globalStore_->get(std::string(EXCEPTION_DUMP));
@@ -1295,6 +1300,11 @@ void ProcessGroupNCCL::heartbeatMonitor() {
if (heartbeat != heartBeatCounter) {
heartBeatCounter = heartbeat;
} else {
+ if (!shouldDump_.load()) {
+ LOG(ERROR)
+ << logPrefix()
+ << "First PG on this rank that detected no heartbeat of its watchdog.";
+ }
shouldDump_.store(true);
// No heartbeat increase detected and timeout.
errorMsg = c10::str(
@@ -1337,16 +1347,18 @@ void ProcessGroupNCCL::heartbeatMonitor() {
cpp_dumper.value()([](const std::string& line) { LOG(INFO) << line; });
}
- // Store debug info to storage if no other thread does it. (By default to
- // local disk)
- std::future<bool> asyncDebugDump = std::async(
- std::launch::async, [this]() { return this->dumpDebuggingInfo(); });
-
- // wait for the dump until timeout
- waitForFutureOrTimeout(
- asyncDebugDump,
- std::chrono::milliseconds(waitTimeoutDumpInMilSec_),
- "Flight recorder dump in heartbeatMonitor");
+ if (checkDumpSignal && shouldDump_.load()) {
+ // Store debug info to storage if no other thread does it. (By default to
+ // local disk)
+ std::future<bool> asyncDebugDump = std::async(
+ std::launch::async, [this]() { return this->dumpDebuggingInfo(); });
+
+ // wait for the dump until timeout
+ waitForFutureOrTimeout(
+ asyncDebugDump,
+ std::chrono::milliseconds(waitTimeoutDumpInMilSec_),
+ "Flight recorder dump in heartbeatMonitor");
+ }
if (get_gil_checker() != nullptr) {
auto fut = launchAsyncGilCheck();
@@ -1567,6 +1579,16 @@ void ProcessGroupNCCL::watchdogHandler() {
// If work hits an exception (either an error or timeout)
if (work.exception()) {
+ // log as soon as exception is detected
+ LOG(ERROR) << c10::str(
+ logPrefix(),
+ "Exception (either an error or timeout) detected by watchdog at work: ",
+ work.seq_,
+ ", last enqueued NCCL work: ",
+ lastEnqueuedSeq_,
+ ", last completed NCCL work: ",
+ lastCompletedSeq_,
+ ".");
// try to dump flight records if exception happens.
// Flight recorder behavior should be independent of desync Debug
if (dumpOnException_) {
@@ -1576,6 +1598,10 @@ void ProcessGroupNCCL::watchdogHandler() {
reinterpret_cast<uint8_t*>(&rank),
reinterpret_cast<uint8_t*>(&rank) + sizeof(rank));
globalStore_->set(std::string(EXCEPTION_DUMP), vec);
+ if (!shouldDump_.load()) {
+ LOG(ERROR) << logPrefix()
+ << "First watchdog to set the dump signal.";
+ }
// signal the monitor thread to start dumping
shouldDump_.store(true);
// This sleep is used to give time for dumping before throwing
|
2.41.0
|
302dc68bf76a0af6dd4bb0488aaf22998374a0e
|
Sat, 4 May 2024 02:39:39 +0000
|
[PATCH 0994/1000] =?UTF-8?q?[Reland]=20Fakify=20script=20object?= =?UTF-8?q?=20inputs=20and=20attributes=20for=20non-strict=20ex=E2=80=A6?= =?UTF-8?q?=20(#125490)?=MIME-Version: 1.0Content-Type: text/plain; charset=UTF-8Content-Transfer-Encoding: 8bit
|
A re-land of #124239. This PR fakify ScriptObject inputs and attributes in export non-strict mode by default. The basic idea is to only fakify the script object during tracing (i.e. aot_export). After we get the traced graph module, eagerly executing, serializing, or running more passes will use the real script objects. This is essentially treating the script object as constant tensor. Concretely, we fakify all the script object inputs, and module attributes (gathered by constant_attrs). patch the module's attributes with fakified script object right after aot_export, remove the patching (to avoid changing the original module) then modify the exported graph module's attribute to real script object. Pull Request resolved: https://github.com/pytorch/pytorch/pull/125490 Approved by: https://github.com/angelayi
|
diff --git a/test/export/test_passes.py b/test/export/test_passes.py
index 41597a6030..e2724ead88 100644
--- a/test/export/test_passes.py
+++ b/test/export/test_passes.py
@@ -13,6 +13,10 @@ from typing import List, Set
import torch
from functorch.experimental.control_flow import cond
from torch._dynamo.eval_frame import is_dynamo_supported
+from torch._export.non_strict_utils import (
+ _fakify_script_objects,
+ _gather_constant_attrs,
+)
from torch._export.pass_base import _ExportPassBaseDeprecatedDoNotUse
from torch._export.passes.functionalize_side_effectful_ops_pass import (
_FunctionalizeSideEffectfulOpsPass,
@@ -34,26 +38,24 @@ from torch._export.utils import (
sequential_split,
)
from torch._higher_order_ops.auto_functionalize import auto_functionalized
-from torch._higher_order_ops.torchbind import enable_torchbind_tracing
+from torch._subclasses.fake_tensor import FakeTensorMode
from torch.export import export
from torch.export._remove_auto_functionalized_pass import (
unsafe_remove_auto_functionalized_pass,
)
from torch.export._remove_effect_tokens_pass import _remove_effect_tokens
+from torch.fx.experimental.symbolic_shapes import ShapeEnv
from torch.fx.passes.infra.partitioner import Partition
from torch.fx.passes.operator_support import OperatorSupport
from torch.library import _scoped_library, impl
from torch.testing import FileCheck
from torch.testing._internal.common_utils import (
- find_library_location,
- IS_FBCODE,
- IS_MACOS,
- IS_SANDCASTLE,
IS_WINDOWS,
run_tests,
skipIfTorchDynamo,
TestCase,
)
+from torch.testing._internal.torchbind_impls import init_torchbind_implementations
from torch.utils import _pytree as pytree
@@ -87,6 +89,53 @@ def _get_output_names(gm: torch.fx.GraphModule) -> List[str]:
return [str(arg) for arg in args]
+class ModelsWithScriptObjectAttr:
+ class Simple(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.attr = torch.classes._TorchScriptTesting._Foo(10, 20)
+
+ class SimpleWithAttrInContainer(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.attr = torch.classes._TorchScriptTesting._Foo(10, 20)
+ self.pytree_attr2 = [
+ torch.classes._TorchScriptTesting._Foo(1, 2),
+ {
+ torch.classes._TorchScriptTesting._Foo(3, 4),
+ },
+ {"foo": torch.classes._TorchScriptTesting._Foo(5, 6)},
+ ]
+
+ class NestedWithAttrInContainer(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.attr = torch.classes._TorchScriptTesting._Foo(10, 20)
+ self.pytree_attr2 = [
+ torch.classes._TorchScriptTesting._Foo(1, 2),
+ {
+ torch.classes._TorchScriptTesting._Foo(3, 4),
+ },
+ {"foo": torch.classes._TorchScriptTesting._Foo(5, 6)},
+ ]
+ self.sub_mod = ModelsWithScriptObjectAttr.Simple()
+ self.sub_mod2 = ModelsWithScriptObjectAttr.SimpleWithAttrInContainer()
+
+ class MoreNestedWithAttrInContainer(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.attr = torch.classes._TorchScriptTesting._Foo(10, 20)
+ self.pytree_attr2 = [
+ torch.classes._TorchScriptTesting._Foo(1, 2),
+ {
+ torch.classes._TorchScriptTesting._Foo(3, 4),
+ },
+ {"foo": torch.classes._TorchScriptTesting._Foo(5, 6)},
+ ]
+ self.sub_mod = ModelsWithScriptObjectAttr.Simple()
+ self.sub_mod2 = ModelsWithScriptObjectAttr.NestedWithAttrInContainer()
+
+
def _set_grad_enabled_tests():
from torch.export._trace import _export
@@ -213,17 +262,7 @@ class TestPasses(TestCase):
self.SEQUENTIAL_SPLIT_INLINE_TESTS = _sequential_split_inline_tests()
self.SET_GRAD_ENABLED_TESTS = _set_grad_enabled_tests()
- if IS_SANDCASTLE or IS_FBCODE:
- torch.ops.load_library(
- "//caffe2/test/cpp/jit:test_custom_class_registrations"
- )
- elif IS_MACOS:
- raise unittest.SkipTest("non-portable load_library call used in test")
- else:
- lib_file_path = find_library_location("libtorchbind_test.so")
- if IS_WINDOWS:
- lib_file_path = find_library_location("torchbind_test.dll")
- torch.ops.load_library(str(lib_file_path))
+ init_torchbind_implementations()
def tearDown(self):
self.SEQUENTIAL_SPLIT_INLINE_TESTS.clear()
@@ -421,8 +460,7 @@ class TestPasses(TestCase):
m = MyModule()
inputs = (torch.ones(2, 3),)
- with enable_torchbind_tracing():
- ep = torch.export.export(m, inputs, strict=False)
+ ep = torch.export.export(m, inputs, strict=False)
inp = torch.randn(2, 3)
orig_res = m(inp)
@@ -435,6 +473,48 @@ class TestPasses(TestCase):
self.assertTrue(torch.allclose(orig_res, ep_res))
self.assertTrue(torch.allclose(orig_res, without_token_res))
+ def test_fakify_script_objects(self):
+ for m in [
+ ModelsWithScriptObjectAttr.Simple(),
+ ModelsWithScriptObjectAttr.SimpleWithAttrInContainer(),
+ ModelsWithScriptObjectAttr.NestedWithAttrInContainer(),
+ ModelsWithScriptObjectAttr.MoreNestedWithAttrInContainer(),
+ ]:
+ constant_attrs = _gather_constant_attrs(m)
+ fake_mode = FakeTensorMode(
+ shape_env=ShapeEnv(tracked_fakes=[]),
+ allow_non_fake_inputs=True,
+ )
+ with _fakify_script_objects(m, tuple(), {}, fake_mode) as (
+ patched_mod,
+ _,
+ _,
+ fake_constant_attrs,
+ fake_to_real,
+ ):
+ self.assertEqual(len(fake_constant_attrs), len(constant_attrs))
+ for fake_obj, fqn in fake_constant_attrs.items():
+ self.assertEqual(constant_attrs[fake_to_real[fake_obj]], fqn)
+
+ # TODO: _gather_constants doesn't recursively look into the pytree containers.
+ @unittest.expectedFailure
+ def test_fakify_script_objects_properly_handle_containers(self):
+ m = ModelsWithScriptObjectAttr.SimpleWithAttrInContainer()
+ constant_attrs = _gather_constant_attrs(m)
+ fake_mode = FakeTensorMode(
+ shape_env=ShapeEnv(tracked_fakes=[]),
+ allow_non_fake_inputs=True,
+ )
+ with _fakify_script_objects(m, tuple(), {}, fake_mode) as (
+ patched_mod,
+ _,
+ _,
+ fake_constant_attrs,
+ fake_to_real,
+ ):
+ self.assertTrue("attr" in fake_constant_attrs.values())
+ self.assertTrue("pytree_attr2" in fake_constant_attrs.values())
+
def test_runtime_assert_inline_constraints_for_item(self) -> None:
class M(torch.nn.Module):
def __init__(self):
diff --git a/test/export/test_serialize.py b/test/export/test_serialize.py
index 27681d48c2..9709241e9a 100644
--- a/test/export/test_serialize.py
+++ b/test/export/test_serialize.py
@@ -3,6 +3,7 @@ PYTEST_DONT_REWRITE (prevents pytest from rewriting assertions, which interferes
with test_sym_bool)
"""
+
# Owner(s): ["oncall: export"]
import copy
import io
@@ -30,11 +31,7 @@ from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch.export import Dim, export, load, save
from torch.fx.experimental.symbolic_shapes import is_concrete_int, ValueRanges
from torch.testing._internal.common_utils import (
- find_library_location,
instantiate_parametrized_tests,
- IS_FBCODE,
- IS_MACOS,
- IS_SANDCASTLE,
IS_WINDOWS,
parametrize,
run_tests,
@@ -42,6 +39,8 @@ from torch.testing._internal.common_utils import (
TestCase,
)
+from torch.testing._internal.torchbind_impls import init_torchbind_implementations
+
def get_filtered_export_db_tests():
return [
@@ -347,17 +346,8 @@ class TestSerialize(TestCase):
@unittest.skipIf(not torchdynamo.is_dynamo_supported(), "dynamo doesn't support")
class TestDeserialize(TestCase):
def setUp(self):
- if IS_SANDCASTLE or IS_FBCODE:
- torch.ops.load_library(
- "//caffe2/test/cpp/jit:test_custom_class_registrations"
- )
- elif IS_MACOS:
- raise unittest.SkipTest("non-portable load_library call used in test")
- else:
- lib_file_path = find_library_location("libtorchbind_test.so")
- if IS_WINDOWS:
- lib_file_path = find_library_location("torchbind_test.dll")
- torch.ops.load_library(str(lib_file_path))
+ super().setUp()
+ init_torchbind_implementations()
def _check_graph_nodes(self, gm1, gm2, _check_meta=True):
# TODO: The _check_meta flag bypasses checking for
@@ -837,8 +827,7 @@ class TestDeserialize(TestCase):
m = MyModule()
inputs = (torch.ones(2, 3),)
- with enable_torchbind_tracing():
- self.check_graph(m, inputs, strict=False)
+ self.check_graph(m, inputs, strict=False)
def test_custom_obj(self):
class MyModule(torch.nn.Module):
@@ -853,8 +842,7 @@ class TestDeserialize(TestCase):
m = MyModule()
inputs = (torch.ones(2, 3),)
- with enable_torchbind_tracing():
- self.check_graph(m, inputs, strict=False)
+ self.check_graph(m, inputs, strict=False)
def test_custom_obj_list_out(self):
class MyModule(torch.nn.Module):
@@ -870,8 +858,7 @@ class TestDeserialize(TestCase):
m = MyModule()
inputs = (torch.ones(2, 3),)
- with enable_torchbind_tracing():
- self.check_graph(m, inputs, strict=False)
+ self.check_graph(m, inputs, strict=False)
instantiate_parametrized_tests(TestDeserialize)
@@ -1061,17 +1048,8 @@ class TestSaveLoad(TestCase):
@unittest.skipIf(not torchdynamo.is_dynamo_supported(), "dynamo doesn't support")
class TestSerializeCustomClass(TestCase):
def setUp(self):
- if IS_SANDCASTLE or IS_FBCODE:
- torch.ops.load_library(
- "//caffe2/test/cpp/jit:test_custom_class_registrations"
- )
- elif IS_MACOS:
- raise unittest.SkipTest("non-portable load_library call used in test")
- else:
- lib_file_path = find_library_location("libtorchbind_test.so")
- if IS_WINDOWS:
- lib_file_path = find_library_location("torchbind_test.dll")
- torch.ops.load_library(str(lib_file_path))
+ super().setUp()
+ init_torchbind_implementations()
def test_custom_class(self):
custom_obj = torch.classes._TorchScriptTesting._PickleTester([3, 4])
diff --git a/test/export/test_torchbind.py b/test/export/test_torchbind.py
index 872c713571..3a6445482c 100644
--- a/test/export/test_torchbind.py
+++ b/test/export/test_torchbind.py
@@ -1,6 +1,5 @@
# Owner(s): ["oncall: export"]
-import unittest
import torch
import torch.utils._pytree as pytree
@@ -11,38 +10,25 @@ from torch.export import export
from torch.export._trace import _export
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
- find_library_location,
instantiate_parametrized_tests,
- IS_FBCODE,
- IS_MACOS,
- IS_SANDCASTLE,
- IS_WINDOWS,
parametrize,
run_tests,
skipIfTorchDynamo,
TestCase,
)
-from torch.testing._internal.torchbind_impls import register_fake_operators
-
-
-def load_torchbind_test_lib():
- if IS_SANDCASTLE or IS_FBCODE:
- torch.ops.load_library("//caffe2/test/cpp/jit:test_custom_class_registrations")
- elif IS_MACOS:
- raise unittest.SkipTest("non-portable load_library call used in test")
- else:
- lib_file_path = find_library_location("libtorchbind_test.so")
- if IS_WINDOWS:
- lib_file_path = find_library_location("torchbind_test.dll")
- torch.ops.load_library(str(lib_file_path))
-
- register_fake_operators()
+from torch.testing._internal.torchbind_impls import init_torchbind_implementations
@skipIfTorchDynamo("torchbind not supported with dynamo yet")
class TestExportTorchbind(TestCase):
def setUp(self):
- load_torchbind_test_lib()
+ init_torchbind_implementations()
+
+ test = self
+ test.tq_push_counter = 0
+ test.tq_pop_counter = 0
+ test.tq_size_counter = 0
+ test.foo_add_tensor_counter = 0
@torch._library.register_fake_class("_TorchScriptTesting::_Foo")
class FakeFoo:
@@ -56,13 +42,9 @@ class TestExportTorchbind(TestCase):
return cls(x, y)
def add_tensor(self, z):
+ test.foo_add_tensor_counter += 1
return (self.x + self.y) * z
- test = self
- test.tq_push_counter = 0
- test.tq_pop_counter = 0
- test.tq_size_counter = 0
-
@torch._library.register_fake_class("_TorchScriptTesting::_TensorQueue")
class FakeTensorQueue:
def __init__(self, q):
@@ -248,7 +230,16 @@ def forward(self, token, obj_attr, x):
)
@parametrize("pre_dispatch", [True, False])
- def test_input(self, pre_dispatch):
+ @parametrize("fakify_script_obj", [True, False])
+ def test_input(self, pre_dispatch, fakify_script_obj):
+ cc = torch.classes._TorchScriptTesting._Foo(10, 20)
+ if not fakify_script_obj:
+ qual_name = cc._type().qualified_name() # type: ignore[att-defined]
+ if torch._library.fake_class_registry.has_fake_class(qual_name):
+ torch._library.fake_class_registry.deregister_fake_class(
+ "_TorchScriptTesting::_Foo"
+ )
+
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -256,7 +247,6 @@ def forward(self, token, obj_attr, x):
def forward(self, x, cc):
return x + cc.add_tensor(x)
- cc = torch.classes._TorchScriptTesting._Foo(10, 20)
ep = self._test_export_same_as_eager(
MyModule(), (torch.ones(2, 3), cc), strict=False, pre_dispatch=pre_dispatch
)
@@ -277,9 +267,23 @@ def forward(self, x, cc):
add = torch.ops.aten.add.Tensor(x, call_torchbind); x = call_torchbind = None
return (add,)""",
)
+ # aot_export_function runs the program twice
+ # in run_functionalized_fw_and_collect_metadata and create_aot_dispatcher_function
+ # We also have a re-tracing test, which doubles the count.
+ if fakify_script_obj:
+ self.assertEqual(self.foo_add_tensor_counter, 4)
@parametrize("pre_dispatch", [True, False])
- def test_input_as_custom_op_argument(self, pre_dispatch):
+ @parametrize("fakify_script_obj", [True, False])
+ def test_input_as_custom_op_argument(self, pre_dispatch, fakify_script_obj):
+ cc = torch.classes._TorchScriptTesting._Foo(10, 20)
+ if not fakify_script_obj:
+ qual_name = cc._type().qualified_name() # type: ignore[att-defined]
+ if torch._library.fake_class_registry.has_fake_class(qual_name):
+ torch._library.fake_class_registry.deregister_fake_class(
+ "_TorchScriptTesting::_Foo"
+ )
+
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
@@ -287,10 +291,33 @@ def forward(self, x, cc):
def forward(self, x, cc):
return x + torch.ops._TorchScriptTesting.takes_foo(cc, x)
- cc = torch.classes._TorchScriptTesting._Foo(10, 20)
+ del torch.ops._TorchScriptTesting.takes_foo.default.py_kernels[
+ torch._C.DispatchKey.Meta
+ ]
+ torch.ops._TorchScriptTesting.takes_foo.default._dispatch_cache.clear()
+ # Even though a C++ implementation for takes_foo.default is registered,
+ # we still need the python implementation for takes_foo.default to trace with FakeFoo.
+ if fakify_script_obj:
+ with self.assertRaisesRegex(
+ RuntimeError, "no python implementation is found"
+ ):
+ self._test_export_same_as_eager(
+ MyModule(),
+ (torch.ones(2, 3), cc),
+ strict=False,
+ pre_dispatch=pre_dispatch,
+ )
+
+ torch.ops._TorchScriptTesting.takes_foo.default.py_impl(
+ torch._C.DispatchKey.Meta
+ )(lambda cc, x: cc.add_tensor(x))
ep = self._test_export_same_as_eager(
- MyModule(), (torch.ones(2, 3), cc), strict=False, pre_dispatch=pre_dispatch
+ MyModule(),
+ (torch.ones(2, 3), cc),
+ strict=False,
+ pre_dispatch=pre_dispatch,
)
+
self.assertExpectedInline(
ep.module().code.strip(),
"""\
@@ -805,7 +832,7 @@ def forward(self, arg0_1, arg1_1, arg2_1):
@skipIfTorchDynamo("torchbind not supported with dynamo yet")
class TestRegisterFakeClass(TestCase):
def setUp(self):
- load_torchbind_test_lib()
+ init_torchbind_implementations()
def tearDown(self):
torch._library.fake_class_registry.global_fake_class_registry.clear()
@@ -851,21 +878,6 @@ class TestRegisterFakeClass(TestCase):
torch._library.register_fake_class("_TorchScriptTesting::_Foo", FakeFoo)
- def test_register_fake_class_duplicate_registration(self):
- @torch._library.register_fake_class("_TorchScriptTesting::_Foo")
- class FakeFoo:
- def __init__(self, x, y):
- self.x = x
- self.y = y
-
- @classmethod
- def from_real(cls, foo_obj):
- x, y = foo_obj.__getstate__()
- return cls(x, y)
-
- with self.assertWarnsRegex(UserWarning, "already registered"):
- torch._library.register_fake_class("_TorchScriptTesting::_Foo", FakeFoo)
-
instantiate_parametrized_tests(TestExportTorchbind)
diff --git a/test/export/test_unflatten.py b/test/export/test_unflatten.py
index 8dfca01112..35fbf2e1d6 100644
--- a/test/export/test_unflatten.py
+++ b/test/export/test_unflatten.py
@@ -41,6 +41,8 @@ from torch.testing._internal.common_utils import (
skipIfTorchDynamo,
TestCase,
)
+
+from torch.testing._internal.torchbind_impls import init_torchbind_implementations
from torch.utils._pytree import (
LeafSpec,
tree_flatten,
@@ -562,18 +564,21 @@ class TestUnflatten(TestCase):
@skipIfTorchDynamo("custom objects not supported in dynamo yet")
def test_unflatten_constant_obj(self):
- if IS_MACOS:
- raise unittest.SkipTest("non-portable load_library call used in test")
- elif IS_SANDCASTLE or IS_FBCODE:
- torch.ops.load_library(
- "//caffe2/test/cpp/jit:test_custom_class_registrations"
- )
- elif IS_WINDOWS:
- lib_file_path = find_library_location("torchbind_test.dll")
- torch.ops.load_library(str(lib_file_path))
- else:
- lib_file_path = find_library_location("libtorchbind_test.so")
- torch.ops.load_library(str(lib_file_path))
+ init_torchbind_implementations()
+
+ @torch._library.register_fake_class("_TorchScriptTesting::_Foo")
+ class FakeFoo:
+ def __init__(self, x: int, y: int):
+ self.x = x
+ self.y = y
+
+ @classmethod
+ def from_real(cls, foo):
+ (x, y), _ = foo.__getstate__()
+ return cls(x, y)
+
+ def add_tensor(self, z):
+ return (self.x + self.y) * z
class SubMod(torch.nn.Module):
def __init__(self):
diff --git a/torch/_export/non_strict_utils.py b/torch/_export/non_strict_utils.py
index 98627e75b9..3ce909bb1e 100644
--- a/torch/_export/non_strict_utils.py
+++ b/torch/_export/non_strict_utils.py
@@ -1,8 +1,10 @@
+import contextlib
import inspect
from collections import defaultdict
from typing import Any, Callable, Dict, List, Tuple, Union
import torch
+import torch.utils._pytree as pytree
from torch._dynamo.source import (
AttrSource,
GetItemSource,
@@ -12,7 +14,9 @@ from torch._dynamo.source import (
)
from torch._dynamo.variables.builder import TrackedFake
from torch._export.passes.add_runtime_assertions_for_constraints_pass import InputDim
+from torch._export.passes.lift_constants_pass import ConstantAttrMap
from torch._guards import Source
+from torch._library.fake_class_registry import FakeScriptObject
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch.export import Constraint
from torch.export.dynamic_shapes import _tree_map
@@ -66,6 +70,7 @@ def fakify(
source = key_path_to_source(kp)
if _is_constant_argument(t) or isinstance(t, torch.ScriptObject):
return t
+
if not isinstance(t, torch.Tensor):
raise ValueError(f"Unsupported input type {type(t)}")
n_dims = len(t.shape)
@@ -320,3 +325,111 @@ def make_constraints(
range_constraints[symbol] = shape_env.var_to_range[symbol]
return range_constraints
+
+
+def _gather_constant_attrs(m: torch.nn.Module) -> ConstantAttrMap:
+ """Search the module hierarchy, gathering up all tensor and ScriptObject constants.
+
+ Returns a dictionary mapping hash(value) to the name of the constant. We
+ have to abuse `hash` here unfortunately, see: [ScriptObject hash].
+ """
+ constants = ConstantAttrMap()
+ buffers_parameters = set(m.buffers())
+ buffers_parameters.update(m.parameters())
+
+ def inner(m: torch.nn.Module, prefix_atoms: List[str], constants):
+ for k, v in m.__dict__.items():
+ if isinstance(
+ v,
+ (
+ torch.Tensor,
+ torch.ScriptObject,
+ FakeScriptObject,
+ ),
+ ):
+ if v in buffers_parameters:
+ # filter out buffers and parameters, leaving only constants
+ continue
+
+ fqn = ".".join(prefix_atoms + [k])
+ if v in constants:
+ raise ValueError(
+ f"Duplicate reference to constant attribute found: '{constants[v]}' and '{fqn}'."
+ )
+
+ constants[v] = fqn
+ for k, v in m.named_children():
+ inner(v, prefix_atoms + [k], constants)
+
+ inner(m, [], constants)
+ return constants
+
+
+@contextlib.contextmanager
+def _fakify_script_objects(
+ mod: torch.nn.Module,
+ args: Tuple[Any],
+ kwargs: Dict[Any, Any],
+ fake_mode: torch._subclasses.fake_tensor.FakeTensorMode,
+):
+ # This context manager is used to fakify script objects into FakeScriptObject.
+ # Inputs:
+ # mod: the module to be exported, it (and its recursive submodules)'s script object attrs haven't been fakified.
+ # args, kwargs: the args and kwargs inputs for mod, script object inputs haven't been fakified.
+ # fake_mode: the fake mode to be used for fakifying script objects. It's the same mode that fakify input tensors.
+ #
+ # Returns:
+ # mod: the patched module, its (and its recursive submodules) script object attrs have been fakified.
+ # fake_args, fake_kwargs: new fakified args and kwargs.
+ # Script object inputs have been fakified. Don't touch the tensors.
+ # fake_constant_attrs: a new map from FakeScriptObject to the fqn of the original script object.
+ # fake_to_real: a mapping between FakeScriptObject and the original script object in order to un-do the patching.
+
+ constant_attrs: ConstantAttrMap = _gather_constant_attrs(mod)
+ assert not any(
+ isinstance(obj, FakeScriptObject) for obj in constant_attrs.values()
+ ), "Mod shouldn't contain any FakeScriptObject."
+ assert not pytree.tree_any(
+ lambda obj: isinstance(obj, FakeScriptObject), (args, kwargs)
+ ), "args and kwargs shouldn't contain any FakeScriptObject."
+
+ patched_attr = {}
+ fake_constant_attrs = ConstantAttrMap()
+ fake_to_real = {}
+
+ def _maybe_fakify_obj(obj):
+ if not torch._library.fake_class_registry.has_fake_class(obj._type().qualified_name()): # type: ignore[attr-defined]
+ return obj
+ fake_obj = torch._library.fake_class_registry.to_fake_obj(fake_mode, obj)
+ fake_to_real[fake_obj] = obj
+ return fake_obj
+
+ def _leaf_mod_and_attr(
+ mod: torch.nn.Module, attr_fqn: str
+ ) -> Tuple[torch.nn.Module, str]:
+ *prefix_attr, last_attr = attr_fqn.split(".")
+ cur_mod = mod
+ for attr in prefix_attr:
+ cur_mod = getattr(cur_mod, attr)
+ return cur_mod, last_attr
+
+ try:
+ for obj, fqn in constant_attrs.items():
+ if isinstance(obj, torch.ScriptObject):
+ cur_mod, attr = _leaf_mod_and_attr(mod, fqn)
+ assert obj is getattr(cur_mod, attr)
+ fake_script_obj = _maybe_fakify_obj(obj)
+ setattr(cur_mod, attr, fake_script_obj)
+ fake_constant_attrs[fake_script_obj] = fqn
+ patched_attr[fqn] = obj
+ else:
+ fake_constant_attrs[obj] = fqn
+
+ fake_args, fake_kwargs = pytree.tree_map_only(
+ torch.ScriptObject, _maybe_fakify_obj, (args, kwargs)
+ )
+ yield (mod, fake_args, fake_kwargs, fake_constant_attrs, fake_to_real)
+ finally:
+ for fqn, orig_obj in patched_attr.items():
+ cur_mod, attr = _leaf_mod_and_attr(mod, fqn)
+ setattr(cur_mod, attr, orig_obj)
diff --git a/torch/_export/passes/lift_constants_pass.py b/torch/_export/passes/lift_constants_pass.py
index fc13403a3f..877ecc23aa 100644
--- a/torch/_export/passes/lift_constants_pass.py
+++ b/torch/_export/passes/lift_constants_pass.py
@@ -4,6 +4,8 @@ from typing import Any, Dict, Union
import torch
from torch._export.verifier import SpecViolationError
from torch._guards import detect_fake_mode
+
+from torch._library.fake_class_registry import FakeScriptObject
from torch.export.exported_program import (
ArgumentSpec,
CustomObjArgument,
@@ -15,33 +17,35 @@ from torch.export.exported_program import (
class ConstantAttrMap(collections.abc.MutableMapping):
- """A mapping class that understands how to use module constants (tensors and
- ScriptObjects) as keys. We store tensors normally, but ScriptObjects are
- stored by hash, because different torch.ScriptObjects can point to the same
- underlying value (but we guarantee that they will `hash()` to the same value
+ """A mapping class that understands how to use module constants (tensors,
+ ScriptObjects, FakeScriptObjects) as keys. We store tensors and FakeScriptObjects normally,
+ but ScriptObjects are stored by hash, because different torch.ScriptObjects can point to
+ the same underlying value (but we guarantee that they will `hash()` to the same value
if that's the case).
"""
def __init__(self):
# Underlying dict that we use to implement this mapping.
- self._constant_attrs: Dict[Union[int, torch.Tensor], Any] = {}
+ self._constant_attrs: Dict[Union[int, torch.Tensor, FakeScriptObject], Any] = {}
# Map from the hash(ScriptObject) to the ScriptObject itself. Used for
# APIs like `__iter__` that should look like they're returning the
# original ScriptObjects.
self._script_object_map: Dict[int, torch.ScriptObject] = {}
- def __getitem__(self, key: Union[torch.Tensor, torch.ScriptObject]) -> Any:
+ def __getitem__(
+ self, key: Union[torch.Tensor, torch.ScriptObject, FakeScriptObject]
+ ) -> Any:
real_key = hash(key) if isinstance(key, torch.ScriptObject) else key
- assert isinstance(real_key, (int, torch.Tensor))
+ assert isinstance(real_key, (int, torch.Tensor, FakeScriptObject))
return self._constant_attrs[real_key]
def __setitem__(
- self, key: Union[torch.Tensor, torch.ScriptObject], value: Any
+ self, key: Union[torch.Tensor, torch.ScriptObject, FakeScriptObject], value: Any
) -> None:
if isinstance(key, torch.ScriptObject):
self._constant_attrs[hash(key)] = value
self._script_object_map[hash(key)] = key
- elif isinstance(key, torch.Tensor):
+ elif isinstance(key, (torch.Tensor, FakeScriptObject)):
self._constant_attrs[key] = value
else:
raise TypeError(
@@ -83,7 +87,7 @@ def lift_constants_pass(
gm: torch.fx.GraphModule,
graph_signature: ExportGraphSignature,
constant_attrs: ConstantAttrMap,
-) -> Dict[str, Union[torch.Tensor, torch._C.ScriptObject]]:
+) -> Dict[str, Union[torch.Tensor, torch.ScriptObject, FakeScriptObject]]:
"""
Takes a graph module, graph signature, and modifies them implace to lift any
constants (tensors or custom classes) as inputs to the graph. Returns a
@@ -101,7 +105,9 @@ def lift_constants_pass(
Returns:
A dictionary of fqn => constant value.
"""
- all_constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]] = {}
+ all_constants: Dict[
+ str, Union[torch.Tensor, torch.ScriptObject, FakeScriptObject]
+ ] = {}
inputs = graph_signature.input_specs
num_custom_obj = sum(
@@ -135,7 +141,7 @@ def lift_constants_pass(
gm.graph.erase_node(node)
continue
- # For ScriptObject and Tensor constants:
+ # For ScriptObject, Tensor and FakeScriptObject constants:
# First check if the constant was an attribute on some module by
# consulting `constant_attrs` map. If it is, use the fqn that keeps
# its location consistent with the eager module.
@@ -144,7 +150,7 @@ def lift_constants_pass(
# constant (e.g. x + torch.tensor(0)), and thus did not have a
# specific location in the eager module. In that case, just generate
# some name and attach it to the module in which it was used.
- if isinstance(constant_val, torch.ScriptObject):
+ if isinstance(constant_val, (torch.ScriptObject, FakeScriptObject)):
constant_kind = InputKind.CUSTOM_OBJ
constant_fqn = constant_attrs.get(constant_val)
if constant_fqn is not None:
@@ -203,6 +209,14 @@ def lift_constants_pass(
input_spec_arg = CustomObjArgument(
name=const_placeholder_node.name, class_fqn=class_fqn
)
+ elif isinstance(constant_val, FakeScriptObject):
+ class_fqn = constant_val.script_class_name
+ const_placeholder_node.meta["val"] = CustomObjArgument(
+ constant_fqn, class_fqn
+ )
+ input_spec_arg = CustomObjArgument(
+ name=const_placeholder_node.name, class_fqn=class_fqn
+ )
else:
raise SpecViolationError(
f"tried to lift unsupported type {type(constant_val)} from node {node.format_node()}"
@@ -229,24 +243,36 @@ def lift_constants_pass(
def rewrite_script_object_meta(
gm: torch.fx.GraphModule,
-) -> Dict[str, Union[torch.Tensor, torch.ScriptObject]]:
- """When tracing, we produce a graph with an actual ScriptObject in the
- meta["val"]. Eventually we want to change this behavior, when FakeMode infra
- for ScriptObjects lands.
+) -> Dict[str, Union[torch.Tensor, torch.ScriptObject, FakeScriptObject],]:
+ """When tracing, we produce a graph with FakeScriptObject in the
+ meta["val"].
For now, we rewrie meta["val"] to be a placeholder CustomObjArgument
"""
- constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]] = {}
+ constants: Dict[
+ str,
+ Union[
+ torch.Tensor,
+ torch.ScriptObject,
+ FakeScriptObject,
+ ],
+ ] = {}
for node in gm.graph.nodes:
- if "val" not in node.meta or not isinstance(
- node.meta["val"], torch.ScriptObject
- ):
+ if "val" not in node.meta:
continue
- old_meta = node.meta["val"]
- class_fqn = old_meta._type().qualified_name() # type: ignore[attr-defined]
- new_meta = CustomObjArgument(node.name, class_fqn)
- constants[node.name] = old_meta
- node.meta["val"] = new_meta
+ if isinstance(node.meta["val"], torch.ScriptObject):
+ old_meta = node.meta["val"]
+ class_fqn = old_meta._type().qualified_name() # type: ignore[attr-defined]
+ new_meta = CustomObjArgument(node.name, class_fqn)
+ constants[node.name] = old_meta
+ node.meta["val"] = new_meta
+
+ elif isinstance(node.meta["val"], FakeScriptObject):
+ old_meta = node.meta["val"] # type: ignore[assignment]
+ class_fqn = old_meta.script_class_name # type: ignore[attr-defined]
+ new_meta = CustomObjArgument(node.name, class_fqn)
+ constants[node.name] = old_meta
+ node.meta["val"] = new_meta
return constants
diff --git a/torch/_library/fake_class_registry.py b/torch/_library/fake_class_registry.py
index 7eff756284..49fe43e8b9 100644
--- a/torch/_library/fake_class_registry.py
+++ b/torch/_library/fake_class_registry.py
@@ -1,5 +1,4 @@
import logging
-import warnings
from typing import Any, Dict, Optional, Protocol, Tuple
import torch
@@ -10,9 +9,12 @@ log = logging.getLogger(__name__)
class FakeScriptObject:
- def __init__(self, wrapped_obj):
+ def __init__(self, wrapped_obj: Any, script_class_name: str):
self.wrapped_obj = wrapped_obj
+ # The fully qualified name of the class of original script object
+ self.script_class_name = script_class_name
+
class HasStaticMethodFromReal(Protocol):
@classmethod
@@ -33,19 +35,22 @@ class FakeClassRegistry:
def register(self, full_qualname: str, fake_class=None) -> None:
if self.has_impl(full_qualname):
- warnings.warn(
- f"{full_qualname} is already registered. Previous fake class is overrided with {fake_class}."
+ log.warning(
+ "%s is already registered. Previous fake class is overrided with %s.",
+ full_qualname,
+ fake_class,
)
self._registered_class[full_qualname] = fake_class
def deregister(self, full_qualname: str) -> Any:
if not self.has_impl(full_qualname):
- raise RuntimeError(
- f"Cannot deregister {full_qualname}. Please use register_fake_class to register it first."
- f" Or do you dereigster it twice?"
+ log.warning(
+ "Cannot deregister %s. Please use register_fake_class to register it first."
+ " Or do you dereigster it twice?",
+ full_qualname,
)
- self._check_registered(full_qualname)
- return self._registered_class.pop(full_qualname)
+ else:
+ return self._registered_class.pop(full_qualname)
def clear(self) -> None:
self._registered_class.clear()
@@ -71,12 +76,13 @@ def to_fake_obj(fake_mode, x: torch.ScriptObject) -> FakeScriptObject:
return wrapped
- fake_x_wrapped = FakeScriptObject(fake_x)
+ fake_x_wrapped = FakeScriptObject(fake_x, x._type().qualified_name()) # type: ignore[attr-defined]
for name in x._method_names(): # type: ignore[attr-defined]
attr = getattr(fake_x, name, None)
if attr:
if not callable(attr):
raise RuntimeError(f"Expect {name} to be a callable but got {attr}.")
+
setattr(
fake_x_wrapped,
name,
diff --git a/torch/_ops.py b/torch/_ops.py
index 6e2119f16a..f5d7313591 100644
--- a/torch/_ops.py
+++ b/torch/_ops.py
@@ -807,6 +807,7 @@ class TorchBindOpOverload(OpOverload):
DispatchKey.AutogradCPU,
DispatchKey.AutogradCUDA,
DispatchKey.ADInplaceOrView,
+ DispatchKey.BackendSelect,
DispatchKey.PythonTLSSnapshot,
DispatchKey.PythonDispatcher,
]
@@ -889,8 +890,13 @@ class TorchBindOpOverload(OpOverload):
)
raise RuntimeError(
- f"Cannot handle FakeScriptObject with python dispatcher with dispatch key {handler}."
- f"Please implement it by annotating a python callable with py_impl({handler})."
+ f"Torchbind op {self} received a FakeScriptObject input when dispatching {handler}."
+ f" but no python implementation is found."
+ f" Please file an issue on this when you encounter this error."
+ f" This error can happen when you export or compile the model."
+ f" It can still happpen even if a C++ implementation for {dispatch_key}. "
+ f" has been registered. That's because FakeScriptObject purely lives in python and cannot work "
+ f" with a C++ implementation."
)
assert isinstance(handler, Callable) # type: ignore[arg-type]
diff --git a/torch/export/_trace.py b/torch/export/_trace.py
index fe70fc2f99..d4584a9a63 100644
--- a/torch/export/_trace.py
+++ b/torch/export/_trace.py
@@ -15,6 +15,8 @@ import torch.fx
import torch.utils._pytree as pytree
from torch._dynamo.exc import UserError, UserErrorType
from torch._export.non_strict_utils import (
+ _fakify_script_objects,
+ _gather_constant_attrs,
make_constraints,
make_fake_inputs,
make_fake_params_buffers,
@@ -34,6 +36,8 @@ from torch._export.verifier import SpecViolationError
from torch._export.wrappers import _wrap_submodules
from torch._functorch.aot_autograd import aot_export_module
from torch._guards import detect_fake_mode
+
+from torch._library.fake_class_registry import FakeScriptObject
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
from torch._utils_internal import log_export_usage
from torch.export.dynamic_shapes import _combine_args
@@ -70,7 +74,6 @@ from .graph_signature import (
TokenArgument,
)
-
log = logging.getLogger(__name__)
@@ -454,37 +457,6 @@ def _export_to_torch_ir(
return gm_torch_level
-def _gather_constant_attrs(m: torch.nn.Module) -> ConstantAttrMap:
- """Search the module hierarchy, gathering up all tensor and ScriptObject constants.
-
- Returns a dictionary mapping hash(value) to the name of the constant. We
- have to abuse `hash` here unfortunately, see: [ScriptObject hash].
- """
- constants = ConstantAttrMap()
- buffers_parameters = set(m.buffers())
- buffers_parameters.update(m.parameters())
-
- def inner(m: torch.nn.Module, prefix_atoms: List[str], constants):
- for k, v in m.__dict__.items():
- if isinstance(v, (torch.Tensor, torch.ScriptObject)):
- if v in buffers_parameters:
- # filter out buffers and parameters, leaving only constants
- continue
-
- fqn = ".".join(prefix_atoms + [k])
- if v in constants:
- raise ValueError(
- f"Duplicate reference to constant attribute found: '{constants[v]}' and '{fqn}'."
- )
-
- constants[v] = fqn
- for k, v in m.named_children():
- inner(v, prefix_atoms + [k], constants)
-
- inner(m, [], constants)
- return constants
-
-
def _export_non_strict(
mod: torch.nn.Module,
fake_args,
@@ -587,9 +559,9 @@ def _export_non_strict(
elif isinstance(val, torch.SymInt):
return SymIntArgument(name=node.name)
elif isinstance(val, torch.ScriptObject):
- return CustomObjArgument(
- name=node.name, class_fqn=val._type().qualified_name() # type: ignore[attr-defined]
- )
+ return CustomObjArgument(name=node.name, class_fqn=val._type().qualified_name()) # type: ignore[attr-defined]
+ elif isinstance(val, FakeScriptObject):
+ return CustomObjArgument(name=node.name, class_fqn=val.script_class_name)
elif isinstance(val, (int, bool, str, float, type(None))):
return ConstantArgument(name=node.name, value=val)
else:
@@ -644,7 +616,14 @@ def _export_non_strict(
class _ExportedProgramNonStrict:
gm: torch.fx.GraphModule
sig: ExportGraphSignature
- constants: Dict[str, Union[torch.Tensor, torch._C.ScriptObject]]
+ constants: Dict[
+ str,
+ Union[
+ torch.Tensor,
+ FakeScriptObject,
+ torch.ScriptObject,
+ ],
+ ]
return _ExportedProgramNonStrict(
gm,
@@ -942,8 +921,6 @@ def _export(
if isinstance(dynamic_shapes, torch.export.ShapesCollection):
dynamic_shapes = dynamic_shapes.dynamic_shapes(mod, args, kwargs)
- constant_attrs = _gather_constant_attrs(mod)
-
flat_args, orig_in_spec = pytree.tree_flatten((args, kwargs))
original_state_dict = mod.state_dict(keep_vars=True)
forward_arg_names = _get_forward_arg_names(mod, args, kwargs)
@@ -1030,16 +1007,32 @@ def _export(
fake_params_buffers = make_fake_params_buffers(
fake_mode, _get_params_buffers(mod)
)
+
with fake_mode:
- ep_non_strict = _export_non_strict(
- mod,
- fake_args,
- fake_kwargs,
- fake_params_buffers,
- constant_attrs,
- pre_dispatch=pre_dispatch,
- transform=_tuplify_outputs,
- )
+ with _fakify_script_objects(mod, fake_args, fake_kwargs, fake_mode) as (
+ patched_mod,
+ new_fake_args,
+ new_fake_kwargs,
+ new_fake_constant_attrs,
+ map_fake_to_real,
+ ):
+ ep_non_strict = _export_non_strict(
+ patched_mod,
+ new_fake_args,
+ new_fake_kwargs,
+ fake_params_buffers,
+ new_fake_constant_attrs,
+ pre_dispatch=pre_dispatch,
+ transform=_tuplify_outputs,
+ )
+ # ep_non_strict.constants contains only fake script objects, we need to map them back
+ ep_non_strict.constants = {
+ fqn: map_fake_to_real[obj]
+ if isinstance(obj, FakeScriptObject)
+ else obj
+ for fqn, obj in ep_non_strict.constants.items()
+ }
+
ep_non_strict.gm.meta["inline_constraints"] = {
k: v
for k, v in fake_mode.shape_env.var_to_range.items()
@@ -1220,6 +1213,7 @@ def _export(
_normalize_nn_module_stack(gm_torch_level, type(mod))
# NOTE: graph module expects only positional args
+ constant_attrs = _gather_constant_attrs(mod)
ep_non_strict = _export_non_strict(
gm_torch_level,
_convert_to_positional_args(orig_arg_names, fake_args, fake_kwargs),
diff --git a/torch/export/unflatten.py b/torch/export/unflatten.py
index 8b8d3132cd..891ed24047 100644
--- a/torch/export/unflatten.py
+++ b/torch/export/unflatten.py
@@ -10,6 +10,7 @@ from typing import Any, cast, Dict, List, Optional, Tuple, Union
import torch
import torch.fx._pytree as fx_pytree
import torch.utils._pytree as pytree
+from torch._library.fake_class_registry import FakeScriptObject
from torch.export._tree_utils import reorder_kwargs
from torch.export.exported_program import (
ConstantArgument,
@@ -56,7 +57,16 @@ def _assign_attr(
assert isinstance(from_obj, torch.Tensor)
to_module.register_buffer(field, from_obj, persistent=persistent)
elif attr_kind == _AttrKind.CONSTANT:
- assert isinstance(from_obj, (torch.Tensor, torch.ScriptObject))
+ assert not isinstance(
+ from_obj, FakeScriptObject
+ ), "FakeScriptObject should only exist during tracing."
+ assert isinstance(
+ from_obj,
+ (
+ torch.Tensor,
+ torch.ScriptObject,
+ ),
+ )
setattr(to_module, field, from_obj)
diff --git a/torch/fx/_symbolic_trace.py b/torch/fx/_symbolic_trace.py
index b3524dbde4..24b1428b83 100644
--- a/torch/fx/_symbolic_trace.py
+++ b/torch/fx/_symbolic_trace.py
@@ -24,6 +24,7 @@ from typing import (
import torch
import torch.utils._pytree as pytree
from torch._C import ScriptObject # type: ignore[attr-defined]
+from torch._library.fake_class_registry import FakeScriptObject
from ._compatibility import compatibility
from .graph import _PyTreeCodeGen, _PyTreeInfo, Graph
@@ -366,7 +367,7 @@ class Tracer(TracerBase):
# a get_attr to retrieve that tensor. Otherwise, we'll store away the
# tensor value into a special attribute on the Module s.t. we can
# retrieve it with a get_attr.
- if isinstance(a, (torch.Tensor, ScriptObject)):
+ if isinstance(a, (torch.Tensor, ScriptObject, FakeScriptObject)):
qualname: Optional[str] = self.tensor_attrs.get(a)
# Tensor was not found in the Module hierarchy, stow it away in a
@@ -729,11 +730,17 @@ class Tracer(TracerBase):
# is some other attribute on the model. Construct a dict mapping Tensor
# values to the qualified name here for efficiency. This is used downstream
# in create_arg
- self.tensor_attrs: Dict[Union[torch.Tensor, ScriptObject], str] = {}
+ self.tensor_attrs: Dict[
+ Union[
+ torch.Tensor,
+ ScriptObject,
+ FakeScriptObject
+ ], str
+ ] = {}
def collect_tensor_attrs(m: torch.nn.Module, prefix_atoms: List[str]):
for k, v in m.__dict__.items():
- if isinstance(v, (torch.Tensor, ScriptObject)):
+ if isinstance(v, (torch.Tensor, ScriptObject, FakeScriptObject)):
self.tensor_attrs[v] = ".".join(prefix_atoms + [k])
for k, v in m.named_children():
collect_tensor_attrs(v, prefix_atoms + [k])
diff --git a/torch/testing/_internal/torchbind_impls.py b/torch/testing/_internal/torchbind_impls.py
index f66388d2ed..7babba0530 100644
--- a/torch/testing/_internal/torchbind_impls.py
+++ b/torch/testing/_internal/torchbind_impls.py
@@ -1,32 +1,120 @@
+import contextlib
+
import torch
-def register_if_not(qualname):
- entry = torch._library.simple_registry.singleton.find(qualname)
- if entry.abstract_impl.kernel is None:
- return torch.library.impl_abstract(qualname)
- else:
+_TORCHBIND_IMPLS_INITIALIZED = False
+
- def dummy_wrapper(fn):
- return fn
+def init_torchbind_implementations():
+ global _TORCHBIND_IMPLS_INITIALIZED
+ if _TORCHBIND_IMPLS_INITIALIZED:
+ return
- return dummy_wrapper
+ load_torchbind_test_lib()
+ register_fake_operators()
+ register_fake_classes()
+ _TORCHBIND_IMPLS_INITIALIZED = True
# put these under a function because the corresponding library might not be loaded yet.
def register_fake_operators():
- @register_if_not("_TorchScriptTesting::takes_foo_python_meta")
+ @torch.library.register_fake("_TorchScriptTesting::takes_foo_python_meta")
def fake_takes_foo(foo, z):
return foo.add_tensor(z)
- @register_if_not("_TorchScriptTesting::queue_pop")
+ @torch.library.register_fake("_TorchScriptTesting::queue_pop")
def fake_queue_pop(tq):
return tq.pop()
- @register_if_not("_TorchScriptTesting::queue_push")
+ @torch.library.register_fake("_TorchScriptTesting::queue_push")
def fake_queue_push(tq, x):
return tq.push(x)
- @register_if_not("_TorchScriptTesting::queue_size")
+ @torch.library.register_fake("_TorchScriptTesting::queue_size")
def fake_queue_size(tq):
return tq.size()
+
+ def meta_takes_foo_list_return(foo, x):
+ a = foo.add_tensor(x)
+ b = foo.add_tensor(a)
+ c = foo.add_tensor(b)
+ return [a, b, c]
+
+ def meta_takes_foo_tuple_return(foo, x):
+ a = foo.add_tensor(x)
+ b = foo.add_tensor(a)
+ return (a, b)
+
+ torch.ops._TorchScriptTesting.takes_foo_list_return.default.py_impl(
+ torch._C.DispatchKey.Meta
+ )(meta_takes_foo_list_return)
+
+ torch.ops._TorchScriptTesting.takes_foo_tuple_return.default.py_impl(
+ torch._C.DispatchKey.Meta
+ )(meta_takes_foo_tuple_return)
+
+ torch.ops._TorchScriptTesting.takes_foo.default.py_impl(torch._C.DispatchKey.Meta)(
+ lambda cc, x: cc.add_tensor(x)
+ )
+
+
+def register_fake_classes():
+ @torch._library.register_fake_class("_TorchScriptTesting::_Foo")
+ class FakeFoo:
+ def __init__(self, x: int, y: int):
+ self.x = x
+ self.y = y
+
+ @classmethod
+ def from_real(cls, foo):
+ (x, y), _ = foo.__getstate__()
+ return cls(x, y)
+
+ def add_tensor(self, z):
+ return (self.x + self.y) * z
+
+ @torch._library.register_fake_class("_TorchScriptTesting::_ContainsTensor")
+ class FakeContainsTensor:
+ def __init__(self, x: torch.Tensor):
+ self.x = x
+
+ @classmethod
+ def from_real(cls, foo):
+ ctx = torch.library.get_ctx()
+ return cls(ctx.to_fake_tensor(foo.get()))
+
+ def get(self):
+ return self.x
+
+
+def load_torchbind_test_lib():
+ import unittest
+
+ from torch.testing._internal.common_utils import ( # type: ignore[attr-defined]
+ find_library_location,
+ IS_FBCODE,
+ IS_MACOS,
+ IS_SANDCASTLE,
+ IS_WINDOWS,
+ )
+
+ if IS_SANDCASTLE or IS_FBCODE:
+ torch.ops.load_library("//caffe2/test/cpp/jit:test_custom_class_registrations")
+ elif IS_MACOS:
+ raise unittest.SkipTest("non-portable load_library call used in test")
+ else:
+ lib_file_path = find_library_location("libtorchbind_test.so")
+ if IS_WINDOWS:
+ lib_file_path = find_library_location("torchbind_test.dll")
+ torch.ops.load_library(str(lib_file_path))
+
+
+@contextlib.contextmanager
+def _register_py_impl_temporarily(op_overload, key, fn):
+ try:
+ op_overload.py_impl(key)(fn)
+ yield
+ finally:
+ del op_overload.py_kernels[key]
+ op_overload._dispatch_cache.clear()
|
2.41.0
|
4727fd4ebd42936a1bae7b7f44ee9a038fd643e
|
Sat, 4 May 2024 03:08:44 +0000
|
[PATCH 0995/1000] [TD][ez] Better check for is pr or not (#125485)
|
You can trigger ciflow tags on main branch commits, so we should be more conservative when checking to see if a workflow is a PR/on the main branch. get_pr_number checks for the pr number based on the PR_NUMBER env var or a tag of the for `ciflow/workflow/pr number` If we fail to find something like this, then assume it is on the main branch Pull Request resolved: https://github.com/pytorch/pytorch/pull/125485 Approved by: https://github.com/huydhn
|
diff --git a/test/run_test.py b/test/run_test.py
index 1b95bcb465..5120ac6513 100755
--- a/test/run_test.py
+++ b/test/run_test.py
@@ -59,6 +59,7 @@ from tools.testing.discover_tests import (
)
from tools.testing.do_target_determination_for_s3 import import_results
from tools.testing.target_determination.gen_artifact import gen_ci_artifact
+from tools.testing.target_determination.heuristics.utils import get_pr_number
from tools.testing.test_run import TestRun
from tools.testing.test_selections import (
@@ -1187,7 +1188,7 @@ def parse_args():
)
or (IS_WINDOWS and not TEST_CUDA)
)
- and os.getenv("BRANCH", "") != "main"
+ and get_pr_number() is not None
and not strtobool(os.environ.get("NO_TD", "False"))
and "slow" not in os.getenv("TEST_CONFIG", "")
and "slow" not in os.getenv("BUILD_ENVIRONMENT", ""),
|
2.41.0
|
f061baa94f437e54c5f4a12f48bcb1dfa662918
|
Fri, 3 May 2024 16:03:08 -0700
|
[PATCH 0996/1000] [comm_mode] adding some initial c10d ops to CommDebugMode (#125475)
|
looks like we can make it work :) Pull Request resolved: https://github.com/pytorch/pytorch/pull/125475 Approved by: https://github.com/awgu
|
diff --git a/test/distributed/_tensor/debug/test_comm_mode.py b/test/distributed/_tensor/debug/test_comm_mode.py
index 893131fe4e..d674905f2b 100644
--- a/test/distributed/_tensor/debug/test_comm_mode.py
+++ b/test/distributed/_tensor/debug/test_comm_mode.py
@@ -9,11 +9,13 @@ from torch.distributed._tensor import DeviceMesh, DTensor
from torch.distributed._tensor.debug.comm_mode import CommDebugMode
from torch.distributed._tensor.placement_types import Shard
+from torch.testing._internal.common_distributed import requires_nccl
from torch.testing._internal.common_utils import run_tests, TestCase
from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule
from torch.testing._internal.distributed.fake_pg import FakeStore
c10d_functional = torch.ops.c10d_functional
+c10d_ops = torch.ops.c10d
class TestCommMode(TestCase):
@@ -79,6 +81,26 @@ class TestCommMode(TestCase):
self.assertEqual(comm_counts[c10d_functional.all_gather_into_tensor], 1)
self.assertEqual(comm_counts[c10d_functional.reduce_scatter_tensor], 0)
+ @requires_nccl()
+ def test_comm_mode_with_c10d(self):
+ world_pg = self.world_pg
+
+ inp = torch.rand(2, 8, 16).cuda()
+ all_gather_out = inp.new_empty(self.world_size * 2, 8, 16)
+
+ comm_mode = CommDebugMode()
+ with comm_mode:
+ dist.all_reduce(inp)
+ dist.all_gather_into_tensor(all_gather_out, inp)
+ dist.reduce_scatter_tensor(inp, all_gather_out)
+ dist.broadcast(inp, 0)
+
+ comm_counts = comm_mode.get_comm_counts()
+ self.assertEqual(comm_counts[c10d_ops.allreduce_], 1)
+ self.assertEqual(comm_counts[c10d_ops._allgather_base_], 1)
+ self.assertEqual(comm_counts[c10d_ops._reduce_scatter_base_], 1)
+ self.assertEqual(comm_counts[c10d_ops.broadcast_], 1)
+
if __name__ == "__main__":
run_tests()
diff --git a/test/distributed/_tensor/test_utils.py b/test/distributed/_tensor/test_utils.py
index 7ba49ae520..3d6608a491 100644
--- a/test/distributed/_tensor/test_utils.py
+++ b/test/distributed/_tensor/test_utils.py
@@ -144,9 +144,8 @@ class Test2DStridedLocalShard(DTensorTestBase):
global_tensor, tp_mesh, placements=[Shard(0)]
)
dtensor_2d = DTensor.from_local(
- dtensor_tp.to_local(), mesh_2d, [Replicate(), Shard(0)]
+ dtensor_tp.to_local(), mesh_2d, [Replicate(), Shard(0)], run_check=False
).redistribute(mesh_2d, [Shard(0), Shard(0)])
- self.assertEqual(len(comm_mode.get_comm_counts()), 1)
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 1
)
@@ -196,7 +195,6 @@ class Test2DStridedLocalShard(DTensorTestBase):
stride=global_tensor.stride(),
)
- self.assertEqual(len(comm_mode.get_comm_counts()), 0)
self.assertEqual(
comm_mode.get_comm_counts()[c10d_functional.all_gather_into_tensor], 0
)
diff --git a/test/distributed/fsdp/test_fsdp_tp_integration.py b/test/distributed/fsdp/test_fsdp_tp_integration.py
index 391f3b0b52..496dff21d6 100644
--- a/test/distributed/fsdp/test_fsdp_tp_integration.py
+++ b/test/distributed/fsdp/test_fsdp_tp_integration.py
@@ -201,7 +201,7 @@ class TestTPFSDPIntegration(FSDPTest):
all_grads_as_flattened = torch.cat(
[torch.empty_like(local_grads_as_flattened) for _ in range(fsdp_pg.size())]
).contiguous()
- dist._all_gather_base(
+ dist.all_gather_into_tensor(
all_grads_as_flattened, local_grads_as_flattened, group=fsdp_pg
)
if not uses_tp:
@@ -387,11 +387,16 @@ class TestTPFSDPIntegration(FSDPTest):
fsdp_2d_model(torch.rand(2, 10).cuda(self.rank)).sum().backward()
funcol = torch.ops.c10d_functional
+ c10d_ops = torch.ops.c10d
comm_counts = comm_mode.get_comm_counts()
- self.assertEqual(comm_mode.get_total_counts(), 5)
+ self.assertEqual(comm_mode.get_total_counts(), 7)
+ # TP comms
self.assertEqual(comm_counts[funcol.reduce_scatter_tensor], 2)
self.assertEqual(comm_counts[funcol.all_gather_into_tensor], 2)
self.assertEqual(comm_counts[funcol.all_reduce], 1)
+ # FSDP comms
+ self.assertEqual(comm_counts[c10d_ops._allgather_base_], 1)
+ self.assertEqual(comm_counts[c10d_ops._reduce_scatter_base_], 1)
grads = [p.grad for p in fsdp_2d_model.parameters() if p.grad is not None]
diff --git a/test/distributed/tensor/parallel/test_tp_style.py b/test/distributed/tensor/parallel/test_tp_style.py
index 520f04ec59..01c301e33a 100644
--- a/test/distributed/tensor/parallel/test_tp_style.py
+++ b/test/distributed/tensor/parallel/test_tp_style.py
@@ -49,10 +49,8 @@ class TensorParallelStyleTest(DTensorTestBase):
model = nn.Linear(16, 16, device=self.device_type)
default_col_parallel = ColwiseParallel()
+ colwise_mod = parallelize_module(deepcopy(model), mesh, default_col_parallel)
with comm_mode:
- colwise_mod = parallelize_module(
- deepcopy(model), mesh, default_col_parallel
- )
out = colwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (8, 16 // self.world_size))
@@ -65,10 +63,8 @@ class TensorParallelStyleTest(DTensorTestBase):
self.assertEqual(comm_mode.get_total_counts(), 1)
sharded_col_parallel = ColwiseParallel(input_layouts=Shard(0))
+ colwise_mod = parallelize_module(deepcopy(model), mesh, sharded_col_parallel)
with comm_mode:
- colwise_mod = parallelize_module(
- deepcopy(model), mesh, sharded_col_parallel
- )
out = colwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (8 * self.world_size, 16 // self.world_size))
@@ -94,10 +90,8 @@ class TensorParallelStyleTest(DTensorTestBase):
model = nn.Embedding(16, 16, device=self.device_type)
default_col_parallel = ColwiseParallel()
+ colwise_mod = parallelize_module(deepcopy(model), mesh, default_col_parallel)
with comm_mode:
- colwise_mod = parallelize_module(
- deepcopy(model), mesh, default_col_parallel
- )
out = colwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (4, 2, 16 // self.world_size))
@@ -119,10 +113,8 @@ class TensorParallelStyleTest(DTensorTestBase):
model = nn.Linear(16, 16, device=self.device_type)
default_row_parallel = RowwiseParallel()
+ rowwise_mod = parallelize_module(deepcopy(model), mesh, default_row_parallel)
with comm_mode:
- rowwise_mod = parallelize_module(
- deepcopy(model), mesh, default_row_parallel
- )
out = rowwise_mod(tensor)
# ensure output replicated
self.assertEqual(out.shape, (8, 16))
@@ -135,10 +127,8 @@ class TensorParallelStyleTest(DTensorTestBase):
self.assertEqual(comm_mode.get_total_counts(), 1)
sharded_row_parallel = RowwiseParallel(output_layouts=Shard(0))
+ rowwise_mod = parallelize_module(deepcopy(model), mesh, sharded_row_parallel)
with comm_mode:
- rowwise_mod = parallelize_module(
- deepcopy(model), mesh, sharded_row_parallel
- )
out = rowwise_mod(tensor)
# ensure output replicated
self.assertEqual(out.shape, (8 // self.world_size, 16))
@@ -163,10 +153,10 @@ class TensorParallelStyleTest(DTensorTestBase):
tensor = torch.arange(8, device=self.device_type).reshape(4, 2)
model = nn.Embedding(16, 16, device=self.device_type)
+ rowwise_mod = parallelize_module(
+ deepcopy(model), mesh, RowwiseParallel(input_layouts=Replicate())
+ )
with comm_mode:
- rowwise_mod = parallelize_module(
- deepcopy(model), mesh, RowwiseParallel(input_layouts=Replicate())
- )
out = rowwise_mod(tensor)
# ensure output shard on the last dim
self.assertEqual(out.shape, (4, 2, 16))
diff --git a/torch/distributed/_tensor/debug/comm_mode.py b/torch/distributed/_tensor/debug/comm_mode.py
index 604380a37f..254a02d8f0 100644
--- a/torch/distributed/_tensor/debug/comm_mode.py
+++ b/torch/distributed/_tensor/debug/comm_mode.py
@@ -9,6 +9,7 @@ from torch.utils._python_dispatch import TorchDispatchMode
funcol_native = torch.ops._c10d_functional
funcol_py = torch.ops.c10d_functional
funcol_autograd = torch.ops._c10d_functional_autograd
+c10d_ops = torch.ops.c10d
NATIVE_TO_PY_MAPPING = {
funcol_native.all_gather_into_tensor: funcol_py.all_gather_into_tensor,
@@ -22,6 +23,13 @@ NATIVE_TO_PY_MAPPING = {
funcol_autograd.all_to_all_single: funcol_py.all_to_all_single,
}
+c10d_collective_ops = {
+ c10d_ops.allreduce_,
+ c10d_ops._allgather_base_,
+ c10d_ops._reduce_scatter_base_,
+ c10d_ops.broadcast_,
+}
+
class CommDebugMode(TorchDispatchMode):
"""
@@ -88,7 +96,8 @@ class CommDebugMode(TorchDispatchMode):
# the need to modify all tests to accommodate the two implementations,
# we make CommDebugMode translate native funcol ops into legacy funcol
# ops until the migration finishes.
- if func_packet in self.comm_registry:
+
+ if func_packet in self.comm_registry or func_packet in c10d_collective_ops:
if func_packet in NATIVE_TO_PY_MAPPING:
func_packet = NATIVE_TO_PY_MAPPING[func_packet]
self.comm_counts[func_packet] += 1
|
2.41.0
|
62e89c1b8b27e848f30991773d689db3af24532
|
Fri, 3 May 2024 11:47:00 -0700
|
[PATCH 0997/1000] [dynamo] Do not turn on record relay with TORCH_COMPILE_DEBUG (#125488)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/125488 Approved by: https://github.com/yanboliang, https://github.com/mlazos
|
diff --git a/torch/_dynamo/config.py b/torch/_dynamo/config.py
index 1360782db1..6a870a6d12 100644
--- a/torch/_dynamo/config.py
+++ b/torch/_dynamo/config.py
@@ -130,7 +130,7 @@ suppress_errors = bool(os.environ.get("TORCHDYNAMO_SUPPRESS_ERRORS", False))
# Record and write an execution record of the current frame to a file
# if an exception is encountered
# @compile_ignored[debug]
-replay_record_enabled = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1"
+replay_record_enabled = os.environ.get("TORCH_COMPILE_REPLAY_RECORD", "0") == "1"
# Rewrite assert statement in python with torch._assert
rewrite_assert_with_torch_assert = True
|
2.41.0
|
b41e1d6fc05428008875e3cfe8be17184e57491
|
Fri, 3 May 2024 16:22:16 -0700
|
[PATCH 0998/1000] try to fix the warning in distribute_tensor (#125476)
|
Pull Request resolved: https://github.com/pytorch/pytorch/pull/125476 Approved by: https://github.com/albanD, https://github.com/awgu ghstack dependencies: #125475
|
diff --git a/test/distributed/_tensor/test_api.py b/test/distributed/_tensor/test_api.py
index 196bd6407b..4318054117 100644
--- a/test/distributed/_tensor/test_api.py
+++ b/test/distributed/_tensor/test_api.py
@@ -70,6 +70,7 @@ class DTensorAPITest(DTensorTestBase):
)
tensor_shape = [3 * self.world_size, 3 * self.world_size]
tensor_to_distribute = torch.randn(*tensor_shape)
+ tensor_requires_grad_distribute = torch.randn(*tensor_shape, requires_grad=True)
with self.assertRaisesRegex(ValueError, "must have the same length"):
shard_spec = [Shard(0)]
@@ -81,6 +82,13 @@ class DTensorAPITest(DTensorTestBase):
global_tensor_to_distribute = global_tensor + 2
distribute_tensor(global_tensor_to_distribute, device_mesh, shard_spec)
+ def replicate_tensor_fn():
+ return distribute_tensor(
+ tensor_requires_grad_distribute, device_mesh, [Replicate(), Replicate()]
+ )
+
+ self.assertNotWarn(replicate_tensor_fn)
+
spec = [Shard(0), Shard(1)]
dtensor = distribute_tensor(tensor_to_distribute, device_mesh, spec)
diff --git a/torch/distributed/_tensor/api.py b/torch/distributed/_tensor/api.py
index 16cd41adc8..42c01b69cc 100644
--- a/torch/distributed/_tensor/api.py
+++ b/torch/distributed/_tensor/api.py
@@ -573,7 +573,7 @@ def distribute_tensor(
# OffsetBasedRNGTracker to perform random operators.
# TODO: the value assignment to global variable is not the ideal solution
# we can replace it in future.
- if is_rng_supported_mesh(device_mesh) and not random._rng_tracker:
+ if not random._rng_tracker and is_rng_supported_mesh(device_mesh):
random._rng_tracker = OffsetBasedRNGTracker(device_type)
if not tensor.is_leaf:
@@ -612,7 +612,7 @@ def distribute_tensor(
)
return tensor
- local_tensor = tensor
+ local_tensor = tensor.detach()
# distribute the tensor according to the placements.
placements = list(placements)
@@ -637,7 +637,7 @@ def distribute_tensor(
# detach the local tensor passed to DTensor since after the construction
# of DTensor, autograd would work on top of DTensor instead of local tensor
return DTensor(
- local_tensor.detach().requires_grad_(tensor.requires_grad),
+ local_tensor.requires_grad_(tensor.requires_grad),
device_mesh,
placements,
shape=tensor.size(),
diff --git a/torch/distributed/_tensor/dispatch.py b/torch/distributed/_tensor/dispatch.py
index f5c3537f8a..b4143391af 100644
--- a/torch/distributed/_tensor/dispatch.py
+++ b/torch/distributed/_tensor/dispatch.py
@@ -1,4 +1,5 @@
# Copyright (c) Meta Platforms, Inc. and affiliates
+import contextlib
import functools
import operator
from typing import cast, Dict, List, Optional, Sequence, Tuple
@@ -179,15 +180,15 @@ class OpDispatcher:
# run local op computation with potentially modified args/kwargs
local_tensor_args = cast(Tuple[object, ...], local_tensor_args)
- if op_call in self._random_ops and is_rng_supported_mesh(mesh):
- if not random._rng_tracker:
+ if op_call in self._random_ops:
+ if not random._rng_tracker and is_rng_supported_mesh(mesh):
# Default to `OffsetBasedRNGTracker` if the parallelism API
# did not already construct one
random._rng_tracker = random.OffsetBasedRNGTracker(mesh.device_type)
# For DTensor random operator, run it within a distribute region
with random._rng_tracker._distribute_region(
cast(dtensor.DTensor, args[0])._spec
- ):
+ ) if random._rng_tracker else contextlib.nullcontext():
local_results = op_call(*local_tensor_args, **op_info.local_kwargs)
else:
local_results = op_call(*local_tensor_args, **op_info.local_kwargs)
|
2.41.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.