| /external/pytorch/aten/src/ATen/functorch/ |
| D | LegacyBatchingRegistrations.cpp | 299 // x.as_strided(sizes, strides, maybe_storage_offset) 321 "result = tensor.as_strided(", sizes, ", ", strides, ", ", storage_offset, ") ", in checkBasicAsStridedValidForSlice() 325 "`as_strided` call as a sequence of PyTorch view operations"); in checkBasicAsStridedValidForSlice() 330 "result = tensor.as_strided(", sizes, ", ", strides, ", ", storage_offset, ") ", in checkBasicAsStridedValidForSlice() 335 "rewrite the `as_strided` call as a sequence of PyTorch view operations"); in checkBasicAsStridedValidForSlice() 338 // What are the semantics of as_strided inside of vmap? 339 // y = vmap(lambda x: x.as_strided(sizes, strides, offset))(xs) 346 // offset equal to xs.offset() and called as_strided(sizes, sizes, offset). 347 // (that is equivalent to x[i].as_strided( 350 // Note that this *may* be different from actually running as_strided [all …]
|
| /external/pytorch/aten/src/ATen/ |
| D | LegacyBatchingRegistrations.cpp | 501 "vmap: Calling Tensor.as_strided is not supported unless the batch dims being ", in checkBatchDimsAtFrontInLayout() 505 "express the as_strided operation in terms of PyTorch view operations"); in checkBatchDimsAtFrontInLayout() 522 // x.as_strided(sizes, strides, maybe_storage_offset) 544 "result = tensor.as_strided(", sizes, ",", strides, ",", storage_offset, ")", in checkBasicAsStridedValidForSlice() 548 "`as_strided` call as a sequence of PyTorch view operations"); in checkBasicAsStridedValidForSlice() 553 "result = tensor.as_strided(", sizes, ",", strides, ",", storage_offset, ")", in checkBasicAsStridedValidForSlice() 558 "rewrite the `as_strided` call as a sequence of PyTorch view operations"); in checkBasicAsStridedValidForSlice() 586 // tensor because using as_strided to access storage locations not indexable in _has_same_storage_numel_batching_rule() 591 // What are the semantics of as_strided inside of vmap? 592 // y = vmap(lambda x: x.as_strided(sizes, strides, offset))(xs) [all …]
|
| D | FunctionalStorageImpl.cpp | 51 // NB: We only actually need tmp_values for ops like select/slice/diagonal/squeeze/as_strided in apply_update() 122 " was an as_strided() call. as_strided() is non-compositional, and therefore is not possible to fun… in add_update()
|
| /external/pytorch/benchmarks/operator_benchmark/pt/ |
| D | as_strided_test.py | 8 """Microbenchmarks for as_strided operator""" 11 # Configs for PT as_strided operator 44 self.set_module_name("as_strided") 49 return torch.as_strided(input_one, size, stride, storage_offset)
|
| /external/pytorch/test/ |
| D | test_fx_reinplace_pass.py | 208 good_mirror_of_b = a.as_strided((4,), (4,), 1) 234 as_strided = torch.ops.aten.as_strided.default(clone, [4], [4], 1); clone = None 235 return as_strided 247 good_mirror_of_b = a.as_strided((4,), (4,), 1) 270 as_strided = torch.ops.aten.as_strided.default(clone, [4], [4], 1); clone = None 271 select_int = torch.ops.aten.select.int(as_strided, 0, 0) 273 return as_strided 282 bad_mirror_of_b = a.as_strided((4,), (4,), 0) 303 as_strided = torch.ops.aten.as_strided.default(clone, [4], [4], 0); clone = None 304 select_int = torch.ops.aten.select.int(as_strided, 0, 1) [all …]
|
| D | test_jiterator.py | 34 a = a_buffer.as_strided(*shape_strides[0]) 35 b = b_buffer.as_strided(*shape_strides[1]) 55 a = a_buffer.as_strided(*shape_strides[0]) 56 b = b_buffer.as_strided(*shape_strides[1])
|
| /external/pytorch/torch/csrc/autograd/functions/ |
| D | tensor.h | 82 // from forward pass, so that we can recover we when as_strided is not 87 // When as_strided is supported (e.g. strided CPU/CUDA Tensors), view_fn_ 89 // With the TensorGeometry information we can use `as_strided` call which 99 // In CPU/CUDA case where we support efficient as_strided implementation, 102 // grad_view_n = grad_base.as_strided(view_sizes, view_strides, view_offset); 104 // But in XLA backend where we don't have full support of as_strided, 113 // efficient than the as_strided one so we should be careful to only use it when 117 // That's all we need to pass into as_strided.
|
| /external/pytorch/test/profiler/ |
| D | profiler_utils_mock_events.json | 1 …as_strided", "_start_us": 1656454173444282, "_duration_us": 4, "_linked_correlation_id": 0, "_devi…
|
| D | test_profiler_tree.py | 612 aten::as_strided 615 aten::as_strided 659 aten::as_strided 662 aten::as_strided 969 aten::as_strided 977 aten::as_strided 1013 aten::as_strided 1020 aten::as_strided 1038 aten::as_strided
|
| /external/pytorch/test/inductor/ |
| D | test_torchinductor_strided_blocks.py | 167 return torch.as_strided(full, view_size, view_stride, storage_offset=offset) 212 view = torch.as_strided(full, view_size, full.stride()) 264 view = torch.as_strided(full, view_size, full.stride()) 302 view = torch.as_strided(full, view_size, full.stride()) 328 view = torch.as_strided(full, view_size, full.stride()) 351 view = torch.as_strided(full, view_size, full.stride()) 366 view = torch.as_strided(full, view_size, full.stride()) 424 return torch.as_strided(full, view_size, full.stride())
|
| /external/pytorch/torch/_functorch/_aot_autograd/ |
| D | functional_utils.py | 264 # fall back to .as_strided() if we can't. 269 # Don't unnecessarily call as_strided if nothing changed; as_strided's 276 reshaped_base_tensor = aliased_base_tensor.as_strided( 287 # As a stopgap, we'll fall back to as_strided. 295 aliased_out = torch.view_as_real(aliased_base_tensor).as_strided( 299 aliased_out = torch.view_as_complex(aliased_base_tensor).as_strided( 303 aliased_out = aliased_base_tensor.as_strided(size, stride, storage_offset) 307 # as_strided() is the "most generic" view, but it does not cover cross-dtype views
|
| /external/pytorch/aten/src/ATen/native/ |
| D | UnfoldBackward.h | 55 auto grad_out_restrided = grad_out.as_strided( in _make_unfold_backward_iter_over_grad_out() 73 auto grad_in_restrided = grad_in.squeeze(-1).as_strided( in _make_unfold_backward_iter_over_grad_out() 95 auto idx_dim_restrided = idx_dim.as_strided(idx_dim_sizes, idx_dim_strides); in _make_unfold_backward_iter_over_grad_out()
|
| D | FunctionOfAMatrixUtils.cpp | 67 auto output_restrided = output.as_strided( in _compute_linear_combination_out() 78 auto input_restrided = input.as_strided( in _compute_linear_combination_out() 90 auto coefficients_restrided = coefficients.as_strided( in _compute_linear_combination_out()
|
| D | Fill.cpp | 124 auto main_diag = self.as_strided(sizes, strides, storage_offset); in fill_diagonal_() 136 auto wrap_diag = self.as_strided(wrap_sizes, strides, storage_offset + offset); in fill_diagonal_()
|
| /external/pytorch/torch/_inductor/ |
| D | freezing.py | 190 by adding aten.as_strided nodes with the expected strides. 220 Make sure the as_strided node's input's layout does not change due to compiler 221 optimizations, because the as_strided strides info depends on input tensor stride info. 225 torch.ops.aten.as_strided.default,
|
| /external/pytorch/test/typing/pass/ |
| D | creation_ops.py | 39 # torch.as_strided 41 torch.as_strided(x, (2, 2), (1, 2)) 42 torch.as_strided(x, (2, 2), (1, 2), 1)
|
| /external/pytorch/aten/src/ATen/native/cuda/ |
| D | ScatterGatherKernel.cu | 181 : self.as_strided(index_sizes, self_strides); in operator ()() 183 src.as_strided(index_sizes, src_strides) in operator ()() 239 : self.as_strided(index_sizes, self_strides); in operator ()() 241 src.as_strided(index_sizes, src_strides) in operator ()() 298 : self.as_strided(index_sizes, self_strides); in operator ()() 300 src.as_strided(index_sizes, src_strides) in operator ()()
|
| /external/libopus/dnn/torch/fargan/ |
| D | dataset.py | 31 …self.data = np.lib.stride_tricks.as_strided(self.data, shape=(self.nb_sequences, pcm_chunk_size*2), 36 …self.features = np.lib.stride_tricks.as_strided(self.features, shape=(self.nb_sequences, self.sequ…
|
| /external/pytorch/test/typing/reveal/ |
| D | tensor_constructors.py | 46 # torch.as_strided 48 reveal_type(torch.as_strided(x, (2, 2), (1, 2))) # E: {Tensor} 49 reveal_type(torch.as_strided(x, (2, 2), (1, 2), 1)) # E: {Tensor}
|
| /external/pytorch/torch/_functorch/ |
| D | config.py | 50 # before falling back to the autograd engine's view replay or as_strided. 54 # at runtime can have more overhead compared to a single as_strided call 57 # (3) Some backends like XLA do not support as_strided
|
| /external/pytorch/torch/csrc/autograd/ |
| D | autograd_meta.cpp | 48 // because of as_strided. conj/neg bit must be part of this metadata because 90 // - Make sure that when the same as_strided is applied to both primal and 238 new_fw_grad_value = new_base_fw_grad.as_strided( in set_fw_grad() 305 new_val = base_val.as_strided( in fw_grad()
|
| /external/pytorch/test/cpp_extensions/open_registration_extension/pytorch_openreg/csrc/ |
| D | OpenRegMem.cpp | 103 return at::cpu::as_strided(self, size, stride, storage_offset_); in as_strided_openreg() 119 m.impl("as_strided", as_strided_openreg); in TORCH_LIBRARY_IMPL()
|
| /external/pytorch/benchmarks/instruction_counts/definitions/ |
| D | standard.py | 193 # @as_strided | // @as_strided 194 torch.as_strided(x, (2, 3), (4, 1), 2) | torch::as_strided(x, {2, 3}, {4, 1}, 2);
|
| /external/pytorch/torch/_prims/ |
| D | __init__.py | 141 "as_strided", 1225 # as_strided to shapes with no elements are trivially valid, so it's OK 1232 return torch.as_strided(a, size, stride, storage_offset) 1238 return torch.as_strided(a, size, stride, storage_offset) 1246 as_strided = _make_prim( variable 1247 …schema="as_strided(Tensor(a!) a, SymInt[] size, SymInt[] stride, SymInt storage_offset) -> Tensor(… 1310 return a.as_strided(shape, new_strides, a.storage_offset()) 1440 return a.as_strided(new_shape, new_strides, a.storage_offset()) 1477 out = a.as_strided(a.shape, a.stride(), a.storage_offset()) 1585 return a.as_strided(new_shape, new_strides, a.storage_offset()) [all …]
|
| /external/pytorch/test/functorch/ |
| D | test_aotdispatch.py | 2579 a = x.as_strided((4, 4), (8, 1), storage_offset=0) 2580 b = x.as_strided((4, 4), (8, 1), storage_offset=28) 2595 a = x.as_strided((4, 4), (9, 1), storage_offset=0) 2596 b = x.as_strided((4, 4), (9, 1), storage_offset=22) 2603 a = x.as_strided((4, 4), (9, 1), storage_offset=0) 2604 b = x.as_strided((4, 4), (9, 1), storage_offset=23) 2612 a = x.as_strided((2, 4, 3), (110, 24, 4), storage_offset=5) 2621 a = x.as_strided((4, 4), (9, 1), storage_offset=0) 2622 b = x.as_strided((4, 4), (9, 1), storage_offset=24) 2629 a = x.as_strided((4, 4), (9, 1), storage_offset=0) [all …]
|