Home
last modified time | relevance | path

Searched full:contiguous (Results 1 – 25 of 4148) sorted by relevance

12345678910>>...166

/external/pytorch/aten/src/ATen/test/
Dmemory_format_test.cpp13 for (auto memory_format : {at::MemoryFormat::ChannelsLast, at::MemoryFormat::Contiguous}) { in TEST()
20 EXPECT_TRUE(t.suggest_memory_format() == at::MemoryFormat::Contiguous); in TEST()
23 // Ambiguous case where we fallback to Contiguous; in TEST()
25 EXPECT_TRUE(t.suggest_memory_format() == at::MemoryFormat::Contiguous); in TEST()
30 EXPECT_TRUE(t.suggest_memory_format() == at::MemoryFormat::Contiguous); in TEST()
81 sliceStepTwo(t, 1, MemoryFormat::Contiguous); in TEST()
82 sliceStepTwo(t, 2, MemoryFormat::Contiguous); in TEST()
83 sliceStepTwo(t, 3, MemoryFormat::Contiguous); in TEST()
86 sliceStepTwo(t, 2, MemoryFormat::Contiguous); in TEST()
87 sliceStepTwo(t, 3, MemoryFormat::Contiguous); in TEST()
[all …]
/external/executorch/backends/example/example_backend_delegate_passes/
Dpermute_memory_formats_pass.py22 after pass: x -> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> out
25 … -> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> to_dim(channel_last) -> conv -> to_dim_…
28 …-> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> to_dim(channel_last) -> linear -> to_dim…
53 … the pattern is conv, x -> conv -> out will become x -> conv -> to_dim(contiguous) -> out when per…
54 …conv -> conv -> out, it will become x -> conv -> to_dim(contiguous) -> conv -> to_dim(contiguous) …
59 … # like, x -> conv -> out will become x -> conv -> to_dim(contiguous) -> out
77 … # like, x -> conv -> conv -> out will become x -> conv -> to_dim(contiguous) -> conv -> out
103 …tern is conv, x -> conv -> to_dim(contiguous) -> out will become x -> to_dim(channel_last) -> conv…
104contiguous) -> conv -> to_dim(contiguous) -> out, it will become x -> to_dim(channel_last) -> conv…
Dmerge_to_dim_pass.py19 … -> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> to_dim(channel_last) -> conv -> to_dim_…
20 after pass: x -> to_dim(channel_last) -> conv -> conv -> to_dim_(contiguous) -> out
23 … -> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> to_dim(channel_last) -> conv -> to_dim_…
24 … |-------------> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> out
25 … -> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> to_dim(channel_last) -> conv -> to_dim_…
26 … |--------------> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> out
29 … -> to_dim(channel_last) -> conv -> to_dim_(contiguous) -> to_dim(channel_last) -> conv -> to_dim_…
30 y -> to_dim(channel_last) -> conv -> to_dim_(contiguous) ---------|
31 after pass: x -> to_dim(channel_last) -> conv -> conv -> to_dim_(contiguous) -> out
/external/pytorch/aten/src/ATen/native/
DBucketizationUtils.h15 // original values given by raw_*. If an original value is not contiguous, will make a contiguous c…
19 // corresponding raw_* version should be used since it was already contiguous of the right type.
32 …TORCH_WARN_ONCE("torch.searchsorted(): input value tensor is non-contiguous, this will lower the p… in searchsorted_maybe_trim_input_tensors()
33 …"to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous inp… in searchsorted_maybe_trim_input_tensors()
35 trimmed_input = raw_input.contiguous(); in searchsorted_maybe_trim_input_tensors()
38 …TORCH_WARN_ONCE("torch.searchsorted(): boundary tensor is non-contiguous, this will lower the perf… in searchsorted_maybe_trim_input_tensors()
39 …"to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous bou… in searchsorted_maybe_trim_input_tensors()
41 trimmed_boundaries = raw_boundaries.contiguous(); in searchsorted_maybe_trim_input_tensors()
44 …TORCH_WARN_ONCE("torch.searchsorted(): sorter tensor is non-contiguous, this will lower the perfor… in searchsorted_maybe_trim_input_tensors()
45 …"to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous sor… in searchsorted_maybe_trim_input_tensors()
[all …]
DWeightNorm.cpp32 // I assume tensor.contiguous(), view(), norm(), etc. here will dispatch through VariableType. in norm_except_dim()
38 return v.contiguous().view({v.size(0), -1}).norm(pow, 1).view(output_size); in norm_except_dim()
42 return v.contiguous().view({-1, v.size(v.dim() - 1)}).norm(pow, 0).view(output_size); in norm_except_dim()
54 auto w = at::empty_like(v, at::MemoryFormat::Contiguous); in weight_norm_cpu()
71 TORCH_CHECK(saved_v.is_contiguous(), "saved_v must be contiguous"); in weight_norm_backward_cpu()
72 TORCH_CHECK(saved_g.is_contiguous(), "saved_g must be contiguous"); in weight_norm_backward_cpu()
73 TORCH_CHECK(saved_norm.is_contiguous(), "saved_norm must be contiguous"); in weight_norm_backward_cpu()
75 auto grad_v = at::empty_like(saved_v, at::MemoryFormat::Contiguous); in weight_norm_backward_cpu()
76 auto grad_g = at::empty_like(saved_g, at::MemoryFormat::Contiguous); in weight_norm_backward_cpu()
93 auto v = v_in.contiguous(); in _weight_norm()
[all …]
/external/cronet/stable/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/nfa/
Dmod.rs11 contiguous. The names reflect their internal representation, and consequently,
20 * A [`contiguous::NFA`] is uses a single allocation to represent all states,
25 starting state), a contiguous NFA better balances memory usage with search
26 speed. The single contiguous allocation also uses less overhead per state and
30 contiguous NFA. It takes only a little longer to build, but both its memory
33 so many patterns that a contiguous NFA could not be built. (Currently, because
34 of both memory and search speed improvements, a contiguous NFA has a smaller
39 pub mod contiguous; module
/external/cronet/tot/third_party/rust/chromium_crates_io/vendor/aho-corasick-1.1.3/src/nfa/
Dmod.rs11 contiguous. The names reflect their internal representation, and consequently,
20 * A [`contiguous::NFA`] is uses a single allocation to represent all states,
25 starting state), a contiguous NFA better balances memory usage with search
26 speed. The single contiguous allocation also uses less overhead per state and
30 contiguous NFA. It takes only a little longer to build, but both its memory
33 so many patterns that a contiguous NFA could not be built. (Currently, because
34 of both memory and search speed improvements, a contiguous NFA has a smaller
39 pub mod contiguous; module
/external/rust/android-crates-io/crates/bytemuck/src/
Dcontiguous.rs22 /// # use bytemuck::Contiguous;
32 /// unsafe impl Contiguous for Foo {
50 /// Precisely, the guarantees you must uphold when implementing `Contiguous` for
67 /// gets a `C` that implements `Contiguous`, it is in the appropriate range.
70 /// `Contiguous::from_integer` and `Contiguous::into_integer`.
80 pub unsafe trait Contiguous: Copy + 'static { trait
84 /// Contiguous is broadly intended for use with fieldless enums, and for
87 /// *unsound* to implement `Contiguous`!).
111 /// `Contiguous` on your type you **must not** override this method.
115 /// We will not panic for any correct implementation of `Contiguous`, but
[all …]
/external/cronet/tot/third_party/rust/chromium_crates_io/vendor/bytemuck-1.19.0/src/
Dcontiguous.rs22 /// # use bytemuck::Contiguous;
32 /// unsafe impl Contiguous for Foo {
50 /// Precisely, the guarantees you must uphold when implementing `Contiguous` for
67 /// gets a `C` that implements `Contiguous`, it is in the appropriate range.
70 /// `Contiguous::from_integer` and `Contiguous::into_integer`.
80 pub unsafe trait Contiguous: Copy + 'static { interface
84 /// Contiguous is broadly intended for use with fieldless enums, and for
87 /// *unsound* to implement `Contiguous`!).
111 /// `Contiguous` on your type you **must not** override this method.
115 /// We will not panic for any correct implementation of `Contiguous`, but
[all …]
/external/cronet/stable/third_party/rust/chromium_crates_io/vendor/bytemuck-1.19.0/src/
Dcontiguous.rs22 /// # use bytemuck::Contiguous;
32 /// unsafe impl Contiguous for Foo {
50 /// Precisely, the guarantees you must uphold when implementing `Contiguous` for
67 /// gets a `C` that implements `Contiguous`, it is in the appropriate range.
70 /// `Contiguous::from_integer` and `Contiguous::into_integer`.
80 pub unsafe trait Contiguous: Copy + 'static { trait
84 /// Contiguous is broadly intended for use with fieldless enums, and for
87 /// *unsound* to implement `Contiguous`!).
111 /// `Contiguous` on your type you **must not** override this method.
115 /// We will not panic for any correct implementation of `Contiguous`, but
[all …]
/external/pytorch/aten/src/ATen/native/cuda/
DTensorModeKernel.cpp57 // contiguous. in mode_kernel_impl()
59 auto contiguous = transposed.contiguous(); in mode_kernel_impl() local
81 values_transposed, indices_transposed, contiguous, slice_size, slices); in mode_kernel_impl()
85 // If transposed is already contiguous, it will return a tensor with the in mode_kernel_impl()
87 if (transposed.is_same(contiguous)) { in mode_kernel_impl()
88 contiguous = contiguous.clone(); in mode_kernel_impl()
92 values_transposed, indices_transposed, contiguous, dim, ndim); in mode_kernel_impl()
DCUDAJitLoops.cuh86 bool contiguous, in launch_jitted_unrolled_kernel() argument
101 desc, contiguous, dynamic_casting, scalar_pos); in launch_jitted_unrolled_kernel()
144 desc, /*contiguous=*/true, /*dynamic_casting=*/false, in launch_jitted_vectorized_kernel()
196 bool contiguous = iter.is_contiguous(); in jitted_gpu_kernel_generic() local
200 // - Case 1: no dynamic casting and contiguous in jitted_gpu_kernel_generic()
202 // - Case 3: dynamic casting and contiguous in jitted_gpu_kernel_generic()
207 if (contiguous) { in jitted_gpu_kernel_generic()
208 // Case 1: no dynamic casting and contiguous in jitted_gpu_kernel_generic()
223 storer, contiguous, scalar_pos, scalar_val, extra_args); in jitted_gpu_kernel_generic()
236 if (contiguous) { in jitted_gpu_kernel_generic()
[all …]
DNaiveDilatedConvolution.cu428 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_cuda()
429 const Tensor weight_ = weight.contiguous(); in slow_conv_dilated2d_cuda()
430 const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined); in slow_conv_dilated2d_cuda()
474 (is_batch ? grad_output.contiguous() in slow_conv_dilated2d_backward_cuda()
475 : grad_output.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cuda()
477 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cuda()
478 const Tensor weight_ = weight.contiguous(); in slow_conv_dilated2d_backward_cuda()
534 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated3d_cuda()
535 const Tensor weight_ = weight.contiguous(); in slow_conv_dilated3d_cuda()
536 const Tensor bias_ = (bias.defined() ? bias.contiguous() : undefined); in slow_conv_dilated3d_cuda()
[all …]
/external/pytorch/docs/source/
Dtensor_view.rst31 it interprets the same data. Taking a view of contiguous tensor could potentially produce a non-con…
41 # View tensors might be non-contiguous.
44 # To get a contiguous tensor, call `.contiguous()` to enforce
45 # copying data when `t` is not contiguous.
46 >>> c = t.contiguous()
97 - :meth:`~torch.Tensor.contiguous` returns **itself** if input tensor is already contiguous, otherw…
/external/pytorch/android/pytorch_android/
Dtest_asset.jit82 r = r.contiguous(memory_format=2)
84 r = r.contiguous()
91 r = r.contiguous(memory_format=2)
93 r = r.contiguous()
96 def contiguous(self, x: Tensor) -> Tensor:
97 return x.contiguous()
101 return x.contiguous(memory_format=2)
105 return x.contiguous(memory_format=3)
Dgenerate_test_torchscripts.py124 r = r.contiguous(memory_format=torch.channels_last)
126 r = r.contiguous()
133 r = r.contiguous(memory_format=torch.channels_last_3d)
135 r = r.contiguous()
139 def contiguous(self, x: Tensor) -> Tensor: member in Test
140 return x.contiguous()
144 return x.contiguous(memory_format=torch.channels_last)
148 return x.contiguous(memory_format=torch.channels_last_3d)
/external/pigweed/pw_allocator/block/
DCMakeLists.txt51 pw_allocator.block.contiguous
75 pw_add_library(pw_allocator.block.contiguous STATIC
77 public/pw_allocator/block/contiguous.h
86 contiguous.cc
97 pw_allocator.block.contiguous
106 pw_allocator.block.contiguous
139 pw_allocator.block.contiguous
DBUILD.bazel54 ":contiguous",
79 name = "contiguous",
80 srcs = ["contiguous.cc"],
81 hdrs = ["public/pw_allocator/block/contiguous.h"],
97 ":contiguous",
108 ":contiguous",
137 ":contiguous",
156 ":contiguous",
264 "public/pw_allocator/block/contiguous.h",
DBUILD.gn54 ":contiguous",
77 pw_source_set("contiguous") {
79 public = [ "public/pw_allocator/block/contiguous.h" ]
86 sources = [ "contiguous.cc" ]
93 public_deps = [ ":contiguous" ]
100 ":contiguous",
128 ":contiguous",
146 ":contiguous",
/external/tensorflow/tensorflow/compiler/xla/
Dcpu_function_runtime.cc78 void* contiguous = nullptr; in MallocContiguousBuffers() local
80 contiguous = aligned_malloc(total, Align()); in MallocContiguousBuffers()
84 ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(contiguous, total); in MallocContiguousBuffers()
87 uintptr_t pos = reinterpret_cast<uintptr_t>(contiguous); in MallocContiguousBuffers()
99 return contiguous; in MallocContiguousBuffers()
102 void FreeContiguous(void* contiguous) { in FreeContiguous() argument
103 if (contiguous != nullptr) { in FreeContiguous()
104 aligned_free(contiguous); in FreeContiguous()
/external/pytorch/test/mobile/model_test/
Dandroid_api_module.py110 r = r.contiguous(memory_format=torch.channels_last)
112 r = r.contiguous()
119 r = r.contiguous(memory_format=torch.channels_last_3d)
121 r = r.contiguous()
125 def contiguous(self, x: Tensor) -> Tensor: member in AndroidAPIModule
126 return x.contiguous()
130 return x.contiguous(memory_format=torch.channels_last)
134 return x.contiguous(memory_format=torch.channels_last_3d)
/external/e2fsprogs/tests/f_resize_inode/
Dexpect15 test_filesys: 11/4096 files (0.0% non-contiguous), 2619/16384 blocks
36 test_filesys: 11/4096 files (0.0% non-contiguous), 2619/16384 blocks
43 test_filesys: 11/4096 files (0.0% non-contiguous), 2619/16384 blocks
64 test_filesys: 11/4096 files (0.0% non-contiguous), 2619/16384 blocks
71 test_filesys: 11/4096 files (0.0% non-contiguous), 2619/16384 blocks
92 test_filesys: 11/4096 files (0.0% non-contiguous), 2619/16384 blocks
99 test_filesys: 11/4096 files (0.0% non-contiguous), 2619/16384 blocks
114 test_filesys: 11/4096 files (0.0% non-contiguous), 2619/16384 blocks
151 test_filesys: 11/4096 files (0.0% non-contiguous), 1275/16384 blocks
158 test_filesys: 11/4096 files (0.0% non-contiguous), 1275/16384 blocks
/external/pytorch/aten/src/ATen/native/cpu/
DAdaptiveMaxPoolKernel.cpp23 auto input = input_.contiguous(); in cpu_adaptive_max_pool2d()
24 auto output = output_.contiguous(); in cpu_adaptive_max_pool2d()
25 auto indices = indices_.contiguous(); in cpu_adaptive_max_pool2d()
94 auto input = input_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
95 auto output = output_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
96 auto indices = indices_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
211 auto input = input_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
212 auto output = output_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
213 auto indices = indices_.contiguous(memory_format); in cpu_adaptive_max_pool2d_channels_last()
346 auto grad_output = grad_output_.contiguous(); in cpu_adaptive_max_pool2d_backward()
[all …]
DPaddingKernel.cpp136 auto input = input_.contiguous(); in cpu_padding()
137 auto output = output_.contiguous(); in cpu_padding()
243 auto input = input_.contiguous(memory_format); in cpu_padding_channels_last()
244 auto output = output_.contiguous(memory_format); in cpu_padding_channels_last()
317 auto grad_output = grad_output_.contiguous(); in cpu_padding_backward()
318 auto grad_input = grad_input_.contiguous(); in cpu_padding_backward()
405 auto grad_input = grad_input_.contiguous(memory_format); in cpu_padding_backward_channels_last()
406 auto grad_output = grad_output_.contiguous(memory_format); in cpu_padding_backward_channels_last()
476 // non-batch mode 4d input will be considered as Contiguous in format of CDHW
478 return input.dim() == 4 ? at::MemoryFormat::Contiguous : input.suggest_memory_format(); in padding_memory_format_3d()
[all …]
/external/pytorch/benchmarks/sparse/
Dbenchmark_semi_structured_sparsity.py48 .contiguous()
52 def test_linear(m, k, n, dtype, contiguous, backend): argument
95 "contiguous": sparse_output.is_contiguous(),
99 def test_tensor(m, k, n, dtype, contiguous, backend): argument
143 "contiguous": sparse_output.is_contiguous(),
174 parser.add_argument("-contiguous", action="store_true")
195 eval_fn(m, k, n, dtype, args.contiguous, args.backend)
221 eval_fn(mn, 10240, mn, dtype, args.contiguous, args.backend)
244 eval_fn(10240, k, 10240, dtype, args.contiguous, args.backend)

12345678910>>...166