Home
last modified time | relevance | path

Searched full:unsqueeze (Results 1 – 25 of 451) sorted by relevance

12345678910>>...19

/external/executorch/backends/arm/test/ops/
Dtest_unsqueeze.py8 # Tests the unsqueeze op which copies the data of the input tensor (possibly with new data format)
24 class Unsqueeze(torch.nn.Module): class in TestSimpleUnsqueeze
29 return x.unsqueeze(dim)
41 .check_count({"torch.ops.aten.unsqueeze.default": 1})
60 .check_count({"torch.ops.aten.unsqueeze.default": 1})
82 .check_count({"torch.ops.aten.unsqueeze.default": 1})
89 @parameterized.expand(Unsqueeze.test_parameters)
92 self._test_unsqueeze_tosa_MI_pipeline(self.Unsqueeze(), (test_tensor, i))
94 @parameterized.expand(Unsqueeze.test_parameters)
96 self._test_unsqueeze_tosa_BI_pipeline(self.Unsqueeze(), (test_tensor, 0))
[all …]
/external/pytorch/aten/src/ATen/test/
Dwrapdim_test.cpp14 ASSERT_TRUE(a.unsqueeze(-5).equal(a.unsqueeze(0))); in TestExpressionSpecification()
15 ASSERT_TRUE(a.unsqueeze(4).equal(a.unsqueeze(-1))); in TestExpressionSpecification()
17 // can unsqueeze scalar in TestExpressionSpecification()
19 ASSERT_TRUE(b.unsqueeze(0).equal(b.unsqueeze(-1))); in TestExpressionSpecification()
/external/executorch/backends/arm/_passes/
Dconv1d_unsqueeze_pass.py29 1) unsqueeze the convolution's input from 3d to 4d
30 2) if the input to unsqueeze is quantized, insert q/dq-pair after unsqueeze
52 data=kernel_param_3d.data.contiguous().unsqueeze(dim=-1),
61 kernel_node.meta["val"] = kernel_node.meta["val"].data.unsqueeze(dim=-1)
67 kernel_node.meta["val"] = kernel_node.meta["val"].data.unsqueeze(dim=-1)
77 kernel_node.meta["val"] = kernel_node.meta["val"].data.unsqueeze(dim=-1)
121 # c. Add unsqueeze to input (3d -> 4d) and squeeze to output (4d -> 3d)
122 # unsqueeze -> conv2d -> squeeze
134 # If Quantized we must insert unsqueeze --> q --> dq --> node
/external/pytorch/aten/src/ATen/native/vulkan/ops/
DUnsqueeze.cpp17 Tensor unsqueeze(const at::Tensor& self, int64_t dim) { in unsqueeze() function
20 "Vulkan unsqueeze only supports up to 3d tensors as input!"); in unsqueeze()
23 "Vulkan unsqueeze dimension out of range expected to be in range of [", in unsqueeze()
37 // Create the output texture. For unsqueeze, add a dimension. in unsqueeze()
93 // Dimension to unsqueeze in unsqueeze()
105 VK_KERNEL(unsqueeze), in unsqueeze()
129 m.impl(TORCH_SELECTIVE_NAME("aten::unsqueeze"), TORCH_FN(unsqueeze)); in TORCH_LIBRARY_IMPL()
DStack.cpp7 #include <ATen/ops/unsqueeze.h>
47 // Unsqueeze each tensor in the list in stack()
50 unsqueezed_outputs.push_back(at::unsqueeze(t, dim)); in stack()
/external/pytorch/functorch/
Dwriting_batching_rules.md18 ### Basic Batching Rule (unsqueeze)
19 …atching rule API. For some reference, the function signature for unsqueeze is `unsqueeze(Tensor(a)…
28 return std::make_tuple(self_.unsqueeze(dim), 0);
45unsqueeze(x, dim)`, the strategy for the batching rule is pretty simple. We first move the batchin…
48 return std::make_tuple(self_.unsqueeze(dim), 0);
53 VMAP_SUPPORT(unsqueeze, unsqueeze_batch_rule);
/external/pytorch/aten/src/ATen/native/cuda/
DNaiveDilatedConvolution.cu425 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated2d_cuda()
428 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_cuda()
432 Tensor output_ = (is_batch ? output : output.unsqueeze(0)); in slow_conv_dilated2d_cuda()
471 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated2d_backward_cuda()
475 : grad_output.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cuda()
477 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cuda()
487 (output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0)) in slow_conv_dilated2d_backward_cuda()
531 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated3d_cuda()
534 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated3d_cuda()
538 Tensor output_ = (is_batch ? output : output.unsqueeze(0)); in slow_conv_dilated3d_cuda()
[all …]
DReplicationPadding.cu277 gradInput_ = gradInput.unsqueeze(0); in replication_pad2d_backward_out_cuda_template()
278 gradOutput_ = gradOutput.unsqueeze(0); in replication_pad2d_backward_out_cuda_template()
391 gradInput_ = gradInput.unsqueeze(0); in replication_pad3d_backward_out_cuda_template()
392 gradOutput_ = gradOutput.unsqueeze(0); in replication_pad3d_backward_out_cuda_template()
445 input_ = input.unsqueeze(0); in TORCH_IMPL_FUNC()
446 output_ = output.unsqueeze(0); in TORCH_IMPL_FUNC()
508 gradInput_ = gradInput.unsqueeze(0); in TORCH_IMPL_FUNC()
509 gradOutput_ = gradOutput.unsqueeze(0); in TORCH_IMPL_FUNC()
551 input_ = input.unsqueeze(0); in TORCH_IMPL_FUNC()
552 output_ = output.unsqueeze(0); in TORCH_IMPL_FUNC()
[all …]
/external/pytorch/test/onnx/
Dtest_onnx_opset.py257 "op_name": "Unsqueeze",
372 {"op_name": "Unsqueeze"},
373 {"op_name": "Unsqueeze"},
398 {"op_name": "Unsqueeze"},
399 {"op_name": "Unsqueeze"},
499 {"op_name": "Unsqueeze"},
501 {"op_name": "Unsqueeze"},
503 {"op_name": "Unsqueeze"},
505 {"op_name": "Unsqueeze"},
514 {"op_name": "Unsqueeze"},
[all …]
/external/pytorch/torch/csrc/api/include/torch/data/transforms/
Dtensor.h63 .unsqueeze(/*dim=*/1) in Normalize()
64 .unsqueeze(/*dim=*/2)), in Normalize()
66 .unsqueeze(/*dim=*/1) in Normalize()
67 .unsqueeze(/*dim=*/2)) {} in Normalize()
/external/pytorch/aten/src/ATen/native/
DNaiveDilatedConvolution.cpp553 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated2d_cpu()
556 (is_batch ? input.contiguous(memory_format) : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_cpu()
560 Tensor output_ = (is_batch ? output : output.unsqueeze(0)); in slow_conv_dilated2d_cpu()
605 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated3d_cpu()
608 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated3d_cpu()
612 Tensor output_ = (is_batch ? output : output.unsqueeze(0)); in slow_conv_dilated3d_cpu()
654 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated2d_backward_cpu()
658 : grad_output.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cpu()
660 (is_batch ? input.contiguous(memory_format) : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cpu()
670 (output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0)) in slow_conv_dilated2d_backward_cpu()
[all …]
/external/pytorch/benchmarks/fastrnns/
Dfactory.py290 inputs[seq_idx].unsqueeze(0), (hy, cy), wih, whh, bih, bhh
294 hx_outs += [hy.unsqueeze(0)]
295 cx_outs += [cy.unsqueeze(0)]
422 return torch.stack(outputs), (hy.unsqueeze(0), cy.unsqueeze(0))
448 return torch.stack(outputs), (hy.unsqueeze(0), cy.unsqueeze(0))
480 return torch.stack(outputs), (hy.unsqueeze(0), cy.unsqueeze(0))
527 return torch.stack(inputs), (hy.unsqueeze(0), cy.unsqueeze(0))
/external/executorch/kernels/test/
Dop_unsqueeze_copy_test.cpp60 // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()] in test_dtype()
61 // Here input.dim == 2, so the range of legal dim for unsqueeze is [-3, 2] in test_dtype()
73 // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()] in test_empty_input()
74 // Here input.dim == 4, so the range of legal dim for unsqueeze is [-5, 4] in test_empty_input()
169 // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()] in TEST_F()
170 // Here input.dim == 3, so the range of legal dim for unsqueeze is [-4, 3] in TEST_F()
193 // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()] in TEST_F()
194 // Here input.dim == 2, so the range of legal dim for unsqueeze is [-3, 2] in TEST_F()
226 res = torch.unsqueeze(x, 1)
/external/executorch/backends/xnnpack/_passes/
Dconv1d_unsqueeze_pass.py23 1) unsqueeze the convolution's input from 3d to 4d
81 data=kernel_param_3d.data.contiguous().unsqueeze(dim=-1),
150 # c. Add unsqueeze to input (3d -> 4d) and squeeze to output (4d -> 3d)
151 # unsqueeze -> conv2d -> squeeze
163 # If Quantized we must insert unsqueeze --> q --> dq --> node
/external/executorch/backends/vulkan/runtime/graph/ops/impl/
DUnsqueeze.cpp26 t_in->dim() < 4, "Cannot unsqueeze a tensor with more than 3 dimensions"); in add_unsqueeze_node()
44 void unsqueeze(ComputeGraph& graph, const std::vector<ValueRef>& args) { in unsqueeze() function
49 VK_REGISTER_OP(aten.unsqueeze_copy.default, unsqueeze);
/external/pytorch/functorch/notebooks/_src/
Dplot_per_sample_gradients.py68 sample = sample.unsqueeze(0)
69 target = target.unsqueeze(0)
106 # ``torch.unsqueeze`` to add a batch dimension.
108 batch = sample.unsqueeze(0)
109 targets = target.unsqueeze(0)
/external/libopus/dnn/torch/osce/utils/layers/
Dsilk_upsampler.py138 y = torch.cat((y_even.unsqueeze(-1), y_odd.unsqueeze(-1)), dim=-1).flatten(2)
157 (y_01_24.unsqueeze(-1), y_17_24.unsqueeze(-1), y_09_24_sh1.unsqueeze(-1)),
Dpitch_auto_correlator.py65 lookup = lookup.unsqueeze(-1) + range
66 lookup = lookup.unsqueeze(1)
70 x_ext = torch.repeat_interleave(x_pad.unsqueeze(-1), 2 * self.radius + 1, -1)
/external/pytorch/torch/ao/nn/quantized/reference/modules/
Drnn.py191 input = input.unsqueeze(0)
198 hx = hx.unsqueeze(0) if not is_batched else hx
280 input = input.unsqueeze(0)
288 hx = (hx[0].unsqueeze(0), hx[1].unsqueeze(0)) if not is_batched else hx
353 input = input.unsqueeze(0)
360 hx = hx.unsqueeze(0) if not is_batched else hx
583 input = input.unsqueeze(batch_dim)
624 hx = (hx[0].unsqueeze(1), hx[1].unsqueeze(1))
757 input = input.unsqueeze(batch_dim)
763 hx = hx.unsqueeze(1)
/external/pytorch/torch/distributions/
Dlowrank_multivariate_normal.py21 Wt_Dinv = W.mT / D.unsqueeze(-2)
46 Wt_Dinv = W.mT / D.unsqueeze(-2)
110 loc_ = loc.unsqueeze(-1)
111 cov_diag_ = cov_diag.unsqueeze(-1)
167 cov_diag_sqrt_unsqueeze = self._unbroadcasted_cov_diag.sqrt().unsqueeze(-1)
192 / self._unbroadcasted_cov_diag.unsqueeze(-2)
/external/pytorch/test/jit/
Dtest_backend_nnapi.py76 args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
87 args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
/external/libopus/dnn/torch/osce/models/
Dnns_base.py51 f = features.unsqueeze(0).to(device)
52 p = periods.unsqueeze(0).to(device)
53 n = numbits.unsqueeze(0).to(device)
/external/pytorch/test/inductor/
Dtest_scatter_optimization.py38 y.scatter_(2, x.unsqueeze(2), 2.718)
56 y.scatter_(0, x.unsqueeze(0), 2.718)
69 y.scatter_(-1, x.unsqueeze(1), 2.718)
82 y.scatter_(1, x.unsqueeze(1), 2.718)
96 y.scatter_(1, x.unsqueeze(1), 2.718)
/external/pytorch/torch/csrc/api/src/nn/modules/
Dadaptive.cpp102 Tensor input = is_batched ? input_ : input_.unsqueeze(0); in forward()
103 Tensor target = is_batched ? target_ : target_.unsqueeze(0); in forward()
139 cluster_logprob.gather(1, relative_target.unsqueeze(1)); in forward()
160 output += head_logprob.gather(1, gather_inds.unsqueeze(1)).squeeze(); in forward()
187 .unsqueeze(1); in _get_full_log_prob()
/external/pytorch/aten/src/ATen/functorch/
DLegacyVmapTransforms.cpp103 tensor = tensor.unsqueeze(0); in moveDimToFrontAndExpand()
144 // Unsqueeze dim 0, expand it to the correct shape in logicalToPhysical()
163 tensor = tensor.unsqueeze(0); in moveDimToFrontAndUnsqueeze()
167 tensor = tensor.unsqueeze(1); in moveDimToFrontAndUnsqueeze()
199 // Unsqueeze dim 0, expand it to the correct shape in logicalToPhysical()

12345678910>>...19