| /external/executorch/backends/arm/test/ops/ |
| D | test_unsqueeze.py | 8 # Tests the unsqueeze op which copies the data of the input tensor (possibly with new data format) 24 class Unsqueeze(torch.nn.Module): class in TestSimpleUnsqueeze 29 return x.unsqueeze(dim) 41 .check_count({"torch.ops.aten.unsqueeze.default": 1}) 60 .check_count({"torch.ops.aten.unsqueeze.default": 1}) 82 .check_count({"torch.ops.aten.unsqueeze.default": 1}) 89 @parameterized.expand(Unsqueeze.test_parameters) 92 self._test_unsqueeze_tosa_MI_pipeline(self.Unsqueeze(), (test_tensor, i)) 94 @parameterized.expand(Unsqueeze.test_parameters) 96 self._test_unsqueeze_tosa_BI_pipeline(self.Unsqueeze(), (test_tensor, 0)) [all …]
|
| /external/pytorch/aten/src/ATen/test/ |
| D | wrapdim_test.cpp | 14 ASSERT_TRUE(a.unsqueeze(-5).equal(a.unsqueeze(0))); in TestExpressionSpecification() 15 ASSERT_TRUE(a.unsqueeze(4).equal(a.unsqueeze(-1))); in TestExpressionSpecification() 17 // can unsqueeze scalar in TestExpressionSpecification() 19 ASSERT_TRUE(b.unsqueeze(0).equal(b.unsqueeze(-1))); in TestExpressionSpecification()
|
| /external/executorch/backends/arm/_passes/ |
| D | conv1d_unsqueeze_pass.py | 29 1) unsqueeze the convolution's input from 3d to 4d 30 2) if the input to unsqueeze is quantized, insert q/dq-pair after unsqueeze 52 data=kernel_param_3d.data.contiguous().unsqueeze(dim=-1), 61 kernel_node.meta["val"] = kernel_node.meta["val"].data.unsqueeze(dim=-1) 67 kernel_node.meta["val"] = kernel_node.meta["val"].data.unsqueeze(dim=-1) 77 kernel_node.meta["val"] = kernel_node.meta["val"].data.unsqueeze(dim=-1) 121 # c. Add unsqueeze to input (3d -> 4d) and squeeze to output (4d -> 3d) 122 # unsqueeze -> conv2d -> squeeze 134 # If Quantized we must insert unsqueeze --> q --> dq --> node
|
| /external/pytorch/aten/src/ATen/native/vulkan/ops/ |
| D | Unsqueeze.cpp | 17 Tensor unsqueeze(const at::Tensor& self, int64_t dim) { in unsqueeze() function 20 "Vulkan unsqueeze only supports up to 3d tensors as input!"); in unsqueeze() 23 "Vulkan unsqueeze dimension out of range expected to be in range of [", in unsqueeze() 37 // Create the output texture. For unsqueeze, add a dimension. in unsqueeze() 93 // Dimension to unsqueeze in unsqueeze() 105 VK_KERNEL(unsqueeze), in unsqueeze() 129 m.impl(TORCH_SELECTIVE_NAME("aten::unsqueeze"), TORCH_FN(unsqueeze)); in TORCH_LIBRARY_IMPL()
|
| D | Stack.cpp | 7 #include <ATen/ops/unsqueeze.h> 47 // Unsqueeze each tensor in the list in stack() 50 unsqueezed_outputs.push_back(at::unsqueeze(t, dim)); in stack()
|
| /external/pytorch/functorch/ |
| D | writing_batching_rules.md | 18 ### Basic Batching Rule (unsqueeze) 19 …atching rule API. For some reference, the function signature for unsqueeze is `unsqueeze(Tensor(a)… 28 return std::make_tuple(self_.unsqueeze(dim), 0); 45 …unsqueeze(x, dim)`, the strategy for the batching rule is pretty simple. We first move the batchin… 48 return std::make_tuple(self_.unsqueeze(dim), 0); 53 VMAP_SUPPORT(unsqueeze, unsqueeze_batch_rule);
|
| /external/pytorch/aten/src/ATen/native/cuda/ |
| D | NaiveDilatedConvolution.cu | 425 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated2d_cuda() 428 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_cuda() 432 Tensor output_ = (is_batch ? output : output.unsqueeze(0)); in slow_conv_dilated2d_cuda() 471 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated2d_backward_cuda() 475 : grad_output.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cuda() 477 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cuda() 487 (output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0)) in slow_conv_dilated2d_backward_cuda() 531 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated3d_cuda() 534 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated3d_cuda() 538 Tensor output_ = (is_batch ? output : output.unsqueeze(0)); in slow_conv_dilated3d_cuda() [all …]
|
| D | ReplicationPadding.cu | 277 gradInput_ = gradInput.unsqueeze(0); in replication_pad2d_backward_out_cuda_template() 278 gradOutput_ = gradOutput.unsqueeze(0); in replication_pad2d_backward_out_cuda_template() 391 gradInput_ = gradInput.unsqueeze(0); in replication_pad3d_backward_out_cuda_template() 392 gradOutput_ = gradOutput.unsqueeze(0); in replication_pad3d_backward_out_cuda_template() 445 input_ = input.unsqueeze(0); in TORCH_IMPL_FUNC() 446 output_ = output.unsqueeze(0); in TORCH_IMPL_FUNC() 508 gradInput_ = gradInput.unsqueeze(0); in TORCH_IMPL_FUNC() 509 gradOutput_ = gradOutput.unsqueeze(0); in TORCH_IMPL_FUNC() 551 input_ = input.unsqueeze(0); in TORCH_IMPL_FUNC() 552 output_ = output.unsqueeze(0); in TORCH_IMPL_FUNC() [all …]
|
| /external/pytorch/test/onnx/ |
| D | test_onnx_opset.py | 257 "op_name": "Unsqueeze", 372 {"op_name": "Unsqueeze"}, 373 {"op_name": "Unsqueeze"}, 398 {"op_name": "Unsqueeze"}, 399 {"op_name": "Unsqueeze"}, 499 {"op_name": "Unsqueeze"}, 501 {"op_name": "Unsqueeze"}, 503 {"op_name": "Unsqueeze"}, 505 {"op_name": "Unsqueeze"}, 514 {"op_name": "Unsqueeze"}, [all …]
|
| /external/pytorch/torch/csrc/api/include/torch/data/transforms/ |
| D | tensor.h | 63 .unsqueeze(/*dim=*/1) in Normalize() 64 .unsqueeze(/*dim=*/2)), in Normalize() 66 .unsqueeze(/*dim=*/1) in Normalize() 67 .unsqueeze(/*dim=*/2)) {} in Normalize()
|
| /external/pytorch/aten/src/ATen/native/ |
| D | NaiveDilatedConvolution.cpp | 553 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated2d_cpu() 556 (is_batch ? input.contiguous(memory_format) : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_cpu() 560 Tensor output_ = (is_batch ? output : output.unsqueeze(0)); in slow_conv_dilated2d_cpu() 605 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated3d_cpu() 608 (is_batch ? input.contiguous() : input.contiguous().unsqueeze(0)); in slow_conv_dilated3d_cpu() 612 Tensor output_ = (is_batch ? output : output.unsqueeze(0)); in slow_conv_dilated3d_cpu() 654 // template function assumes batched tensors. unsqueeze(0) will in slow_conv_dilated2d_backward_cpu() 658 : grad_output.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cpu() 660 (is_batch ? input.contiguous(memory_format) : input.contiguous().unsqueeze(0)); in slow_conv_dilated2d_backward_cpu() 670 (output_mask[0] ? (is_batch ? grad_input : grad_input.unsqueeze(0)) in slow_conv_dilated2d_backward_cpu() [all …]
|
| /external/pytorch/benchmarks/fastrnns/ |
| D | factory.py | 290 inputs[seq_idx].unsqueeze(0), (hy, cy), wih, whh, bih, bhh 294 hx_outs += [hy.unsqueeze(0)] 295 cx_outs += [cy.unsqueeze(0)] 422 return torch.stack(outputs), (hy.unsqueeze(0), cy.unsqueeze(0)) 448 return torch.stack(outputs), (hy.unsqueeze(0), cy.unsqueeze(0)) 480 return torch.stack(outputs), (hy.unsqueeze(0), cy.unsqueeze(0)) 527 return torch.stack(inputs), (hy.unsqueeze(0), cy.unsqueeze(0))
|
| /external/executorch/kernels/test/ |
| D | op_unsqueeze_copy_test.cpp | 60 // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()] in test_dtype() 61 // Here input.dim == 2, so the range of legal dim for unsqueeze is [-3, 2] in test_dtype() 73 // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()] in test_empty_input() 74 // Here input.dim == 4, so the range of legal dim for unsqueeze is [-5, 4] in test_empty_input() 169 // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()] in TEST_F() 170 // Here input.dim == 3, so the range of legal dim for unsqueeze is [-4, 3] in TEST_F() 193 // Legal dim for unsqueeze should be in [-(input.dim()+1), input.dim()] in TEST_F() 194 // Here input.dim == 2, so the range of legal dim for unsqueeze is [-3, 2] in TEST_F() 226 res = torch.unsqueeze(x, 1)
|
| /external/executorch/backends/xnnpack/_passes/ |
| D | conv1d_unsqueeze_pass.py | 23 1) unsqueeze the convolution's input from 3d to 4d 81 data=kernel_param_3d.data.contiguous().unsqueeze(dim=-1), 150 # c. Add unsqueeze to input (3d -> 4d) and squeeze to output (4d -> 3d) 151 # unsqueeze -> conv2d -> squeeze 163 # If Quantized we must insert unsqueeze --> q --> dq --> node
|
| /external/executorch/backends/vulkan/runtime/graph/ops/impl/ |
| D | Unsqueeze.cpp | 26 t_in->dim() < 4, "Cannot unsqueeze a tensor with more than 3 dimensions"); in add_unsqueeze_node() 44 void unsqueeze(ComputeGraph& graph, const std::vector<ValueRef>& args) { in unsqueeze() function 49 VK_REGISTER_OP(aten.unsqueeze_copy.default, unsqueeze);
|
| /external/pytorch/functorch/notebooks/_src/ |
| D | plot_per_sample_gradients.py | 68 sample = sample.unsqueeze(0) 69 target = target.unsqueeze(0) 106 # ``torch.unsqueeze`` to add a batch dimension. 108 batch = sample.unsqueeze(0) 109 targets = target.unsqueeze(0)
|
| /external/libopus/dnn/torch/osce/utils/layers/ |
| D | silk_upsampler.py | 138 y = torch.cat((y_even.unsqueeze(-1), y_odd.unsqueeze(-1)), dim=-1).flatten(2) 157 (y_01_24.unsqueeze(-1), y_17_24.unsqueeze(-1), y_09_24_sh1.unsqueeze(-1)),
|
| D | pitch_auto_correlator.py | 65 lookup = lookup.unsqueeze(-1) + range 66 lookup = lookup.unsqueeze(1) 70 x_ext = torch.repeat_interleave(x_pad.unsqueeze(-1), 2 * self.radius + 1, -1)
|
| /external/pytorch/torch/ao/nn/quantized/reference/modules/ |
| D | rnn.py | 191 input = input.unsqueeze(0) 198 hx = hx.unsqueeze(0) if not is_batched else hx 280 input = input.unsqueeze(0) 288 hx = (hx[0].unsqueeze(0), hx[1].unsqueeze(0)) if not is_batched else hx 353 input = input.unsqueeze(0) 360 hx = hx.unsqueeze(0) if not is_batched else hx 583 input = input.unsqueeze(batch_dim) 624 hx = (hx[0].unsqueeze(1), hx[1].unsqueeze(1)) 757 input = input.unsqueeze(batch_dim) 763 hx = hx.unsqueeze(1)
|
| /external/pytorch/torch/distributions/ |
| D | lowrank_multivariate_normal.py | 21 Wt_Dinv = W.mT / D.unsqueeze(-2) 46 Wt_Dinv = W.mT / D.unsqueeze(-2) 110 loc_ = loc.unsqueeze(-1) 111 cov_diag_ = cov_diag.unsqueeze(-1) 167 cov_diag_sqrt_unsqueeze = self._unbroadcasted_cov_diag.sqrt().unsqueeze(-1) 192 / self._unbroadcasted_cov_diag.unsqueeze(-2)
|
| /external/pytorch/test/jit/ |
| D | test_backend_nnapi.py | 76 args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1) 87 args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
|
| /external/libopus/dnn/torch/osce/models/ |
| D | nns_base.py | 51 f = features.unsqueeze(0).to(device) 52 p = periods.unsqueeze(0).to(device) 53 n = numbits.unsqueeze(0).to(device)
|
| /external/pytorch/test/inductor/ |
| D | test_scatter_optimization.py | 38 y.scatter_(2, x.unsqueeze(2), 2.718) 56 y.scatter_(0, x.unsqueeze(0), 2.718) 69 y.scatter_(-1, x.unsqueeze(1), 2.718) 82 y.scatter_(1, x.unsqueeze(1), 2.718) 96 y.scatter_(1, x.unsqueeze(1), 2.718)
|
| /external/pytorch/torch/csrc/api/src/nn/modules/ |
| D | adaptive.cpp | 102 Tensor input = is_batched ? input_ : input_.unsqueeze(0); in forward() 103 Tensor target = is_batched ? target_ : target_.unsqueeze(0); in forward() 139 cluster_logprob.gather(1, relative_target.unsqueeze(1)); in forward() 160 output += head_logprob.gather(1, gather_inds.unsqueeze(1)).squeeze(); in forward() 187 .unsqueeze(1); in _get_full_log_prob()
|
| /external/pytorch/aten/src/ATen/functorch/ |
| D | LegacyVmapTransforms.cpp | 103 tensor = tensor.unsqueeze(0); in moveDimToFrontAndExpand() 144 // Unsqueeze dim 0, expand it to the correct shape in logicalToPhysical() 163 tensor = tensor.unsqueeze(0); in moveDimToFrontAndUnsqueeze() 167 tensor = tensor.unsqueeze(1); in moveDimToFrontAndUnsqueeze() 199 // Unsqueeze dim 0, expand it to the correct shape in logicalToPhysical()
|