Home
last modified time | relevance | path

Searched refs:convolution (Results 1 – 25 of 414) sorted by relevance

12345678910>>...17

/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/
Dconvolution.c71 pytorch_qnnp_operator_t convolution = NULL; in pytorch_qnnp_create_convolution_ndhwc_q8() local
188 convolution = calloc(1, sizeof(struct pytorch_qnnp_operator)); in pytorch_qnnp_create_convolution_ndhwc_q8()
189 if (convolution == NULL) { in pytorch_qnnp_create_convolution_ndhwc_q8()
229 convolution->group_stride = c_stride; in pytorch_qnnp_create_convolution_ndhwc_q8()
232 convolution->packed_weights = malloc(packed_weights_size); in pytorch_qnnp_create_convolution_ndhwc_q8()
233 if (convolution->packed_weights == NULL) { in pytorch_qnnp_create_convolution_ndhwc_q8()
253 convolution->packed_weights); in pytorch_qnnp_create_convolution_ndhwc_q8()
268 convolution->packed_weights, in pytorch_qnnp_create_convolution_ndhwc_q8()
281 (char*)convolution->packed_weights + in pytorch_qnnp_create_convolution_ndhwc_q8()
295 (char*)convolution->packed_weights + in pytorch_qnnp_create_convolution_ndhwc_q8()
[all …]
Dconv-prepack.cc11 const pytorch_qnnp_operator_t convolution, in PrePackConvWeights() argument
15 enum pytorch_qnnp_ukernel_type ukernel_type = convolution->ukernel_type; in PrePackConvWeights()
16 const uint32_t kernel_width = convolution->kernel_width; in PrePackConvWeights()
17 const uint32_t kernel_height = convolution->kernel_height; in PrePackConvWeights()
20 convolution->kernel_depth ? convolution->kernel_depth : 1; in PrePackConvWeights()
21 const uint32_t groups = convolution->groups; in PrePackConvWeights()
23 if (convolution->transpose && in PrePackConvWeights()
165 (convolution->group_output_channels + (nr - 1)) & -nr; in PrePackConvWeights()
167 (convolution->group_input_channels + (kr - 1)) & -kr; in PrePackConvWeights()
184 convolution->group_output_channels, in PrePackConvWeights()
[all …]
Dconv-run.cc333 const pytorch_qnnp_operator_t convolution, in qnnpackConv() argument
348 const size_t groups = convolution->groups; in qnnpackConv()
349 const size_t input_pixel_stride = convolution->group_input_channels * groups; in qnnpackConv()
351 convolution->group_output_channels * groups; in qnnpackConv()
352 const size_t kernel_width = convolution->kernel_width; in qnnpackConv()
353 const size_t kernel_height = convolution->kernel_height; in qnnpackConv()
354 const size_t kernel_depth = convolution->kernel_depth; in qnnpackConv()
364 if (convolution->ukernel_type == pytorch_qnnp_ukernel_type_xzp_gemm) { in qnnpackConv()
385 if (convolution->input != input || convolution->batch_size != batch_size || in qnnpackConv()
386 convolution->input_depth != input_depth || in qnnpackConv()
[all …]
/external/tensorflow/tensorflow/compiler/xla/service/
Dconvolution_group_converter.cc52 Status HandleConvolution(HloInstruction* convolution) override;
54 Status HandleBatchGroupCount(HloInstruction* convolution);
204 Status ConvolutionVisitor::HandleBatchGroupCount(HloInstruction* convolution) { in HandleBatchGroupCount() argument
205 auto dim_numbers = convolution->convolution_dimension_numbers(); in HandleBatchGroupCount()
206 auto activation = convolution->mutable_operand(0); in HandleBatchGroupCount()
207 auto filter = convolution->mutable_operand(1); in HandleBatchGroupCount()
208 int64_t batch_group_count = convolution->batch_group_count(); in HandleBatchGroupCount()
211 (should_expand_ && !should_expand_(convolution))) { in HandleBatchGroupCount()
216 << " for convolution " << convolution->ToString() << "\n"; in HandleBatchGroupCount()
297 Window window = convolution->window(); in HandleBatchGroupCount()
[all …]
Dtranspose_folding.cc42 const HloInstruction& convolution, in CanFoldOperandsIntoConvolution() argument
45 if (HloOpcode::kConvolution != convolution.opcode()) { in CanFoldOperandsIntoConvolution()
50 for (int64_t i = 0; i < convolution.operand_count(); ++i) { in CanFoldOperandsIntoConvolution()
51 auto& operand = *convolution.operand(i); in CanFoldOperandsIntoConvolution()
57 return transposable_conv_operands(convolution, operand_set); in CanFoldOperandsIntoConvolution()
116 auto& convolution = *pair.first; in FoldTransposeIntoConvolution() local
124 convolution.convolution_dimension_numbers(); in FoldTransposeIntoConvolution()
130 HloInstruction& transpose = *convolution.mutable_operand(kLhsIdx); in FoldTransposeIntoConvolution()
147 new_lhs = convolution.mutable_operand(kLhsIdx); in FoldTransposeIntoConvolution()
153 HloInstruction& transpose = *convolution.mutable_operand(kRhsIdx); in FoldTransposeIntoConvolution()
[all …]
Dspace_to_batch_converter.cc68 Status PerformSpaceToBatchOnConvolution(HloInstruction* convolution);
80 ConvDetails GetConvolutionDetails(HloInstruction* convolution,
88 bool IsForwardWindowDilatedConv(HloInstruction* convolution,
164 Status PropagateOnConv(HloInstruction* convolution);
177 Status PropagateOnBackpropFilterConv(HloInstruction* convolution);
180 bool IsConvSuitableForSpaceToBatch(HloInstruction* convolution);
183 bool IsThisBackPropFilterConv(HloInstruction* convolution);
243 int64_t GetFirstChosenSpatialDim(HloInstruction* convolution) { in GetFirstChosenSpatialDim() argument
245 const int64_t end_point = convolution->convolution_dimension_numbers() in GetFirstChosenSpatialDim()
251 std::vector<int64_t> GetChosenSpatialDims(HloInstruction* convolution) { in GetChosenSpatialDims() argument
[all …]
/external/eigen/bench/tensors/
Dtensor_benchmarks_cpu.cc146 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 4);
147 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 8);
148 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 12);
150 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 4);
151 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 8);
152 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 12);
154 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 4);
155 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 8);
156 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 12);
158 BM_FuncWithKernelDimsCPU(convolution, 4, 7, 4);
[all …]
Dtensor_benchmarks_gpu.cu70 BM_FuncWithKernelDimsGPU(convolution, 7, 1);
71 BM_FuncWithKernelDimsGPU(convolution, 1, 7);
72 BM_FuncWithKernelDimsGPU(convolution, 7, 4);
73 BM_FuncWithKernelDimsGPU(convolution, 4, 7);
74 BM_FuncWithKernelDimsGPU(convolution, 7, 64);
75 BM_FuncWithKernelDimsGPU(convolution, 64, 7);
Dtensor_benchmarks_sycl.cc134 BM_FuncWithKernelDimsGPU(convolution, 7, 1);
135 BM_FuncWithKernelDimsGPU(convolution, 1, 7);
136 BM_FuncWithKernelDimsGPU(convolution, 7, 4);
137 BM_FuncWithKernelDimsGPU(convolution, 4, 7);
138 BM_FuncWithKernelDimsGPU(convolution, 7, 64);
139 BM_FuncWithKernelDimsGPU(convolution, 64, 7);
/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dgpu_hlo_cost_analysis.cc85 const HloInstruction* convolution) { in GetConvolutionFlops() argument
86 auto lhs = convolution->operand(0); in GetConvolutionFlops()
87 auto rhs = convolution->operand(1); in GetConvolutionFlops()
92 const Shape& shape = convolution->shape(); in GetConvolutionFlops()
93 if (IsCustomCallToDnnConvolution(*convolution) && in GetConvolutionFlops()
94 convolution->shape().IsTuple()) { in GetConvolutionFlops()
100 return HloCostAnalysis::GetConvolutionFlops(convolution, lhs_shape, rhs_shape, in GetConvolutionFlops()
/external/pytorch/aten/src/ATen/native/quantized/cpu/
DQnnpackUtils.h200 pytorch_qnnp_operator_t convolution{nullptr}; in PackedConvWeightsQnnp()
202 convolution = static_cast<pytorch_qnnp_operator_t>( in PackedConvWeightsQnnp()
204 if (convolution == nullptr) { in PackedConvWeightsQnnp()
212 convolution); in PackedConvWeightsQnnp()
215 convolution->ukernel_type = ukernel_type; in PackedConvWeightsQnnp()
216 convolution->groups = groups; in PackedConvWeightsQnnp()
217 convolution->group_input_channels = group_input_channels; in PackedConvWeightsQnnp()
218 convolution->group_output_channels = group_output_channels; in PackedConvWeightsQnnp()
219 convolution->kernel_depth = kernel_depth; in PackedConvWeightsQnnp()
220 convolution->kernel_height = kernel_height; in PackedConvWeightsQnnp()
[all …]
/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/
Doperator.h140 const struct pytorch_qnnp_operator* convolution) { in pytorch_qnnp_operator_get_log2_output_element_size() argument
141 return (uint32_t)(convolution->format & UINT32_C(0xFF)); in pytorch_qnnp_operator_get_log2_output_element_size()
145 const struct pytorch_qnnp_operator* convolution) { in pytorch_qnnp_operator_get_log2_input_element_size() argument
146 return (uint32_t)((convolution->format >> 8) & UINT32_C(0xFF)); in pytorch_qnnp_operator_get_log2_input_element_size()
150 const struct pytorch_qnnp_operator* convolution) { in pytorch_qnnp_operator_get_log2_kernel_element_size() argument
151 return (uint32_t)((convolution->format >> 16) & UINT32_C(0xFF)); in pytorch_qnnp_operator_get_log2_kernel_element_size()
155 const struct pytorch_qnnp_operator* convolution) { in pytorch_qnnp_operator_get_log2_bias_element_size() argument
156 return (uint32_t)((convolution->format >> 24) & UINT32_C(0xFF)); in pytorch_qnnp_operator_get_log2_bias_element_size()
/external/ComputeLibrary/scripts/
Dcheck_bad_style.sh16 grep -HrnP --exclude-dir=assembly --exclude-dir=convolution --exclude-dir=arm_gemm --exclude-dir=ar…
24 grep -Hnr --exclude-dir=assembly --exclude-dir=convolution --exclude-dir=arm_gemm --exclude-dir=arm…
32 grep -HnRE --exclude-dir=assembly --exclude-dir=convolution --exclude-dir=arm_gemm --exclude-dir=dy…
40 grep -HnR --exclude-dir=assembly --exclude-dir=convolution --exclude-dir=arm_gemm --exclude-dir=arm…
48 grep -Hnir --exclude-dir=assembly --exclude-dir=convolution --exclude-dir=arm_gemm --exclude-dir=ar…
56 grep -Hnir --exclude-dir=assembly --exclude-dir=convolution --exclude-dir=arm_gemm --exclude-dir=ar…
64 grep -Hnir --exclude-dir=assembly --exclude-dir=convolution --exclude-dir=arm_gemm --exclude-dir=ar…
72 grep -Hnir --exclude-dir=assembly --exclude-dir=convolution --exclude-dir=arm_gemm --exclude-dir=ar…
/external/ComputeLibrary/tests/validate_examples/
Dgraph_depthwiseconvolution.cpp180 common_params.convolution.padding_mode = padding_mode->value(); in consume_parameters()
181 common_params.convolution.padding_top = padding_top->value(); in consume_parameters()
182 common_params.convolution.padding_bottom = padding_bottom->value(); in consume_parameters()
183 common_params.convolution.padding_left = padding_left->value(); in consume_parameters()
184 common_params.convolution.padding_right = padding_right->value(); in consume_parameters()
185 common_params.convolution.padding_stride_x = stride_x->value(); in consume_parameters()
186 common_params.convolution.padding_stride_y = stride_y->value(); in consume_parameters()
187 common_params.convolution.depth_multiplier = depth_multiplier->value(); in consume_parameters()
203 …ht) (stride x, stride y) : (" << common_params.convolution.padding_top << "," << common_params.con… in print_parameters()
204convolution.padding_left << "," << common_params.convolution.padding_right << ") (" << common_para… in print_parameters()
[all …]
Dgraph_convolution.cpp181 common_params.convolution.padding_mode = padding_mode->value(); in consume_parameters()
182 common_params.convolution.padding_top = padding_top->value(); in consume_parameters()
183 common_params.convolution.padding_bottom = padding_bottom->value(); in consume_parameters()
184 common_params.convolution.padding_left = padding_left->value(); in consume_parameters()
185 common_params.convolution.padding_right = padding_right->value(); in consume_parameters()
186 common_params.convolution.padding_stride_x = stride_x->value(); in consume_parameters()
187 common_params.convolution.padding_stride_y = stride_y->value(); in consume_parameters()
203 …ht) (stride x, stride y) : (" << common_params.convolution.padding_top << "," << common_params.con… in print_parameters()
204convolution.padding_left << "," << common_params.convolution.padding_right << ") (" << common_para… in print_parameters()
206 os << "Padding Mode: " << common_params.convolution.padding_mode << std::endl; in print_parameters()
/external/tensorflow/tensorflow/compiler/xla/service/cpu/
Dir_emission_utils.cc43 const HloInstruction& convolution, in PotentiallyImplementedAsEigenConvolution() argument
52 const Shape& input_shape = convolution.operand(0)->shape(); in PotentiallyImplementedAsEigenConvolution()
53 const Shape& kernel_shape = convolution.operand(1)->shape(); in PotentiallyImplementedAsEigenConvolution()
54 const Shape& output_shape = convolution.shape(); in PotentiallyImplementedAsEigenConvolution()
78 if (window_util::HasWindowReversal(convolution.window())) { in PotentiallyImplementedAsEigenConvolution()
83 convolution.convolution_dimension_numbers(); in PotentiallyImplementedAsEigenConvolution()
/external/tensorflow/tensorflow/compiler/xla/mlir_hlo/stablehlo/tests/
Dverify_conv.mlir5 // Valid: Generic convolution
9 %result = "stablehlo.convolution"(%arg0, %arg1) {
32 // Valid: Test convolution i8xi8 -> i32.
36 %result = "stablehlo.convolution"(%arg0, %arg1) {
61 // CHECK: stablehlo.convolution
67 %0 = stablehlo.convolution(%arg0, %arg1)
86 %1 = "stablehlo.convolution"(%arg0, %arg1) {
104 …// expected-error@+1 {{expects convolution arguments to have same number of dimensions. Got: 'tens…
105 %0 = stablehlo.convolution(%arg0, %arg1)
122 …// expected-error@+1 {{expects convolution arguments to have >= 2 dimensions. Got: 'tensor<1xf32>'…
[all …]
/external/tensorflow/tensorflow/compiler/xla/mlir_hlo/tests/Dialect/mhlo/
Dverifier_conv_op.mlir5 // Valid: Generic convolution
9 %result = "mhlo.convolution"(%arg0, %arg1) {
32 // Valid: Test convolution i8xi8 -> i32.
36 %result = "mhlo.convolution"(%arg0, %arg1) {
61 // CHECK: mhlo.convolution
67 %0 = mhlo.convolution(%arg0, %arg1)
86 %1 = "mhlo.convolution"(%arg0, %arg1) {
104 …// expected-error@+1 {{expects convolution arguments to have same number of dimensions. Got: 'tens…
105 %0 = mhlo.convolution(%arg0, %arg1)
122 …// expected-error@+1 {{expects convolution arguments to have >= 2 dimensions. Got: 'tensor<1xf32>'…
[all …]
/external/tensorflow/tensorflow/compiler/xla/tests/
Disolated_convolution.hlo1 HloModule convolution.167:
3 ENTRY %convolution.167 (parameter.0: f32[16,28,28,128], parameter.1: f32[3,3,128,128]) -> f32[16,28…
6 …ROOT %convolution.167 = f32[16,28,28,128]{3,0,2,1} convolution(f32[16,28,28,128]{3,0,2,1} %paramet…
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/g3doc/
Dspace_to_depth.md9 convolution in the new MLIR bridge to improve MXU efficiency of low batch size
21 transposed to the shape that the convolution emitter expects. The input also
23 convolution efficient. Although a 2x2 space-to-depth transform works only when
24 the first convolution has a stride of 2, many image models, ResNet-like in
25 particular, have a stride-2 convolution in the first layer.
28 speedup and reduce memory usage in the first convolution.
30 The first convolution in many image models, including ResNet or ResNet-like, is
31 a (kernel=7, stride=2) 2D convolution. The input of the convolution is images,
32 which usually has RGB channels. The input of this first convolution is of shape
35 convolution's input to [batch\_size, height // stride, width // stride, 3 \*
[all …]
/external/mesa3d/src/gallium/drivers/etnaviv/
Detnaviv_ml_tp.c724 struct etna_operation *convolution, in etna_ml_lower_detranspose() argument
732 operation->input_width = convolution->output_width; in etna_ml_lower_detranspose()
733 operation->input_height = convolution->output_height; in etna_ml_lower_detranspose()
734 operation->input_channels = convolution->output_channels; in etna_ml_lower_detranspose()
735 operation->input_zero_point = convolution->output_zero_point; in etna_ml_lower_detranspose()
736 operation->input_scale = convolution->output_scale; in etna_ml_lower_detranspose()
741 operation->output_tensors[0] = convolution->output_tensors[0]; in etna_ml_lower_detranspose()
743 operation->output_width = convolution->output_width; in etna_ml_lower_detranspose()
744 operation->output_height = convolution->output_height; in etna_ml_lower_detranspose()
745 operation->output_channels = convolution->output_channels; in etna_ml_lower_detranspose()
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/nn_ops/
Datrous_convolution_test.py100 y1 = nn_ops.convolution(
102 y2 = nn_ops.convolution(input=x, filter=filters_upsampled, **kwargs)
113 y = nn_ops.convolution(
121 y = nn_ops.convolution(
219 result = nn_ops.convolution(
221 result = nn_ops.convolution(
229 y1 = nn_ops.convolution(
234 y1 = nn_ops.convolution(
255 output = nn_ops.convolution(
/external/pytorch/torch/csrc/jit/passes/
Dgraph_rewrite_helper.cpp65 std::string convolution = R"( in replaceConvolutionWithAtenConv() local
226 rewriter_conv1d.RegisterRewritePattern(convolution, conv1d); in replaceConvolutionWithAtenConv()
232 rewriter_conv2d.RegisterRewritePattern(convolution, conv2d); in replaceConvolutionWithAtenConv()
238 rewriter_conv3d.RegisterRewritePattern(convolution, conv3d); in replaceConvolutionWithAtenConv()
245 convolution, conv_transpose1d); in replaceConvolutionWithAtenConv()
252 convolution, conv_transpose2d); in replaceConvolutionWithAtenConv()
259 convolution, conv_transpose3d); in replaceConvolutionWithAtenConv()
/external/pytorch/torch/_inductor/kernel/
Dconv.py354 torch.convolution,
357 op_overload=aten.convolution.default,
393 output = torch.ops.aten.convolution(
444 @register_lowering(aten.convolution)
445 def convolution( function
485 convolution(L[aten.expand](x, [1, *x.get_size()]), weight, bias, **kwargs),
525 result = convolution(x, weight, None, **kwargs)
666 return convolution(
672 assert fx_node.target == torch.ops.aten.convolution.default
679 add_layout_constraint(aten.convolution, constrain_conv_to_fx_strides)
/external/pytorch/torch/_inductor/fx_passes/
Dbinary_folding.py49 op="call_function", target=aten.convolution.default
59 for node in graph.find_nodes(op="call_function", target=aten.convolution.default):
84 _computation_ops = [aten.convolution.default]
85 _computation_calls = [CallFunction(aten.convolution.default, *_conv_args, _users=1)]
184 if binary_node.args[0].target == aten.convolution.default:
212 assert conv_node.target == aten.convolution.default
269 assert computation_node.target == aten.convolution.default

12345678910>>...17