Home
last modified time | relevance | path

Searched refs:convolution (Results 1 – 25 of 86) sorted by relevance

1234

/external/tensorflow/tensorflow/compiler/xla/service/
Dconvolution_group_converter.cc51 Status HandleConvolution(HloInstruction* convolution) override;
53 Status HandleBatchGroupCount(HloInstruction* convolution);
201 Status ConvolutionVisitor::HandleBatchGroupCount(HloInstruction* convolution) { in HandleBatchGroupCount() argument
202 auto dim_numbers = convolution->convolution_dimension_numbers(); in HandleBatchGroupCount()
203 auto activation = convolution->mutable_operand(0); in HandleBatchGroupCount()
204 auto filter = convolution->mutable_operand(1); in HandleBatchGroupCount()
205 int64 batch_group_count = convolution->batch_group_count(); in HandleBatchGroupCount()
212 << " for convolution " << convolution->ToString() << "\n"; in HandleBatchGroupCount()
227 if (!is_cost_viable_(convolution) || filter_expansion_) { in HandleBatchGroupCount()
235 GetExpandedFilterMask(convolution->shape(), output_batch_dimension, in HandleBatchGroupCount()
[all …]
Dtranspose_folding.cc57 const HloInstruction& convolution, in CanFoldOperandsIntoConvolution() argument
60 if (HloOpcode::kConvolution != convolution.opcode()) { in CanFoldOperandsIntoConvolution()
65 for (int64 i = 0; i < convolution.operand_count(); ++i) { in CanFoldOperandsIntoConvolution()
66 auto& operand = *convolution.operand(i); in CanFoldOperandsIntoConvolution()
72 return transposable_conv_operands(convolution, operand_set); in CanFoldOperandsIntoConvolution()
120 auto& convolution = *pair.first; in FoldTransposeIntoConvolution() local
128 convolution.convolution_dimension_numbers(); in FoldTransposeIntoConvolution()
134 HloInstruction& transpose = *convolution.mutable_operand(kLhsIdx); in FoldTransposeIntoConvolution()
151 new_lhs = convolution.mutable_operand(kLhsIdx); in FoldTransposeIntoConvolution()
157 HloInstruction& transpose = *convolution.mutable_operand(kRhsIdx); in FoldTransposeIntoConvolution()
[all …]
Dhlo_cost_analysis.cc434 Status HloCostAnalysis::HandleConvolution(const HloInstruction* convolution) { in HandleConvolution() argument
435 auto lhs = convolution->operand(0); in HandleConvolution()
436 auto rhs = convolution->operand(1); in HandleConvolution()
437 Window window = convolution->window(); in HandleConvolution()
438 const auto& result_shape = convolution->shape(); in HandleConvolution()
442 const auto& dnums = convolution->convolution_dimension_numbers(); in HandleConvolution()
527 const int64 fma_count = (input_feature / convolution->feature_group_count()) * in HandleConvolution()
529 (batch / convolution->batch_group_count()) * in HandleConvolution()
Dalgebraic_simplifier.cc202 Status HandleConvolution(HloInstruction* convolution) override;
419 StatusOr<bool> FoldConvInputPad(HloInstruction* convolution);
420 StatusOr<bool> FoldConvFilterPad(HloInstruction* convolution);
423 StatusOr<bool> SimplifyConvToDot(HloInstruction* convolution);
3394 HloInstruction* convolution) { in FoldConvInputPad() argument
3395 auto* lhs = convolution->mutable_operand(0); in FoldConvInputPad()
3396 auto* rhs = convolution->mutable_operand(1); in FoldConvInputPad()
3397 const auto& window = convolution->window(); in FoldConvInputPad()
3399 convolution->convolution_dimension_numbers(); in FoldConvInputPad()
3452 auto new_conv = convolution->CloneWithNewOperands( in FoldConvInputPad()
[all …]
/external/eigen/bench/tensors/
Dtensor_benchmarks_cpu.cc146 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 4);
147 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 8);
148 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 12);
150 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 4);
151 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 8);
152 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 12);
154 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 4);
155 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 8);
156 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 12);
158 BM_FuncWithKernelDimsCPU(convolution, 4, 7, 4);
[all …]
Dtensor_benchmarks_gpu.cu70 BM_FuncWithKernelDimsGPU(convolution, 7, 1);
71 BM_FuncWithKernelDimsGPU(convolution, 1, 7);
72 BM_FuncWithKernelDimsGPU(convolution, 7, 4);
73 BM_FuncWithKernelDimsGPU(convolution, 4, 7);
74 BM_FuncWithKernelDimsGPU(convolution, 7, 64);
75 BM_FuncWithKernelDimsGPU(convolution, 64, 7);
/external/tensorflow/tensorflow/compiler/xla/service/cpu/
Dir_emission_utils.cc44 const HloInstruction& convolution, in PotentiallyImplementedAsEigenConvolution() argument
53 const Shape& input_shape = convolution.operand(0)->shape(); in PotentiallyImplementedAsEigenConvolution()
54 const Shape& kernel_shape = convolution.operand(1)->shape(); in PotentiallyImplementedAsEigenConvolution()
55 const Shape& output_shape = convolution.shape(); in PotentiallyImplementedAsEigenConvolution()
79 if (window_util::HasWindowReversal(convolution.window())) { in PotentiallyImplementedAsEigenConvolution()
84 convolution.convolution_dimension_numbers(); in PotentiallyImplementedAsEigenConvolution()
Dir_emission_utils.h27 const HloInstruction& convolution,
/external/tensorflow/tensorflow/compiler/xla/tests/
Disolated_convolution.hlo1 HloModule convolution.167:
3 ENTRY %convolution.167 (parameter.0: f32[16,28,28,128], parameter.1: f32[3,3,128,128]) -> f32[16,28…
6 …ROOT %convolution.167 = f32[16,28,28,128]{3,0,2,1} convolution(f32[16,28,28,128]{3,0,2,1} %paramet…
/external/tensorflow/tensorflow/python/kernel_tests/
Datrous_convolution_test.py104 y1 = nn_ops.convolution(
106 y2 = nn_ops.convolution(input=x, filter=filters_upsampled, **kwargs)
117 y = nn_ops.convolution(
125 y = nn_ops.convolution(
223 result = nn_ops.convolution(
225 result = nn_ops.convolution(
233 y1 = nn_ops.convolution(
238 y1 = nn_ops.convolution(
259 output = nn_ops.convolution(
/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dbackend_configs.proto18 // Backend config for a convolution that runs through cudnn.
28 // The scaling factor multiplied with the convolution result.
31 // Below are the fields related to cuDNN's fused convolution. Refer to
34 // The requested activation (e.g. relu) after the convolution. It is with type
Dgpu_autotuning.proto1 // This is used for convolution logging. Also see
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_DepthwiseConv2dNativeBackpropInput.pbtxt24 Gradients w.r.t. the output of the convolution.
33 convolution.
40 of the convolution.
69 summary: "Computes the gradients of depthwise convolution with respect to the input."
Dapi_def_DepthwiseConv2dNativeBackpropFilter.pbtxt25 Gradients w.r.t. the output of the convolution.
33 the `filter` input of the convolution.
40 of the convolution.
69 summary: "Computes the gradients of depthwise convolution with respect to the filter."
Dapi_def_Conv2DBackpropFilter.pbtxt21 Gradients w.r.t. the output of the convolution.
29 the `filter` input of the convolution.
36 of the convolution. Must be in the same order as the dimension specified with
75 summary: "Computes the gradients of convolution with respect to the filter."
Dapi_def_Conv2DBackpropInput.pbtxt21 Gradients w.r.t. the output of the convolution.
28 w.r.t. the input of the convolution.
35 of the convolution. Must be in the same order as the dimension specified with
74 summary: "Computes the gradients of convolution with respect to the input."
Dapi_def_SpaceToBatch.pbtxt95 Among others, this operation is useful for reducing atrous convolution into
96 regular convolution.
Dapi_def_FusedResizeAndPadConv2D.pbtxt50 summary: "Performs a resize and padding as a preprocess during a convolution."
53 the packing stage of a convolution, so this op allows for an optimized
Dapi_def_SpaceToBatchND.pbtxt125 Among others, this operation is useful for reducing atrous convolution into
126 regular convolution.
/external/tensorflow/tensorflow/stream_executor/
Ddnn.proto15 // Describes how a convolution input or output layer's data is formatted.
30 // Describes how a convolution filter is laid out in the memory.
63 // referred as convolution. See cuDNN cudnnConvolutionMode_t.
/external/python/cpython3/Modules/_decimal/libmpdec/literature/
Dbignum.txt6 Bignum arithmetic in libmpdec uses the scheme for fast convolution
13 The transform in a finite field can be used for convolution in the same
75 convolute.c -> do the actual fast convolution, using one of
/external/tensorflow/tensorflow/lite/g3doc/models/segmentation/
Doverview.md25 …<li>DeepLabv1: We use atrous convolution to explicitly control the resolution at which feature res…
27 …rameters to facilitate the training. In particular, we applying atrous convolution to extract outp…
28 …trarily control the resolution of extracted encoder features by atrous convolution to trade-off pr…
/external/tensorflow/tensorflow/lite/tools/optimize/testdata/
DREADME.md8 A floating point model with single convolution where all weights are
13 A floating point model with a single convolution where weights of the model
/external/tensorflow/tensorflow/lite/g3doc/guide/
Dops_version.md19 to add dilation parameters to the convolution operation.
25 * Old convolution kernels that don't support dilation are equivalent to
33 For example, the options table of convolution looks like this:
72 The original convolution parameter is as follows:
/external/tensorflow/tensorflow/core/protobuf/
Dconv_autotuning.proto1 // This is used for convolution logging. Also see

1234