/external/tensorflow/tensorflow/compiler/xla/service/ |
D | convolution_group_converter.cc | 51 Status HandleConvolution(HloInstruction* convolution) override; 53 Status HandleBatchGroupCount(HloInstruction* convolution); 201 Status ConvolutionVisitor::HandleBatchGroupCount(HloInstruction* convolution) { in HandleBatchGroupCount() argument 202 auto dim_numbers = convolution->convolution_dimension_numbers(); in HandleBatchGroupCount() 203 auto activation = convolution->mutable_operand(0); in HandleBatchGroupCount() 204 auto filter = convolution->mutable_operand(1); in HandleBatchGroupCount() 205 int64 batch_group_count = convolution->batch_group_count(); in HandleBatchGroupCount() 212 << " for convolution " << convolution->ToString() << "\n"; in HandleBatchGroupCount() 227 if (!is_cost_viable_(convolution) || filter_expansion_) { in HandleBatchGroupCount() 235 GetExpandedFilterMask(convolution->shape(), output_batch_dimension, in HandleBatchGroupCount() [all …]
|
D | transpose_folding.cc | 57 const HloInstruction& convolution, in CanFoldOperandsIntoConvolution() argument 60 if (HloOpcode::kConvolution != convolution.opcode()) { in CanFoldOperandsIntoConvolution() 65 for (int64 i = 0; i < convolution.operand_count(); ++i) { in CanFoldOperandsIntoConvolution() 66 auto& operand = *convolution.operand(i); in CanFoldOperandsIntoConvolution() 72 return transposable_conv_operands(convolution, operand_set); in CanFoldOperandsIntoConvolution() 120 auto& convolution = *pair.first; in FoldTransposeIntoConvolution() local 128 convolution.convolution_dimension_numbers(); in FoldTransposeIntoConvolution() 134 HloInstruction& transpose = *convolution.mutable_operand(kLhsIdx); in FoldTransposeIntoConvolution() 151 new_lhs = convolution.mutable_operand(kLhsIdx); in FoldTransposeIntoConvolution() 157 HloInstruction& transpose = *convolution.mutable_operand(kRhsIdx); in FoldTransposeIntoConvolution() [all …]
|
D | hlo_cost_analysis.cc | 434 Status HloCostAnalysis::HandleConvolution(const HloInstruction* convolution) { in HandleConvolution() argument 435 auto lhs = convolution->operand(0); in HandleConvolution() 436 auto rhs = convolution->operand(1); in HandleConvolution() 437 Window window = convolution->window(); in HandleConvolution() 438 const auto& result_shape = convolution->shape(); in HandleConvolution() 442 const auto& dnums = convolution->convolution_dimension_numbers(); in HandleConvolution() 527 const int64 fma_count = (input_feature / convolution->feature_group_count()) * in HandleConvolution() 529 (batch / convolution->batch_group_count()) * in HandleConvolution()
|
D | algebraic_simplifier.cc | 202 Status HandleConvolution(HloInstruction* convolution) override; 419 StatusOr<bool> FoldConvInputPad(HloInstruction* convolution); 420 StatusOr<bool> FoldConvFilterPad(HloInstruction* convolution); 423 StatusOr<bool> SimplifyConvToDot(HloInstruction* convolution); 3394 HloInstruction* convolution) { in FoldConvInputPad() argument 3395 auto* lhs = convolution->mutable_operand(0); in FoldConvInputPad() 3396 auto* rhs = convolution->mutable_operand(1); in FoldConvInputPad() 3397 const auto& window = convolution->window(); in FoldConvInputPad() 3399 convolution->convolution_dimension_numbers(); in FoldConvInputPad() 3452 auto new_conv = convolution->CloneWithNewOperands( in FoldConvInputPad() [all …]
|
/external/eigen/bench/tensors/ |
D | tensor_benchmarks_cpu.cc | 146 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 4); 147 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 8); 148 BM_FuncWithKernelDimsCPU(convolution, 7, 1, 12); 150 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 4); 151 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 8); 152 BM_FuncWithKernelDimsCPU(convolution, 1, 7, 12); 154 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 4); 155 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 8); 156 BM_FuncWithKernelDimsCPU(convolution, 7, 4, 12); 158 BM_FuncWithKernelDimsCPU(convolution, 4, 7, 4); [all …]
|
D | tensor_benchmarks_gpu.cu | 70 BM_FuncWithKernelDimsGPU(convolution, 7, 1); 71 BM_FuncWithKernelDimsGPU(convolution, 1, 7); 72 BM_FuncWithKernelDimsGPU(convolution, 7, 4); 73 BM_FuncWithKernelDimsGPU(convolution, 4, 7); 74 BM_FuncWithKernelDimsGPU(convolution, 7, 64); 75 BM_FuncWithKernelDimsGPU(convolution, 64, 7);
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | ir_emission_utils.cc | 44 const HloInstruction& convolution, in PotentiallyImplementedAsEigenConvolution() argument 53 const Shape& input_shape = convolution.operand(0)->shape(); in PotentiallyImplementedAsEigenConvolution() 54 const Shape& kernel_shape = convolution.operand(1)->shape(); in PotentiallyImplementedAsEigenConvolution() 55 const Shape& output_shape = convolution.shape(); in PotentiallyImplementedAsEigenConvolution() 79 if (window_util::HasWindowReversal(convolution.window())) { in PotentiallyImplementedAsEigenConvolution() 84 convolution.convolution_dimension_numbers(); in PotentiallyImplementedAsEigenConvolution()
|
D | ir_emission_utils.h | 27 const HloInstruction& convolution,
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | isolated_convolution.hlo | 1 HloModule convolution.167: 3 ENTRY %convolution.167 (parameter.0: f32[16,28,28,128], parameter.1: f32[3,3,128,128]) -> f32[16,28… 6 …ROOT %convolution.167 = f32[16,28,28,128]{3,0,2,1} convolution(f32[16,28,28,128]{3,0,2,1} %paramet…
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | atrous_convolution_test.py | 104 y1 = nn_ops.convolution( 106 y2 = nn_ops.convolution(input=x, filter=filters_upsampled, **kwargs) 117 y = nn_ops.convolution( 125 y = nn_ops.convolution( 223 result = nn_ops.convolution( 225 result = nn_ops.convolution( 233 y1 = nn_ops.convolution( 238 y1 = nn_ops.convolution( 259 output = nn_ops.convolution(
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | backend_configs.proto | 18 // Backend config for a convolution that runs through cudnn. 28 // The scaling factor multiplied with the convolution result. 31 // Below are the fields related to cuDNN's fused convolution. Refer to 34 // The requested activation (e.g. relu) after the convolution. It is with type
|
D | gpu_autotuning.proto | 1 // This is used for convolution logging. Also see
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_DepthwiseConv2dNativeBackpropInput.pbtxt | 24 Gradients w.r.t. the output of the convolution. 33 convolution. 40 of the convolution. 69 summary: "Computes the gradients of depthwise convolution with respect to the input."
|
D | api_def_DepthwiseConv2dNativeBackpropFilter.pbtxt | 25 Gradients w.r.t. the output of the convolution. 33 the `filter` input of the convolution. 40 of the convolution. 69 summary: "Computes the gradients of depthwise convolution with respect to the filter."
|
D | api_def_Conv2DBackpropFilter.pbtxt | 21 Gradients w.r.t. the output of the convolution. 29 the `filter` input of the convolution. 36 of the convolution. Must be in the same order as the dimension specified with 75 summary: "Computes the gradients of convolution with respect to the filter."
|
D | api_def_Conv2DBackpropInput.pbtxt | 21 Gradients w.r.t. the output of the convolution. 28 w.r.t. the input of the convolution. 35 of the convolution. Must be in the same order as the dimension specified with 74 summary: "Computes the gradients of convolution with respect to the input."
|
D | api_def_SpaceToBatch.pbtxt | 95 Among others, this operation is useful for reducing atrous convolution into 96 regular convolution.
|
D | api_def_FusedResizeAndPadConv2D.pbtxt | 50 summary: "Performs a resize and padding as a preprocess during a convolution." 53 the packing stage of a convolution, so this op allows for an optimized
|
D | api_def_SpaceToBatchND.pbtxt | 125 Among others, this operation is useful for reducing atrous convolution into 126 regular convolution.
|
/external/tensorflow/tensorflow/stream_executor/ |
D | dnn.proto | 15 // Describes how a convolution input or output layer's data is formatted. 30 // Describes how a convolution filter is laid out in the memory. 63 // referred as convolution. See cuDNN cudnnConvolutionMode_t.
|
/external/python/cpython3/Modules/_decimal/libmpdec/literature/ |
D | bignum.txt | 6 Bignum arithmetic in libmpdec uses the scheme for fast convolution 13 The transform in a finite field can be used for convolution in the same 75 convolute.c -> do the actual fast convolution, using one of
|
/external/tensorflow/tensorflow/lite/g3doc/models/segmentation/ |
D | overview.md | 25 …<li>DeepLabv1: We use atrous convolution to explicitly control the resolution at which feature res… 27 …rameters to facilitate the training. In particular, we applying atrous convolution to extract outp… 28 …trarily control the resolution of extracted encoder features by atrous convolution to trade-off pr…
|
/external/tensorflow/tensorflow/lite/tools/optimize/testdata/ |
D | README.md | 8 A floating point model with single convolution where all weights are 13 A floating point model with a single convolution where weights of the model
|
/external/tensorflow/tensorflow/lite/g3doc/guide/ |
D | ops_version.md | 19 to add dilation parameters to the convolution operation. 25 * Old convolution kernels that don't support dilation are equivalent to 33 For example, the options table of convolution looks like this: 72 The original convolution parameter is as follows:
|
/external/tensorflow/tensorflow/core/protobuf/ |
D | conv_autotuning.proto | 1 // This is used for convolution logging. Also see
|