/external/ComputeLibrary/src/core/CL/kernels/ |
D | CLFuseBatchNormalizationKernel.cpp | 206 add_1D_tensor_argument(idx, _input_bias, slice_1d); in run() 208 add_1D_tensor_argument(idx, _bn_mean, slice_1d); in run() 209 add_1D_tensor_argument(idx, _bn_var, slice_1d); in run() 216 add_1D_tensor_argument(idx, _fused_bias, slice_1d); in run() 220 add_1D_tensor_argument(idx, _bn_beta, slice_1d); in run() 224 add_1D_tensor_argument(idx, _bn_gamma, slice_1d); in run()
|
D | CLBatchNormalizationLayerKernel.cpp | 234 add_1D_tensor_argument(idx, _mean, vector_slice); in run() 235 add_1D_tensor_argument(idx, _var, vector_slice); in run() 238 add_1D_tensor_argument(idx, _beta, vector_slice); in run() 242 add_1D_tensor_argument(idx, _gamma, vector_slice); in run()
|
D | CLGenerateProposalsLayerKernel.cpp | 145 add_1D_tensor_argument(idx, _anchors, collapsed); in run() 146 add_1D_tensor_argument(idx, _all_anchors, collapsed); in run()
|
D | CLQLSTMLayerNormalizationKernel.cpp | 167 add_1D_tensor_argument(idx, _weight, weight_slice); in run() 168 add_1D_tensor_argument(idx, _bias, weight_slice); in run()
|
D | CLNormalizePlanarYUVLayerKernel.cpp | 188 add_1D_tensor_argument(idx, _mean, slice_in); in run() 189 add_1D_tensor_argument(idx, _std, slice_in); in run()
|
D | CLReductionOperationKernel.cpp | 224 add_1D_tensor_argument(idx, _input, in_slice); in run() 225 add_1D_tensor_argument(idx, _output, out_slice); in run()
|
D | CLDepthwiseConvolutionLayerNativeKernel.cpp | 408 add_1D_tensor_argument(idx, _output_multipliers, slice); in run() 409 add_1D_tensor_argument(idx, _output_shifts, slice); in run() 413 add_1D_tensor_argument(idx, _biases, slice); in run()
|
D | CLReverseKernel.cpp | 136 add_1D_tensor_argument(idx, _axis, axis_slice); in run()
|
D | CLRangeKernel.cpp | 135 add_1D_tensor_argument(idx, _output, window); in run()
|
D | CLGatherKernel.cpp | 129 add_1D_tensor_argument(idx, _indices, window_collapsed); in run()
|
D | CLFFTDigitReverseKernel.cpp | 136 add_1D_tensor_argument(idx, _idx, slice); in run()
|
D | CLSelectKernel.cpp | 149 add_1D_tensor_argument(idx, _c, vector_slice); in run()
|
D | CLBoundingBoxTransformKernel.cpp | 170 add_1D_tensor_argument(idx, _boxes, slice); in run()
|
D | CLBatchToSpaceLayerKernel.cpp | 193 add_1D_tensor_argument(idx, _block_shape, vector_slice); in run()
|
D | CLDeconvolutionReshapeOutputKernel.cpp | 199 add_1D_tensor_argument(idx, _bias, collapsed); in run()
|
/external/ComputeLibrary/src/core/CL/ |
D | ICLKernel.h | 165 void add_1D_tensor_argument(unsigned int &idx, const ICLTensor *tensor, const Window &window) in add_1D_tensor_argument() function 180 add_1D_tensor_argument(idx, tensor, window); in add_1D_tensor_argument_if()
|
/external/ComputeLibrary/src/gpu/cl/kernels/ |
D | ClGemmLowpQuantizeDownInt32ScaleByFloatKernel.cpp | 151 add_1D_tensor_argument(idx1, bias, biases_slice); in run_op()
|
D | ClGemmLowpQuantizeDownInt32ScaleByFixedPointKernel.cpp | 150 add_1D_tensor_argument(idx1, bias, biases_slice); in run_op()
|
D | ClWeightsReshapeKernel.cpp | 153 add_1D_tensor_argument(idx, biases, biases_slice); in run_op()
|
D | ClGemmLowpQuantizeDownInt32ScaleKernel.cpp | 148 add_1D_tensor_argument(idx1, bias, biases_slice); in run_op()
|
D | ClDirectConv2dKernel.cpp | 472 add_1D_tensor_argument(idx, biases, slice); in run_op() 485 add_1D_tensor_argument(idx1, biases, slice_biases); in run_op()
|
D | ClTransposedConvolutionKernel.cpp | 263 add_1D_tensor_argument(idx, biases, slice); in run_op()
|
D | ClIndirectConv2dKernel.cpp | 299 add_1D_tensor_argument(idx, biases, slice); in run_op()
|
D | ClDirectConv3dKernel.cpp | 246 add_1D_tensor_argument(idx, biases, slice); in run_op()
|
/external/ComputeLibrary/src/dynamic_fusion/runtime/gpu/cl/ |
D | ClKernelRuntime.cpp | 72 add_1D_tensor_argument(idx, tensor, arg_slice); in add_tensor_argument()
|