/packages/modules/NeuralNetworks/common/operations/ |
D | Reshape.cpp | 35 const Shape& outputShape) { in copyData() argument 44 T* outputData, const Shape& outputShape) { in depthToSpaceGeneric() argument 47 outputData, convertShapeToDims(outputShape)); in depthToSpaceGeneric() 52 const Shape& outputShape); 55 const Shape& outputShape); 58 const Shape& outputShape); 61 const Shape& outputShape); 65 T* outputData, const Shape& outputShape) { in spaceToDepthGeneric() argument 68 outputData, convertShapeToDims(outputShape)); in spaceToDepthGeneric() 73 const Shape& outputShape); [all …]
|
D | Activation.cpp | 53 bool reluFloat(const T* inputData, const Shape& inputShape, T* outputData, const Shape& outputShape, in reluFloat() argument 64 const Shape& outputShape, float reluMin, float reluMax); 66 _Float16* outputData, const Shape& outputShape, float reluMin, 71 const Shape& outputShape) { in relu1Float() argument 72 return reluFloat(inputData, inputShape, outputData, outputShape, -1.f, 1.f); in relu1Float() 75 const Shape& outputShape); 77 _Float16* outputData, const Shape& outputShape); 81 const Shape& outputShape) { in relu6Float() argument 82 return reluFloat(inputData, inputShape, outputData, outputShape, 0.f, 6.f); in relu6Float() 85 const Shape& outputShape); [all …]
|
D | Pooling.cpp | 142 float* outputData, const Shape& outputShape) { in averagePoolNhwc() argument 144 auto op_params = param.toTfliteParam(outputShape); in averagePoolNhwc() 147 convertShapeToTflshape(outputShape), outputData); in averagePoolNhwc() 152 _Float16* outputData, const Shape& outputShape) { in averagePoolNhwc() argument 155 std::vector<float> outputDataFloat32(getNumberOfElements(outputShape)); in averagePoolNhwc() 159 outputShape); in averagePoolNhwc() 165 uint8_t* outputData, const Shape& outputShape) { in averagePoolNhwc() argument 167 auto op_params = param.toTfliteParam(outputShape); in averagePoolNhwc() 170 convertShapeToTflshape(outputShape), outputData); in averagePoolNhwc() 175 int8_t* outputData, const Shape& outputShape) { in averagePoolNhwc() argument [all …]
|
D | MaximumMinimum.cpp | 36 bool isMinimum, T* outputData, const Shape& outputShape) { in evalGeneric() argument 39 IndexedShapeWrapper outputShapeIndexed(outputShape); in evalGeneric() 41 std::vector<uint32_t> curIndex(outputShape.dimensions.size(), 0); in evalGeneric() 62 bool isMinimum, T* outputData, const Shape& outputShape) { in evalQuant8() argument 65 IndexedShapeWrapper outputShapeIndexed(outputShape); in evalQuant8() 67 std::vector<uint32_t> curIndex(outputShape.dimensions.size(), 0); in evalQuant8() 77 T aValue = requantize<T>(aData[aFlatIndex], aShape, outputShape); in evalQuant8() 78 T bValue = requantize<T>(bData[bFlatIndex], bShape, outputShape); in evalQuant8() 96 bool isMinimum, void* output, const Shape& outputShape) { in eval() argument 102 reinterpret_cast<_Float16*>(output), outputShape); in eval() [all …]
|
D | Conv2D.cpp | 138 uint32_t outHeight = getSizeOfDimension(outputShape, 1); \ 139 uint32_t outWidth = getSizeOfDimension(outputShape, 2); \ 146 im2colDim.sizes[3] = (int)getSizeOfDimension(outputShape, 0); \ 147 im2colDim.sizes[2] = (int)getSizeOfDimension(outputShape, 1); \ 148 im2colDim.sizes[1] = (int)getSizeOfDimension(outputShape, 2); \ 199 float* outputData, const Shape& outputShape) { in convNhwc() argument 219 convertShapeToDims(outputShape), need_im2colData ? im2colData : nullptr, im2colDim); in convNhwc() 228 uint8_t* outputData, const Shape& outputShape) { in convNhwc() argument 235 int32_t outputOffset = outputShape.offset; in convNhwc() 243 NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape, outputShape, in convNhwc() [all …]
|
D | SimpleMath.cpp | 35 const Shape& outputShape) { in meanFloat16() argument 40 std::vector<float> outputDataFloat32(getNumberOfElements(outputShape)); in meanFloat16() 42 outputDataFloat32.data(), outputShape); in meanFloat16() 49 bool keepDims, T* outputData, const Shape& outputShape) { in meanGeneric() argument 59 U* tempSumBuffer = new (std::nothrow) U[getNumberOfElements(outputShape)]; in meanGeneric() 68 reinterpret_cast<const int*>(outputShape.dimensions.data()), in meanGeneric() 69 getNumberOfDimensions(outputShape), axis, axisSize, keepDims, scratchBuffer, in meanGeneric() 79 float* outputData, const Shape& outputShape); 83 const Shape& outputShape); 87 const Shape& outputShape);
|
D | FullyConnected.cpp | 59 float* outputData, const Shape& outputShape) { in fullyConnectedFloat32() argument 66 uint32_t batch_size = getSizeOfDimension(outputShape, 0); in fullyConnectedFloat32() 74 outputData, convertShapeToDims(outputShape)); in fullyConnectedFloat32() 81 outputData, convertShapeToDims(outputShape)); in fullyConnectedFloat32() 89 _Float16* outputData, const Shape& outputShape) { in fullyConnectedFloat16() argument 98 std::vector<float> outputDataFloat32(getNumberOfElements(outputShape)); in fullyConnectedFloat16() 101 outputDataFloat32.data(), outputShape); in fullyConnectedFloat16() 110 uint8_t* outputData, const Shape& outputShape) { in fullyConnectedQuant8() argument 114 int32_t outputOffset = outputShape.offset; in fullyConnectedQuant8() 122 NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, weightsShape, biasShape, outputShape, in fullyConnectedQuant8() [all …]
|
D | GroupedConv2D.cpp | 41 uint32_t outputHeight = getSizeOfDimension(outputShape, 1); \ 42 uint32_t outputWidth = getSizeOfDimension(outputShape, 2); \ 43 uint32_t outputDepth = getSizeOfDimension(outputShape, 3); \ 51 const Shape& outputShape) { in groupedConvFloat32() argument 109 const Shape& outputShape) { in groupedConvQuant8() argument 115 int32_t outputOffset = outputShape.offset; in groupedConvQuant8() 120 NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape, outputShape, in groupedConvQuant8() 127 CalculateActivationRange<T>(activation, outputShape, &output_activation_min, in groupedConvQuant8() 188 const Shape& outputShape); 197 const Shape& outputShape); [all …]
|
D | DepthwiseConv2D.cpp | 127 uint32_t outHeight = getSizeOfDimension(outputShape, 1); \ 128 uint32_t outWidth = getSizeOfDimension(outputShape, 2); \ 139 const Shape& outputShape) { in depthwiseConvNhwc() argument 162 convertShapeToTflshape(outputShape), outputData); in depthwiseConvNhwc() 173 _Float16* outputData, const Shape& outputShape) { in depthwiseConvNhwc() argument 182 std::vector<float> outputDataFloat32(getNumberOfElements(outputShape)); in depthwiseConvNhwc() 187 outputShape); in depthwiseConvNhwc() 199 const Shape& outputShape) { in depthwiseConvNhwc() argument 210 NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape, outputShape, in depthwiseConvNhwc() 215 CalculateActivationRangeUint8(activation, outputShape, &output_activation_min, in depthwiseConvNhwc() [all …]
|
D | L2Normalization.cpp | 49 float* outputData, const Shape& outputShape) { in l2normFloat32Impl() argument 77 uint8_t* outputData, const Shape& outputShape) { in l2normQuant8Impl() argument 109 int8_t* outputData, const Shape& outputShape) { in l2normQuant8SignedImpl() argument 140 const Shape& outputShape) { in l2normFloat32() argument 148 convertShapeToTflshape(outputShape), outputData); in l2normFloat32() 151 return l2normFloat32Impl(inputData, inputShape, axis, outputData, outputShape); in l2normFloat32() 156 _Float16* outputData, const Shape& outputShape) { in l2normFloat16() argument 160 std::vector<float> outputDataFloat32(getNumberOfElements(outputShape)); in l2normFloat16() 162 l2normFloat32(inputDataFloat32.data(), inputShape, axis, outputDataFloat32.data(), outputShape); in l2normFloat16() 169 uint8_t* outputData, const Shape& outputShape) { in l2normQuant8() argument [all …]
|
D | TransposeConv2D.cpp | 118 uint32_t outputHeight = getSizeOfDimension(outputShape, 1); \ 119 uint32_t outputWidth = getSizeOfDimension(outputShape, 2); \ 120 uint32_t outputDepth = getSizeOfDimension(outputShape, 3); \ 129 const Shape& outputShape) { in transposeConvNhwc() argument 136 memset(outputData, 0, getNumberOfElements(outputShape) * sizeof(float)); in transposeConvNhwc() 185 const TransposeConv2dParam& param, T* outputData, const Shape& outputShape) { in transposeConvNhwc() argument 191 uint32_t tempBufferByteSize = getNumberOfElements(outputShape) * sizeof(int32_t); in transposeConvNhwc() 205 int32_t outputOffset = outputShape.offset; in transposeConvNhwc() 210 NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape, outputShape, in transposeConvNhwc() 217 CalculateActivationRange<T>(activation, outputShape, &outputActivationMin, in transposeConvNhwc() [all …]
|
D | Quantize.cpp | 40 bool quantizeToQuant8(const T* inputData, uint8_t* outputData, const Shape& outputShape) { in quantizeToQuant8() argument 42 uint32_t size = getNumberOfElements(outputShape); in quantizeToQuant8() 45 0.0f, std::min<float>(255.0f, outputShape.offset + std::round(inputData[i] / in quantizeToQuant8() 46 outputShape.scale)))); in quantizeToQuant8() 52 bool quantizeToQuant8Signed(const T* inputData, int8_t* outputData, const Shape& outputShape) { in quantizeToQuant8Signed() argument 54 uint32_t size = getNumberOfElements(outputShape); in quantizeToQuant8Signed() 58 std::min<float>(127.0f, outputShape.offset + in quantizeToQuant8Signed() 59 std::round(inputData[i] / outputShape.scale)))); in quantizeToQuant8Signed()
|
D | Softmax.cpp | 54 int32_t axis, float* outputData, const Shape& outputShape) { in softmaxSlowFloat32() argument 86 float* outputData, const Shape& outputShape) { in softmaxFloat32() argument 94 convertShapeToTflshape(outputShape), outputData); in softmaxFloat32() 97 return softmaxSlowFloat32(inputData, inputShape, beta, axis, outputData, outputShape); in softmaxFloat32() 102 int32_t axis, _Float16* outputData, const Shape& outputShape) { in softmaxFloat16() argument 106 std::vector<float> outputData_float32(getNumberOfElements(outputShape)); in softmaxFloat16() 109 outputShape); in softmaxFloat16() 118 T* outputData, const Shape& outputShape) { in softmaxQuant8Impl() argument 204 T* outputData, const Shape& outputShape) { in softmaxQuant8() argument 208 if ((inputShape.type == OperandType::TENSOR_QUANT8_ASYMM && outputShape.offset != 0) || in softmaxQuant8() [all …]
|
D | Concatenation.cpp | 52 const Shape& outputShape) { in concatenation() argument 63 getNumberOfDimensions(outputShape) - axis - 1, inputDataPtrs.data(), in concatenation() 64 inputDimsPtr.data(), num_inputs, outputData, convertShapeToDims(outputShape)); in concatenation() 72 uint8_t* outputData, const Shape& outputShape) { in concatenation() argument 88 getNumberOfDimensions(outputShape) - axis - 1, inputDataPtrs.data(), in concatenation() 90 convertShapeToDims(outputShape), outputShape.offset, outputShape.scale); in concatenation() 131 Shape outputShape(context->getOutputShape(kOutputTensor)); in concatenation() local 132 outputShape.offset += 128; in concatenation() 134 output_uint8.data(), outputShape)); in concatenation()
|
D | ResizeImageOps.cpp | 69 bool halfPixelCenters, T* outputData, const Shape& outputShape) { in resizeNearestNeighbor() argument 74 const int outHeight = getSizeOfDimension(outputShape, 1); in resizeNearestNeighbor() 75 const int outWidth = getSizeOfDimension(outputShape, 2); in resizeNearestNeighbor() 113 const Shape& outputShape) { in resizeImageOpNhwc() argument 115 int32_t height = static_cast<int32_t>(getSizeOfDimension(outputShape, 1)); in resizeImageOpNhwc() 116 int32_t width = static_cast<int32_t>(getSizeOfDimension(outputShape, 2)); in resizeImageOpNhwc() 127 outDimData, convertShapeToTflshape(outputShape), outputData); in resizeImageOpNhwc() 132 outputShape); in resizeImageOpNhwc() 140 _Float16* outputData, const Shape& outputShape) { in resizeImageOpNhwc() argument 144 std::vector<float> outputData_float32(getNumberOfElements(outputShape)); in resizeImageOpNhwc() [all …]
|
D | Cast.cpp | 45 const Shape& outputShape) { in copyToTensor() argument 53 switch (outputShape.type) { in copyToTensor() 73 const Shape& outputShape) { in eval() argument 81 outputShape); \ in eval() 91 if (inputShape.type == outputShape.type) { in eval() 92 return copyData(inputData, inputShape, outputData, outputShape); in eval()
|
D | Pow.cpp | 35 const Shape& exponentShape, T* outputData, const Shape& outputShape) { in evalGeneric() argument 38 IndexedShapeWrapper outputShapeIndexed(outputShape); in evalGeneric() 40 std::vector<uint32_t> curIndex(outputShape.dimensions.size(), 0); in evalGeneric() 70 const Shape& exponentShape, void* outputData, const Shape& outputShape) { in eval() argument 75 reinterpret_cast<_Float16*>(outputData), outputShape); in eval() 80 reinterpret_cast<float*>(outputData), outputShape); in eval()
|
D | Reduce.cpp | 55 const Shape outputShape = context->getOutputShape(kOutputTensor); in compute() local 64 reinterpret_cast<const int32_t*>(outputShape.dimensions.data()), in compute() 65 outputShape.dimensions.size(), context->getInputBuffer<int32_t>(kInputAxes), numAxes, in compute() 147 Shape outputShape = inputShape; in prepare() local 148 outputShape.dimensions.clear(); in prepare() 153 outputShape.dimensions.push_back(1); in prepare() 156 outputShape.dimensions.push_back(getSizeOfDimension(inputShape, axis)); in prepare() 161 if (outputShape.dimensions.empty()) { in prepare() 162 outputShape.dimensions.push_back(1); in prepare() 165 return context->setOutputShape(kOutputTensor, outputShape); in prepare()
|
D | PRelu.cpp | 48 const Shape& outputShape) { in eval() argument 51 IndexedShapeWrapper outputShapeIndexed(outputShape); in eval() 52 std::vector<uint32_t> curIndex(outputShape.dimensions.size(), 0); in eval() 71 T* outputData, const Shape& outputShape) { in evalQuant8() argument 74 const int32_t output_offset = outputShape.offset; in evalQuant8() 76 const double real_multiplier_pos = aShape.scale / outputShape.scale; in evalQuant8() 77 const double real_multiplier_neg = input_product_scale / outputShape.scale; in evalQuant8() 98 aData, aShape, bData, bShape, outputData, outputShape); in evalQuant8()
|
D | Slice.cpp | 55 T* outputData, const Shape& outputShape) { in evalGeneric() argument 56 const int outputSize = getNumberOfElements(outputShape); in evalGeneric() 57 const IndexedShapeWrapper indexedOutput = IndexedShapeWrapper(outputShape); in evalGeneric() 59 std::vector<uint32_t> outputIndex(getNumberOfDimensions(outputShape), 0); in evalGeneric() 126 Shape outputShape = context->getOutputShape(kOutputTensor); in prepare() local 127 outputShape.dimensions.resize(n_dims); in prepare() 137 outputShape.dimensions[i] = sliceSize; in prepare() 139 return context->setOutputShape(kOutputTensor, outputShape); in prepare()
|
D | LocalResponseNormalization.cpp | 54 const Shape& outputShape) { in localResponseNormFloat32Impl() argument 83 T beta, int32_t axis, T* outputData, const Shape& outputShape); 88 const Shape& outputShape) { in localResponseNorm() argument 99 convertShapeToTflshape(outputShape), outputData); in localResponseNorm() 103 outputData, outputShape); in localResponseNorm() 110 _Float16* outputData, const Shape& outputShape) { in localResponseNorm() argument 114 std::vector<float> outputDataFloat32(getNumberOfElements(outputShape)); in localResponseNorm() 117 outputDataFloat32.data(), outputShape); in localResponseNorm()
|
D | LSHProjection.cpp | 45 Shape* outputShape) { in Prepare() argument 69 outputShape->dimensions = {SizeOfDimension(hash, 0)}; in Prepare() 76 outputShape->dimensions = {SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1)}; in Prepare() 83 outputShape->type = OperandType::TENSOR_INT32; in Prepare() 84 outputShape->offset = 0; in Prepare() 85 outputShape->scale = 0.f; in Prepare()
|
D | RoiPooling.cpp | 58 T_Input* outputData, const Shape& outputShape) { in roiPoolingNhwc() argument 69 uint32_t outHeight = getSizeOfDimension(outputShape, 1); in roiPoolingNhwc() 70 uint32_t outWidth = getSizeOfDimension(outputShape, 2); in roiPoolingNhwc() 147 bool useNchw, T_Input* outputData, const Shape& outputShape) { in roiPooling() argument 151 NN_RET_CHECK(output.initialize(outputData, outputShape)); in roiPooling() 165 const Shape& outputShape) { in roiPooling() argument 170 outputShape)); in roiPooling() 180 const Shape& outputShape) { in roiPooling() argument 185 outputShape)); in roiPooling()
|
/packages/modules/NeuralNetworks/common/include/ |
D | Operations.h | 58 _Float16* outputData, const Shape& outputShape); 65 const Shape& outputShape); 72 uint8_t* outputData, const Shape& outputShape); 81 const Shape& outputShape); 85 _Float16* outputData, const Shape& outputShape); 88 const Shape& outputShape); 91 const Shape& outputShape); 95 T* outputData, const Shape& outputShape); 98 T* outputData, const Shape& outputShape); 102 T* outputData, const Shape& outputShape); [all …]
|
/packages/modules/NeuralNetworks/common/ |
D | OperationsUtils.cpp | 48 void CalculateActivationRangeImpl(int32_t activation, const Shape& outputShape, int32_t qmin, in CalculateActivationRangeImpl() argument 50 const auto scale = outputShape.scale; in CalculateActivationRangeImpl() 51 const auto zero_point = outputShape.offset; in CalculateActivationRangeImpl() 257 const Shape& biasShape, const Shape& outputShape, in GetQuantizedConvolutionMultipler() argument 267 *multiplier = input_product_scale / outputShape.scale; in GetQuantizedConvolutionMultipler() 271 void CalculateActivationRangeUint8(int32_t activation, const Shape& outputShape, int32_t* act_min, in CalculateActivationRangeUint8() argument 276 CalculateActivationRangeImpl(activation, outputShape, qmin, qmax, act_min, act_max); in CalculateActivationRangeUint8() 279 void CalculateActivationRangeInt8(int32_t activation, const Shape& outputShape, int32_t* act_min, in CalculateActivationRangeInt8() argument 284 CalculateActivationRangeImpl(activation, outputShape, qmin, qmax, act_min, act_max); in CalculateActivationRangeInt8() 460 bool embeddingLookupPrepare(const Shape& valueShape, const Shape& lookupShape, Shape* outputShape) { in embeddingLookupPrepare() argument [all …]
|