Lines Matching refs:inputData
53 bool reluFloat(const T* inputData, const Shape& inputShape, T* outputData, const Shape& outputShape, in reluFloat() argument
57 for (int i = 0; i < numElements; i++, inputData++, outputData++) { in reluFloat()
59 std::min(std::max(reluMin, static_cast<float>(*inputData)), reluMax)); in reluFloat()
63 template bool reluFloat<float>(const float* inputData, const Shape& inputShape, float* outputData,
65 template bool reluFloat<_Float16>(const _Float16* inputData, const Shape& inputShape,
70 bool relu1Float(const T* inputData, const Shape& inputShape, T* outputData, in relu1Float() argument
72 return reluFloat(inputData, inputShape, outputData, outputShape, -1.f, 1.f); in relu1Float()
74 template bool relu1Float<float>(const float* inputData, const Shape& inputShape, float* outputData,
76 template bool relu1Float<_Float16>(const _Float16* inputData, const Shape& inputShape,
80 bool relu6Float(const T* inputData, const Shape& inputShape, T* outputData, in relu6Float() argument
82 return reluFloat(inputData, inputShape, outputData, outputShape, 0.f, 6.f); in relu6Float()
84 template bool relu6Float<float>(const float* inputData, const Shape& inputShape, float* outputData,
86 template bool relu6Float<_Float16>(const _Float16* inputData, const Shape& inputShape,
89 bool tanhFloat16(const _Float16* inputData, const Shape& inputShape, _Float16* outputData, in tanhFloat16() argument
93 for (int i = 0; i < numElements; i++, inputData++, outputData++) { in tanhFloat16()
94 *outputData = static_cast<_Float16>(std::tanh(static_cast<float>(*inputData))); in tanhFloat16()
99 bool tanhFloat32(const float* inputData, const Shape& inputShape, float* outputData, in tanhFloat32() argument
103 for (int i = 0; i < numElements; i++, inputData++, outputData++) { in tanhFloat32()
104 *outputData = std::tanh(*inputData); in tanhFloat32()
110 bool logisticFloat(const T* inputData, const Shape& inputShape, T* outputData, in logisticFloat() argument
114 for (int i = 0; i < numElements; i++, inputData++, outputData++) { in logisticFloat()
115 *outputData = static_cast<T>(1.f / (1.f + std::exp(static_cast<float>(-*inputData)))); in logisticFloat()
119 template bool logisticFloat<float>(const float* inputData, const Shape& inputShape,
121 template bool logisticFloat<_Float16>(const _Float16* inputData, const Shape& inputShape,
125 inline bool reluXQuant8(const uint8_t* inputData, const Shape& inputShape, uint8_t* outputData, in reluXQuant8() argument
134 for (int i = 0; i < numElements; i++, inputData++, outputData++) { in reluXQuant8()
136 std::max((uint8_t)output_activation_min, *inputData)); in reluXQuant8()
141 bool reluQuant8(const uint8_t* inputData, const Shape& inputShape, uint8_t* outputData, in reluQuant8() argument
144 return reluXQuant8<kActivationRelu>(inputData, inputShape, outputData, outputShape); in reluQuant8()
147 bool relu1Quant8(const uint8_t* inputData, const Shape& inputShape, uint8_t* outputData, in relu1Quant8() argument
150 return reluXQuant8<kActivationRelu1>(inputData, inputShape, outputData, outputShape); in relu1Quant8()
153 bool relu6Quant8(const uint8_t* inputData, const Shape& inputShape, uint8_t* outputData, in relu6Quant8() argument
156 return reluXQuant8<kActivationRelu6>(inputData, inputShape, outputData, outputShape); in relu6Quant8()
159 bool tanhQuant8(const uint8_t* inputData, const Shape& inputShape, uint8_t* outputData, in tanhQuant8() argument
182 tflite::optimized_ops::Tanh(inputData, convertShapeToTflshape(inputShape), inputShape.offset, in tanhQuant8()
189 bool logisticQuant8(const uint8_t* inputData, const Shape& inputShape, uint8_t* outputData, in logisticQuant8() argument
213 inputData, convertShapeToTflshape(inputShape), inputShape.offset, input_range_radius, in logisticQuant8()
220 inline bool reluXQuant8Signed(const int8_t* inputData, const Shape& inputShape, int8_t* outputData, in reluXQuant8Signed() argument
229 for (int i = 0; i < numElements; i++, inputData++, outputData++) { in reluXQuant8Signed()
231 std::max((int8_t)output_activation_min, *inputData)); in reluXQuant8Signed()
236 bool reluQuant8Signed(const int8_t* inputData, const Shape& inputShape, int8_t* outputData, in reluQuant8Signed() argument
239 return reluXQuant8Signed<kActivationRelu>(inputData, inputShape, outputData, outputShape); in reluQuant8Signed()
242 bool relu1Quant8Signed(const int8_t* inputData, const Shape& inputShape, int8_t* outputData, in relu1Quant8Signed() argument
245 return reluXQuant8Signed<kActivationRelu1>(inputData, inputShape, outputData, outputShape); in relu1Quant8Signed()
248 bool relu6Quant8Signed(const int8_t* inputData, const Shape& inputShape, int8_t* outputData, in relu6Quant8Signed() argument
251 return reluXQuant8Signed<kActivationRelu6>(inputData, inputShape, outputData, outputShape); in relu6Quant8Signed()
254 bool tanhQuant8Signed(const int8_t* inputData, const Shape& inputShape, int8_t* outputData, in tanhQuant8Signed() argument
279 inputData, convertShapeToTflshape(outputShape), outputData); in tanhQuant8Signed()
284 bool logisticQuant8Signed(const int8_t* inputData, const Shape& inputShape, int8_t* outputData, in logisticQuant8Signed() argument
308 input_left_shift, numElements, inputData, outputData); in logisticQuant8Signed()
328 bool hardSwishQuant(const T* inputData, const Shape& inputShape, T* outputData, in hardSwishQuant() argument
354 tflite::reference_ops::HardSwish(params, convertShapeToTflshape(inputShape), inputData, in hardSwishQuant()