/external/tensorflow/tensorflow/lite/experimental/micro/kernels/ |
D | fully_connected_test.cc | 109 std::initializer_list<int> output_dims_data, float output_min, in TestFullyConnectedQuantized() argument 128 output_min, output_max), in TestFullyConnectedQuantized() 272 const float output_min = -127.0f; in TF_LITE_MICRO_TEST() local 321 F2Q(24, output_min, output_max), in TF_LITE_MICRO_TEST() 322 F2Q(25, output_min, output_max), in TF_LITE_MICRO_TEST() 323 F2Q(26, output_min, output_max), in TF_LITE_MICRO_TEST() 324 F2Q(58, output_min, output_max), in TF_LITE_MICRO_TEST() 325 F2Q(59, output_min, output_max), in TF_LITE_MICRO_TEST() 326 F2Q(60, output_min, output_max), in TF_LITE_MICRO_TEST() 329 output_min, output_max, // Output quantization range. in TF_LITE_MICRO_TEST() [all …]
|
D | depthwise_conv_test.cc | 116 std::initializer_list<int> output_dims_data, float output_min, in TestDepthwiseConvQuantized() argument 135 output_min, output_max), in TestDepthwiseConvQuantized() 234 const float output_min = -127.0f; in TF_LITE_MICRO_TEST() local 289 F2Q(71, output_min, output_max), in TF_LITE_MICRO_TEST() 290 F2Q(-34, output_min, output_max), in TF_LITE_MICRO_TEST() 291 F2Q(99, output_min, output_max), in TF_LITE_MICRO_TEST() 292 F2Q(-20, output_min, output_max), in TF_LITE_MICRO_TEST() 293 F2Q(91, output_min, output_max), in TF_LITE_MICRO_TEST() 294 F2Q(-26, output_min, output_max), in TF_LITE_MICRO_TEST() 295 F2Q(127, output_min, output_max), in TF_LITE_MICRO_TEST() [all …]
|
D | softmax_test.cc | 94 float output_min, float output_max, in TestSoftmaxQuantized() argument 107 output_min, output_max), in TestSoftmaxQuantized() 193 const float output_min = 0.0f; in TF_LITE_MICRO_TEST() local 209 F2Q(0.011656231, output_min, output_max), in TF_LITE_MICRO_TEST() 210 F2Q(0.031684921, output_min, output_max), in TF_LITE_MICRO_TEST() 211 F2Q(0.086128544, output_min, output_max), in TF_LITE_MICRO_TEST() 212 F2Q(0.234121657, output_min, output_max), in TF_LITE_MICRO_TEST() 213 F2Q(0.636408647, output_min, output_max), in TF_LITE_MICRO_TEST() 216 output_min, output_max, // Output quantized range. in TF_LITE_MICRO_TEST()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | quantized_add_op.cc | 48 float scalar_input_max, float output_min, float output_max, in ScalarAddition() argument 51 scalar_input, scalar_input_min, scalar_input_max, output_min, output_max); in ScalarAddition() 54 full_input[i], full_input_min, full_input_max, output_min, output_max); in ScalarAddition() 66 float output_min, float output_max, qint32* output) { in ScalarAddition() argument 68 scalar_input, scalar_input_min, scalar_input_max, output_min, output_max); in ScalarAddition() 75 FloatToQuantizedUnclamped<qint32>(input_0_float, output_min, output_max); in ScalarAddition() 77 FloatToQuantizedUnclamped<qint32>(input_1_float, output_min, output_max); in ScalarAddition() 123 float output_min, float output_max, qint32* output) { in ScalarAddition() argument 125 scalar_input, scalar_input_min, scalar_input_max, output_min, output_max); in ScalarAddition() 132 FloatToQuantizedUnclamped<qint32>(input_0_float, output_min, output_max); in ScalarAddition() [all …]
|
D | quantized_batch_norm_op.cc | 38 Tensor* output, float* output_min, float* output_max) { in ReferenceBatchNorm() argument 49 *output_min = std::numeric_limits<float>::max(); in ReferenceBatchNorm() 78 *output_min = std::min(output_value, *output_min); in ReferenceBatchNorm() 82 FloatToQuantized<T2>(output_value, *output_min, *output_max); in ReferenceBatchNorm() 101 Tensor* output, float* output_min, float* output_max) { in FixedPointBatchNorm() argument 115 *output_min = -(1 << 20); in FixedPointBatchNorm() 139 FloatToQuantized<T2>(scale_value, *output_min, *output_max); in FixedPointBatchNorm() 141 FloatToQuantized<T2>(offset_value, *output_min, *output_max); in FixedPointBatchNorm() 145 FloatToQuantized<T2>(1.0f, *output_min, *output_max); in FixedPointBatchNorm() 151 input_max, *output_min, *output_max); in FixedPointBatchNorm() [all …]
|
D | quantized_concat_op.cc | 35 float output_min, float output_max) in RequantizeCopier() 36 : output_min(output_min), in RequantizeCopier() 43 if (input_min == output_min && input_max == output_max) { in Copy() 54 FloatToQuantizedStruct<T> f2q(output_min, output_max); in Copy() 64 float output_min; member 82 float* output_min, float* output_max) { in CalculateInputAndOutputRange() argument 100 *output_min = -largest_value; in CalculateInputAndOutputRange() 103 *output_min = overall_min; in CalculateInputAndOutputRange() 194 float output_min = std::numeric_limits<float>::max(); in Compute() local 198 &input_mins_and_maxes, &output_min, in Compute() [all …]
|
D | quantized_activation_ops.cc | 51 Tensor* output_min = nullptr; in Compute() local 52 OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); in Compute() 53 output_min->flat<float>()(0) = min_input; in Compute() 89 Tensor* output_min = nullptr; in Compute() local 90 OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); in Compute() 91 output_min->flat<float>()(0) = min_input; in Compute()
|
D | quantized_pooling_ops.cc | 94 Tensor* output_min = nullptr; in Compute() local 95 OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); in Compute() 96 output_min->flat<float>()(0) = min_input; in Compute() 118 Tensor* output_min = nullptr; in Compute() local 119 OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); in Compute() 120 output_min->flat<float>()(0) = min_input; in Compute()
|
D | quantization_utils_test.cc | 35 float input_max, float output_min, float output_max, in TestRequantizeMany() argument 44 output_min, output_max)); in TestRequantizeMany() 55 input_max, output_min, output_max, in TestRequantizeMany() 59 *eigen_device, i_tensor, input_min, input_max, output_min, output_max, in TestRequantizeMany() 71 << ", input_max=" << input_max << ", output_min=" << output_min in TestRequantizeMany() 77 float output_min, float output_max, in TestRequantizeMany8To32Bit() argument 86 output_min, output_max)); in TestRequantizeMany8To32Bit() 96 input_max, output_min, output_max, in TestRequantizeMany8To32Bit() 107 << ", input_max=" << input_max << ", output_min=" << output_min in TestRequantizeMany8To32Bit() 232 const float output_min = ranges[range_index][2]; in TestRequantizeManyInNewRangeEigenVsNonEigen() local [all …]
|
D | mkl_requantize_ops_test.cc | 163 const float output_min = GetOutput(0)->flat<float>()(0); in TEST_F() local 165 EXPECT_NEAR(-14.8217, output_min, 0.002); in TEST_F() 224 const float output_min = GetOutput(0)->flat<float>()(0); in TEST_F() local 226 EXPECT_NEAR(-6.0, output_min, 0.002); // Values are aligned with clip_value. in TEST_F() 293 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 295 EXPECT_NEAR(range_op_output_min, output_min, 0.002); in TEST_F()
|
D | quantized_activation_ops_test.cc | 62 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 65 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F() 93 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 96 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F()
|
D | quantized_reshape_op.cc | 36 Tensor* output_min = nullptr; in Compute() local 37 OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_min)); in Compute() 38 output_min->flat<float>()(0) = input_min_float; in Compute()
|
D | quantized_pooling_ops_test.cc | 76 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 79 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F() 121 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 124 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F()
|
D | requantization_range_op.cc | 51 Tensor* output_min = nullptr; in Compute() local 52 OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &output_min)); in Compute() 68 output_min->flat<float>().setConstant(used_min_float); in Compute()
|
D | quantized_bias_add_op_test.cc | 83 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 86 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); in TEST_F() 165 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 168 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); in TEST_F()
|
D | quantize_op_test.cc | 292 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 294 EXPECT_NEAR(0.0f, output_min, 1e-5f); in TEST_F() 314 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 316 EXPECT_NEAR(0.0f, output_min, 1e-5f); in TEST_F() 336 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 338 EXPECT_NEAR(0.0f, output_min, 1e-5f); in TEST_F() 358 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 360 EXPECT_NEAR(-0.3f, output_min, 1e-5f); in TEST_F()
|
D | requantize.cc | 49 Tensor* output_min = nullptr; in Compute() local 50 OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_min)); in Compute() 92 output_min->flat<float>().setConstant(requested_output_min_float); in Compute()
|
D | quantized_concat_op_test.cc | 111 const float output_min = GetOutput(1)->flat<float>()(0); in TestSmall8Bit() local 114 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TestSmall8Bit() 177 const float output_min = GetOutput(1)->flat<float>()(0); in TestSmall32Bit() local 180 QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max); in TestSmall32Bit() 239 const float output_min = GetOutput(1)->flat<float>()(0); in TestSecondDim8Bit() local 242 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TestSecondDim8Bit()
|
D | mkl_quantized_pooling_ops_test.cc | 125 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 128 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F() 193 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 196 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F()
|
D | quantization_utils.cc | 23 float* output_min, float* output_max) { in GetOutputMinAndMaxForQuantizedAdd() argument 39 *output_min = -(*output_max); in GetOutputMinAndMaxForQuantizedAdd()
|
D | quantize_down_and_shrink_range.cc | 47 Tensor* output_min = nullptr; in Compute() local 48 OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_min)); in Compute() 96 output_min->flat<float>().setConstant(actual_min_float); in Compute()
|
D | mkl_requantization_range_per_channel_op.cc | 102 Tensor* output_min = nullptr; in Compute() local 104 OP_REQUIRES_OK(ctx, ctx->allocate_output(kOutputMinIndex, {}, &output_min)); in Compute() 106 output_min->flat<float>()(0) = is_non_negative ? 0.0f : -out_min_max; in Compute()
|
D | quantized_bias_add_op.cc | 84 Tensor* output_min = nullptr; in Compute() local 85 OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); in Compute() 86 output_min->flat<float>()(0) = total_min; in Compute()
|
D | mkl_quantized_concat_op_test.cc | 150 const float output_min = GetOutput(1)->flat<float>()(0); in TestSmall8Bit() local 153 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TestSmall8Bit() 224 const float output_min = GetOutput(1)->flat<float>()(0); in TestSecondDim8Bit() local 227 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TestSecondDim8Bit()
|
D | meta_support.cc | 257 float input_min, float input_max, float output_min, in Requantize() argument 270 params.kernel.output_range_min = output_min; in Requantize() 274 CalculateOneOverRangeScale<uint8_t>(output_min, output_max); in Requantize() 349 float bias_max, float output_min, float output_max, in QuantizedBiasAdd() argument 371 params.kernel.output_range_min = output_min; in QuantizedBiasAdd() 373 CalculateOneOverRangeScale<int32_t>(output_min, output_max); in QuantizedBiasAdd()
|