/external/tensorflow/tensorflow/lite/micro/kernels/ |
D | pooling_test.cc | 104 std::initializer_list<int> output_dims_data, float output_min, in TestAveragePoolingQuantized() argument 120 output_min, output_max), in TestAveragePoolingQuantized() 246 float output_min, float output_max, in TestMaxPoolQuantized() argument 263 output_min, output_max), in TestMaxPoolQuantized() 344 const float output_min = -15.9375; in TF_LITE_MICRO_TEST() local 365 F2Q(0., output_min, output_max), in TF_LITE_MICRO_TEST() 366 F2Q(0.75, output_min, output_max), in TF_LITE_MICRO_TEST() 369 output_min, output_max, // output quantization range in TF_LITE_MICRO_TEST() 378 const float output_min = -15.9375; in TF_LITE_MICRO_TEST() local 392 F2QS(-0.25, output_min, output_max), F2QS(0.75, output_min, output_max)}, in TF_LITE_MICRO_TEST() [all …]
|
D | mul_test.cc | 101 const float output_min, const float output_max, in TestMulQuantized() argument 121 output_min, output_max), in TestMulQuantized() 180 const float output_min = -1; in TF_LITE_MICRO_TEST() local 200 output_min, output_max, in TF_LITE_MICRO_TEST() 202 F2QS(-0.48, output_min, output_max), in TF_LITE_MICRO_TEST() 203 F2QS(0.08, output_min, output_max), in TF_LITE_MICRO_TEST() 204 F2QS(0.81, output_min, output_max), in TF_LITE_MICRO_TEST() 205 F2QS(0.56, output_min, output_max), in TF_LITE_MICRO_TEST() 214 const float output_min = -10; in TF_LITE_MICRO_TEST() local 234 input_min, input_max, {4, 1, 2, 2, 1}, output_min, output_max, in TF_LITE_MICRO_TEST() [all …]
|
D | fully_connected_test.cc | 104 const float output_min, const float output_max, in TestFullyConnectedQuantized() argument 122 output_min, output_max), in TestFullyConnectedQuantized() 269 const float output_min = -127.0f; in TF_LITE_MICRO_TEST() local 310 F2Q(24, output_min, output_max), F2Q(25, output_min, output_max), in TF_LITE_MICRO_TEST() 311 F2Q(26, output_min, output_max), F2Q(58, output_min, output_max), in TF_LITE_MICRO_TEST() 312 F2Q(59, output_min, output_max), F2Q(60, output_min, output_max), in TF_LITE_MICRO_TEST() 321 bias_scale, expected_output_data, output_dims_data, output_min, in TF_LITE_MICRO_TEST() 335 const float output_min = -127.0f; in TF_LITE_MICRO_TEST() local 376 F2QS(24, output_min, output_max), F2QS(25, output_min, output_max), in TF_LITE_MICRO_TEST() 377 F2QS(26, output_min, output_max), F2QS(58, output_min, output_max), in TF_LITE_MICRO_TEST() [all …]
|
D | softmax_test.cc | 93 float output_min, float output_max, in TestSoftmaxQuantized() argument 106 output_min, output_max), in TestSoftmaxQuantized() 159 std::initializer_list<int> output_dims_data, float output_min, in TestSoftmaxQuantizedSigned() argument 172 output_min, output_max), in TestSoftmaxQuantizedSigned() 258 const float output_min = 0.0f; in TF_LITE_MICRO_TEST() local 274 F2Q(0.011656231, output_min, output_max), in TF_LITE_MICRO_TEST() 275 F2Q(0.031684921, output_min, output_max), in TF_LITE_MICRO_TEST() 276 F2Q(0.086128544, output_min, output_max), in TF_LITE_MICRO_TEST() 277 F2Q(0.234121657, output_min, output_max), in TF_LITE_MICRO_TEST() 278 F2Q(0.636408647, output_min, output_max), in TF_LITE_MICRO_TEST() [all …]
|
/external/XNNPACK/src/xnnpack/ |
D | params-init.h | 34 uint8_t output_min, in xnn_init_scalar_q8_gemm_params() argument 61 (int32_t) (uint32_t) output_min - (int32_t) (uint32_t) output_zero_point; in xnn_init_scalar_q8_gemm_params() 73 uint8_t output_min, in xnn_init_q8_gemm_params() argument 118 params.sse2.output_min[i] = output_min; in xnn_init_q8_gemm_params() 127 params.neon.output_min = output_min; in xnn_init_q8_gemm_params() 138 (int32_t) (uint32_t) output_min - (int32_t) (uint32_t) output_zero_point; in xnn_init_q8_gemm_params() 150 uint8_t output_min, in xnn_init_q8_avgpool_params() argument 189 params.sse2.output_min[i] = output_min; in xnn_init_q8_avgpool_params() 197 params.neon.output_min = output_min; in xnn_init_q8_avgpool_params() 206 (int32_t) (uint32_t) output_min - (int32_t) (uint32_t) output_zero_point; in xnn_init_q8_avgpool_params() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | quantized_add_op.cc | 48 float scalar_input_max, float output_min, float output_max, in ScalarAddition() argument 51 scalar_input, scalar_input_min, scalar_input_max, output_min, output_max); in ScalarAddition() 54 full_input[i], full_input_min, full_input_max, output_min, output_max); in ScalarAddition() 66 float output_min, float output_max, qint32* output) { in ScalarAddition() argument 68 scalar_input, scalar_input_min, scalar_input_max, output_min, output_max); in ScalarAddition() 75 FloatToQuantizedUnclamped<qint32>(input_0_float, output_min, output_max); in ScalarAddition() 77 FloatToQuantizedUnclamped<qint32>(input_1_float, output_min, output_max); in ScalarAddition() 123 float output_min, float output_max, qint32* output) { in ScalarAddition() argument 125 scalar_input, scalar_input_min, scalar_input_max, output_min, output_max); in ScalarAddition() 132 FloatToQuantizedUnclamped<qint32>(input_0_float, output_min, output_max); in ScalarAddition() [all …]
|
D | quantized_batch_norm_op.cc | 38 Tensor* output, float* output_min, float* output_max) { in ReferenceBatchNorm() argument 49 *output_min = std::numeric_limits<float>::max(); in ReferenceBatchNorm() 78 *output_min = std::min(output_value, *output_min); in ReferenceBatchNorm() 82 FloatToQuantized<T2>(output_value, *output_min, *output_max); in ReferenceBatchNorm() 101 Tensor* output, float* output_min, float* output_max) { in FixedPointBatchNorm() argument 115 *output_min = -(1 << 20); in FixedPointBatchNorm() 139 FloatToQuantized<T2>(scale_value, *output_min, *output_max); in FixedPointBatchNorm() 141 FloatToQuantized<T2>(offset_value, *output_min, *output_max); in FixedPointBatchNorm() 145 FloatToQuantized<T2>(1.0f, *output_min, *output_max); in FixedPointBatchNorm() 151 input_max, *output_min, *output_max); in FixedPointBatchNorm() [all …]
|
D | quantized_concat_op.cc | 35 float output_min, float output_max) in RequantizeCopier() 36 : output_min(output_min), in RequantizeCopier() 43 if (input_min == output_min && input_max == output_max) { in Copy() 54 FloatToQuantizedStruct<T> f2q(output_min, output_max); in Copy() 64 float output_min; member 82 float* output_min, float* output_max) { in CalculateInputAndOutputRange() argument 100 *output_min = -largest_value; in CalculateInputAndOutputRange() 103 *output_min = overall_min; in CalculateInputAndOutputRange() 192 float output_min = std::numeric_limits<float>::max(); in Compute() local 196 &input_mins_and_maxes, &output_min, in Compute() [all …]
|
D | quantize_op_test.cc | 123 auto output_min = *GetOutput(1); in TEST_P() local 127 EXPECT_EQ(output_min.flat<float>()(slice_idx), 0); in TEST_P() 203 auto output_min = *GetOutput(1); in TEST_P() local 207 EXPECT_EQ(output_min.flat<float>()(slice_idx), -128.0 * (slice_idx + 1)); in TEST_P() 253 auto output_min = *GetOutput(1); in TEST_P() local 257 EXPECT_EQ(output_min.flat<float>()(slice_idx), -128.0 * (slice_idx + 1)); in TEST_P() 415 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 417 EXPECT_NEAR(0.0f, output_min, 1e-5f); in TEST_F() 437 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 439 EXPECT_NEAR(0.0f, output_min, 1e-5f); in TEST_F() [all …]
|
D | quantized_activation_ops.cc | 51 Tensor* output_min = nullptr; in Compute() local 52 OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); in Compute() 53 output_min->flat<float>()(0) = min_input; in Compute() 89 Tensor* output_min = nullptr; in Compute() local 90 OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); in Compute() 91 output_min->flat<float>()(0) = min_input; in Compute()
|
D | quantized_pooling_ops.cc | 94 Tensor* output_min = nullptr; in Compute() local 95 OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); in Compute() 96 output_min->flat<float>()(0) = min_input; in Compute() 118 Tensor* output_min = nullptr; in Compute() local 119 OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); in Compute() 120 output_min->flat<float>()(0) = min_input; in Compute()
|
D | mkl_requantize_ops_test.cc | 163 const float output_min = GetOutput(0)->flat<float>()(0); in TEST_F() local 165 EXPECT_NEAR(-14.8217, output_min, 0.002); in TEST_F() 224 const float output_min = GetOutput(0)->flat<float>()(0); in TEST_F() local 226 EXPECT_NEAR(-6.0, output_min, 0.002); // Values are aligned with clip_value. in TEST_F() 293 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 295 EXPECT_NEAR(range_op_output_min, output_min, 0.002); in TEST_F()
|
D | quantization_utils_test.cc | 35 float input_max, float output_min, float output_max, in TestRequantizeMany() argument 44 output_min, output_max)); in TestRequantizeMany() 55 input_max, output_min, output_max, in TestRequantizeMany() 59 *eigen_device, i_tensor, input_min, input_max, output_min, output_max, in TestRequantizeMany() 71 << ", input_max=" << input_max << ", output_min=" << output_min in TestRequantizeMany() 77 float output_min, float output_max, in TestRequantizeMany8To32Bit() argument 86 output_min, output_max)); in TestRequantizeMany8To32Bit() 96 input_max, output_min, output_max, in TestRequantizeMany8To32Bit() 107 << ", input_max=" << input_max << ", output_min=" << output_min in TestRequantizeMany8To32Bit() 232 const float output_min = ranges[range_index][2]; in TestRequantizeManyInNewRangeEigenVsNonEigen() local [all …]
|
D | quantized_activation_ops_test.cc | 62 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 65 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F() 93 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 96 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F()
|
D | quantized_reshape_op.cc | 36 Tensor* output_min = nullptr; in Compute() local 37 OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_min)); in Compute() 38 output_min->flat<float>()(0) = input_min_float; in Compute()
|
D | mkl_quantize_op_test.cc | 123 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 125 EXPECT_NEAR(0.0f, output_min, 1e-5f); in TEST_F() 153 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 155 EXPECT_NEAR(0.0f, output_min, 1e-5f); in TEST_F() 183 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 185 EXPECT_NEAR(-0.8f, output_min, 1e-5f); in TEST_F()
|
/external/XNNPACK/src/ |
D | subgraph.c | 128 float output_min, in xnn_define_convolution_2d() argument 186 if (isnan(output_min)) { in xnn_define_convolution_2d() 198 if (output_min >= output_max) { in xnn_define_convolution_2d() 202 output_min, output_max); in xnn_define_convolution_2d() 253 node->activation.output_min = output_min; in xnn_define_convolution_2d() 280 float output_min, in xnn_define_depthwise_convolution_2d() argument 332 if (isnan(output_min)) { in xnn_define_depthwise_convolution_2d() 344 if (output_min >= output_max) { in xnn_define_depthwise_convolution_2d() 348 output_min, output_max); in xnn_define_depthwise_convolution_2d() 398 node->activation.output_min = output_min; in xnn_define_depthwise_convolution_2d() [all …]
|
D | global-average-pooling-nwc.c | 31 uint8_t output_min, in xnn_create_global_average_pooling_nwc_q8() argument 85 if (output_min >= output_max) { in xnn_create_global_average_pooling_nwc_q8() 89 output_min, output_max); in xnn_create_global_average_pooling_nwc_q8() 127 global_average_pooling_op->output_min = output_min; in xnn_create_global_average_pooling_nwc_q8() 147 float output_min, in xnn_create_global_average_pooling_nwc_f32() argument 185 if (isnan(output_min)) { in xnn_create_global_average_pooling_nwc_f32() 197 if (output_min >= output_max) { in xnn_create_global_average_pooling_nwc_f32() 201 output_min, output_max); in xnn_create_global_average_pooling_nwc_f32() 224 …global_average_pooling_op->f32_avgpool_params = xnn_init_f32_avgpool_params(nanf(""), output_min, … in xnn_create_global_average_pooling_nwc_f32() 278 global_average_pooling_op->output_min, in xnn_setup_global_average_pooling_nwc_q8()
|
D | binary-elementwise-nd.c | 21 float output_min, in create_binary_elementwise_nd_f32() argument 37 if (isnan(output_min)) { in create_binary_elementwise_nd_f32() 49 if (output_min >= output_max) { in create_binary_elementwise_nd_f32() 52 output_min, output_max); in create_binary_elementwise_nd_f32() 64 binary_elementwise_op->f32_output_params = xnn_init_f32_output_params(output_min, output_max); in create_binary_elementwise_nd_f32() 80 float output_min, in xnn_create_add_nd_f32() argument 86 output_min, output_max, flags, xnn_operator_type_add_nd_f32, add_op_out); in xnn_create_add_nd_f32() 90 float output_min, in xnn_create_divide_nd_f32() argument 96 output_min, output_max, flags, xnn_operator_type_divide_nd_f32, divide_op_out); in xnn_create_divide_nd_f32() 118 float output_min, in xnn_create_multiply_nd_f32() argument [all …]
|
D | clamp-nc.c | 26 uint8_t output_min, in xnn_create_clamp_nc_u8() argument 63 if (output_min >= output_max) { in xnn_create_clamp_nc_u8() 66 output_min, output_max); in xnn_create_clamp_nc_u8() 81 clamp_op->u8_output_params = xnn_init_u8_output_params(output_min, output_max); in xnn_create_clamp_nc_u8() 100 float output_min, in xnn_create_clamp_nc_f32() argument 137 if (isnan(output_min)) { in xnn_create_clamp_nc_f32() 149 if (output_min >= output_max) { in xnn_create_clamp_nc_f32() 152 output_min, output_max); in xnn_create_clamp_nc_f32() 167 clamp_op->f32_output_params = xnn_init_f32_output_params(output_min, output_max); in xnn_create_clamp_nc_f32()
|
D | global-average-pooling-ncw.c | 22 float output_min, in xnn_create_global_average_pooling_ncw_f32() argument 44 if (isnan(output_min)) { in xnn_create_global_average_pooling_ncw_f32() 56 if (output_min >= output_max) { in xnn_create_global_average_pooling_ncw_f32() 60 output_min, output_max); in xnn_create_global_average_pooling_ncw_f32() 81 …ooling_op->f32_gavgpool_params = xnn_init_f32_gavgpool_params(nanf(""), output_min, output_max, 0); in xnn_create_global_average_pooling_ncw_f32()
|
D | fully-connected-nc.c | 39 uint8_t output_min, in xnn_create_fully_connected_nc_q8() argument 105 if (output_min >= output_max) { in xnn_create_fully_connected_nc_q8() 109 output_min, output_max); in xnn_create_fully_connected_nc_q8() 172 requantization_scale, output_zero_point, output_min, output_max); in xnn_create_fully_connected_nc_q8() 201 float output_min, in xnn_create_fully_connected_nc_f32() argument 246 if (isnan(output_min)) { in xnn_create_fully_connected_nc_f32() 258 if (output_min >= output_max) { in xnn_create_fully_connected_nc_f32() 261 output_min, output_max); in xnn_create_fully_connected_nc_f32() 307 fully_connected_op->f32_output_params = xnn_init_f32_output_params(output_min, output_max); in xnn_create_fully_connected_nc_f32()
|
D | max-pooling-nhwc.c | 52 uint8_t output_min, in xnn_create_max_pooling2d_nhwc_u8() argument 121 if (output_min >= output_max) { in xnn_create_max_pooling2d_nhwc_u8() 125 output_min, output_max); in xnn_create_max_pooling2d_nhwc_u8() 152 max_pooling_op->u8_output_params = xnn_init_u8_output_params(output_min, output_max); in xnn_create_max_pooling2d_nhwc_u8() 181 float output_min, in xnn_create_max_pooling2d_nhwc_f32() argument 251 if (isnan(output_min)) { in xnn_create_max_pooling2d_nhwc_f32() 263 if (output_min >= output_max) { in xnn_create_max_pooling2d_nhwc_f32() 266 output_min, output_max); in xnn_create_max_pooling2d_nhwc_f32() 293 max_pooling_op->f32_output_params = xnn_init_f32_output_params(output_min, output_max); in xnn_create_max_pooling2d_nhwc_f32()
|
/external/XNNPACK/include/ |
D | xnnpack.h | 239 float output_min, 288 float output_min, 317 float output_min, 345 float output_min, 380 float output_min, 508 float output_min, 534 float output_min, 561 float output_min, 579 float output_min, 609 float output_min, [all …]
|
/external/XNNPACK/test/ |
D | argmaxpool-microkernel-tester.h | 208 const float output_min = accumulated_min + float(qmin()) / 255.0f * accumulated_range; variable 215 output_params = xnn_init_f32_output_params(output_min, output_max); 218 output_params = xnn_init_scalar_f32_output_params(output_min, output_max); 224 output_value = std::max(std::min(output_value, output_max), output_min); 237 ASSERT_GE(output[x * output_stride() + c], output_min) 311 const float output_min = accumulated_min + float(qmin()) / 255.0f * accumulated_range; variable 318 output_params = xnn_init_f32_output_params(output_min, output_max); 321 output_params = xnn_init_scalar_f32_output_params(output_min, output_max); 327 output_value = std::max(std::min(output_value, output_max), output_min); 342 ASSERT_GE(output[x * output_stride() + c], output_min)
|