| /external/XNNPACK/src/ |
| D | microparams-init.c | 21 int8_t output_min, in xnn_init_qc8_conv_minmax_fp32_scalar_fmagic_params() 34 int8_t output_min, in xnn_init_qc8_conv_minmax_fp32_scalar_imagic_params() 49 int8_t output_min, in xnn_init_qc8_conv_minmax_fp32_scalar_lrintf_params() 62 int8_t output_min, in xnn_init_qc8_conv_minmax_fp32_sse2_params() 79 int8_t output_min, in xnn_init_qc8_conv_minmax_fp32_sse4_params() 98 int8_t output_min, in xnn_init_qc8_conv_minmax_fp32_avx2_params() 117 int8_t output_min, in xnn_init_qc8_conv_minmax_fp32_avx512_params() 138 int8_t output_min, in xnn_init_qc8_conv_minmax_fp32_armsimd32_params() 153 int8_t output_min, in xnn_init_qc8_conv_minmax_fp32_neon_params() 166 int8_t output_min, in xnn_init_qc8_conv_minmax_fp32_neonv8_params() [all …]
|
| /external/XNNPACK/src/subgraph/ |
| D | global-average-pooling.c | 77 …const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_… in create_global_average_pooling_operator() local 95 …const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero… in create_global_average_pooling_operator() local 213 float output_min, in define_global_average_pooling_nd() 320 float output_min, in xnn_define_global_average_pooling_1d() 332 float output_min, in xnn_define_global_average_pooling_2d()
|
| D | clamp.c | 65 …const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_… in create_clamp_operator() local 81 …const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero… in create_clamp_operator() local 168 float output_min, in xnn_define_clamp()
|
| D | add2.c | 64 …const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_… in create_add_operator() local 82 …const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero… in create_add_operator() local 208 float output_min, in xnn_define_add2()
|
| D | subtract.c | 64 …const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_… in create_subtract_operator() local 82 …const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero… in create_subtract_operator() local 208 float output_min, in xnn_define_subtract()
|
| D | multiply2.c | 64 …const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_… in create_multiply_operator() local 82 …const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero… in create_multiply_operator() local 212 float output_min, in xnn_define_multiply2()
|
| D | convolution-2d.c | 141 …const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_… in create_convolution_operator() local 175 …const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_… in create_convolution_operator() local 211 …const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero… in create_convolution_operator() local 451 float output_min, in xnn_define_convolution_2d()
|
| D | depthwise-convolution-2d.c | 142 …const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_… in create_convolution_operator() local 176 …const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_… in create_convolution_operator() local 212 …const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero… in create_convolution_operator() local 451 float output_min, in xnn_define_depthwise_convolution_2d()
|
| D | max-pooling-2d.c | 85 …const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_… in create_max_pooling_operator() local 111 …const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero… in create_max_pooling_operator() local 227 float output_min, in xnn_define_max_pooling_2d()
|
| D | fully-connected.c | 101 …const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_… in create_fully_connected_operator() local 126 …const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero… in create_fully_connected_operator() local 294 float output_min, in xnn_define_fully_connected()
|
| D | deconvolution-2d.c | 115 …const int8_t output_min = xnn_qs8_quantize(node->activation.output_min, output_scale, output_zero_… in create_deconvolution_operator() local 153 …const uint8_t output_min = xnn_qu8_quantize(node->activation.output_min, output_scale, output_zero… in create_deconvolution_operator() local 373 float output_min, in xnn_define_deconvolution_2d()
|
| /external/tensorflow/tensorflow/core/kernels/ |
| D | quantize_op_test.cc | 123 auto output_min = *GetOutput(1); in TEST_P() local 203 auto output_min = *GetOutput(1); in TEST_P() local 253 auto output_min = *GetOutput(1); in TEST_P() local 415 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 437 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 459 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 481 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 525 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local
|
| D | quantized_activation_ops_test.cc | 62 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 93 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local
|
| D | quantized_activation_ops.cc | 64 Tensor* output_min = nullptr; in Compute() local 115 Tensor* output_min = nullptr; in Compute() local
|
| D | quantized_pooling_ops.cc | 113 Tensor* output_min = nullptr; in Compute() local 149 Tensor* output_min = nullptr; in Compute() local
|
| D | quantized_add_op.cc | 50 float output_min, float output_max, Toutput* output) { in ScalarAddition() 67 float output_min, float output_max, qint32* output) { in ScalarAddition() 124 float output_min, float output_max, qint32* output) { in ScalarAddition() 162 int64_t num_elements, float output_min, float output_max, in VectorAddition() 178 int64 num_elements, float output_min, float output_max, in VectorAddition() 247 int64_t num_elements, float output_min, float output_max, in VectorAddition() 294 float output_min, float output_max, Toutput* output) { in VectorTensorAddition() 312 float output_min, float output_max, qint32* output) { in VectorTensorAddition() 397 float output_min, float output_max, qint32* output) { in VectorTensorAddition()
|
| D | quantized_bias_add_op_test.cc | 83 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 165 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local
|
| D | quantized_pooling_ops_test.cc | 76 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 121 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local
|
| /external/XNNPACK/test/ |
| D | clamp.cc | 34 const float output_min = (quantized_output_min - input_zero_point) * input_scale; in TEST_F() local 78 const float output_min = (quantized_output_min - input_zero_point) * input_scale; in TEST_F() local 116 const float output_min = std::uniform_real_distribution<float>(-128.0f, 0.0f)(rng); in TEST_F() local 160 const float output_min = (quantized_output_min - input_zero_point) * input_scale; in TEST_F() local 225 const float output_min = (quantized_output_min - input_zero_point) * input_scale; in TEST_F() local 284 const float output_min = std::uniform_real_distribution<float>(-128.0f, 0.0f)(rng); in TEST_F() local
|
| D | fusion.cc | 16 float output_min = -0.5f; in TEST() local 46 float output_min = -0.5f; in TEST() local 74 float output_min = -0.5f; in TEST() local 106 float output_min = -0.5f; in TEST() local 147 float output_min = -0.5f; in TEST() local 177 float output_min = -0.5f; in TEST() local 219 float output_min = -0.5f; in TEST() local 259 float output_min = -0.5f; in TEST() local 291 float output_min = -0.5f; in TEST() local 321 float output_min = -0.5f; in TEST() local [all …]
|
| /external/tensorflow/tensorflow/core/kernels/mkl/ |
| D | mkl_quantize_op_test.cc | 103 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 127 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local 151 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local
|
| D | mkl_requantize_ops_test.cc | 162 const float output_min = GetOutput(0)->flat<float>()(0); in TEST_F() local 223 const float output_min = GetOutput(0)->flat<float>()(0); in TEST_F() local 292 const float output_min = GetOutput(1)->flat<float>()(0); in TEST_F() local
|
| /external/XNNPACK/src/operators/ |
| D | lut-elementwise-nc.c | 29 long output_min, in create_lut_elementwise_nc() 152 int8_t output_min, in xnn_create_elu_nc_qs8() 186 int8_t output_min, in xnn_create_sigmoid_nc_qs8() 223 uint8_t output_min, in xnn_create_sigmoid_nc_qu8() 264 int8_t output_min, in xnn_create_tanh_nc_qs8() 301 uint8_t output_min, in xnn_create_tanh_nc_qu8()
|
| D | binary-elementwise-nd.c | 69 float output_min, in create_binary_elementwise_nd_f16() 115 float output_min, in create_binary_elementwise_nd_f32() 176 int8_t output_min, in xnn_create_add_nd_qs8() 254 uint8_t output_min, in xnn_create_add_nd_qu8() 326 float output_min, in xnn_create_add_nd_f16() 341 float output_min, in xnn_create_add_nd_f32() 356 float output_min, in xnn_create_divide_nd_f16() 371 float output_min, in xnn_create_divide_nd_f32() 450 float output_min, in xnn_create_multiply_nd_f16() 465 float output_min, in xnn_create_multiply_nd_f32() [all …]
|
| /external/tensorflow/tensorflow/lite/kernels/ |
| D | table_test.cc | 68 inline float GetLUTTolerance(float input_min, float input_max, float output_min, in GetLUTTolerance() 95 float output_min = 0.0f; in TableWithExpLUTToInt8Test() local 142 float output_min = 0.0f; in TableWithExpLUTToInt16Test() local
|