/external/tensorflow/tensorflow/core/kernels/ |
D | quantized_batch_norm_op_test.cc | 72 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 78 FloatTensorToQuantized<quint8>(mean_float, mean_min, mean_max); in TEST_F() 83 Tensor variance_quantized = FloatTensorToQuantized<quint8>( in TEST_F() 90 FloatTensorToQuantized<quint8>(beta_float, beta_min, beta_max); in TEST_F() 96 FloatTensorToQuantized<quint8>(gamma_float, gamma_min, gamma_max); in TEST_F() 169 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 175 FloatTensorToQuantized<quint8>(mean_float, mean_min, mean_max); in TEST_F() 180 Tensor variance_quantized = FloatTensorToQuantized<quint8>( in TEST_F() 187 FloatTensorToQuantized<quint8>(beta_float, beta_min, beta_max); in TEST_F() 193 FloatTensorToQuantized<quint8>(gamma_float, gamma_min, gamma_max); in TEST_F()
|
D | quantized_bias_add_op_test.cc | 59 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 67 FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max); in TEST_F() 119 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 139 FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max); in TEST_F()
|
D | quantized_concat_op_test.cc | 83 FloatTensorToQuantized<quint8>(first_float, first_min, first_max); in TestSmall8Bit() 92 FloatTensorToQuantized<quint8>(second_float, second_min, second_max); in TestSmall8Bit() 148 FloatTensorToQuantized<qint32>(first_float, first_min, first_max); in TestSmall32Bit() 157 FloatTensorToQuantized<qint32>(second_float, second_min, second_max); in TestSmall32Bit() 211 FloatTensorToQuantized<quint8>(first_float, first_min, first_max); in TestSecondDim8Bit() 220 FloatTensorToQuantized<quint8>(second_float, second_min, second_max); in TestSecondDim8Bit()
|
D | mkl_quantized_concat_op_test.cc | 115 FloatTensorToQuantized<quint8>(first_float, first_min, first_max); in TestSmall8Bit() 126 FloatTensorToQuantized<quint8>(second_float, second_min, second_max); in TestSmall8Bit() 188 FloatTensorToQuantized<quint8>(first_float, first_min, first_max); in TestSecondDim8Bit() 200 FloatTensorToQuantized<quint8>(second_float, second_min, second_max); in TestSecondDim8Bit()
|
D | quantized_conv_ops_test.cc | 74 FloatTensorToQuantized<quint8>(image_float, image_min, image_max); in TEST_F() 88 FloatTensorToQuantized<quint8>(filter_float, filter_min, filter_max); in TEST_F() 291 FloatTensorToQuantized<quint8>(image_float, image_min, image_max); in TEST_F() 300 FloatTensorToQuantized<quint8>(filter_float, filter_min, filter_max); in TEST_F()
|
D | quantized_activation_ops_test.cc | 52 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 83 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
|
D | quantized_pooling_ops_test.cc | 62 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 107 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
|
D | mkl_quantized_pooling_ops_test.cc | 92 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 161 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
|
D | quantized_matmul_op_test.cc | 305 Tensor a_quantized = FloatTensorToQuantized<quint8>(a_float, a_min, a_max); in TEST_F() 325 Tensor b_quantized = FloatTensorToQuantized<quint8>(b_float, b_min, b_max); in TEST_F()
|
D | mkl_quantized_conv_ops_test.cc | 124 FloatTensorToQuantized<quint8>(image_float, image_min, image_max); in TEST_F() 141 FloatTensorToQuantized<qint8>(filter_float, filter_min, filter_max); in TEST_F()
|
D | quantization_utils_test.cc | 632 Tensor output = FloatTensorToQuantized<quint8>(input, input_min, input_max); in TestFloatTensorToQuantized()
|
D | quantization_utils.h | 754 Tensor FloatTensorToQuantized(const Tensor& input, float min, float max) { in FloatTensorToQuantized() function
|