Home
last modified time | relevance | path

Searched refs:FloatTensorToQuantized (Results 1 – 13 of 13) sorted by relevance

/external/tensorflow/tensorflow/core/kernels/
Dquantized_batch_norm_op_test.cc71 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
77 FloatTensorToQuantized<quint8>(mean_float, mean_min, mean_max); in TEST_F()
82 Tensor variance_quantized = FloatTensorToQuantized<quint8>( in TEST_F()
89 FloatTensorToQuantized<quint8>(beta_float, beta_min, beta_max); in TEST_F()
95 FloatTensorToQuantized<quint8>(gamma_float, gamma_min, gamma_max); in TEST_F()
168 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
174 FloatTensorToQuantized<quint8>(mean_float, mean_min, mean_max); in TEST_F()
179 Tensor variance_quantized = FloatTensorToQuantized<quint8>( in TEST_F()
186 FloatTensorToQuantized<quint8>(beta_float, beta_min, beta_max); in TEST_F()
192 FloatTensorToQuantized<quint8>(gamma_float, gamma_min, gamma_max); in TEST_F()
Dquantized_bias_add_op_test.cc59 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
67 FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max); in TEST_F()
119 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
139 FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max); in TEST_F()
Dquantized_concat_op_test.cc83 FloatTensorToQuantized<quint8>(first_float, first_min, first_max); in TestSmall8Bit()
92 FloatTensorToQuantized<quint8>(second_float, second_min, second_max); in TestSmall8Bit()
148 FloatTensorToQuantized<qint32>(first_float, first_min, first_max); in TestSmall32Bit()
157 FloatTensorToQuantized<qint32>(second_float, second_min, second_max); in TestSmall32Bit()
211 FloatTensorToQuantized<quint8>(first_float, first_min, first_max); in TestSecondDim8Bit()
220 FloatTensorToQuantized<quint8>(second_float, second_min, second_max); in TestSecondDim8Bit()
Dquantized_activation_ops_test.cc52 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
83 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
Dquantized_conv_ops_test.cc74 FloatTensorToQuantized<quint8>(image_float, image_min, image_max); in TEST_F()
88 FloatTensorToQuantized<quint8>(filter_float, filter_min, filter_max); in TEST_F()
291 FloatTensorToQuantized<quint8>(image_float, image_min, image_max); in TEST_F()
300 FloatTensorToQuantized<quint8>(filter_float, filter_min, filter_max); in TEST_F()
Dquantized_pooling_ops_test.cc62 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
107 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
Dquantized_matmul_op_test.cc305 Tensor a_quantized = FloatTensorToQuantized<quint8>(a_float, a_min, a_max); in TEST_F()
325 Tensor b_quantized = FloatTensorToQuantized<quint8>(b_float, b_min, b_max); in TEST_F()
Dquantization_utils.h763 Tensor FloatTensorToQuantized(const Tensor& input, float min, float max) { in FloatTensorToQuantized() function
Dquantization_utils_test.cc632 Tensor output = FloatTensorToQuantized<quint8>(input, input_min, input_max); in TestFloatTensorToQuantized()
/external/tensorflow/tensorflow/core/kernels/mkl/
Dmkl_quantized_concat_op_test.cc119 FloatTensorToQuantized<quint8>(first_float, first_min, first_max); in TestSmall8Bit()
130 FloatTensorToQuantized<quint8>(second_float, second_min, second_max); in TestSmall8Bit()
192 FloatTensorToQuantized<quint8>(first_float, first_min, first_max); in TestSecondDim8Bit()
204 FloatTensorToQuantized<quint8>(second_float, second_min, second_max); in TestSecondDim8Bit()
Dmkl_quantized_conv_ops_perchannel_test.cc120 FloatTensorToQuantized<quint8>(image_float, image_min, image_max); in TEST_F()
143 FloatTensorToQuantized<qint8>(filter_float, filter_min, filter_max); in TEST_F()
Dmkl_quantized_pooling_ops_test.cc92 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
161 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F()
Dmkl_quantized_conv_ops_test.cc206 FloatTensorToQuantized<quint8>(image_float, image_min, image_max); in TEST_F()
223 FloatTensorToQuantized<qint8>(filter_float, filter_min, filter_max); in TEST_F()
331 FloatTensorToQuantized<qint8>(image_float, image_min, image_max); in TEST_F()
348 FloatTensorToQuantized<qint8>(filter_float, filter_min, filter_max); in TEST_F()