/external/tensorflow/tensorflow/core/kernels/ |
D | quantized_batch_norm_op_test.cc | 71 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 77 FloatTensorToQuantized<quint8>(mean_float, mean_min, mean_max); in TEST_F() 82 Tensor variance_quantized = FloatTensorToQuantized<quint8>( in TEST_F() 89 FloatTensorToQuantized<quint8>(beta_float, beta_min, beta_max); in TEST_F() 95 FloatTensorToQuantized<quint8>(gamma_float, gamma_min, gamma_max); in TEST_F() 97 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F() 98 input_quantized.flat<quint8>()); in TEST_F() 101 AddInputFromArray<quint8>(mean_quantized.shape(), in TEST_F() 102 mean_quantized.flat<quint8>()); in TEST_F() 105 AddInputFromArray<quint8>(variance_quantized.shape(), in TEST_F() [all …]
|
D | quantization_utils_test.cc | 39 std::vector<quint8> expected_values; in TestRequantizeMany() 42 expected_values.push_back(FloatToQuantized<quint8>( in TestRequantizeMany() 50 auto output_values = o_tensor.flat<quint8>(); in TestRequantizeMany() 58 RequantizeManyInNewRangeUsingEigen<qint32, quint8>( in TestRequantizeMany() 78 const std::vector<quint8>& values_quantized, in TestRequantizeMany8To32Bit() 90 tensorflow::test::AsTensor(gtl::ArraySlice<quint8>(values_quantized)); in TestRequantizeMany8To32Bit() 94 const auto input_array = i_tensor.flat<quint8>(); in TestRequantizeMany8To32Bit() 190 std::vector<quint8> values_quantized; in TestRequantizeManyInNewRange8To32Bit() 193 values_quantized.push_back(FloatToQuantized<quint8>(v, r[0], r[1])); in TestRequantizeManyInNewRange8To32Bit() 199 int low = Eigen::NumTraits<quint8>::lowest(); in TestRequantizeManyInNewRange8To32Bit() [all …]
|
D | meta_support.h | 71 const quint8* a_data, const quint8* b_data, qint32* c_data, 81 float output_max, quint8* output); 85 void Dequantize(OpKernelContext* context, const quint8* input, int count, 91 float range_max, quint8* output); 98 void QuantizedBiasAdd(OpKernelContext* context, const quint8* input, 99 int input_count, const quint8* bias, int bias_count, 106 void Clamp(OpKernelContext* context, const quint8* input, int input_count, 107 quint8 clamp_min, quint8 clamp_max, quint8* output);
|
D | quantized_activation_ops.cc | 42 if (meta::IsSupportedAndEnabled() && std::is_same<T, quint8>()) { in Compute() 43 auto input_ui8_array = input.flat<quint8>(); in Compute() 45 min_as_quantized, 255, output->flat<quint8>().data()); in Compute() 76 if (meta::IsSupportedAndEnabled() && std::is_same<T, quint8>()) { in Compute() 77 auto input_ui8_array = input.flat<quint8>(); in Compute() 80 output->flat<quint8>().data()); in Compute() 105 .TypeConstraint<quint8>("Tinput") 106 .TypeConstraint<quint8>("out_type"), 107 QuantizedReluOp<quint8>); 116 .TypeConstraint<quint8>("Tinput") [all …]
|
D | quantized_resize_bilinear_op.cc | 137 inline uint8x8_t ToUint8x8(const quint8* v0, const quint8* v1, const quint8* v2, in ToUint8x8() 138 const quint8* v3, const quint8* v4, const quint8* v5, in ToUint8x8() 139 const quint8* v6, const quint8* v7) { in ToUint8x8() 200 const quint8* tl0, const quint8* tr0, const quint8* bl0, const quint8* br0, in ComputeLerpx8() 201 const int16* xlp0, const quint8* tl1, const quint8* tr1, const quint8* bl1, in ComputeLerpx8() 202 const quint8* br1, const int16* xlp1, const quint8* tl2, const quint8* tr2, in ComputeLerpx8() 203 const quint8* bl2, const quint8* br2, const int16* xlp2, const quint8* tl3, in ComputeLerpx8() 204 const quint8* tr3, const quint8* bl3, const quint8* br3, const int16* xlp3, in ComputeLerpx8() 205 const quint8* tl4, const quint8* tr4, const quint8* bl4, const quint8* br4, in ComputeLerpx8() 206 const int16* xlp4, const quint8* tl5, const quint8* tr5, const quint8* bl5, in ComputeLerpx8() [all …]
|
D | quantized_bias_add_op_test.cc | 59 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 67 FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max); in TEST_F() 73 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F() 74 input_quantized.flat<quint8>()); in TEST_F() 75 AddInputFromArray<quint8>(bias_quantized.shape(), in TEST_F() 76 bias_quantized.flat<quint8>()); in TEST_F() 119 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 139 FloatTensorToQuantized<quint8>(bias_float, bias_min, bias_max); in TEST_F() 155 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F() 156 input_quantized.flat<quint8>()); in TEST_F() [all …]
|
D | quantized_conv_ops_test.cc | 74 FloatTensorToQuantized<quint8>(image_float, image_min, image_max); in TEST_F() 88 FloatTensorToQuantized<quint8>(filter_float, filter_min, filter_max); in TEST_F() 90 AddInputFromArray<quint8>(image_quantized.shape(), in TEST_F() 91 image_quantized.flat<quint8>()); in TEST_F() 92 AddInputFromArray<quint8>(filter_quantized.shape(), in TEST_F() 93 filter_quantized.flat<quint8>()); in TEST_F() 153 AddInputFromArray<quint8>( in TEST_F() 158 AddInputFromArray<quint8>( in TEST_F() 196 AddInputFromArray<quint8>( in TEST_F() 201 AddInputFromArray<quint8>( in TEST_F() [all …]
|
D | quantized_pooling_ops_test.cc | 45 .Attr("T", DataTypeToEnum<quint8>::v()) in TEST_F() 62 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 70 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F() 71 input_quantized.flat<quint8>()); in TEST_F() 79 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F() 90 .Attr("T", DataTypeToEnum<quint8>::v()) in TEST_F() 107 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 115 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F() 116 input_quantized.flat<quint8>()); in TEST_F() 124 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F()
|
D | quantized_reshape_op_test.cc | 55 input.flat<quint8>()(i) = quint8(i); in TEST_F() 56 expected.flat<quint8>()(i) = quint8(i); in TEST_F() 58 AddInputFromArray<quint8>(input.shape(), input.flat<quint8>()); in TEST_F() 66 test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); in TEST_F()
|
D | quantized_add_op.cc | 62 void ScalarAddition(OpKernelContext* context, const quint8* full_input, in ScalarAddition() 64 int64 num_elements, quint8 scalar_input, in ScalarAddition() 67 const int32 scalar_in_output_range = RequantizeInNewRange<quint8, qint32>( in ScalarAddition() 71 QuantizedToFloat<quint8>(0, full_input_min, full_input_max); in ScalarAddition() 73 QuantizedToFloat<quint8>(1, full_input_min, full_input_max); in ScalarAddition() 119 void ScalarAddition(OpKernelContext* context, const quint8* full_input, in ScalarAddition() 121 int64 num_elements, quint8 scalar_input, in ScalarAddition() 124 const int32 scalar_in_output_range = RequantizeInNewRange<quint8, qint32>( in ScalarAddition() 128 QuantizedToFloat<quint8>(0, full_input_min, full_input_max); in ScalarAddition() 130 QuantizedToFloat<quint8>(1, full_input_min, full_input_max); in ScalarAddition() [all …]
|
D | quantized_matmul_op_test.cc | 58 AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6}); in TEST_F() 63 AddInputFromArray<quint8>(TensorShape({3, 4}), in TEST_F() 116 AddInputFromArray<quint8>(TensorShape({a_rows, a_cols}), {11}); in TEST_F() 120 AddInputFromArray<quint8>(TensorShape({b_rows, b_cols}), {0}); in TEST_F() 160 AddInputFromArray<quint8>(TensorShape({a_rows, a_cols}), {11}); in TEST_F() 164 AddInputFromArray<quint8>(TensorShape({b_rows, b_cols}), {0}); in TEST_F() 209 AddInputFromArray<quint8>(TensorShape({a_rows, a_cols}), { in TEST_F() 228 AddInputFromArray<quint8>(TensorShape({b_rows, b_cols}), { in TEST_F() 305 Tensor a_quantized = FloatTensorToQuantized<quint8>(a_float, a_min, a_max); in TEST_F() 325 Tensor b_quantized = FloatTensorToQuantized<quint8>(b_float, b_min, b_max); in TEST_F() [all …]
|
D | quantized_activation_ops_test.cc | 52 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 56 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F() 57 input_quantized.flat<quint8>()); in TEST_F() 65 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F() 83 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 87 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F() 88 input_quantized.flat<quint8>()); in TEST_F() 96 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F()
|
D | quantized_instance_norm_test.cc | 28 void ReferenceImpl(const quint8* inp, float inp_min, float inp_max, in ReferenceImpl() 107 ReferenceImpl(input.flat<quint8>().data(), x_min, x_max, input.shape(), in Expect() 110 auto out = outputs[0].flat<quint8>(); in Expect() 126 auto input = input_tensor.flat<quint8>(); in TestBasic() 128 input = input.random(Eigen::internal::UniformRandomGenerator<quint8>()); in TestBasic() 135 auto input = input_tensor.flat<quint8>(); in TestZeroInput() 145 auto input = input_tensor.flat<quint8>(); in TestMaxInput() 156 auto input = input_tensor.flat<quint8>(); in TestOutputRangeGiven() 157 input = input.random(Eigen::internal::UniformRandomGenerator<quint8>()); in TestOutputRangeGiven() 164 auto input = input_tensor.flat<quint8>(); in TestClamp() [all …]
|
D | quantize_op_test.cc | 42 .Attr("T", DataTypeToEnum<quint8>::v()) in TEST_F() 56 test::FillValues<quint8>(&expected, {0, 1, 1, 2, 127, 255, 255}); in TEST_F() 57 test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); in TEST_F() 91 .Attr("T", DataTypeToEnum<quint8>::v()) in TEST_P() 119 test::FillValues<quint8>( in TEST_P() 121 ScalePerSliceAlongAxis<quint8>(dims, -1, {0, 0, 2, 3, 4, 129, 255, 255})); in TEST_P() 132 test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); in TEST_P() 140 .Attr("T", DataTypeToEnum<quint8>::v()) in TEST_F() 153 test::FillValues<quint8>(&expected, {0, 0, 255}); in TEST_F() 154 test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); in TEST_F() [all …]
|
D | quantized_bias_add_op.cc | 67 if (meta::IsSupportedAndEnabled() && std::is_same<T1, quint8>() && in Compute() 68 std::is_same<T2, quint8>() && std::is_same<T3, qint32>()) { in Compute() 69 auto input_ui8_array = input.flat<quint8>(); in Compute() 70 auto bias_ui8_array = bias.flat<quint8>(); in Compute() 96 .TypeConstraint<quint8>("T1") 97 .TypeConstraint<quint8>("T2") 99 QuantizedBiasAddOp<quint8, quint8, qint32>);
|
D | quantized_concat_op_test.cc | 73 .Attr("T", DataTypeToEnum<quint8>::v()) in TestSmall8Bit() 83 FloatTensorToQuantized<quint8>(first_float, first_min, first_max); in TestSmall8Bit() 92 FloatTensorToQuantized<quint8>(second_float, second_min, second_max); in TestSmall8Bit() 101 AddInputFromArray<quint8>(first_quantized.shape(), in TestSmall8Bit() 102 first_quantized.flat<quint8>()); in TestSmall8Bit() 103 AddInputFromArray<quint8>(second_quantized.shape(), in TestSmall8Bit() 104 second_quantized.flat<quint8>()); in TestSmall8Bit() 114 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TestSmall8Bit() 201 .Attr("T", DataTypeToEnum<quint8>::v()) in TestSecondDim8Bit() 211 FloatTensorToQuantized<quint8>(first_float, first_min, first_max); in TestSecondDim8Bit() [all …]
|
D | quantized_matmul_op.cc | 35 void GemmlowpMultiply(OpKernelContext* op_context, const quint8* a_data, in GemmlowpMultiply() 36 const quint8* b_data, qint32* c_data, int m, int n, int k, in GemmlowpMultiply() 135 if (meta::IsSupportedAndEnabled() && std::is_same<T1, quint8>() && in Compute() 136 std::is_same<T2, quint8>() && std::is_same<Toutput, qint32>() && in Compute() 143 } else if (std::is_same<T1, quint8>() && std::is_same<T2, quint8>() && in Compute() 196 .TypeConstraint<quint8>("T1") 197 .TypeConstraint<quint8>("T2") 199 QuantizedMatMulOp<quint8, quint8, qint32>);
|
D | meta_support.cc | 118 void QuantizedGemmImpl(OpKernelContext* tf_context, const quint8* a_data, in QuantizedGemmImpl() 119 const quint8* b_data, qint32* c_data, int m, int n, in QuantizedGemmImpl() 221 bool transpose_b, const quint8* a_data, const quint8* b_data, in QuantizedGemm() 258 float output_max, quint8* output) { in Requantize() 292 void Dequantize(OpKernelContext* tf_context, const quint8* input, int count, in Dequantize() 317 float range_min, float range_max, quint8* output) { in Quantize() 346 void QuantizedBiasAdd(OpKernelContext* tf_context, const quint8* input, in QuantizedBiasAdd() 347 int input_count, const quint8* bias, int bias_count, in QuantizedBiasAdd() 385 void Clamp(OpKernelContext* tf_context, const quint8* input, int count, in Clamp() 386 quint8 clamp_min, quint8 clamp_max, quint8* output) { in Clamp()
|
/external/tensorflow/tensorflow/core/kernels/mkl/ |
D | mkl_qmatmul_op.cc | 308 if (std::is_same<Toutput, quint8>::value || in Compute() 319 if (std::is_same<Toutput, quint8>::value || in Compute() 344 MklQuantizationRangeForMultiplication<quint8, qint8, qint32>( in ComputeOutputRangeForInt32() 359 if (std::is_same<Toutput, quint8>::value || in ExtendMklDnnMatMulFwdParams() 372 if (std::is_same<Toutput, quint8>::value) { in ExtendMklDnnMatMulFwdParams() 524 MklDnnQuantizedMatMulOp<Device, quint8, qint8, Tbias, in ExtendMklDnnMatMulFwdParams() 536 .TypeConstraint<quint8>("T1") 544 .TypeConstraint<quint8>("T1") 549 MklDnnQuantizedMatMulOp<CPUDevice, quint8, qint8, float, qint32>); 553 .TypeConstraint<quint8>("T1") [all …]
|
D | mkl_quantized_concat_op_test.cc | 105 .Attr("T", DataTypeToEnum<quint8>::v()) in TestSmall8Bit() 119 FloatTensorToQuantized<quint8>(first_float, first_min, first_max); in TestSmall8Bit() 130 FloatTensorToQuantized<quint8>(second_float, second_min, second_max); in TestSmall8Bit() 139 AddInputFromArray<quint8>(first_quantized.shape(), in TestSmall8Bit() 140 first_quantized.flat<quint8>()); in TestSmall8Bit() 141 AddInputFromArray<quint8>(second_quantized.shape(), in TestSmall8Bit() 142 second_quantized.flat<quint8>()); in TestSmall8Bit() 157 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TestSmall8Bit() 178 .Attr("T", DataTypeToEnum<quint8>::v()) in TestSecondDim8Bit() 192 FloatTensorToQuantized<quint8>(first_float, first_min, first_max); in TestSecondDim8Bit() [all …]
|
D | mkl_quantized_pooling_ops_test.cc | 74 .Attr("T", DataTypeToEnum<quint8>::v()) in TEST_F() 92 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 108 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F() 109 input_quantized.flat<quint8>()); in TEST_F() 122 conv_comp.ConvertMKL2TF<quint8>(DT_QUINT8, output, mkl_shape_tensor, in TEST_F() 128 QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max); in TEST_F() 143 .Attr("T", DataTypeToEnum<quint8>::v()) in TEST_F() 161 FloatTensorToQuantized<quint8>(input_float, input_min, input_max); in TEST_F() 176 AddInputFromArray<quint8>(input_quantized.shape(), in TEST_F() 177 input_quantized.flat<quint8>()); in TEST_F() [all …]
|
D | mkl_conv_ops.cc | 1395 if (std::is_same<Toutput, quint8>::value || in Compute() 1445 if (std::is_same<Toutput, quint8>::value || in ExtendConvFwdParams() 1465 std::is_same<Toutput, quint8>::value ? 255.0f : 127.0f; in ExtendConvFwdParams() 1475 (std::is_same<Tinput, quint8>::value) ? 255.0 * 127.0 : 127.0 * 127.0; in ExtendConvFwdParams() 1524 (std::is_same<Tinput, quint8>::value) ? 255.0 * 127.0 : 127.0 * 127.0; in GetBiasHandle() 1695 if (std::is_same<Toutput, quint8>::value) { in ExtendConvFwdParams() 1745 if (std::is_same<Toutput, quint8>::value) { in AllocateOutputTensor() 1798 (std::is_same<Tinput, quint8>::value) ? 255.0 * 127.0 : 127.0 * 127.0; in AllocateOutputTensor() 1843 .TypeConstraint<quint8>("Tinput") 1850 .TypeConstraint<quint8>("Tinput") [all …]
|
D | mkl_quantize_op_test.cc | 40 .Attr("T", DataTypeToEnum<quint8>::v()) in TEST_F() 60 test::FillValues<quint8>(&expected, {0, 1, 1, 2, 127, 255, 255, 2}); in TEST_F() 63 test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); in TEST_F() 107 .Attr("T", DataTypeToEnum<quint8>::v()) in TEST_F() 121 test::FillValues<quint8>(&expected, {1, 1, 2, 2, 3, 127, 255, 255}); in TEST_F() 122 test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); in TEST_F() 137 .Attr("T", DataTypeToEnum<quint8>::v()) in TEST_F() 151 test::FillValues<quint8>(&expected, {32, 64, 96, 128, 159, 191, 223, 255}); in TEST_F() 152 test::ExpectTensorEqual<quint8>(expected, *GetOutput(0)); in TEST_F() 167 .Attr("T", DataTypeToEnum<quint8>::v()) in TEST_F() [all …]
|
D | mkl_qmatmul_op_test.cc | 95 AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6}); in TEST_F() 168 AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6}); in TEST_F() 249 AddInputFromArray<quint8>(TensorShape({4, 3}), in TEST_F() 326 .Attr("Toutput", DataTypeToEnum<quint8>::v()) in TEST_F() 327 .Attr("T", DataTypeToEnum<quint8>::v()) in TEST_F() 334 AddInputFromArray<quint8>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6}); in TEST_F() 384 test::FillValues<quint8>(&expected, {84, 60, 116, 52, 184, 169, 234, 179}); in TEST_F() 390 conv_comp.ConvertMKL2TF<quint8>(DT_QUINT8, output, mkl_shape_tensor, in TEST_F() 393 test::ExpectTensorEqual<quint8>(expected, output_quantized); in TEST_F() 420 .Attr("T", DataTypeToEnum<quint8>::v()) in TEST_F() [all …]
|
D | mkl_quantized_conv_ops_test.cc | 87 .Attr("Tinput", DataTypeToEnum<quint8>::v()) in ConfigureQuantizedConv2D() 89 .Attr("T", DataTypeToEnum<quint8>::v()) in ConfigureQuantizedConv2D() 107 AddInputFromArray<quint8>( in RunQuantizedDepthwiseConv2DOp() 206 FloatTensorToQuantized<quint8>(image_float, image_min, image_max); in TEST_F() 225 AddInputFromArray<quint8>(image_quantized.shape(), in TEST_F() 226 image_quantized.flat<quint8>()); in TEST_F() 316 .Attr("T", DataTypeToEnum<quint8>::v()) in TEST_F() 402 AddInputFromArray<quint8>( in TEST_F() 467 .Attr("Tinput", DataTypeToEnum<quint8>::v()) in TEST_F() 469 .Attr("T", DataTypeToEnum<quint8>::v()) in TEST_F() [all …]
|