/external/tensorflow/tensorflow/core/kernels/ |
D | relu_op.cc | 147 void Relu<GPUDevice, qint8>::operator()( 148 const GPUDevice& d, typename TTypes<qint8>::ConstTensor features, 149 typename TTypes<qint8>::Tensor activations); 150 extern template struct Relu<GPUDevice, qint8>; 192 class ReluOp<Device, qint8> 193 : public UnaryElementWiseOp<qint8, ReluOp<Device, qint8>> { 195 using UnaryElementWiseOp<qint8, ReluOp<Device, qint8>>::UnaryElementWiseOp; 198 auto flat_input = input.flat<qint8>(); in Operate() 203 functor::Relu<Device, qint8> func; in Operate() 204 func(context->eigen_device<Device>(), flat_input, output->flat<qint8>()); in Operate() [all …]
|
D | quantize_op_test.cc | 121 .Attr("T", DataTypeToEnum<qint8>::v()) in TEST_F() 134 test::FillValues<qint8>(&expected, {-128, 0, 1, 1, 2, 64, 127}); in TEST_F() 135 test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); in TEST_F() 151 .Attr("T", DataTypeToEnum<qint8>::v()) in TEST_F() 162 test::FillValues<qint8>(&expected, {-64, 0, 127}); in TEST_F() 163 test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); in TEST_F() 179 .Attr("T", DataTypeToEnum<qint8>::v()) in TEST_F() 192 test::FillValues<qint8>(&expected, {-126, 0, 1, 2, 4, 64, 127}); in TEST_F() 193 test::ExpectTensorEqual<qint8>(expected, *GetOutput(0)); in TEST_F() 209 .Attr("T", DataTypeToEnum<qint8>::v()) in TEST_F() [all …]
|
D | mkl_conv_ops.cc | 1506 : public MklConvOp<Device, quint8, qint8, Tbias, Toutput, Ttemp_output, 1522 : MklConvOp<Device, quint8, qint8, Tbias, Toutput, Ttemp_output, int32, in MklQuantizedConv2DOp() 1533 MklConvOp<Device, quint8, qint8, Tbias, Toutput, Ttemp_output, int32, in Compute() 1552 std::is_same<Toutput, qint8>::value) { in Compute() 1559 MklQuantizationRangeForMultiplication<quint8, qint8, qint32>( in Compute() 1580 MklConvOp<Device, quint8, qint8, Tbias, Toutput, Ttemp_output, int32, in ExtendConvFwdParams() 1586 std::is_same<Toutput, qint8>::value) { in ExtendConvFwdParams() 1605 MklQuantizationRangeForMultiplication<quint8, qint8, qint32>( in ExtendConvFwdParams() 1791 MklConvOp<Device, quint8, qint8, Tbias, Toutput, Ttemp_output, int32, in AllocateOutputTensor() 1850 .TypeConstraint<qint8>("Tfilter") [all …]
|
D | dequantize_op_test.cc | 118 RunDequantizeMinCombinedTest<qint8>(0, 255.0f); in TEST_F() 141 RunDequantizeScaledTest<qint8>(-255.0f, 127.0f, 0, 0.0); in TEST_F() 144 RunDequantizeScaledTest<qint8>(-10.0f, 127.0f, -127, -127.0); in TEST_F() 147 RunDequantizeScaledTest<qint8>(-2.0f, 1.0f, -128, -2.0); in TEST_F() 150 RunDequantizeScaledTest<qint8>(-1.0f, 300.0f, 42, 99.212601); in TEST_F() 185 BM_DequantizeMinCombinedCpu<qint8>(iters); in BM_DequantizeMinCombinedCpuQint8()
|
D | mkl_quantized_conv_ops_test.cc | 90 .Attr("Tfilter", DataTypeToEnum<qint8>::v()) in ConfigureQuantizedConv2D() 141 FloatTensorToQuantized<qint8>(filter_float, filter_min, filter_max); in TEST_F() 145 AddInputFromArray<qint8>(filter_quantized.shape(), in TEST_F() 146 filter_quantized.flat<qint8>()); in TEST_F() 223 AddInputFromArray<qint8>( in TEST_F() 283 .Attr("Tfilter", DataTypeToEnum<qint8>::v()) in TEST_F() 305 AddInputFromArray<qint8>( in TEST_F() 361 AddInputFromArray<qint8>( in TEST_F() 417 AddInputFromArray<qint8>( in TEST_F()
|
D | save_v2_op_test.cc | 88 AddInput<qint8>(TensorShape({3, 2}), in TEST_F() 89 [](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); }); in TEST_F() 93 return *reinterpret_cast<qint32*>(&x) * qint8(2); in TEST_F() 205 EXPECT_EQ(*reinterpret_cast<qint8*>(&i), val.template flat<qint8>()(i)); in TEST_F() 221 EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2), in TEST_F()
|
D | maxpooling_op.h | 43 struct SpatialMaxPooling<Device, qint8> { 44 void operator()(const Device& d, typename TTypes<qint8, 4>::Tensor output, 45 typename TTypes<qint8, 4>::ConstTensor input, int window_rows,
|
D | quantized_bias_add_op.cc | 102 .TypeConstraint<qint8>("T1") 103 .TypeConstraint<qint8>("T2") 105 QuantizedBiasAddOp<qint8, qint8, qint32>);
|
D | save_op_test.cc | 90 AddInput<qint8>(TensorShape({3, 2}), in TEST_F() 91 [](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); }); in TEST_F() 95 return *reinterpret_cast<qint32*>(&x) * qint8(2); in TEST_F() 223 qint8 data[6]; in TEST_F() 226 EXPECT_EQ(*reinterpret_cast<qint8*>(&i), data[i]); in TEST_F() 244 EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2), data[i]); in TEST_F() 452 AddInput<qint8>(TensorShape({3, 2}), in TEST_F() 453 [](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); }); in TEST_F() 457 return *reinterpret_cast<qint32*>(&x) * qint8(2); in TEST_F() 530 qint8 data[6]; in TEST_F()
|
D | mkl_requantize_per_channel_op.cc | 102 ? memory::desc(dims_mkl_order, MklDnnType<qint8>(), in Compute() 117 const_cast<qint8*>(output->flat<qint8>().data())); in Compute() 168 .TypeConstraint<qint8>("out_type"), 169 MklRequantizePerChannelOp<CPUDevice, qint8>);
|
D | restore_v2_op_test.cc | 143 Tensor input_6 = MakeInput<qint8>( in RunTest() 145 [](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); }); in RunTest() 150 return *reinterpret_cast<qint32*>(&x) * qint8(2); in RunTest() 258 EXPECT_EQ(*reinterpret_cast<qint8*>(&i), output->flat<qint8>()(i)); in RunTest() 270 EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2), in RunTest()
|
D | relu_op_gpu.cu.cc | 124 struct Relu<Device, qint8> { 129 void operator()(const Device& d, typename TTypes<qint8>::ConstTensor input, in operator ()() 130 typename TTypes<qint8>::Tensor output) { in operator ()() 162 template struct functor::Relu<GPUDevice, qint8>;
|
D | restore_op_test.cc | 125 Tensor input_6 = MakeInput<qint8>(TensorShape({3, 2}), [](int x) -> qint8 { in TEST_F() 126 return *reinterpret_cast<qint8*>(&x); in TEST_F() 132 return *reinterpret_cast<qint32*>(&x) * qint8(2); in TEST_F() 248 EXPECT_EQ(*reinterpret_cast<qint8*>(&i), output->flat<qint8>()(i)); in TEST_F() 260 EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2), in TEST_F()
|
D | depthtospace_op.cc | 70 constexpr bool is_int8x4 = std::is_same<T, qint8>::value; in Compute() 194 Name("DepthToSpace").Device(DEVICE_GPU).TypeConstraint<qint8>("T"), 195 DepthToSpaceOp<GPUDevice, qint8>);
|
D | spacetodepth_op.cc | 70 constexpr bool is_int8x4 = std::is_same<T, qint8>::value; in Compute() 194 Name("SpaceToDepth").Device(DEVICE_GPU).TypeConstraint<qint8>("T"), 195 SpaceToDepthOp<GPUDevice, qint8>);
|
D | dequantize_op.cc | 122 Name("Dequantize").Device(DEVICE_CPU).TypeConstraint<qint8>("T"), 123 DequantizeOp<CPUDevice, qint8>);
|
D | mkl_concat_op.cc | 232 if (std::is_same<T, qint8>::value || std::is_same<T, quint8>::value) { in Compute() 271 (!std::is_same<T, qint8>::value && !std::is_same<T, quint8>::value), in Compute() 423 if (std::is_same<T, qint8>::value || std::is_same<T, quint8>::value) { in Compute() 563 .TypeConstraint<qint8>("T") 566 MklConcatOp<CPUDevice, qint8, NAME_IS_AXIS>)
|
D | quantize_op.cc | 233 Name("QuantizeV2").Device(DEVICE_CPU).TypeConstraint<qint8>("T"), 234 QuantizeV2Op<CPUDevice, qint8>);
|
/external/tensorflow/tensorflow/python/ops/ |
D | dequantize_op_test.py | 43 dtypes.qint8: np.int8, 69 self._testDequantizeOp(np.array([-128, 0, 127]), -1.0, 2.0, dtypes.qint8) 70 self._testDequantizeOp(np.array([-2, 4, -17]), -5.0, -3.0, dtypes.qint8) 71 self._testDequantizeOp(np.array([0, -4, 42, -108]), 5.0, 40.0, dtypes.qint8)
|
/external/tensorflow/tensorflow/core/grappler/ |
D | utils_test.cc | 504 TestSetTensorValue<qint8>(DT_QINT8, -8, /*success=*/true, /*error_msg=*/""); in TEST() 505 TestSetTensorValue<qint8>(DT_QINT8, 0, /*success=*/true, /*error_msg=*/""); in TEST() 506 TestSetTensorValue<qint8>(DT_QINT8, 8, /*success=*/true, /*error_msg=*/""); in TEST() 507 TestSetTensorValue<qint8>(DT_QINT8, std::numeric_limits<qint8>::min(), in TEST() 509 TestSetTensorValue<qint8>(DT_QINT8, std::numeric_limits<qint8>::max(), in TEST() 511 TestSetTensorValue<qint8>(DT_QINT8, kMinInt, /*success=*/false, in TEST() 513 TestSetTensorValue<qint8>(DT_QINT8, kMaxInt, /*success=*/false, in TEST()
|
/external/tensorflow/tensorflow/core/framework/ |
D | type_traits.h | 41 struct is_quantized<qint8> : true_type {}; 80 class numeric_limits<tensorflow::qint8> 97 struct is_signed<tensorflow::qint8> : public is_signed<tensorflow::int8> {};
|
D | register_types.h | 78 #define TF_CALL_qint8(m) m(::tensorflow::qint8) 109 #define TF_CALL_qint8(m) m(::tensorflow::qint8)
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_QuantizeV2.pbtxt | 45 if T == qint8: out[i] -= (range(T) + 1) / 2.0 57 If the output type was qint8 ([-128, 127]), the operation will additionally 59 with the range of qint8.
|
D | api_def_Dequantize.pbtxt | 24 if T == qint8: in[i] += (range(T) + 1)/ 2.0 36 Note that if quantizedtype is qint8, the operation will additionally add
|
/external/tensorflow/tensorflow/python/framework/ |
D | tensor_util.py | 111 dtypes.qint8.as_numpy_dtype: 177 dtypes.qint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto, 240 dtypes.int8, dtypes.int64, dtypes.qint8, dtypes.quint8, dtypes.qint16, 337 dtypes.qint8: [_FilterInt, _FilterTuple], 434 dtypes.qint8, dtypes.quint8, dtypes.qint16, dtypes.quint16, 625 dtypes.qint32, dtypes.quint8, dtypes.qint8, dtypes.qint16, dtypes.quint16
|