Searched refs:QuantizationType (Results 1 – 13 of 13) sorted by relevance
/external/libtextclassifier/native/lang_id/common/ |
D | embedding-network-params.h | 28 enum class QuantizationType { enum 47 QuantizationType ParseQuantizationType(const std::string &s); 87 QuantizationType quant_type = QuantizationType::NONE; 138 matrix.quant_type = QuantizationType::NONE; in GetHiddenLayerBias() 174 matrix.quant_type = QuantizationType::NONE; in GetSoftmaxBias() 212 virtual QuantizationType embeddings_quant_type(int i) const { in embeddings_quant_type() 213 return QuantizationType::NONE; in embeddings_quant_type() 233 virtual QuantizationType hidden_weights_quant_type(int i) const { in hidden_weights_quant_type() 234 return QuantizationType::NONE; in hidden_weights_quant_type() 267 virtual QuantizationType softmax_weights_quant_type(int i) const { in softmax_weights_quant_type() [all …]
|
D | embedding-network-params.cc | 25 QuantizationType ParseQuantizationType(const std::string &s) { in ParseQuantizationType() 27 return QuantizationType::NONE; in ParseQuantizationType() 30 return QuantizationType::UINT8; in ParseQuantizationType() 33 return QuantizationType::UINT4; in ParseQuantizationType() 36 return QuantizationType::FLOAT16; in ParseQuantizationType() 43 return QuantizationType::NONE; in ParseQuantizationType()
|
D | embedding-network.cc | 27 SAFTM_CHECK_EQ(static_cast<int>(QuantizationType::NONE), in CheckNoQuantization() 34 QuantizationType quant_type = matrix.quant_type; in GetMatrixRowSizeInBytes() 36 case QuantizationType::NONE: in GetMatrixRowSizeInBytes() 38 case QuantizationType::UINT8: in GetMatrixRowSizeInBytes() 40 case QuantizationType::UINT4: in GetMatrixRowSizeInBytes() 43 case QuantizationType::FLOAT16: in GetMatrixRowSizeInBytes() 93 case QuantizationType::NONE: { in SparseReluProductPlusBias() 121 case QuantizationType::FLOAT16: { in SparseReluProductPlusBias() 198 case QuantizationType::NONE: { in ConcatEmbeddings() 206 case QuantizationType::UINT8: { in ConcatEmbeddings() [all …]
|
/external/libtextclassifier/native/lang_id/common/flatbuffers/ |
D | embedding-network-params-from-flatbuffer.cc | 86 case QuantizationType::NONE: in VerifyMatrix() 89 case QuantizationType::UINT8: { in VerifyMatrix() 96 case QuantizationType::UINT4: { in VerifyMatrix() 107 case QuantizationType::FLOAT16: { in VerifyMatrix() 401 QuantizationType EmbeddingNetworkParamsFromFlatbuffer::SafeGetQuantizationType( in SafeGetQuantizationType() 404 return QuantizationType::NONE; in SafeGetQuantizationType() 406 saft_fbs::QuantizationType quantization_type = matrix->quantization_type(); in SafeGetQuantizationType() 412 return QuantizationType::NONE; in SafeGetQuantizationType() 414 return QuantizationType::UINT8; in SafeGetQuantizationType() 416 return QuantizationType::UINT4; in SafeGetQuantizationType() [all …]
|
D | embedding-network-params-from-flatbuffer.h | 77 QuantizationType embeddings_quant_type(int i) const override { in embeddings_quant_type() 102 QuantizationType hidden_weights_quant_type(int i) const override { in hidden_weights_quant_type() 141 QuantizationType softmax_weights_quant_type(int i) const override { in softmax_weights_quant_type() 259 QuantizationType SafeGetQuantizationType(
|
D | embedding-network.fbs | 27 // Should be kept in sync with the C++ enum nlp_saft::QuantizationType. 28 enum QuantizationType : byte { 48 quantization_type:QuantizationType = NONE;
|
/external/tensorflow/tensorflow/lite/kernels/internal/ |
D | depthwiseconv_quantized_test.cc | 48 using optimized_ops::depthwise_conv::QuantizationType; 78 typedef std::tuple<DepthwiseConvImplementation, int, QuantizationType, bool, 109 QuantizationType quantization_type = QuantizationType::kNonPerChannelUint8; 119 template <QuantizationType quantization_type> 140 inline void DispatchDepthwiseConvGeneral<QuantizationType::kPerChannelInt8>( in DispatchDepthwiseConvGeneral() 154 template <QuantizationType quantization_type> 358 inline void DispatchDepthwiseConvImpl<QuantizationType::kPerChannelInt8>( in DispatchDepthwiseConvImpl() 362 QuantizationType::kPerChannelInt8>::ExternalType* input_data, in DispatchDepthwiseConvImpl() 365 QuantizationType::kPerChannelInt8>::ExternalType* filter_data, in DispatchDepthwiseConvImpl() 369 QuantizationType::kPerChannelInt8>::ExternalType* output_data) { in DispatchDepthwiseConvImpl() [all …]
|
D | depthwiseconv_per_channel_quantized_test.cc | 220 optimized_ops::depthwise_conv::QuantizationType::kNonPerChannelUint8>( in GenerateValidShapeConfigurations() 304 optimized_ops::depthwise_conv::QuantizationType::kPerChannelInt8>( in TryTestOneDepthwiseConv3x3Filter()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | depthwiseconv_3x3_filter_common.h | 224 enum class QuantizationType { 229 template <QuantizationType quantization_type> 233 struct QuantizationTypeImpl<QuantizationType::kNonPerChannelUint8> { 241 struct QuantizationTypeImpl<QuantizationType::kPerChannelInt8> { 249 QuantizationType quantization_type = QuantizationType::kNonPerChannelUint8> 279 if (quantization_type == QuantizationType::kPerChannelInt8) { 442 QuantizationType quantization_type = QuantizationType::kNonPerChannelUint8> 471 if (quantization_type == QuantizationType::kPerChannelInt8) { 520 QuantizationType quantization_type> 539 QuantizationType quantization_type, [all …]
|
D | depthwiseconv_uint8_transitional.h | 83 template <QuantizationType quantization_type> 186 template <QuantizationType quantization_type> 302 template <QuantizationType quantization_type> 320 if (quantization_type == QuantizationType::kNonPerChannelUint8) { 381 if (quantization_type == QuantizationType::kNonPerChannelUint8) { 447 template <QuantizationType quantization_type, int32 max_padding> 648 template <QuantizationType quantization_type, int32 max_padding> 767 template <QuantizationType quantization_type> 922 template <QuantizationType quantization_type> 944 template <QuantizationType quantization_type, int32 max_padding> [all …]
|
D | depthwiseconv_uint8_3x3_filter.h | 5795 QuantizationType::kNonPerChannelUint8> { 5912 QuantizationType::kNonPerChannelUint8, 6133 QuantizationType::kNonPerChannelUint8, 6551 QuantizationType::kNonPerChannelUint8, 6938 QuantizationType::kNonPerChannelUint8, 7172 QuantizationType::kNonPerChannelUint8, 7986 QuantizationType::kNonPerChannelUint8, 8687 QuantizationType::kNonPerChannelUint8, 9372 QuantizationType::kNonPerChannelUint8, 9878 QuantizationType quantization_type> [all …]
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/ |
D | depthwise_conv_hybrid.h | 293 optimized_ops::depthwise_conv::QuantizationType::kNonPerChannelUint8>( in DepthwiseConvHybridWithRounding()
|
D | depthwise_conv.h | 1818 optimized_ops::depthwise_conv::QuantizationType::kPerChannelInt8>(
|