Searched refs:activations_type (Results 1 – 8 of 8) sorted by relevance
/external/tensorflow/tensorflow/lite/tools/optimize/ |
D | quantize_model.cc | 57 const TensorType& activations_type) { in GetOperatorProperty() argument 64 if (activations_type == TensorType_INT16 && !property.quantizable_int16) { in GetOperatorProperty() 87 const TensorType& activations_type) { in PopulateRealValueOpSet() argument 97 operator_name, activations_type); in PopulateRealValueOpSet() 144 const TensorType& activations_type, in QuantizeBias() argument 176 if (activations_type == tflite::TensorType_INT16) { in QuantizeBias() 193 if (activations_type == tflite::TensorType_INT16) { in QuantizeBias() 261 const TensorType& activations_type) { in SetInputType() argument 268 activations_type == TensorType_INT16 ? "int16" : "int8"; in SetInputType() 325 const TensorType& activations_type) { in SetOutputType() argument [all …]
|
D | quantize_model.h | 77 const TensorType& activations_type, 87 const TensorType& activations_type,
|
D | quantization_utils.h | 150 TfLiteStatus GetQuantizationParams(TensorT* tensor, TensorType activations_type, 155 TfLiteStatus QuantizeActivation(TensorT* tensor, TensorType activations_type,
|
D | quantization_utils.cc | 102 TfLiteStatus GetQuantizationParams(TensorT* tensor, TensorType activations_type, in GetQuantizationParams() argument 105 if (activations_type == TensorType_INT8) { in GetQuantizationParams() 110 } else if (activations_type == TensorType_INT16) { in GetQuantizationParams() 119 activations_type); in GetQuantizationParams() 738 TfLiteStatus QuantizeActivation(TensorT* tensor, TensorType activations_type, in QuantizeActivation() argument 741 tensor, activations_type, tensor->quantization.get(), error_reporter)); in QuantizeActivation() 742 tensor->type = activations_type; in QuantizeActivation()
|
/external/tensorflow/tensorflow/lite/python/optimize/ |
D | calibrator_test.py | 40 def test_calibration_with_quantization(self, activations_type): argument 55 activations_type) 63 def test_calibration_with_quantization_allow_float(self, activations_type): argument 78 activations_type) 117 self, activations_type): argument 134 activations_type)
|
D | calibrator.py | 69 activations_type=dtypes.int8, argument 104 np.dtype(activations_type.as_numpy_dtype()).num)
|
D | calibration_wrapper.cc | 337 TfLiteType activations_type = in QuantizeModel() local 353 TfLiteTypeToSchemaType(activations_type), error_reporter_.get()); in QuantizeModel()
|
/external/tensorflow/tensorflow/lite/python/ |
D | lite.py | 283 def activations_type(self): member in QuantizationMode 293 self.activations_type(), 333 "activations_type": self.activations_type(), 341 "activations_type": self.activations_type(), 473 inference_output_type, activations_type, argument 490 activations_type != _dtypes.int16): 497 inference_output_type, allow_float, activations_type)
|