/external/tensorflow/tensorflow/lite/tools/optimize/ |
D | modify_model_interface_test.cc | 93 tensor_1->quantization = absl::make_unique<QuantizationParametersT>(); in CreateQuantizedModelSingleInputOutput() 94 tensor_1->quantization->scale.push_back(0.35); in CreateQuantizedModelSingleInputOutput() 95 tensor_1->quantization->zero_point.push_back(28); in CreateQuantizedModelSingleInputOutput() 101 tensor_2->quantization = absl::make_unique<QuantizationParametersT>(); in CreateQuantizedModelSingleInputOutput() 102 tensor_2->quantization->scale.push_back(0.12); in CreateQuantizedModelSingleInputOutput() 103 tensor_2->quantization->zero_point.push_back(50); in CreateQuantizedModelSingleInputOutput() 203 tensor_2->quantization = absl::make_unique<QuantizationParametersT>(); in CreateQuantizedModelMultipleInputOutput() 204 tensor_2->quantization->scale.push_back(0.35); in CreateQuantizedModelMultipleInputOutput() 205 tensor_2->quantization->zero_point.push_back(28); in CreateQuantizedModelMultipleInputOutput() 211 tensor_3->quantization = absl::make_unique<QuantizationParametersT>(); in CreateQuantizedModelMultipleInputOutput() [all …]
|
D | quantize_model_test.cc | 99 const auto quantization_params = tensor->quantization.get(); in ExpectSameModels() 101 expected_tensor->quantization.get(); in ExpectSameModels() 363 EXPECT_EQ(subgraph->tensors[input_idx]->quantization->scale.size(), 1); in TEST_P() 364 EXPECT_FLOAT_EQ(subgraph->tensors[input_idx]->quantization->scale[0], in TEST_P() 366 EXPECT_EQ(subgraph->tensors[input_idx]->quantization->zero_point.size(), 1); in TEST_P() 367 EXPECT_EQ(subgraph->tensors[input_idx]->quantization->zero_point[0], 0); in TEST_P() 370 EXPECT_EQ(subgraph->tensors[output_idx]->quantization->scale.size(), 1); in TEST_P() 371 EXPECT_FLOAT_EQ(subgraph->tensors[output_idx]->quantization->scale[0], in TEST_P() 373 EXPECT_EQ(subgraph->tensors[output_idx]->quantization->zero_point.size(), in TEST_P() 375 EXPECT_EQ(subgraph->tensors[output_idx]->quantization->zero_point[0], 0); in TEST_P() [all …]
|
D | model_utils_test.cc | 35 tensor.quantization = absl::make_unique<QuantizationParametersT>(); in TEST() 36 tensor.quantization->scale.push_back(0.5); in TEST() 37 tensor.quantization->scale.push_back(1.5); in TEST() 39 tensor.quantization->zero_point.push_back(1); in TEST() 40 tensor.quantization->zero_point.push_back(-1); in TEST() 60 tensor.quantization = absl::make_unique<QuantizationParametersT>(); in TEST() 61 tensor.quantization->min.push_back(0.5); in TEST() 63 tensor.quantization->max.push_back(1.5); in TEST()
|
D | quantize_model.cc | 152 TF_LITE_ENSURE(error_reporter, weight_tensor->quantization); in QuantizeBias() 153 std::vector<float> weight_scales = weight_tensor->quantization->scale; in QuantizeBias() 163 if (!input_tensor->quantization || in QuantizeBias() 164 input_tensor->quantization->scale.size() != 1) { in QuantizeBias() 178 model, bias_tensor, input_tensor->quantization->scale[0], in QuantizeBias() 182 model, bias_tensor, input_tensor->quantization->scale[0], in QuantizeBias() 196 input_tensor->quantization->scale[0] * weight_scales[0], in QuantizeBias() 201 input_tensor->quantization->scale[0] * weight_scales[0], in QuantizeBias() 214 !tensor->quantization->scale.empty(); in TensorTypeChangeRequired() 217 !tensor->quantization->scale.empty(); in TensorTypeChangeRequired() [all …]
|
D | quantization_utils.cc | 107 tensor->quantization->min[0], tensor->quantization->max[0], in GetQuantizationParams() 112 GetSymmetricQuantizationParams(tensor->quantization->min[0], in GetQuantizationParams() 113 tensor->quantization->max[0], in GetQuantizationParams() 303 if (tensor->quantization == nullptr) { in SymmetricPerChannelQuantization() 304 tensor->quantization = absl::make_unique<QuantizationParametersT>(); in SymmetricPerChannelQuantization() 309 tensor->quantization.get(), error_reporter)); in SymmetricPerChannelQuantization() 317 std::max(std::abs(tensor->quantization->min[channel_idx]), in SymmetricPerChannelQuantization() 318 std::abs(tensor->quantization->max[channel_idx])); in SymmetricPerChannelQuantization() 417 if (tensor->quantization->min.size() != 1 || in SymmetricQuantizeTensorFromMinMax() 418 tensor->quantization->max.size() != 1) { in SymmetricQuantizeTensorFromMinMax() [all …]
|
D | model_utils.cc | 106 (*tensor)->quantization = absl::make_unique<QuantizationParametersT>(); in MakeTensorWithQuantParam() 107 (*tensor)->quantization->scale.push_back(scale); in MakeTensorWithQuantParam() 108 (*tensor)->quantization->zero_point.push_back(zero_point); in MakeTensorWithQuantParam() 112 return tensor->quantization != nullptr && in QuantizationParametersExist() 113 !tensor->quantization->scale.empty() && in QuantizationParametersExist() 114 !tensor->quantization->zero_point.empty(); in QuantizationParametersExist() 128 return tensor->quantization && !tensor->quantization->min.empty() && in HasMinMax() 129 !tensor->quantization->max.empty(); in HasMinMax()
|
D | quantization_utils_test.cc | 204 tensor.quantization = nullptr; in TEST_F() 235 tensor.quantization = absl::make_unique<QuantizationParametersT>(); in TEST_F() 238 tensor.quantization.get(), &error_reporter_); in TEST_F() 243 EXPECT_THAT(tensor.quantization->min, ElementsAreArray(expected_mins)); in TEST_F() 244 EXPECT_THAT(tensor.quantization->max, ElementsAreArray(expected_maxs)); in TEST_F() 359 auto quantization = absl::make_unique<QuantizationParametersT>(); in TEST_F() local 360 quantization->min = {-0.00001, -7.0, -2.0}; in TEST_F() 361 quantization->max = {0.00001, 1.0, -1.0}; in TEST_F() 362 std::vector<float> scales = std::vector<float>(quantization->min.size()); in TEST_F() 364 GetSymmetricScalesFromMaxMin(quantization.get(), &scales, &error_reporter_); in TEST_F() [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/ |
D | lstm.mlir | 16 // CHECK-NEXT: quantization: { 23 // CHECK-NEXT: quantization: { 30 // CHECK-NEXT: quantization: { 37 // CHECK-NEXT: quantization: { 44 // CHECK-NEXT: quantization: { 51 // CHECK-NEXT: quantization: { 58 // CHECK-NEXT: quantization: { 65 // CHECK-NEXT: quantization: { 72 // CHECK-NEXT: quantization: { 79 // CHECK-NEXT: quantization: { [all …]
|
D | unidirectional_sequence_lstm.mlir | 16 // CHECK-NEXT: quantization: { 23 // CHECK-NEXT: quantization: { 30 // CHECK-NEXT: quantization: { 37 // CHECK-NEXT: quantization: { 44 // CHECK-NEXT: quantization: { 51 // CHECK-NEXT: quantization: { 58 // CHECK-NEXT: quantization: { 65 // CHECK-NEXT: quantization: { 72 // CHECK-NEXT: quantization: { 79 // CHECK-NEXT: quantization: { [all …]
|
D | lstm_quantized.mlir | 20 // CHECK-NEXT: quantization: { 29 // CHECK-NEXT: quantization: { 38 // CHECK-NEXT: quantization: { 47 // CHECK-NEXT: quantization: { 56 // CHECK-NEXT: quantization: { 65 // CHECK-NEXT: quantization: { 74 // CHECK-NEXT: quantization: { 83 // CHECK-NEXT: quantization: { 92 // CHECK-NEXT: quantization: { 101 // CHECK-NEXT: quantization: { [all …]
|
D | basic_lstm.mlir | 16 // CHECK-NEXT: quantization: { 23 // CHECK-NEXT: quantization: { 30 // CHECK-NEXT: quantization: { 37 // CHECK-NEXT: quantization: { 44 // CHECK-NEXT: quantization: { 51 // CHECK-NEXT: quantization: { 58 // CHECK-NEXT: quantization: { 65 // CHECK-NEXT: quantization: { 72 // CHECK-NEXT: quantization: {
|
D | while_op.mlir | 26 // CHECK-NEXT: quantization: { 33 // CHECK-NEXT: quantization: { 41 // CHECK-NEXT: quantization: { 48 // CHECK-NEXT: quantization: { 70 // CHECK-NEXT: quantization: { 77 // CHECK-NEXT: quantization: { 85 // CHECK-NEXT: quantization: { 93 // CHECK-NEXT: quantization: { 111 // CHECK-NEXT: quantization: { 118 // CHECK-NEXT: quantization: { [all …]
|
D | tfl_while_op.mlir | 26 // CHECK-NEXT: quantization: { 33 // CHECK-NEXT: quantization: { 41 // CHECK-NEXT: quantization: { 48 // CHECK-NEXT: quantization: { 70 // CHECK-NEXT: quantization: { 77 // CHECK-NEXT: quantization: { 85 // CHECK-NEXT: quantization: { 93 // CHECK-NEXT: quantization: { 111 // CHECK-NEXT: quantization: { 118 // CHECK-NEXT: quantization: { [all …]
|
D | if_op.mlir | 26 // CHECK-NEXT: quantization: { 33 // CHECK-NEXT: quantization: { 41 // CHECK-NEXT: quantization: { 48 // CHECK-NEXT: quantization: { 73 // CHECK-NEXT: quantization: { 80 // CHECK-NEXT: quantization: { 87 // CHECK-NEXT: quantization: { 108 // CHECK-NEXT: quantization: { 115 // CHECK-NEXT: quantization: { 122 // CHECK-NEXT: quantization: {
|
/external/tensorflow/tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/ |
D | importer_test_min_max.cc | 123 input_tensor->quantization->scale.clear(); in InjectStatsToFullyConnected() 124 input_tensor->quantization->zero_point.clear(); in InjectStatsToFullyConnected() 125 input_tensor->quantization->min.push_back(-1.0); in InjectStatsToFullyConnected() 126 input_tensor->quantization->max.push_back(1.0); in InjectStatsToFullyConnected() 130 output_tensor->quantization->scale.clear(); in InjectStatsToFullyConnected() 131 output_tensor->quantization->zero_point.clear(); in InjectStatsToFullyConnected() 133 output_tensor->quantization->min.push_back(-1.0 * i); in InjectStatsToFullyConnected() 134 output_tensor->quantization->max.push_back(1.0 * i); in InjectStatsToFullyConnected() 136 output_tensor->quantization->quantized_dimension = shape.size() - 1; in InjectStatsToFullyConnected()
|
/external/tensorflow/tensorflow/lite/toco/tflite/ |
D | import.cc | 79 auto quantization = input_tensor->quantization(); in ImportTensors() local 80 if (quantization) { in ImportTensors() 83 if (quantization->min() && quantization->max()) { in ImportTensors() 84 CHECK_EQ(1, quantization->min()->Length()); in ImportTensors() 85 CHECK_EQ(1, quantization->max()->Length()); in ImportTensors() 87 minmax.min = quantization->min()->Get(0); in ImportTensors() 88 minmax.max = quantization->max()->Get(0); in ImportTensors() 90 if (quantization->scale() && quantization->zero_point()) { in ImportTensors() 91 CHECK_EQ(1, quantization->scale()->Length()); in ImportTensors() 92 CHECK_EQ(1, quantization->zero_point()->Length()); in ImportTensors() [all …]
|
/external/tensorflow/tensorflow/lite/g3doc/performance/ |
D | post_training_quantization.md | 1 # Post-training quantization 3 Post-training quantization is a conversion technique that can reduce model size 13 There are several post-training quantization options to choose from. Here is a 19 : quantization : : : 21 : quantization : : Microcontrollers : 22 | Float16 quantization | 2x smaller, GPU | CPU, GPU | 25 The following decision tree can help determine which post-training quantization 30 ### Dynamic range quantization 32 The simplest form of post-training quantization statically quantizes only the 53 ### Full integer quantization [all …]
|
D | model_optimization.md | 46 Currently, quantization can be used to reduce latency by simplifying the 73 TensorFlow Lite currently supports optimization via quantization, pruning and 83 [Quantization](https://www.tensorflow.org/model_optimization/guide/quantization/post_training) 88 The following types of quantization are available in TensorFlow Lite: 92 [Post-training float16 quantization](post_training_float16_quant.ipynb) … 93 [Post-training dynamic range quantization](post_training_quant.ipynb) … 94 [Post-training integer quantization](post_training_integer_quant.ipynb) … 95 [Quantization-aware training](http://www.tensorflow.org/model_optimization/guide/quantization/train… 97 Below are the latency and accuracy results for post-training quantization and 98 quantization-aware training on a few models. All latency numbers are measured on [all …]
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | kernel_util_test.cc | 279 input.quantization.type = kTfLiteAffineQuantization; in TEST_F() 286 input.quantization.params = reinterpret_cast<void*>(input_params); in TEST_F() 299 filter.quantization.type = kTfLiteAffineQuantization; in TEST_F() 311 filter.quantization.params = reinterpret_cast<void*>(filter_params); in TEST_F() 320 bias.quantization.type = kTfLiteAffineQuantization; in TEST_F() 331 bias.quantization.params = reinterpret_cast<void*>(bias_params); in TEST_F() 340 output.quantization.type = kTfLiteAffineQuantization; in TEST_F() 347 output.quantization.params = reinterpret_cast<void*>(output_params); in TEST_F() 385 input.quantization.type = kTfLiteAffineQuantization; in TEST_F() 392 input.quantization.params = reinterpret_cast<void*>(input_params); in TEST_F() [all …]
|
/external/tensorflow/tensorflow/lite/c/ |
D | common.c | 91 void TfLiteQuantizationFree(TfLiteQuantization* quantization) { in TfLiteQuantizationFree() argument 92 if (quantization->type == kTfLiteAffineQuantization) { in TfLiteQuantizationFree() 94 (TfLiteAffineQuantization*)(quantization->params); in TfLiteQuantizationFree() 105 quantization->params = NULL; in TfLiteQuantizationFree() 106 quantization->type = kTfLiteNoQuantization; in TfLiteQuantizationFree() 152 TfLiteQuantizationFree(&t->quantization); in TfLiteTensorFree() 158 TfLiteQuantizationParams quantization, char* buffer, in TfLiteTensorReset() argument 166 tensor->params = quantization; in TfLiteTensorReset() 173 tensor->quantization.type = kTfLiteNoQuantization; in TfLiteTensorReset() 174 tensor->quantization.params = NULL; in TfLiteTensorReset()
|
/external/tensorflow/tensorflow/lite/delegates/gpu/common/ |
D | model_builder_test.cc | 238 TfLiteQuantization quantization; in InterpreterFp16() local 239 quantization.type = kTfLiteNoQuantization; in InterpreterFp16() 242 0, TfLiteType::kTfLiteFloat16, "t0", dims, quantization, false), in InterpreterFp16() 246 2, TfLiteType::kTfLiteFloat16, "t2", dims, quantization, false), in InterpreterFp16() 259 1, TfLiteType::kTfLiteFloat32, "t1", dims, quantization, false), in InterpreterFp16() 263 3, TfLiteType::kTfLiteFloat32, "t3", dims, quantization, false), in InterpreterFp16() 504 TfLiteQuantization quantization; in InterpreterFp32() local 505 quantization.type = kTfLiteNoQuantization; in InterpreterFp32() 507 0, TfLiteType::kTfLiteUInt8, "t0", dims, quantization, false), in InterpreterFp32() 511 1, TfLiteType::kTfLiteFloat32, "t1", dims, quantization, false), in InterpreterFp32() [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/ |
D | quantization_info.proto | 7 // Represents the quantization parameters for a list of named tensors. 11 // quantization specification. 39 // The quantized axis index if it is per-axis quantization. 48 // The quantization parameters for a named tensor. 62 // The quantization parameters for the tensor. If it is for per-axis, the 67 // Metadata about the quantization parameters. 71 // List of quantization parameters for tensors.
|
D | quantization.td | 16 // This is the quantization definition file for TensorFlow. 60 // TFL native op traits (for quantization). 84 // are used to generate the op quantization specs. 101 [{Returns quantization dim for the affine operand.}], 141 // and also the quantization dimension if per-axis quantization is support. 142 // If the quantization dimension is -1, per-axis quantization isn't supported. 149 // apply quantization on this op.
|
/external/tensorflow/tensorflow/lite/ |
D | interpreter.cc | 62 TfLiteQuantization quantization; in GetQuantizationFromLegacy() local 63 quantization.type = kTfLiteAffineQuantization; in GetQuantizationFromLegacy() 70 quantization.params = affine_quantization; in GetQuantizationFromLegacy() 72 return quantization; in GetQuantizationFromLegacy() 309 const std::vector<int>& dims, TfLiteQuantization quantization, in SetTensorParametersReadOnly() argument 312 tensor_index, type, name, dims.size(), dims.data(), quantization, buffer, in SetTensorParametersReadOnly() 318 const std::vector<int>& dims, TfLiteQuantization quantization, in SetTensorParametersReadWrite() argument 321 tensor_index, type, name, dims.size(), dims.data(), quantization, in SetTensorParametersReadWrite() 327 const int* dims, TfLiteQuantizationParams quantization, const char* buffer, in SetTensorParametersReadOnly() argument 329 TfLiteQuantization new_quantization = GetQuantizationFromLegacy(quantization); in SetTensorParametersReadOnly() [all …]
|
/external/tensorflow/tensorflow/lite/tools/optimize/calibration/ |
D | calibration_reader.cc | 48 if (tensor->quantization) { in AddCalibrationToModel() 49 const float existing_min = tensor->quantization->min[0]; in AddCalibrationToModel() 50 const float existing_max = tensor->quantization->max[0]; in AddCalibrationToModel() 58 subgraph->tensors[tensorid_stat.first]->quantization = in AddCalibrationToModel()
|