Searched refs:quant_specs (Results 1 – 12 of 12) sorted by relevance
/external/tensorflow/tensorflow/compiler/mlir/lite/ |
D | tf_tfl_passes.cc | 38 void AddQuantizationPasses(const mlir::TFL::QuantizationSpecs& quant_specs, in AddQuantizationPasses() argument 40 pass_manager->addPass(mlir::TFL::CreatePrepareQuantizePass(quant_specs)); in AddQuantizationPasses() 43 quant_specs.inference_type != quant_specs.inference_input_type; in AddQuantizationPasses() 47 if (quant_specs.default_ranges.first.hasValue() || in AddQuantizationPasses() 48 quant_specs.default_ranges.second.hasValue()) { in AddQuantizationPasses() 50 quant_specs.default_ranges.first.getValueOr(0.0), in AddQuantizationPasses() 51 quant_specs.default_ranges.second.getValueOr(0.0))); in AddQuantizationPasses() 76 if (!pass_config.quant_specs.serialized_quant_stats.empty()) { in AddTFToTFLConversionPasses() 79 pass_config.quant_specs.serialized_quant_stats)); in AddTFToTFLConversionPasses() 146 if (pass_config.quant_specs.RunPropagationAndRewriteQuantizationPasses()) { in AddTFToTFLConversionPasses() [all …]
|
D | tf_tfl_translate.cc | 147 mlir::TFL::QuantizationSpecs quant_specs; in main() local 149 inference_type, &quant_specs)) { in main() 154 quant_specs.weight_quantization = true; in main() 156 quant_specs.inference_type = tensorflow::DT_QINT8; in main() 158 quant_specs.inference_type = tensorflow::DT_HALF; in main() 165 quant_specs.inference_input_type = quant_specs.inference_type; in main() 176 quant_specs.serialized_quant_stats = file->getBuffer().str(); in main() 179 mlir::TFL::PassConfig pass_config(quant_specs); in main() 189 emit_select_tf_ops, emit_custom_ops, quant_specs, &result, &pm); in main()
|
D | tf_to_tfl_flatbuffer.cc | 103 const mlir::TFL::QuantizationSpecs& quant_specs, std::string* result, in ConvertTFExecutorToTFLOrFlatbuffer() argument 118 if (!quant_specs.RunWeightQuantization()) { in ConvertTFExecutorToTFLOrFlatbuffer() 139 if (quant_specs.inference_type == tensorflow::DT_QINT8) { in ConvertTFExecutorToTFLOrFlatbuffer() 141 } else if (quant_specs.inference_type == tensorflow::DT_HALF) { in ConvertTFExecutorToTFLOrFlatbuffer()
|
D | tf_tfl_passes.h | 32 void AddQuantizationPasses(const mlir::TFL::QuantizationSpecs& quant_specs,
|
D | tf_to_tfl_flatbuffer.h | 54 const mlir::TFL::QuantizationSpecs& quant_specs, std::string* result,
|
/external/tensorflow/tensorflow/compiler/mlir/lite/python/ |
D | graphdef_to_tfl_flatbuffer.cc | 173 mlir::TFL::QuantizationSpecs quant_specs; in ConvertGraphDefToTFLiteFlatBuffer() local 181 quant_specs.inference_input_type = in ConvertGraphDefToTFLiteFlatBuffer() 187 if (quant_specs.inference_input_type != tensorflow::DT_FLOAT) { in ConvertGraphDefToTFLiteFlatBuffer() 188 inference_type = quant_specs.inference_input_type; in ConvertGraphDefToTFLiteFlatBuffer() 217 inference_type, &quant_specs)) { in ConvertGraphDefToTFLiteFlatBuffer() 225 quant_specs.weight_quantization = true; in ConvertGraphDefToTFLiteFlatBuffer() 227 quant_specs.inference_type = tensorflow::DT_HALF; in ConvertGraphDefToTFLiteFlatBuffer() 228 quant_specs.inference_input_type = tensorflow::DT_HALF; in ConvertGraphDefToTFLiteFlatBuffer() 230 quant_specs.inference_type = tensorflow::DT_QINT8; in ConvertGraphDefToTFLiteFlatBuffer() 231 quant_specs.inference_input_type = tensorflow::DT_QINT8; in ConvertGraphDefToTFLiteFlatBuffer() [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/lite/ |
D | quantize_model.cc | 76 TFL::QuantizationSpecs quant_specs; in QuantizeModel() local 77 quant_specs.inference_type = tensorflow::DT_QINT8; in QuantizeModel() 78 quant_specs.post_training_quantization = true; in QuantizeModel() 85 quant_specs.inference_type = tensorflow::DT_QUINT8; in QuantizeModel() 88 pm.addPass(TFL::CreatePrepareQuantizePass(quant_specs)); in QuantizeModel()
|
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/ |
D | quantization_config.cc | 46 QuantizationSpecs* quant_specs) { in ParseInputNodeQuantSpecs() argument 79 quant_specs); in ParseInputNodeQuantSpecs() 86 QuantizationSpecs* quant_specs) { in GetInputNodeQuantSpecs() argument 87 quant_specs->inference_type = inference_type; in GetInputNodeQuantSpecs() 100 quant_specs->input_ranges.push_back({node_mins[i], node_maxs[i]}); in GetInputNodeQuantSpecs()
|
D | quantization_config.h | 122 QuantizationSpecs* quant_specs); 131 QuantizationSpecs* quant_specs);
|
/external/tensorflow/tensorflow/compiler/mlir/lite/transforms/ |
D | prepare_quantize.cc | 78 explicit PrepareQuantizePass(const QuantizationSpecs& quant_specs) in PrepareQuantizePass() argument 79 : quant_specs_(quant_specs) {} in PrepareQuantizePass() 227 const QuantizationSpecs& quant_specs) { in CreatePrepareQuantizePass() argument 228 return std::make_unique<PrepareQuantizePass>(quant_specs); in CreatePrepareQuantizePass()
|
D | passes.h | 51 const QuantizationSpecs& quant_specs);
|
/external/tensorflow/tensorflow/compiler/mlir/lite/common/ |
D | tfl_pass_config.h | 34 quant_specs(std::move(specs)), 49 QuantizationSpecs quant_specs; member
|