Home
last modified time | relevance | path

Searched refs:quant_specs (Results 1 – 12 of 12) sorted by relevance

/external/tensorflow/tensorflow/compiler/mlir/lite/
Dtf_tfl_passes.cc38 void AddQuantizationPasses(const mlir::TFL::QuantizationSpecs& quant_specs, in AddQuantizationPasses() argument
40 pass_manager->addPass(mlir::TFL::CreatePrepareQuantizePass(quant_specs)); in AddQuantizationPasses()
43 quant_specs.inference_type != quant_specs.inference_input_type; in AddQuantizationPasses()
47 if (quant_specs.default_ranges.first.hasValue() || in AddQuantizationPasses()
48 quant_specs.default_ranges.second.hasValue()) { in AddQuantizationPasses()
50 quant_specs.default_ranges.first.getValueOr(0.0), in AddQuantizationPasses()
51 quant_specs.default_ranges.second.getValueOr(0.0))); in AddQuantizationPasses()
76 if (!pass_config.quant_specs.serialized_quant_stats.empty()) { in AddTFToTFLConversionPasses()
79 pass_config.quant_specs.serialized_quant_stats)); in AddTFToTFLConversionPasses()
146 if (pass_config.quant_specs.RunPropagationAndRewriteQuantizationPasses()) { in AddTFToTFLConversionPasses()
[all …]
Dtf_tfl_translate.cc147 mlir::TFL::QuantizationSpecs quant_specs; in main() local
149 inference_type, &quant_specs)) { in main()
154 quant_specs.weight_quantization = true; in main()
156 quant_specs.inference_type = tensorflow::DT_QINT8; in main()
158 quant_specs.inference_type = tensorflow::DT_HALF; in main()
165 quant_specs.inference_input_type = quant_specs.inference_type; in main()
176 quant_specs.serialized_quant_stats = file->getBuffer().str(); in main()
179 mlir::TFL::PassConfig pass_config(quant_specs); in main()
189 emit_select_tf_ops, emit_custom_ops, quant_specs, &result, &pm); in main()
Dtf_to_tfl_flatbuffer.cc103 const mlir::TFL::QuantizationSpecs& quant_specs, std::string* result, in ConvertTFExecutorToTFLOrFlatbuffer() argument
118 if (!quant_specs.RunWeightQuantization()) { in ConvertTFExecutorToTFLOrFlatbuffer()
139 if (quant_specs.inference_type == tensorflow::DT_QINT8) { in ConvertTFExecutorToTFLOrFlatbuffer()
141 } else if (quant_specs.inference_type == tensorflow::DT_HALF) { in ConvertTFExecutorToTFLOrFlatbuffer()
Dtf_tfl_passes.h32 void AddQuantizationPasses(const mlir::TFL::QuantizationSpecs& quant_specs,
Dtf_to_tfl_flatbuffer.h54 const mlir::TFL::QuantizationSpecs& quant_specs, std::string* result,
/external/tensorflow/tensorflow/compiler/mlir/lite/python/
Dgraphdef_to_tfl_flatbuffer.cc173 mlir::TFL::QuantizationSpecs quant_specs; in ConvertGraphDefToTFLiteFlatBuffer() local
181 quant_specs.inference_input_type = in ConvertGraphDefToTFLiteFlatBuffer()
187 if (quant_specs.inference_input_type != tensorflow::DT_FLOAT) { in ConvertGraphDefToTFLiteFlatBuffer()
188 inference_type = quant_specs.inference_input_type; in ConvertGraphDefToTFLiteFlatBuffer()
217 inference_type, &quant_specs)) { in ConvertGraphDefToTFLiteFlatBuffer()
225 quant_specs.weight_quantization = true; in ConvertGraphDefToTFLiteFlatBuffer()
227 quant_specs.inference_type = tensorflow::DT_HALF; in ConvertGraphDefToTFLiteFlatBuffer()
228 quant_specs.inference_input_type = tensorflow::DT_HALF; in ConvertGraphDefToTFLiteFlatBuffer()
230 quant_specs.inference_type = tensorflow::DT_QINT8; in ConvertGraphDefToTFLiteFlatBuffer()
231 quant_specs.inference_input_type = tensorflow::DT_QINT8; in ConvertGraphDefToTFLiteFlatBuffer()
[all …]
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/lite/
Dquantize_model.cc76 TFL::QuantizationSpecs quant_specs; in QuantizeModel() local
77 quant_specs.inference_type = tensorflow::DT_QINT8; in QuantizeModel()
78 quant_specs.post_training_quantization = true; in QuantizeModel()
85 quant_specs.inference_type = tensorflow::DT_QUINT8; in QuantizeModel()
88 pm.addPass(TFL::CreatePrepareQuantizePass(quant_specs)); in QuantizeModel()
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/
Dquantization_config.cc46 QuantizationSpecs* quant_specs) { in ParseInputNodeQuantSpecs() argument
79 quant_specs); in ParseInputNodeQuantSpecs()
86 QuantizationSpecs* quant_specs) { in GetInputNodeQuantSpecs() argument
87 quant_specs->inference_type = inference_type; in GetInputNodeQuantSpecs()
100 quant_specs->input_ranges.push_back({node_mins[i], node_maxs[i]}); in GetInputNodeQuantSpecs()
Dquantization_config.h122 QuantizationSpecs* quant_specs);
131 QuantizationSpecs* quant_specs);
/external/tensorflow/tensorflow/compiler/mlir/lite/transforms/
Dprepare_quantize.cc78 explicit PrepareQuantizePass(const QuantizationSpecs& quant_specs) in PrepareQuantizePass() argument
79 : quant_specs_(quant_specs) {} in PrepareQuantizePass()
227 const QuantizationSpecs& quant_specs) { in CreatePrepareQuantizePass() argument
228 return std::make_unique<PrepareQuantizePass>(quant_specs); in CreatePrepareQuantizePass()
Dpasses.h51 const QuantizationSpecs& quant_specs);
/external/tensorflow/tensorflow/compiler/mlir/lite/common/
Dtfl_pass_config.h34 quant_specs(std::move(specs)),
49 QuantizationSpecs quant_specs; member