Home
last modified time | relevance | path

Searched refs:inference_type (Results 1 – 25 of 36) sorted by relevance

12

/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/
Dquantization_config.h70 tensorflow::DataType inference_type = tensorflow::DT_FLOAT; member
98 return inference_type != tensorflow::DT_FLOAT && !weight_quantization; in RunPropagationAndRewriteQuantizationPasses()
106 switch (inference_type) { in IsSignedInferenceType()
118 switch (inference_type) { in GetQuantizationTypeWidth()
143 absl::string_view inference_type,
153 tensorflow::DataType inference_type, QuantizationSpecs* quant_specs);
Dquantization_config.cc45 absl::string_view inference_type, in ParseInputNodeQuantSpecs() argument
74 if (!inference_type.empty() && in ParseInputNodeQuantSpecs()
75 !DataType_Parse(std::string(inference_type), &final_type)) { in ParseInputNodeQuantSpecs()
86 tensorflow::DataType inference_type, QuantizationSpecs* quant_specs) { in GetInputNodeQuantSpecs() argument
87 quant_specs->inference_type = inference_type; in GetInputNodeQuantSpecs()
93 if (IsQuantizationType(inference_type)) { in GetInputNodeQuantSpecs()
/external/tensorflow/tensorflow/lite/testing/
Dtflite_model_test.bzl28 inference_type = "float",
43 inference_type: The data type for inference and output.
61 inference_type = inference_type,
101 inference_type):
108 inference_type: The data type for inference and output.
113 if inference_type == "float":
115 "--inference_type=FLOAT",
118 elif inference_type == "quantized":
120 "--inference_type=QUANTIZED_UINT8",
124 fail("Invalid inference type (%s). Expected 'float' or 'quantized'" % inference_type)
Dtoco_convert.py52 inference_type = "FLOAT"
56 inference_type = "QUANTIZED_UINT8"
58 " --inference_type=%s" % inference_type +
/external/tensorflow/tensorflow/compiler/mlir/lite/python/
Dtf_tfl_flatbuffer_helpers.cc203 tensorflow::DataType inference_type = in PopulateQuantizationSpecs() local
204 ConvertIODataTypeToDataType(toco_flags.inference_type()); in PopulateQuantizationSpecs()
208 inference_type = quant_specs->inference_input_type; in PopulateQuantizationSpecs()
230 if (inference_type == DT_QINT8 || inference_type == DT_QUINT8) { in PopulateQuantizationSpecs()
234 flag.std_value(), inference_type)); in PopulateQuantizationSpecs()
245 inference_type, quant_specs)) { in PopulateQuantizationSpecs()
255 quant_specs->inference_type = tensorflow::DT_HALF; in PopulateQuantizationSpecs()
258 quant_specs->inference_type = tensorflow::DT_QINT8; in PopulateQuantizationSpecs()
Dgraphdef_to_tfl_flatbuffer.cc94 if (toco_flags.inference_type() == toco::IODataType::QUANTIZED_INT16) { in ConvertGraphDefToTFLiteFlatBuffer()
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/lite/
Dquantize_model.cc43 const tflite::TensorType& inference_type, in QuantizeModel() argument
82 quant_specs.inference_type = tflite::TflTypeToTfType(inference_type); in QuantizeModel()
93 quant_specs.inference_type = input_tf_type; in QuantizeModel()
Dquantize_model.h40 const tflite::TensorType& inference_type,
/external/tensorflow/tensorflow/lite/python/
Dconvert.py51 return ((toco_flags.inference_type in _quantized_inference_types or
129 inference_type=_types_pb2.INT8, argument
151 inference_type,
309 def build_toco_flags(inference_type=dtypes.float32, argument
331 toco.inference_type = util.convert_dtype_to_tflite_type(inference_type)
336 toco.inference_input_type = toco.inference_type
365 inference_type=dtypes.float32, argument
482 toco = build_toco_flags(inference_type, inference_input_type, input_format,
Dwrap_toco.py47 fully_quantize, inference_type, argument
53 inference_type,
Dtflite_convert.py141 if flags.inference_type:
142 converter.inference_type = _parse_inference_type(flags.inference_type,
157 if converter.inference_type == dtypes.float32:
209 if converter.inference_type != dtypes.float32:
212 converter.inference_type = dtypes.float32
Dconvert_test.py61 inference_type=dtypes.uint8,
75 inference_type=dtypes.float32)
113 inference_type=dtypes.uint8,
160 inference_type=dtypes.uint8)
/external/tensorflow/tensorflow/compiler/mlir/lite/
Dtf_tfl_translate.cc196 inference_type, &quant_specs)) { in main()
203 quant_specs.inference_type = tensorflow::DT_QINT8; in main()
205 quant_specs.inference_type = tensorflow::DT_HALF; in main()
212 quant_specs.inference_input_type = quant_specs.inference_type; in main()
Dtf_to_tfl_flatbuffer.cc206 if (quant_specs.inference_type == tensorflow::DT_QINT8) { in ConvertTFExecutorToTFLOrFlatbuffer()
208 } else if (quant_specs.inference_type == tensorflow::DT_HALF) { in ConvertTFExecutorToTFLOrFlatbuffer()
/external/tensorflow/tensorflow/python/lite/
Dtoco_python_api_wrapper.cc60 bool fully_quantize, int inference_type, bool enable_numeric_verify) { in PYBIND11_MODULE() argument
63 inference_type, enable_numeric_verify)); in PYBIND11_MODULE()
/external/tensorflow/tensorflow/lite/toco/
Dtoco_cmdline_flags.cc79 Flag("inference_type", parsed_flags.inference_type.bind(), in ParseTocoFlagsFromCommandLineFlags()
80 parsed_flags.inference_type.default_value(), in ParseTocoFlagsFromCommandLineFlags()
268 PARSE_TOCO_FLAG(IODataType, inference_type, FlagRequirement::kNone); in ReadTocoFlagsFromCommandLineFlags()
338 if (toco_flags->inference_type() == IODataType::QUANTIZED_UINT8) { in ReadTocoFlagsFromCommandLineFlags()
Dtoco_tooling.cc168 type = ConvertIODataTypeToArrayDataType(toco_flags.inference_type()); in SetFinalDataTypeOnInputs()
247 const IODataType inference_type = toco_flags.inference_type(); in TransformWithStatus() local
251 (inference_type == QUANTIZED_UINT8 || inference_type == QUANTIZED_INT16); in TransformWithStatus()
Dtoco_flags.proto49 // Similar to inference_type, but allows to control specifically the
52 // If not set, then the value of inference_type is implicitly used, i.e.
55 // Like inference_type, this only affects real-number arrays. By "real-number"
90 optional IODataType inference_type = 4; field
/external/tensorflow/tensorflow/lite/toco/python/
Dtoco_python_api.h47 bool fully_quantize, int inference_type,
Dtoco_python_api.cc239 bool fully_quantize, int inference_type, in MlirQuantizeModel() argument
261 switch (inference_type) { in MlirQuantizeModel()
Dtoco_from_protos_test.py52 toco_flags.inference_type = types_pb2.FLOAT
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/translate/
Dtf_mlir_translate_cl.h34 extern llvm::cl::opt<std::string> inference_type;
Dtf_mlir_translate_cl.cc57 opt<std::string> inference_type( variable
/external/tensorflow/tensorflow/lite/g3doc/r1/convert/
Dcmdline_reference.md78 * When performing float inference (`--inference_type=FLOAT`) on a
82 * When performing quantized inference (`inference_type` is `INT8` or
95 * `--inference_type`. Type: string. Default: `FLOAT`. Data type of all
114 array in the output file. By default the `--inference_type` is used as type
/external/tensorflow/tensorflow/lite/python/testdata/
DBUILD37 "--inference_type=QUANTIZED_UINT8",

12