/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/ |
D | quantization_config.h | 70 tensorflow::DataType inference_type = tensorflow::DT_FLOAT; member 98 return inference_type != tensorflow::DT_FLOAT && !weight_quantization; in RunPropagationAndRewriteQuantizationPasses() 106 switch (inference_type) { in IsSignedInferenceType() 118 switch (inference_type) { in GetQuantizationTypeWidth() 143 absl::string_view inference_type, 153 tensorflow::DataType inference_type, QuantizationSpecs* quant_specs);
|
D | quantization_config.cc | 45 absl::string_view inference_type, in ParseInputNodeQuantSpecs() argument 74 if (!inference_type.empty() && in ParseInputNodeQuantSpecs() 75 !DataType_Parse(std::string(inference_type), &final_type)) { in ParseInputNodeQuantSpecs() 86 tensorflow::DataType inference_type, QuantizationSpecs* quant_specs) { in GetInputNodeQuantSpecs() argument 87 quant_specs->inference_type = inference_type; in GetInputNodeQuantSpecs() 93 if (IsQuantizationType(inference_type)) { in GetInputNodeQuantSpecs()
|
/external/tensorflow/tensorflow/lite/testing/ |
D | tflite_model_test.bzl | 28 inference_type = "float", 43 inference_type: The data type for inference and output. 61 inference_type = inference_type, 101 inference_type): 108 inference_type: The data type for inference and output. 113 if inference_type == "float": 115 "--inference_type=FLOAT", 118 elif inference_type == "quantized": 120 "--inference_type=QUANTIZED_UINT8", 124 fail("Invalid inference type (%s). Expected 'float' or 'quantized'" % inference_type)
|
D | toco_convert.py | 52 inference_type = "FLOAT" 56 inference_type = "QUANTIZED_UINT8" 58 " --inference_type=%s" % inference_type +
|
/external/tensorflow/tensorflow/compiler/mlir/lite/python/ |
D | tf_tfl_flatbuffer_helpers.cc | 203 tensorflow::DataType inference_type = in PopulateQuantizationSpecs() local 204 ConvertIODataTypeToDataType(toco_flags.inference_type()); in PopulateQuantizationSpecs() 208 inference_type = quant_specs->inference_input_type; in PopulateQuantizationSpecs() 230 if (inference_type == DT_QINT8 || inference_type == DT_QUINT8) { in PopulateQuantizationSpecs() 234 flag.std_value(), inference_type)); in PopulateQuantizationSpecs() 245 inference_type, quant_specs)) { in PopulateQuantizationSpecs() 255 quant_specs->inference_type = tensorflow::DT_HALF; in PopulateQuantizationSpecs() 258 quant_specs->inference_type = tensorflow::DT_QINT8; in PopulateQuantizationSpecs()
|
D | graphdef_to_tfl_flatbuffer.cc | 94 if (toco_flags.inference_type() == toco::IODataType::QUANTIZED_INT16) { in ConvertGraphDefToTFLiteFlatBuffer()
|
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/lite/ |
D | quantize_model.cc | 43 const tflite::TensorType& inference_type, in QuantizeModel() argument 82 quant_specs.inference_type = tflite::TflTypeToTfType(inference_type); in QuantizeModel() 93 quant_specs.inference_type = input_tf_type; in QuantizeModel()
|
D | quantize_model.h | 40 const tflite::TensorType& inference_type,
|
/external/tensorflow/tensorflow/lite/python/ |
D | convert.py | 51 return ((toco_flags.inference_type in _quantized_inference_types or 129 inference_type=_types_pb2.INT8, argument 151 inference_type, 309 def build_toco_flags(inference_type=dtypes.float32, argument 331 toco.inference_type = util.convert_dtype_to_tflite_type(inference_type) 336 toco.inference_input_type = toco.inference_type 365 inference_type=dtypes.float32, argument 482 toco = build_toco_flags(inference_type, inference_input_type, input_format,
|
D | wrap_toco.py | 47 fully_quantize, inference_type, argument 53 inference_type,
|
D | tflite_convert.py | 141 if flags.inference_type: 142 converter.inference_type = _parse_inference_type(flags.inference_type, 157 if converter.inference_type == dtypes.float32: 209 if converter.inference_type != dtypes.float32: 212 converter.inference_type = dtypes.float32
|
D | convert_test.py | 61 inference_type=dtypes.uint8, 75 inference_type=dtypes.float32) 113 inference_type=dtypes.uint8, 160 inference_type=dtypes.uint8)
|
/external/tensorflow/tensorflow/compiler/mlir/lite/ |
D | tf_tfl_translate.cc | 196 inference_type, &quant_specs)) { in main() 203 quant_specs.inference_type = tensorflow::DT_QINT8; in main() 205 quant_specs.inference_type = tensorflow::DT_HALF; in main() 212 quant_specs.inference_input_type = quant_specs.inference_type; in main()
|
D | tf_to_tfl_flatbuffer.cc | 206 if (quant_specs.inference_type == tensorflow::DT_QINT8) { in ConvertTFExecutorToTFLOrFlatbuffer() 208 } else if (quant_specs.inference_type == tensorflow::DT_HALF) { in ConvertTFExecutorToTFLOrFlatbuffer()
|
/external/tensorflow/tensorflow/python/lite/ |
D | toco_python_api_wrapper.cc | 60 bool fully_quantize, int inference_type, bool enable_numeric_verify) { in PYBIND11_MODULE() argument 63 inference_type, enable_numeric_verify)); in PYBIND11_MODULE()
|
/external/tensorflow/tensorflow/lite/toco/ |
D | toco_cmdline_flags.cc | 79 Flag("inference_type", parsed_flags.inference_type.bind(), in ParseTocoFlagsFromCommandLineFlags() 80 parsed_flags.inference_type.default_value(), in ParseTocoFlagsFromCommandLineFlags() 268 PARSE_TOCO_FLAG(IODataType, inference_type, FlagRequirement::kNone); in ReadTocoFlagsFromCommandLineFlags() 338 if (toco_flags->inference_type() == IODataType::QUANTIZED_UINT8) { in ReadTocoFlagsFromCommandLineFlags()
|
D | toco_tooling.cc | 168 type = ConvertIODataTypeToArrayDataType(toco_flags.inference_type()); in SetFinalDataTypeOnInputs() 247 const IODataType inference_type = toco_flags.inference_type(); in TransformWithStatus() local 251 (inference_type == QUANTIZED_UINT8 || inference_type == QUANTIZED_INT16); in TransformWithStatus()
|
D | toco_flags.proto | 49 // Similar to inference_type, but allows to control specifically the 52 // If not set, then the value of inference_type is implicitly used, i.e. 55 // Like inference_type, this only affects real-number arrays. By "real-number" 90 optional IODataType inference_type = 4; field
|
/external/tensorflow/tensorflow/lite/toco/python/ |
D | toco_python_api.h | 47 bool fully_quantize, int inference_type,
|
D | toco_python_api.cc | 239 bool fully_quantize, int inference_type, in MlirQuantizeModel() argument 261 switch (inference_type) { in MlirQuantizeModel()
|
D | toco_from_protos_test.py | 52 toco_flags.inference_type = types_pb2.FLOAT
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/translate/ |
D | tf_mlir_translate_cl.h | 34 extern llvm::cl::opt<std::string> inference_type;
|
D | tf_mlir_translate_cl.cc | 57 opt<std::string> inference_type( variable
|
/external/tensorflow/tensorflow/lite/g3doc/r1/convert/ |
D | cmdline_reference.md | 78 * When performing float inference (`--inference_type=FLOAT`) on a 82 * When performing quantized inference (`inference_type` is `INT8` or 95 * `--inference_type`. Type: string. Default: `FLOAT`. Data type of all 114 array in the output file. By default the `--inference_type` is used as type
|
/external/tensorflow/tensorflow/lite/python/testdata/ |
D | BUILD | 37 "--inference_type=QUANTIZED_UINT8",
|