/external/tensorflow/tensorflow/compiler/mlir/lite/experimental/tac/common/ |
D | targets.h | 63 inline std::string GetInferenceString(InferenceType inference_type) { in GetInferenceString() argument 64 if (inference_type == FLOAT) { in GetInferenceString() 66 } else if (inference_type == QUANTIZED_INT8) { in GetInferenceString() 68 } else if (inference_type == QUANTIZED_UINT8) { in GetInferenceString() 70 } else if (inference_type == HYBRID) { in GetInferenceString() 98 auto inference_type = op->getAttrOfType<StringAttr>(kInferenceType); in GetInferenceTypeAnnotation() local 99 if (inference_type == nullptr) return llvm::None; in GetInferenceTypeAnnotation() 101 llvm::StringRef device_name_str = inference_type.getValue(); in GetInferenceTypeAnnotation() 108 InferenceType inference_type; member 112 (inference_type == other.inference_type); [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/lite/experimental/tac/tests/ |
D | get-alternative-subgraph.mlir | 11 …ensor<1xf32>) -> tensor<2x1xf32> attributes {tac.device = "CPU", tac.inference_type = "FLOAT", tac… 12 …%0 = "tfl.pack"(%arg0, %arg1) {axis = 0 : i32, tac.device = "CPU", tac.inference_type = "FLOAT", v… 16 … tensor<1xf32>) -> tensor<1xf32> attributes {tac.device = "GPU", tac.inference_type = "FLOAT", tac… 17 … %arg1 {fused_activation_function = "RELU6", tac.device = "GPU", tac.inference_type = "FLOAT"} : t… 18 … %arg2 {fused_activation_function = "RELU6", tac.device = "GPU", tac.inference_type = "FLOAT"} : t… 22 … tensor<1xf32>) -> tensor<1xf32> attributes {tac.device = "GPU", tac.inference_type = "FLOAT", tac… 23 … %arg1 {fused_activation_function = "RELU6", tac.device = "GPU", tac.inference_type = "FLOAT"} : t… 34 …ensor<1xf32>) -> tensor<2x1xf32> attributes {tac.device = "CPU", tac.inference_type = "FLOAT", tac… 35 …ck"(%[[VAL_0]], %[[VAL_1]]) {axis = 0 : i32, tac.device = "CPU", tac.inference_type = "FLOAT", val… 39 … tensor<1xf32>) -> tensor<1xf32> attributes {tac.device = "GPU", tac.inference_type = "FLOAT", tac… [all …]
|
D | pick-subgraphs.mlir | 5 …%0 = func.call @func_0_GPU_FLOAT(%arg0, %arg1, %arg2) {tac.device = "GPU", tac.inference_type = "F… 6 …%1 = func.call @func_1_GPU_FLOAT(%arg0, %arg3) {tac.device = "GPU", tac.inference_type = "FLOAT", … 7 …%2 = func.call @func_2_CPU_FLOAT(%0, %1) {tac.device = "CPU", tac.inference_type = "FLOAT", tac.in… 10 …> attributes {tac.cost = 2.000000e+01 : f32, tac.device = "CPU", tac.inference_type = "FLOAT", tac… 14 …> attributes {tac.cost = 4.000000e+01 : f32, tac.device = "GPU", tac.inference_type = "FLOAT", tac… 19 …> attributes {tac.cost = 2.000000e+01 : f32, tac.device = "GPU", tac.inference_type = "FLOAT", tac… 23 …> attributes {tac.cost = 8.040000e+01 : f32, tac.device = "GPU", tac.inference_type = "FLOAT", tac… 29 …> attributes {tac.cost = 2.000000e+02 : f32, tac.device = "CPU", tac.inference_type = "FLOAT", tac… 34 …> attributes {tac.cost = 1.000000e+02 : f32, tac.device = "CPU", tac.inference_type = "FLOAT", tac… 40 …_GPU_FLOAT([[VAL_0]], [[VAL_1]], [[VAL_2]]) {tac.device = "GPU", tac.inference_type = "FLOAT", tac… [all …]
|
D | raise-target-subgraphs.mlir | 5 …%arg1) {tac.device = "GPU", fused_activation_function = "RELU6", tac.inference_type = "FLOAT"} : (… 6 …%arg2) {tac.device = "GPU", fused_activation_function = "RELU6", tac.inference_type = "FLOAT"} : (… 7 …%arg3) {tac.device = "GPU", fused_activation_function = "RELU6", tac.inference_type = "FLOAT"} : (… 8 …%3 = "tfl.pack"(%1, %2) {tac.device = "CPU", tac.inference_type = "FLOAT", axis = 0 : i32, values_… 14 …AL_1]], %[[VAL_2]], %[[VAL_0]], %[[VAL_3]]) {tac.device = "GPU", tac.inference_type = "FLOAT", tac… 15 …unc_1_CPU_FLOAT(%[[VAL_4]]#0, %[[VAL_4]]#1) {tac.device = "CPU", tac.inference_type = "FLOAT", tac… 19 …> (tensor<1xf32>, tensor<1xf32>) attributes {tac.device = "GPU", tac.inference_type = "FLOAT", tac… 20 …AL_1]] {fused_activation_function = "RELU6", tac.device = "GPU", tac.inference_type = "FLOAT"} : t… 21 …AL_2]] {fused_activation_function = "RELU6", tac.device = "GPU", tac.inference_type = "FLOAT"} : t… 22 …AL_4]] {fused_activation_function = "RELU6", tac.device = "GPU", tac.inference_type = "FLOAT"} : t… [all …]
|
D | target-annotation.mlir | 4 // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT" 12 // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT" 20 // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT" 28 // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT" 36 // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT" 38 // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT" 40 // CHECK: tac.device = "GPU", tac.inference_type = "FLOAT" 42 // CHECK: tac.device = "CPU", tac.inference_type = "FLOAT" 48 // CHECK-NOT: tac.device tac.inference_type 50 // CHECK-NOT: tac.device tac.inference_type [all …]
|
D | compute-cost.mlir | 43 …!quant.uniform<i8:f32, 0.3:-3>> attributes {tac.device = "CPU", tac.inference_type = "QUANTIZED_I… 45 …%1 = "tfl.fully_connected"(%arg0, %arg1, %0) {tac.device = "CPU", tac.inference_type = "QUANTIZED_… 47 …%3 = "tfl.reshape"(%1, %2) {tac.device = "CPU", tac.inference_type = "QUANTIZED_INT8"} : (tensor<3… 48 …%4 = "tfl.mul"(%3, %arg2) {tac.device = "CPU", tac.inference_type = "QUANTIZED_INT8", fused_activa… 49 …%5 = "tfl.add"(%4, %arg3) {tac.device = "CPU", tac.inference_type = "QUANTIZED_INT8", fused_activa…
|
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/lite/ |
D | quantize_weights.cc | 80 const tflite::TensorType& inference_type, const StringSet& denylisted_ops, in QuantizeWeights() argument 111 quant_specs.inference_type = tflite::TflTypeToTfType(inference_type); in QuantizeWeights() 127 if (quant_specs.inference_type == tensorflow::DT_INT8) in QuantizeWeights() 128 quant_specs.inference_type = tensorflow::DT_QINT8; in QuantizeWeights() 130 if (!(quant_specs.inference_type == tensorflow::DT_HALF || in QuantizeWeights() 131 quant_specs.inference_type == tensorflow::DT_QINT8)) { in QuantizeWeights() 142 << ", inference_type: " << quant_specs.inference_type << "\n"; in QuantizeWeights() 192 tflite::TensorType inference_type; in QuantizeWeights() local 195 inference_type = tflite::TensorType_FLOAT16; in QuantizeWeights() 198 inference_type = tflite::TensorType_INT8; in QuantizeWeights() [all …]
|
D | quantize_model.cc | 53 const tflite::TensorType& inference_type, in QuantizeModel() argument 92 quant_specs.inference_type = tflite::TflTypeToTfType(inference_type); in QuantizeModel() 102 << ", inference_type: " << quant_specs.inference_type in QuantizeModel() 114 input_mlir_type = tflite::ConvertElementType(inference_type, mlir_builder); in QuantizeModel()
|
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/ |
D | quantization_config.h | 99 tensorflow::DataType inference_type = tensorflow::DT_FLOAT; member 134 return inference_type != tensorflow::DT_FLOAT && !weight_quantization; in RunPropagationAndRewriteQuantizationPasses() 143 (inference_type != tensorflow::DT_FLOAT) && weight_quantization && in RunAndRewriteDynamicRangeQuantizationPasses() 151 switch (inference_type) { in IsSignedInferenceType() 163 switch (inference_type) { in GetQuantizationTypeWidth() 214 absl::string_view inference_type, 224 tensorflow::DataType inference_type, QuantizationSpecs* quant_specs);
|
D | quantization_config.cc | 93 absl::string_view inference_type, in ParseInputNodeQuantSpecs() argument 122 if (!inference_type.empty() && in ParseInputNodeQuantSpecs() 123 !DataType_Parse(std::string(inference_type), &final_type)) { in ParseInputNodeQuantSpecs() 134 tensorflow::DataType inference_type, QuantizationSpecs* quant_specs) { in GetInputNodeQuantSpecs() argument 135 quant_specs->inference_type = inference_type; in GetInputNodeQuantSpecs() 141 if (IsQuantizationType(inference_type)) { in GetInputNodeQuantSpecs()
|
/external/tensorflow/tensorflow/lite/testing/ |
D | tflite_model_test.bzl | 28 inference_type = "float", 43 inference_type: The data type for inference and output. 61 inference_type = inference_type, 101 inference_type): 108 inference_type: The data type for inference and output. 113 if inference_type == "float": 115 "--inference_type=FLOAT", 118 elif inference_type == "quantized": 120 "--inference_type=QUANTIZED_UINT8", 124 fail("Invalid inference type (%s). Expected 'float' or 'quantized'" % inference_type)
|
/external/tensorflow/tensorflow/compiler/mlir/lite/experimental/tac/transforms/ |
D | get_alternative_subgraph.cc | 61 GetInferenceString(device_inference_type.inference_type)); in GetFunctionImplName() 68 InferenceType inference_type, ArrayRef<std::string> devices) { in GetAllAlternativeInferenceDeviceType() argument 71 if (inference_type == QUANTIZED_INT8) { in GetAllAlternativeInferenceDeviceType() 73 } else if (inference_type == QUANTIZED_UINT8) { in GetAllAlternativeInferenceDeviceType() 127 const InferenceDeviceType& inference_type); 239 target_device_inference_type.inference_type))); in GetAlternativeViewForSpec() 246 if ((current_device_inference_type.inference_type == QUANTIZED_UINT8 || in GetAlternativeViewForSpec() 247 current_device_inference_type.inference_type == QUANTIZED_INT8) && in GetAlternativeViewForSpec() 248 target_device_inference_type.inference_type == FLOAT) { in GetAlternativeViewForSpec() 264 target_device_inference_type.inference_type))); in GetAlternativeViewForSpec()
|
/external/tensorflow/tensorflow/compiler/mlir/lite/experimental/tac/ |
D | README.md | 181 …%arg1) {tac.device = "GPU", fused_activation_function = "RELU6", tac.inference_type = "FLOAT"} : (… 182 …%arg2) {tac.device = "GPU", fused_activation_function = "RELU6", tac.inference_type = "FLOAT"} : (… 183 …%arg3) {tac.device = "GPU", fused_activation_function = "RELU6", tac.inference_type = "FLOAT"} : (… 184 …%3 = "tfl.pack"(%1, %2) {tac.device = "CPU", tac.inference_type = "FLOAT", axis = 0 : i32, values_… 193 … tensor<1xf32>) -> tensor<1xf32> attributes {tac.device = "GPU", tac.inference_type = "FLOAT", tac… 194 … %arg1 {fused_activation_function = "RELU6", tac.device = "GPU", tac.inference_type = "FLOAT"} : t… 202 …ensor<1xf32>) -> tensor<2x1xf32> attributes {tac.device = "CPU", tac.inference_type = "FLOAT", tac… 203 …%0 = "tfl.pack"(%arg0, %arg1) {axis = 0 : i32, tac.device = "CPU", tac.inference_type = "FLOAT", v… 207 … tensor<1xf32>) -> tensor<1xf32> attributes {tac.device = "GPU", tac.inference_type = "FLOAT", tac… 208 … %arg1 {fused_activation_function = "RELU6", tac.device = "GPU", tac.inference_type = "FLOAT"} : t… [all …]
|
/external/tensorflow/tensorflow/compiler/mlir/lite/experimental/tac/hardwares/ |
D | gpu_hardware.cc | 86 InferenceType inference_type = GetInferenceType(op); in IsOpSupported() local 87 if (inference_type != FLOAT) { in IsOpSupported() 108 InferenceType inference_type = GetInferenceType(op); in IsOpSupported() local 109 if (inference_type != FLOAT) { in IsOpSupported() 130 InferenceType inference_type = GetInferenceType(op); in IsOpSupported() local 131 if (inference_type != FLOAT) { in IsOpSupported() 154 InferenceType inference_type = GetInferenceType(op); in IsOpSupported() local 155 if (inference_type != FLOAT) { in IsOpSupported()
|
D | cpu_hardware.cc | 40 inline float InferenceTypeEfficiency(InferenceType inference_type) { in InferenceTypeEfficiency() argument 41 if (inference_type == QUANTIZED_INT8 || inference_type == QUANTIZED_UINT8) { in InferenceTypeEfficiency()
|
/external/tensorflow/tensorflow/compiler/mlir/lite/python/ |
D | tf_tfl_flatbuffer_helpers.cc | 216 tensorflow::DataType inference_type = in PopulateQuantizationSpecs() local 217 ConvertIODataTypeToDataType(toco_flags.inference_type()); in PopulateQuantizationSpecs() 221 inference_type = quant_specs->inference_input_type; in PopulateQuantizationSpecs() 243 if (inference_type == DT_QINT8 || inference_type == DT_QUINT8) { in PopulateQuantizationSpecs() 247 flag.std_value(), inference_type)); in PopulateQuantizationSpecs() 258 inference_type, quant_specs)) { in PopulateQuantizationSpecs() 270 quant_specs->inference_type = tensorflow::DT_HALF; in PopulateQuantizationSpecs() 273 quant_specs->inference_type = tensorflow::DT_QINT8; in PopulateQuantizationSpecs()
|
/external/tensorflow/tensorflow/compiler/mlir/lite/transforms/ |
D | prepare_quantize_dynamic_range.cc | 64 quant_specs_.inference_type = tensorflow::DT_QINT8; in PrepareDynamicRangeQuantizePass() 119 if (quant_specs_.inference_type == tensorflow::DT_QINT8 && in matchAndRewrite() 123 if (quant_specs_.inference_type == tensorflow::DT_HALF && in matchAndRewrite() 296 if (quant_specs_.inference_type == tensorflow::DT_QINT8 && in getQuantizableOps() 299 } else if (quant_specs_.inference_type == tensorflow::DT_HALF) { in getQuantizableOps() 313 if (quant_specs_.inference_type == tensorflow::DT_QINT8) { in quantizeOps() 315 } else if (quant_specs_.inference_type == tensorflow::DT_HALF) { in quantizeOps() 413 quant_specs_.inference_type = tensorflow::DT_HALF; in runOnOperation()
|
/external/tensorflow/tensorflow/compiler/mlir/lite/experimental/tac/tests/e2e/ |
D | simple-graph.mlir | 13 // CHECK: [[VAL_0:%.*]] = "tfl.reshape"(%1, %[[CST]]) {tac.device = "GPU", tac.inference_type = "… 14 // CHECK: [[VAL_1:%.*]] = "tfl.reshape"(%2, %[[CST]]) {tac.device = "GPU", tac.inference_type = "… 15 …3 : i32, fused_activation_function = "NONE", tac.device = "GPU", tac.inference_type = "FLOAT"} : (…
|
/external/tensorflow/tensorflow/lite/python/ |
D | wrap_toco.py | 37 fully_quantize, inference_type, argument 44 input_data_str, disable_per_channel, fully_quantize, inference_type,
|
D | tflite_convert.py | 166 if flags.inference_type: 167 converter.inference_type = _parse_inference_type(flags.inference_type, 182 if converter.inference_type == dtypes.float32: 233 if converter.inference_type != dtypes.float32: 236 converter.inference_type = dtypes.float32
|
D | convert.py | 58 return ((conversion_flags.inference_type in quantized_inference_types or 202 inference_type=_types_pb2.QUANTIZED_INT8, argument 236 input_data_str, disable_per_channel, fully_quantize, inference_type, 471 def build_conversion_flags(inference_type=dtypes.float32, argument 594 conversion_flags.inference_type = convert_inference_tf_type_to_tflite_type( 595 inference_type, usage="inference_type flag") 601 conversion_flags.inference_input_type = conversion_flags.inference_type
|
/external/tensorflow/tensorflow/compiler/mlir/lite/ |
D | tf_tfl_translate.cc | 237 input_arrays, min_values, max_values, inference_type, &quant_specs)) { in main() 244 quant_specs.inference_type = tensorflow::DT_QINT8; in main() 246 quant_specs.inference_type = tensorflow::DT_HALF; in main() 253 quant_specs.inference_input_type = quant_specs.inference_type; in main()
|
/external/tensorflow/tensorflow/lite/toco/ |
D | toco_cmdline_flags.cc | 81 Flag("inference_type", parsed_flags.inference_type.bind(), in ParseTocoFlagsFromCommandLineFlags() 82 parsed_flags.inference_type.default_value(), in ParseTocoFlagsFromCommandLineFlags() 280 PARSE_TOCO_FLAG(IODataType, inference_type, FlagRequirement::kNone); in ReadTocoFlagsFromCommandLineFlags() 353 if (toco_flags->inference_type() == IODataType::QUANTIZED_UINT8) { in ReadTocoFlagsFromCommandLineFlags()
|
D | toco_tooling.cc | 169 type = ConvertIODataTypeToArrayDataType(toco_flags.inference_type()); in SetFinalDataTypeOnInputs() 248 const IODataType inference_type = toco_flags.inference_type(); in TransformWithStatus() local 252 (inference_type == QUANTIZED_UINT8 || inference_type == QUANTIZED_INT16); in TransformWithStatus()
|
/external/tensorflow/tensorflow/compiler/mlir/quantization/tensorflow/passes/ |
D | prepare_quantize_drq.cc | 62 quant_specs_.inference_type = tensorflow::DT_QINT8; in PrepareQuantizeDRQPass() 135 if (quant_specs_.inference_type == tensorflow::DT_QINT8 && in getQuantizableOps() 203 if (quant_specs_.inference_type == tensorflow::DT_QINT8) { in quantizeOps()
|