/external/tensorflow/tensorflow/lite/toco/ |
D | toco_convert_test.cc | 25 ParsedTocoFlags toco_flags; in TEST() local 27 EXPECT_DEATH(Convert(toco_flags, model_flags).ok(), in TEST() 32 TocoFlags toco_flags; in TEST() local 38 EXPECT_DEATH(Convert(input, toco_flags, model_flags, &output).ok(), in TEST() 43 TocoFlags toco_flags; in TEST() local 46 toco_flags.set_input_format(TENSORFLOW_GRAPHDEF); in TEST() 50 EXPECT_DEATH(Convert(input, toco_flags, model_flags, &output).ok(), in TEST() 56 TocoFlags toco_flags; in TEST() local 59 toco_flags.set_input_format(TENSORFLOW_GRAPHDEF); in TEST() 64 EXPECT_DEATH(Convert(input, toco_flags, model_flags, &output).ok(), in TEST() [all …]
|
D | toco_tooling.cc | 159 void SetFinalDataTypeOnInputs(const TocoFlags& toco_flags, Model* model) { in SetFinalDataTypeOnInputs() argument 160 const FileFormat output_format = toco_flags.output_format(); in SetFinalDataTypeOnInputs() 165 } else if (toco_flags.has_inference_input_type()) { in SetFinalDataTypeOnInputs() 166 type = ConvertIODataTypeToArrayDataType(toco_flags.inference_input_type()); in SetFinalDataTypeOnInputs() 167 } else if (toco_flags.has_inference_type()) { in SetFinalDataTypeOnInputs() 168 type = ConvertIODataTypeToArrayDataType(toco_flags.inference_type()); in SetFinalDataTypeOnInputs() 210 std::unique_ptr<Model> Import(const TocoFlags& toco_flags, in Import() argument 214 switch (toco_flags.input_format()) { in Import() 218 toco_flags.has_drop_control_dependency() in Import() 219 ? toco_flags.drop_control_dependency() in Import() [all …]
|
D | toco_convert.cc | 57 TocoFlags* toco_flags, ModelFlags* model_flags, in ReadInputData() argument 75 const TocoFlags& toco_flags, in Convert() argument 80 Import(toco_flags, model_flags, graph_def_contents); in Convert() 81 TF_RETURN_IF_ERROR(TransformWithStatus(toco_flags, model.get())); in Convert() 82 TF_RETURN_IF_ERROR(Export(toco_flags, *model, toco_flags.allow_custom_ops(), in Convert() 95 TocoFlags toco_flags; in Convert() local 96 ReadTocoFlagsFromCommandLineFlags(parsed_toco_flags, &toco_flags); in Convert() 99 ReadInputData(parsed_toco_flags, parsed_model_flags, &toco_flags, in Convert() 104 TF_RETURN_IF_ERROR(Convert(graph_def_contents, toco_flags, model_flags, in Convert()
|
D | toco_tooling.h | 28 std::unique_ptr<Model> Import(const TocoFlags& toco_flags, 34 tensorflow::Status TransformWithStatus(const TocoFlags& toco_flags, 36 inline void Transform(const TocoFlags& toco_flags, Model* model) { in Transform() argument 37 auto s = TransformWithStatus(toco_flags, model); in Transform() 44 tensorflow::Status Export(const TocoFlags& toco_flags, const Model& model, 48 inline void Export(const TocoFlags& toco_flags, const Model& model, in Export() argument 50 auto status = Export(toco_flags, model, true, output_file_contents); in Export()
|
D | toco_cmdline_flags.cc | 240 TocoFlags* toco_flags) { in ReadTocoFlagsFromCommandLineFlags() argument 249 toco_flags->set_##name(flag_value.value()); \ in ReadTocoFlagsFromCommandLineFlags() 262 toco_flags->set_##name(x); \ in ReadTocoFlagsFromCommandLineFlags() 310 toco_flags->set_inference_input_type(input_type); in ReadTocoFlagsFromCommandLineFlags() 328 toco_flags->set_inference_input_type(input_type); in ReadTocoFlagsFromCommandLineFlags() 334 toco_flags->set_post_training_quantize( in ReadTocoFlagsFromCommandLineFlags() 338 if (toco_flags->inference_type() == IODataType::QUANTIZED_UINT8) { in ReadTocoFlagsFromCommandLineFlags() 342 toco_flags->set_inference_type(IODataType::FLOAT); in ReadTocoFlagsFromCommandLineFlags()
|
D | toco_cmdline_flags.h | 32 TocoFlags* toco_flags);
|
D | toco_convert.h | 26 const TocoFlags& toco_flags,
|
D | BUILD | 27 srcs = ["toco_flags.proto"], 52 "toco_flags.proto",
|
/external/tensorflow/tensorflow/compiler/mlir/lite/python/ |
D | graphdef_to_tfl_flatbuffer.cc | 106 const toco::TocoFlags& toco_flags) { in WarningUnusedFlags() argument 107 if (toco_flags.output_format()) { in WarningUnusedFlags() 110 if (toco_flags.drop_control_dependency()) { in WarningUnusedFlags() 113 if (toco_flags.reorder_across_fake_quant()) { in WarningUnusedFlags() 119 if (toco_flags.dump_graphviz_include_video()) { in WarningUnusedFlags() 167 const toco::TocoFlags& toco_flags, in ConvertGraphDefToTFLiteFlatBuffer() argument 182 ConvertIODataTypeToDataType(toco_flags.inference_input_type()); in ConvertGraphDefToTFLiteFlatBuffer() 184 ConvertIODataTypeToDataType(toco_flags.inference_type()); in ConvertGraphDefToTFLiteFlatBuffer() 224 if (toco_flags.post_training_quantize()) { in ConvertGraphDefToTFLiteFlatBuffer() 226 if (toco_flags.quantize_to_float16()) { in ConvertGraphDefToTFLiteFlatBuffer() [all …]
|
D | graphdef_to_tfl_flatbuffer.h | 30 const toco::TocoFlags& toco_flags,
|
/external/tensorflow/tensorflow/lite/toco/python/ |
D | toco_python_api.cc | 41 toco::TocoFlags* toco_flags, in PopulateConversionLogHelper() argument 47 dump_options->dump_graphviz = toco_flags->conversion_summary_dir(); in PopulateConversionLogHelper() 52 toco::Import(*toco_flags, model_flags, input_contents_txt); in PopulateConversionLogHelper() 56 std::ofstream osstream_before(toco_flags->conversion_summary_dir() + in PopulateConversionLogHelper() 64 toco_flags->set_input_format(toco::FileFormat::TFLITE); in PopulateConversionLogHelper() 66 toco::Import(*toco_flags, model_flags, output_file_contents_txt); in PopulateConversionLogHelper() 71 std::ofstream ostream_after(toco_flags->conversion_summary_dir() + in PopulateConversionLogHelper() 125 toco::TocoFlags toco_flags; in TocoConvert() local 126 if (!toco_flags.ParseFromString(toco_flags_proto_txt)) { in TocoConvert() 154 if (toco_flags.has_dump_graphviz_dir()) { in TocoConvert() [all …]
|
D | toco_from_protos_test.py | 48 toco_flags = toco_flags_pb2.TocoFlags() 49 toco_flags.input_format = toco_flags_pb2.TENSORFLOW_GRAPHDEF 50 toco_flags.output_format = toco_flags_pb2.TFLITE 51 toco_flags.inference_input_type = types_pb2.FLOAT 52 toco_flags.inference_type = types_pb2.FLOAT 53 toco_flags.allow_custom_ops = True 65 fp_toco.write(toco_flags.SerializeToString())
|
/external/tensorflow/tensorflow/lite/python/ |
D | convert.py | 49 def _requires_input_stats(toco_flags): argument 50 return ((toco_flags.inference_type in _quantized_inference_types or 51 toco_flags.inference_input_type in _quantized_inference_types) and 52 not toco_flags.post_training_quantize) 437 model_flags, toco_flags, _ = build_toco_convert_protos( 442 if _requires_input_stats(toco_flags): 457 toco_flags.SerializeToString(), 488 model_flags, toco_flags, debug_info = build_toco_convert_protos( 493 toco_flags.SerializeToString(),
|