/external/tensorflow/tensorflow/lite/toco/ |
D | toco_convert_test.cc | 25 ParsedTocoFlags toco_flags; in TEST() local 27 EXPECT_DEATH(Convert(toco_flags, model_flags).ok(), in TEST() 32 TocoFlags toco_flags; in TEST() local 38 EXPECT_DEATH(Convert(input, toco_flags, model_flags, &output).ok(), in TEST() 43 TocoFlags toco_flags; in TEST() local 46 toco_flags.set_input_format(TENSORFLOW_GRAPHDEF); in TEST() 50 EXPECT_DEATH(Convert(input, toco_flags, model_flags, &output).ok(), in TEST() 56 TocoFlags toco_flags; in TEST() local 59 toco_flags.set_input_format(TENSORFLOW_GRAPHDEF); in TEST() 64 EXPECT_DEATH(Convert(input, toco_flags, model_flags, &output).ok(), in TEST() [all …]
|
D | toco_tooling.cc | 154 void SetFinalDataTypeOnInputs(const TocoFlags& toco_flags, Model* model) { in SetFinalDataTypeOnInputs() argument 155 const FileFormat output_format = toco_flags.output_format(); in SetFinalDataTypeOnInputs() 160 } else if (toco_flags.has_inference_input_type()) { in SetFinalDataTypeOnInputs() 161 type = ConvertIODataTypeToArrayDataType(toco_flags.inference_input_type()); in SetFinalDataTypeOnInputs() 162 } else if (toco_flags.has_inference_type()) { in SetFinalDataTypeOnInputs() 163 type = ConvertIODataTypeToArrayDataType(toco_flags.inference_type()); in SetFinalDataTypeOnInputs() 205 std::unique_ptr<Model> Import(const TocoFlags& toco_flags, in Import() argument 209 switch (toco_flags.input_format()) { in Import() 213 toco_flags.has_drop_control_dependency() in Import() 214 ? toco_flags.drop_control_dependency() in Import() [all …]
|
D | toco_convert.cc | 57 TocoFlags* toco_flags, ModelFlags* model_flags, in ReadInputData() argument 75 const TocoFlags& toco_flags, in Convert() argument 79 Import(toco_flags, model_flags, graph_def_contents); in Convert() 80 TF_RETURN_IF_ERROR(TransformWithStatus(toco_flags, model.get())); in Convert() 81 return Export(toco_flags, *model, toco_flags.allow_custom_ops(), in Convert() 90 TocoFlags toco_flags; in Convert() local 91 ReadTocoFlagsFromCommandLineFlags(parsed_toco_flags, &toco_flags); in Convert() 94 ReadInputData(parsed_toco_flags, parsed_model_flags, &toco_flags, in Convert() 99 TF_RETURN_IF_ERROR(Convert(graph_def_contents, toco_flags, model_flags, in Convert()
|
D | toco_tooling.h | 28 std::unique_ptr<Model> Import(const TocoFlags& toco_flags, 34 tensorflow::Status TransformWithStatus(const TocoFlags& toco_flags, 36 inline void Transform(const TocoFlags& toco_flags, Model* model) { in Transform() argument 37 auto s = TransformWithStatus(toco_flags, model); in Transform() 44 tensorflow::Status Export(const TocoFlags& toco_flags, const Model& model, 48 inline void Export(const TocoFlags& toco_flags, const Model& model, in Export() argument 50 auto status = Export(toco_flags, model, true, output_file_contents); in Export()
|
D | toco_cmdline_flags.cc | 224 TocoFlags* toco_flags) { in ReadTocoFlagsFromCommandLineFlags() argument 233 toco_flags->set_##name(flag_value.value()); \ in ReadTocoFlagsFromCommandLineFlags() 246 toco_flags->set_##name(x); \ in ReadTocoFlagsFromCommandLineFlags() 293 toco_flags->set_inference_input_type(input_type); in ReadTocoFlagsFromCommandLineFlags() 311 toco_flags->set_inference_input_type(input_type); in ReadTocoFlagsFromCommandLineFlags() 317 toco_flags->set_post_training_quantize( in ReadTocoFlagsFromCommandLineFlags() 321 if (toco_flags->inference_type() == IODataType::QUANTIZED_UINT8) { in ReadTocoFlagsFromCommandLineFlags() 325 toco_flags->set_inference_type(IODataType::FLOAT); in ReadTocoFlagsFromCommandLineFlags()
|
D | toco_cmdline_flags.h | 32 TocoFlags* toco_flags);
|
D | toco_convert.h | 26 const TocoFlags& toco_flags,
|
D | BUILD | 25 srcs = ["toco_flags.proto"], 48 "toco_flags.proto",
|
/external/tensorflow/tensorflow/lite/toco/python/ |
D | toco_python_api.cc | 64 toco::TocoFlags toco_flags; in TocoConvert() local 65 if (!toco_flags.ParseFromString(toco_flags_proto_txt)) { in TocoConvert() 70 if (toco_flags.has_dump_graphviz_dir()) { in TocoConvert() 71 dump_options.dump_graphviz = toco_flags.dump_graphviz_dir(); in TocoConvert() 73 if (toco_flags.has_dump_graphviz_include_video()) { in TocoConvert() 74 dump_options.dump_graphviz_video = toco_flags.dump_graphviz_include_video(); in TocoConvert() 79 toco::Import(toco_flags, model_flags, input_contents_txt); in TocoConvert() 80 toco::Transform(toco_flags, model.get()); in TocoConvert() 82 auto status = Export(toco_flags, *model, toco_flags.allow_custom_ops(), in TocoConvert()
|
D | toco_from_protos_test.py | 48 toco_flags = toco_flags_pb2.TocoFlags() 49 toco_flags.input_format = toco_flags_pb2.TENSORFLOW_GRAPHDEF 50 toco_flags.output_format = toco_flags_pb2.TFLITE 51 toco_flags.inference_input_type = types_pb2.FLOAT 52 toco_flags.inference_type = types_pb2.FLOAT 53 toco_flags.allow_custom_ops = True 65 fp_toco.write(toco_flags.SerializeToString())
|
/external/tensorflow/tensorflow/lite/python/ |
D | convert.py | 401 model_flags, toco_flags = build_toco_convert_protos( 406 if toco_flags.inference_input_type == _types_pb2.QUANTIZED_UINT8: 420 toco_flags.SerializeToString(), 448 model_flags, toco_flags = build_toco_convert_protos( 451 toco_flags.SerializeToString(),
|