Home
last modified time | relevance | path

Searched refs:input_model (Results 1 – 14 of 14) sorted by relevance

/external/tensorflow/tensorflow/lite/toco/tflite/
Dimport.cc30 void LoadTensorsTable(const ::tflite::Model& input_model, in LoadTensorsTable() argument
33 auto tensors = (*input_model.subgraphs())[0]->tensors(); in LoadTensorsTable()
40 void LoadOperatorsTable(const ::tflite::Model& input_model, in LoadOperatorsTable() argument
42 auto opcodes = input_model.operator_codes(); in LoadOperatorsTable()
55 void ImportTensors(const ::tflite::Model& input_model, Model* model) { in ImportTensors() argument
56 auto tensors = (*input_model.subgraphs())[0]->tensors(); in ImportTensors()
57 auto* buffers = input_model.buffers(); in ImportTensors()
101 const ::tflite::Model& input_model, in ImportOperators()
106 auto ops = (*input_model.subgraphs())[0]->operators(); in ImportOperators()
169 const ::tflite::Model& input_model, in ImportIOTensors()
[all …]
Dimport.h39 void LoadTensorsTable(const ::tflite::Model &input_model,
41 void LoadOperatorsTable(const ::tflite::Model &input_model,
Dexport.cc655 const ::tflite::Model* input_model = ::tflite::GetModel(buffer); in Export() local
665 if (::tflite::optimize::QuantizeWeights(&q_builder, input_model, in Export()
/external/tensorflow/tensorflow/lite/tools/
Dgen_op_registration_main.cc38 void ParseFlagAndInit(int* argc, char** argv, string* input_model, in ParseFlagAndInit() argument
42 Flag(kInputModelFlag, input_model, "path to the tflite model"), in ParseFlagAndInit()
135 void AddOpsFromModel(const string& input_model, in AddOpsFromModel() argument
138 std::ifstream fin(input_model); in AddOpsFromModel()
150 string input_model; in main() local
155 ParseFlagAndInit(&argc, argv, &input_model, &output_registration, in main()
160 if (!input_model.empty()) { in main()
161 AddOpsFromModel(input_model, &builtin_ops, &custom_ops); in main()
/external/tensorflow/tensorflow/lite/tools/optimize/
Dquantize_model.h38 ModelT* input_model, ErrorReporter* error_reporter);
45 ModelT* input_model, const TensorType& input_type,
54 ModelT* input_model, const TensorType& input_type,
63 ModelT* input_model, const TensorType& input_type,
Dquantize_weights.h40 flatbuffers::FlatBufferBuilder* builder, const Model* input_model,
46 const Model* input_model,
60 const Model* input_model,
71 const Model* input_model,
Dquantize_weights.cc342 const Model* input_model, in QuantizeWeightsInt8() argument
347 model.reset(input_model->UnPack()); in QuantizeWeightsInt8()
468 const Model* input_model) { in QuantizeWeightsFloat16() argument
470 model.reset(input_model->UnPack()); in QuantizeWeightsFloat16()
550 const Model* input_model, in QuantizeWeights() argument
556 return QuantizeWeightsInt8(builder, input_model, use_hybrid_evaluation, in QuantizeWeights()
562 const Model* input_model, in QuantizeWeights() argument
565 return QuantizeWeightsInt8(builder, input_model, true, in QuantizeWeights()
570 const Model* input_model, BufferType quant_type) { in QuantizeWeights() argument
576 return QuantizeWeightsInt8(builder, input_model, true, in QuantizeWeights()
[all …]
Dquantization_wrapper_utils.cc57 auto input_model = FlatBufferModel::BuildFromFile(path.c_str()); in LoadModel() local
58 if (!input_model) { in LoadModel()
61 auto readonly_model = input_model->GetModel(); in LoadModel()
/external/tensorflow/tensorflow/compiler/mlir/lite/sparsity/
Dsparsify_model.cc35 TfLiteStatus SparsifyModel(const tflite::ModelT& input_model, in SparsifyModel() argument
45 tflite::Model::Pack(input_builder, &input_model); in SparsifyModel()
Dsparsify_model.h29 TfLiteStatus SparsifyModel(const tflite::ModelT& input_model,
/external/tensorflow/tensorflow/compiler/mlir/lite/quantization/lite/
Dquantize_model.cc39 const tflite::ModelT& input_model, const tflite::TensorType& input_type, in QuantizeModel() argument
58 tflite::Model::Pack(input_builder, &input_model); in QuantizeModel()
Dquantize_model.h32 const tflite::ModelT& input_model, const tflite::TensorType& input_type,
/external/tensorflow/tensorflow/compiler/mlir/lite/
Dtf_to_tfl_flatbuffer.cc136 const ::tflite::Model* input_model = ::tflite::GetModel(buffer); in ConvertTFExecutorToTFLOrFlatbuffer() local
146 if (::tflite::optimize::QuantizeWeights(&q_builder, input_model, in ConvertTFExecutorToTFLOrFlatbuffer()
/external/tensorflow/tensorflow/lite/tools/optimize/g3doc/
Dquantize_weights.md64 ::tflite::Model* input_model = ...;
66 TfLiteStatus status = ::tflite::optimize::QuantizeWeights(&builder, input_model);