1 /** 2 * Copyright 2022 Huawei Technologies Co., Ltd 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_COREML_GRAPH_H_ 18 #define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_COREML_GRAPH_H_ 19 20 #include <vector> 21 #include <queue> 22 #include <map> 23 #include <string> 24 #include <utility> 25 #include "proto/Model.pb.h" 26 #include "proto/NeuralNetwork.pb.h" 27 #include "include/api/kernel.h" 28 #include "src/litert/delegate/coreml/op/coreml_op.h" 29 #include "src/litert/delegate/coreml/coreml_executor_wrapper.h" 30 31 namespace mindspore::lite { 32 constexpr int kCoreMLVersion4 = 4; 33 class CoreMLGraph : public kernel::Kernel { 34 public: CoreMLGraph(std::vector<CoreMLOp * > coreml_ops,const std::vector<mindspore::MSTensor> & inputs,const std::vector<mindspore::MSTensor> & outputs)35 CoreMLGraph(std::vector<CoreMLOp *> coreml_ops, const std::vector<mindspore::MSTensor> &inputs, 36 const std::vector<mindspore::MSTensor> &outputs) 37 : kernel::Kernel(inputs, outputs, nullptr, nullptr), coreml_ops_(std::move(coreml_ops)) {} 38 39 ~CoreMLGraph() override; 40 41 int Init(); 42 Prepare()43 int Prepare() override { return RET_OK; } 44 45 int Execute() override; 46 ReSize()47 int ReSize() override { 48 MS_LOG(ERROR) << "CoreML does not support the resize function temporarily."; 49 return RET_ERROR; 50 } 51 52 void set_input(mindspore::MSTensor in_tensor, int index) override; 53 54 void set_output(mindspore::MSTensor out_tensor, int index) override; 55 GetOps()56 std::vector<CoreMLOp *> *GetOps() { return &coreml_ops_; } 57 GetInsertTensors()58 std::vector<mindspore::MSTensor *> *GetInsertTensors() { return &insert_tensors_; } 59 60 protected: 61 CoreML::Specification::Model *BuildMLModel(); 62 63 int SetMLModelInOut(CoreML::Specification::Model *model); 64 65 std::string SaveMLModel(); 66 67 std::vector<CoreMLOp *> coreml_ops_{}; 68 std::vector<kernel::Kernel *> all_kernels_{}; 69 CoreML::Specification::Model *ml_model_ = nullptr; 70 CoreMLExecutorWrapper *executor_wrapper_ = nullptr; 71 std::vector<mindspore::MSTensor *> insert_tensors_; 72 }; 73 } // namespace mindspore::lite 74 75 #endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_COREML_COREML_GRAPH_H_ 76