• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef OHOS_HDI_NNR_V1_0_PREPAREDMODELSERVICE_H
17 #define OHOS_HDI_NNR_V1_0_PREPAREDMODELSERVICE_H
18 
19 #include "v1_0/iprepared_model.h"
20 #include "include/api/data_type.h"
21 #include "include/api/context.h"
22 #include "include/api/types.h"
23 #include "include/api/model.h"
24 #include "mindspore_schema/model_generated.h"
25 #include "ashmem.h"
26 
27 namespace OHOS {
28 namespace HDI {
29 namespace Nnrt {
30 namespace V1_0 {
31 constexpr int DYNAMIC_SHAPE_FLAG = -1;
32 class PreparedModelService : public IPreparedModel {
33 public:
34     PreparedModelService() = default;
35 
36     virtual ~PreparedModelService();
37 
38     explicit PreparedModelService(std::shared_ptr<mindspore::Context> context);
39 
40     int32_t Compile(std::shared_ptr<mindspore::schema::MetaGraphT> graph);
41 
42     int32_t Compile(const void* modelBuffer, size_t length);
43 
44     int32_t ExportModelCache(std::vector<SharedBuffer>& modelCache) override;
45 
46     int32_t Run(const std::vector<IOTensor>& inputs, const std::vector<IOTensor>& outputs,
47         std::vector<std::vector<int32_t>>& outputsDims, std::vector<bool>& isOutputBufferEnough) override;
48 
49 private:
50     int32_t SetInputs(const std::vector<IOTensor>& inputs);
51     int32_t SetOutputs(const std::vector<IOTensor>& outputs);
52     int32_t GetMSInputsAndOutputs();
53     int32_t CompareTensor(const IOTensor& tensor, const mindspore::MSTensor& msTensor);
54     sptr<Ashmem> ParseBuffer(const SharedBuffer& buffer);
55     int32_t UpdateOutput(const std::vector<IOTensor>& outputs,
56         std::vector<std::vector<int32_t>>& outputsDims, std::vector<bool>& isOutputBufferEnough);
57     void ResetInputAndOutput();
58 
59 private:
60     std::shared_ptr<mindspore::schema::MetaGraphT> m_graph {nullptr};
61     std::shared_ptr<mindspore::Context> m_context {nullptr};
62     flatbuffers::FlatBufferBuilder m_builder;
63     std::shared_ptr<mindspore::Model> m_model {nullptr};
64     sptr<Ashmem> m_cacheBuffer {nullptr};
65     std::vector<sptr<Ashmem>> m_inputAshmems;
66     std::vector<mindspore::MSTensor> m_inputs;
67     std::vector<sptr<Ashmem>> m_outputAshmems;
68     std::vector<mindspore::MSTensor> m_outputs;
69     std::vector<std::vector<int64_t>> m_inputDims;
70     bool m_isDynamicShape {false};
71 };
72 } // V1_0
73 } // Nnrt
74 } // HDI
75 } // OHOS
76 
77 #endif // OHOS_HDI_NNR_V1_0_PREPAREDMODELSERVICE_H