• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef NEURAL_NETWORK_RUNTIME_COMPILATION_H
16 #define NEURAL_NETWORK_RUNTIME_COMPILATION_H
17 
18 #include "inner_model.h"
19 #include "execution_plan.h"
20 
21 #include "device.h"
22 #include "cpp_type.h"
23 
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
26 struct ModelCacheInfo {
27     uint64_t fileNumber = 0;
28     uint64_t version = 0;
29     uint64_t deviceId = 0;
30     std::vector<unsigned short> modelCheckSum;
31 };
32 
33 class Compilation {
34 public:
35     explicit Compilation(const InnerModel* innerModel);
36 
37     OH_NN_ReturnCode SetDevice(size_t deviceId);
38     OH_NN_ReturnCode SetCacheDir(const std::string& cacheModelPath, uint32_t version);
39     OH_NN_ReturnCode SetPerformance(OH_NN_PerformanceMode performance);
40     OH_NN_ReturnCode SetPriority(OH_NN_Priority priority);
41     OH_NN_ReturnCode SetEnableFp16(bool isFp16);
42 
43     OH_NN_ReturnCode Build();
44 
45     bool IsBuild() const;
46     bool IsDynamicShape() const;
47     std::vector<std::shared_ptr<NNTensor>>GetInputTensors() const;
48     std::vector<std::shared_ptr<NNTensor>>GetOutputTensors() const;
49     std::shared_ptr<ExecutionPlan> GetExecutionPlan() const;
50 
51 private:
52     std::shared_ptr<mindspore::lite::LiteGraph> m_liteGraph {nullptr};
53     OH_NN_Priority m_priority {OH_NN_PRIORITY_NONE};
54     OH_NN_PerformanceMode m_performance {OH_NN_PERFORMANCE_NONE};
55     bool m_enableFp16 {false};
56     std::shared_ptr<Device> m_device {nullptr};
57     std::string m_cachePath;
58     uint32_t m_version {0};
59     size_t m_deviceId {0};
60     bool m_isBuild {false};
61     std::shared_ptr<ExecutionPlan> m_executionPlan {nullptr};
62     std::vector<std::shared_ptr<NNTensor>> m_inputTensors;
63     std::vector<std::shared_ptr<NNTensor>> m_outputTensors;
64     void* m_metaGraph {nullptr};
65     Buffer m_quantBuffer;
66     std::string m_modelName;
67 
68 private:
69     OH_NN_ReturnCode GenerateCacheFiles(const std::vector<Buffer>& modelBuffer) const;
70     OH_NN_ReturnCode GenerateCacheModel(size_t cacheNumber, std::unique_ptr<uint64_t[]>& cacheInfo,
71         std::vector<Buffer> modelBuffer) const;
72     OH_NN_ReturnCode GenerateCacheInfo(uint32_t cacheSize, std::unique_ptr<uint64_t[]>& cacheInfo) const;
73     OH_NN_ReturnCode CheckCacheInfo(ModelCacheInfo& modelCacheInfo, const std::string& cacheInfoPath) const;
74     OH_NN_ReturnCode ReadCacheModelFile(const std::string& file, Buffer& modelBuffer) const;
75     OH_NN_ReturnCode RemoveCacheFiles(uint32_t fileNumber) const;
76     unsigned short GetCrc16(const unsigned char* buffer, size_t length) const;
77     OH_NN_ReturnCode CheckCacheModel(const ModelCacheInfo& modelCacheInfo,
78         std::vector<Buffer>& modelBuffers) const;
79     OH_NN_ReturnCode NormalBuild(std::shared_ptr<PreparedModel>& preparedModel);
80     OH_NN_ReturnCode BuildCacheModel(std::shared_ptr<PreparedModel>& preparedModel);
81     OH_NN_ReturnCode GenCacheBuild(std::shared_ptr<PreparedModel>& preparedModel);
82     OH_NN_ReturnCode ReGenCacheBuild(uint32_t fileNumber, std::shared_ptr<PreparedModel>& preparedModel);
83     OH_NN_ReturnCode LoadCacheBuild(std::shared_ptr<PreparedModel>& preparedModel, const ModelCacheInfo& cacheInfo);
84     OH_NN_ReturnCode InnerBuild();
85     OH_NN_ReturnCode GetCacheFileLength(std::ifstream& ifs, int& fsize) const;
86     OH_NN_ReturnCode IsOfflineModel(bool& isOfflineModel) const;
87     OH_NN_ReturnCode BuildOfflineModel(std::shared_ptr<PreparedModel>& preparedModel);
88 };
89 } // namespace NeuralNetworkRuntime
90 } // namespace OHOS
91 
92 #endif // NEURAL_NETWORK_RUNTIME_COMPILATION_H