• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H
17 #define NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H
18 
19 #include "refbase.h"
20 #include "hdi_interfaces.h"
21 
22 #include "device.h"
23 
24 namespace OHOS {
25 namespace NeuralNetworkRuntime {
26 class HDIDevice : public Device {
27 public:
28     explicit HDIDevice(OHOS::sptr<V1_0::INnrtDevice> device);
29 
30     OH_NN_ReturnCode GetDeviceName(std::string& name) override;
31     OH_NN_ReturnCode GetVendorName(std::string& name) override;
32     OH_NN_ReturnCode GetDeviceType(OH_NN_DeviceType& deviceType) override;
33     OH_NN_ReturnCode GetDeviceStatus(DeviceStatus& status) override;
34     OH_NN_ReturnCode GetSupportedOperation(std::shared_ptr<const mindspore::lite::LiteGraph> model,
35                                            std::vector<bool>& ops) override;
36 
37     OH_NN_ReturnCode IsFloat16PrecisionSupported(bool& isSupported) override;
38     OH_NN_ReturnCode IsPerformanceModeSupported(bool& isSupported) override;
39     OH_NN_ReturnCode IsPrioritySupported(bool& isSupported) override;
40     OH_NN_ReturnCode IsDynamicInputSupported(bool& isSupported) override;
41     OH_NN_ReturnCode IsModelCacheSupported(bool& isSupported) override;
42 
43     OH_NN_ReturnCode PrepareModel(std::shared_ptr<const mindspore::lite::LiteGraph> model,
44                                   const ModelConfig& config,
45                                   std::shared_ptr<PreparedModel>& preparedModel) override;
46     OH_NN_ReturnCode PrepareModelFromModelCache(const std::vector<ModelBuffer>& modelCache,
47                                                 const ModelConfig& config,
48                                                 std::shared_ptr<PreparedModel>& preparedModel) override;
49 
50     void* AllocateBuffer(size_t length) override;
51     OH_NN_ReturnCode ReleaseBuffer(const void* buffer) override;
52 
53 private:
54     OH_NN_ReturnCode ReleaseSharedBuffer(const V1_0::SharedBuffer& buffer);
55 
56 private:
57     // first: major version, second: minor version
58     std::pair<uint32_t, uint32_t> m_hdiVersion;
59     OHOS::sptr<V1_0::INnrtDevice> m_iDevice {nullptr};
60 };
61 } // namespace NeuralNetworkRuntime
62 } // namespace OHOS
63 #endif // NEURAL_NETWORK_RUNTIME_HDI_DEVICE_H