1 /* 2 * Copyright (c) 2022 Huawei Device Co., Ltd. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 16 #include <vector> 17 #include <v1_0/nnrt_types.h> 18 #include <v1_0/innrt_device.h> 19 #include <v1_0/iprepared_model.h> 20 21 #include "gtest/gtest.h" 22 #include "mindir.h" 23 #include "mindir_lite_graph.h" 24 25 #include "interfaces/kits/c/neural_network_runtime.h" 26 #include "common/hdi_nnrt_test_utils.h" 27 #include "common/hdi_nnrt_test.h" 28 29 using namespace std; 30 using namespace testing::ext; 31 using namespace OHOS::NeuralNetworkRuntime; 32 using namespace OHOS::NeuralNetworkRuntime::Test; 33 34 namespace { 35 36 class DeviceTest : public HDINNRtTest {}; 37 38 } // namespace 39 40 /** 41 * @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceInfo_0100 42 * @tc.desc: 获取设备名称 43 * @tc.type: FUNC 44 */ 45 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceInfo_0100, Function | MediumTest | Level2) 46 { 47 std::string deviceName; 48 auto hdiRet = device_->GetDeviceName(deviceName); 49 ASSERT_EQ(HDF_SUCCESS, hdiRet) << hdiRet; 50 std::cout << "deviceName:" << deviceName << std::endl; 51 ASSERT_TRUE(!deviceName.empty()); 52 } 53 54 /** 55 * @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceInfo_0200 56 * @tc.desc: 获取芯片供应商名称 57 * @tc.type: FUNC 58 */ 59 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceInfo_0200, Function | MediumTest | Level2) 60 { 61 std::string vendorName; 62 auto hdiRet = device_->GetVendorName(vendorName); 63 ASSERT_EQ(HDF_SUCCESS, hdiRet) << hdiRet; 64 std::cout << "vendorName:" << vendorName << std::endl; 65 ASSERT_TRUE(!vendorName.empty()); 66 } 67 68 /** 69 * @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceInfo_0300 70 * @tc.desc: 获取设备类型 71 * @tc.type: FUNC 72 */ 73 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceInfo_0300, Function | MediumTest | Level1) 74 { 75 V1_0::DeviceType deviceType; 76 auto hdiRet = device_->GetDeviceType(deviceType); 77 ASSERT_EQ(HDF_SUCCESS, hdiRet); 78 79 ASSERT_TRUE(deviceType == V1_0::DeviceType::OTHER || deviceType == V1_0::DeviceType::GPU || 80 deviceType == V1_0::DeviceType::CPU || deviceType == V1_0::DeviceType::ACCELERATOR) 81 << deviceType; 82 } 83 84 /** 85 * @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceInfo_0400 86 * @tc.desc: 获取设备状态 87 * @tc.type: FUNC 88 */ 89 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceInfo_0400, Function | MediumTest | Level1) 90 { 91 V1_0::DeviceStatus deviceStatus; 92 auto hdiRet = device_->GetDeviceStatus(deviceStatus); 93 ASSERT_EQ(HDF_SUCCESS, hdiRet); 94 ASSERT_TRUE(deviceStatus == V1_0::DeviceStatus::AVAILABLE || deviceStatus == V1_0::DeviceStatus::BUSY || 95 deviceStatus == V1_0::DeviceStatus::OFFLINE || deviceStatus == V1_0::DeviceStatus::UNKNOWN) 96 << deviceStatus; 97 } 98 99 /** 100 * @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceConfig_0100 101 * @tc.desc: 查询设备是否支持以Float16精度运算Float32的模型 102 * @tc.type: FUNC 103 */ 104 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceConfig_0100, Function | MediumTest | Level2) 105 { 106 bool isSupportedFp16; 107 auto hdiRet = device_->IsFloat16PrecisionSupported(isSupportedFp16); 108 ASSERT_EQ(HDF_SUCCESS, hdiRet) << hdiRet; 109 } 110 111 /** 112 * @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceConfig_0200 113 * @tc.desc: 查询设备是否支持性能偏好设置 114 * @tc.type: FUNC 115 */ 116 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceConfig_0200, Function | MediumTest | Level2) 117 { 118 bool isSupportedPerformance; 119 auto hdiRet = device_->IsPerformanceModeSupported(isSupportedPerformance); 120 ASSERT_EQ(HDF_SUCCESS, hdiRet) << hdiRet; 121 } 122 123 /** 124 * @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceConfig_0300 125 * @tc.desc: 查询设备是否支持任务优先级设置 126 * @tc.type: FUNC 127 */ 128 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceConfig_0300, Function | MediumTest | Level2) 129 { 130 bool isSupportedPriority; 131 auto hdiRet = device_->IsPrioritySupported(isSupportedPriority); 132 ASSERT_EQ(HDF_SUCCESS, hdiRet) << hdiRet; 133 } 134 135 /** 136 * @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceConfig_0400 137 * @tc.desc: 查询设备是否支持变尺寸输入 138 * @tc.type: FUNC 139 */ 140 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceConfig_0400, Function | MediumTest | Level2) 141 { 142 bool isSupportedDynamicInput; 143 auto hdiRet = device_->IsDynamicInputSupported(isSupportedDynamicInput); 144 ASSERT_EQ(HDF_SUCCESS, hdiRet) << hdiRet; 145 } 146 147 /** 148 * @tc.name: SUB_AI_NNRt_Func_South_Device_DeviceConfig_0500 149 * @tc.desc: 查询是否支持模型缓存功能 150 * @tc.type: FUNC 151 */ 152 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_DeviceConfig_0500, Function | MediumTest | Level2) 153 { 154 bool isSupportedCache; 155 auto hdiRet = device_->IsModelCacheSupported(isSupportedCache); 156 ASSERT_EQ(HDF_SUCCESS, hdiRet) << hdiRet; 157 } 158 159 /** 160 * @tc.number : SUB_AI_NNRt_Func_South_Device_ModelSupport_0100 161 * @tc.name : 模型算子支持查询,nodes为空 162 * @tc.desc : [C- SOFTWARE -0200] 163 */ 164 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_ModelSupport_0100, Function | MediumTest | Level3) 165 { 166 OH_NNModel *model = nullptr; 167 HDICommon::BuildAddGraph(&model); 168 ASSERT_NE(model, nullptr); 169 170 V1_0::Model *iModel = nullptr; 171 V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0}; 172 ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel)); 173 // set nodes to empty 174 iModel->nodes = {}; 175 std::vector<bool> supportedOperations; 176 EXPECT_EQ(HDF_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations)); 177 EXPECT_TRUE(supportedOperations.empty()); 178 179 // release 180 mindspore::lite::MindIR_Model_Destroy(&iModel); 181 if (tensorBuffer.fd != -1) { 182 EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer)); 183 } 184 } 185 186 /** 187 * @tc.number : SUB_AI_NNRt_Func_South_Device_ModelSupport_0200 188 * @tc.name : 模型算子支持查询,nodes中存在NodeType为-1 189 * @tc.desc : [C- SOFTWARE -0200] 190 */ 191 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_ModelSupport_0200, Function | MediumTest | Level3) 192 { 193 OH_NNModel *model = nullptr; 194 HDICommon::BuildAddGraph(&model); 195 ASSERT_NE(model, nullptr); 196 197 V1_0::Model *iModel = nullptr; 198 V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0}; 199 ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel)); 200 // set node[0] to 201 for (auto &node : iModel->nodes) { 202 node.nodeType = static_cast<V1_0::NodeType>(-1); 203 } 204 205 std::vector<bool> supportedOperations; 206 EXPECT_EQ(HDF_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations)); 207 for (uint32_t i = 0; i < supportedOperations.size(); i++) { 208 EXPECT_EQ(false, supportedOperations[i]); 209 } 210 211 // release 212 mindspore::lite::MindIR_Model_Destroy(&iModel); 213 if (tensorBuffer.fd != -1) { 214 EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer)); 215 } 216 } 217 218 /** 219 * @tc.number : SUB_AI_NNRt_Func_South_Device_ModelSupport_0300 220 * @tc.name : 模型算子支持查询,nodes中存在NodeType为10000 221 * @tc.desc : [C- SOFTWARE -0200] 222 */ 223 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_ModelSupport_0300, Function | MediumTest | Level3) 224 { 225 OH_NNModel *model = nullptr; 226 HDICommon::BuildAddGraph(&model); 227 ASSERT_NE(model, nullptr); 228 229 V1_0::Model *iModel = nullptr; 230 V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0}; 231 ASSERT_EQ(OH_NN_SUCCESS, HDICommon::ConvertModel(device_, model, tensorBuffer, &iModel)); 232 233 for (auto &node : iModel->nodes) { 234 node.nodeType = static_cast<V1_0::NodeType>(100000); 235 } 236 237 std::vector<bool> supportedOperations; 238 EXPECT_EQ(HDF_SUCCESS, device_->GetSupportedOperation(*iModel, supportedOperations)); 239 for (uint32_t i = 0; i < supportedOperations.size(); i++) { 240 EXPECT_EQ(false, supportedOperations[i]); 241 } 242 243 // release 244 mindspore::lite::MindIR_Model_Destroy(&iModel); 245 if (tensorBuffer.fd != -1) { 246 EXPECT_EQ(HDF_SUCCESS, device_->ReleaseBuffer(tensorBuffer)); 247 } 248 } 249 250 /** 251 * @tc.name: SUB_AI_NNRt_Func_South_Device_Memory_0100 252 * @tc.desc: 创建共享内存,length为1 253 * @tc.type: FUNC 254 */ 255 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_Memory_0100, Function | MediumTest | Level1) 256 { 257 V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0}; 258 size_t tensorSize = 1; 259 auto hdiRet = device_->AllocateBuffer(tensorSize, tensorBuffer); 260 EXPECT_TRUE(hdiRet == HDF_SUCCESS) << hdiRet; 261 EXPECT_TRUE(tensorBuffer.fd != NNRT_INVALID_FD && tensorBuffer.bufferSize == tensorSize) 262 << tensorBuffer.fd << tensorBuffer.bufferSize; 263 } 264 265 /** 266 * @tc.name: SUB_AI_NNRt_Func_South_Device_Memory_0200 267 * @tc.desc: 创建共享内存,length为0 268 * @tc.type: FUNC 269 */ 270 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_Memory_0200, Function | MediumTest | Level3) 271 { 272 V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0}; 273 size_t tensorSize = 0; 274 auto hdiRet = device_->AllocateBuffer(tensorSize, tensorBuffer); 275 EXPECT_TRUE(hdiRet != HDF_SUCCESS) << hdiRet; 276 EXPECT_TRUE(tensorBuffer.fd == NNRT_INVALID_FD) << tensorBuffer.fd; 277 } 278 279 /** 280 * @tc.name: SUB_AI_NNRt_Func_South_Device_Memory_0300 281 * @tc.desc: 销毁共享内存,buffer为空 282 * @tc.type: FUNC 283 */ 284 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_Memory_0300, Function | MediumTest | Level2) 285 { 286 V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0}; 287 auto hdiRet = device_->ReleaseBuffer(tensorBuffer); 288 EXPECT_TRUE(hdiRet != HDF_SUCCESS); 289 } 290 291 /** 292 * @tc.name: SUB_AI_NNRt_Func_South_Device_Memory_0400 293 * @tc.desc: 销毁共享内存,buffer释放成功 294 * @tc.type: FUNC 295 */ 296 HWTEST_F(DeviceTest, SUB_AI_NNRt_Func_South_Device_Memory_0400, Function | MediumTest | Level1) 297 { 298 V1_0::SharedBuffer tensorBuffer{NNRT_INVALID_FD, 0, 0, 0}; 299 size_t tensorSize = 224; 300 auto hdiRet = device_->AllocateBuffer(tensorSize, tensorBuffer); 301 EXPECT_TRUE(hdiRet == HDF_SUCCESS && tensorBuffer.fd != NNRT_INVALID_FD && tensorBuffer.bufferSize == tensorSize) 302 << tensorBuffer.fd << tensorBuffer.bufferSize; 303 hdiRet = device_->ReleaseBuffer(tensorBuffer); 304 EXPECT_TRUE(hdiRet == HDF_SUCCESS); 305 }