1 /**
2 * Copyright 2022-2023 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <vector>
17 #include "common/common_test.h"
18 #include "include/registry/register_kernel_interface.h"
19 #include "include/registry/register_kernel.h"
20 #include "src/litert/cxx_api/kernel_executor/kernel_executor.h"
21 #include "ops/auto_generate/gen_lite_ops.h"
22 #include "ops/conv2d.h"
23 #include "ops/topk.h"
24 #include "ops/conv2d_transpose.h"
25 #include "ops/max_pool.h"
26 #include "ops/pad.h"
27 #include "ops/base_operator.h"
28
29 namespace mindspore {
30 class KernelExecutorTest : public mindspore::CommonTest {
31 public:
32 KernelExecutorTest();
33 ~KernelExecutorTest() = default;
34
35 protected:
36 std::shared_ptr<mindspore::KernelExecutor> kernel_executor_;
37 std::shared_ptr<mindspore::Context> context_;
38 std::vector<float> input_data_;
39 };
40
KernelExecutorTest()41 KernelExecutorTest::KernelExecutorTest() {
42 kernel_executor_ = std::make_shared<mindspore::KernelExecutor>();
43 context_ = std::make_shared<mindspore::Context>();
44 auto cpu_context = std::make_shared<mindspore::CPUDeviceInfo>();
45 context_->MutableDeviceInfo().push_back(cpu_context);
46 context_->SetThreadNum(1);
47
48 input_data_ = {-1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12};
49 }
50
51 namespace {
52 const auto kFloat32 = DataType::kNumberTypeFloat32;
53 class CustomAddKernel : public kernel::Kernel {
54 public:
CustomAddKernel(const std::vector<MSTensor> & inputs,const std::vector<MSTensor> & outputs,const schema::Primitive * primitive,const mindspore::Context * ctx)55 CustomAddKernel(const std::vector<MSTensor> &inputs, const std::vector<MSTensor> &outputs,
56 const schema::Primitive *primitive, const mindspore::Context *ctx)
57 : Kernel(inputs, outputs, primitive, ctx) {}
58 ~CustomAddKernel() = default;
59
Prepare()60 int Prepare() override { return static_cast<int>(kSuccess); }
61
Execute()62 int Execute() override {
63 const float *in0 = static_cast<const float *>(inputs_[0].Data().get());
64 const float *in1 = static_cast<const float *>(inputs_[1].Data().get());
65 float *out = static_cast<float *>(outputs_[0].MutableData());
66 auto num = outputs_[0].ElementNum();
67 for (int i = 0; i < num; ++i) {
68 out[i] = in0[i] + in1[i];
69 }
70 return static_cast<int>(kSuccess);
71 }
ReSize()72 int ReSize() override { return static_cast<int>(kSuccess); }
73 };
74
CustomAddCreator(const std::vector<MSTensor> & inputs,const std::vector<MSTensor> & outputs,const schema::Primitive * primitive,const mindspore::Context * ctx)75 std::shared_ptr<kernel::Kernel> CustomAddCreator(const std::vector<MSTensor> &inputs,
76 const std::vector<MSTensor> &outputs,
77 const schema::Primitive *primitive, const mindspore::Context *ctx) {
78 return std::make_shared<CustomAddKernel>(inputs, outputs, primitive, ctx);
79 }
80 REGISTER_CUSTOM_KERNEL(CPU, Tutorial, kFloat32, Custom_Add, CustomAddCreator)
81
82 class CustomAddInfer : public kernel::KernelInterface {
83 public:
84 CustomAddInfer() = default;
85 ~CustomAddInfer() = default;
Infer(std::vector<mindspore::MSTensor> * inputs,std::vector<mindspore::MSTensor> * outputs,const schema::Primitive * primitive)86 Status Infer(std::vector<mindspore::MSTensor> *inputs, std::vector<mindspore::MSTensor> *outputs,
87 const schema::Primitive *primitive) override {
88 (*outputs)[0].SetFormat((*inputs)[0].format());
89 (*outputs)[0].SetDataType((*inputs)[0].DataType());
90 (*outputs)[0].SetShape((*inputs)[0].Shape());
91 return kSuccess;
92 }
93 };
CustomAddInferCreator()94 std::shared_ptr<kernel::KernelInterface> CustomAddInferCreator() { return std::make_shared<CustomAddInfer>(); }
95 REGISTER_CUSTOM_KERNEL_INTERFACE(CustomOpTurial, Custom_Add, CustomAddInferCreator)
96 } // namespace
97
TEST_F(KernelExecutorTest,TestBuild)98 TEST_F(KernelExecutorTest, TestBuild) {
99 auto op = std::make_shared<ops::Abs>();
100 std::vector<mindspore::MSTensor> inputs_abs;
101 mindspore::MSTensor tensor_abs("Abs", mindspore::DataType::kNumberTypeFloat32, {1, 3, 2, 3},
102 reinterpret_cast<void *>(input_data_.data()), 12 * sizeof(float));
103 inputs_abs.emplace_back(tensor_abs);
104 ASSERT_EQ(kernel_executor_->Build(nullptr, {}, nullptr), mindspore::kLiteNullptr);
105 ASSERT_EQ(kernel_executor_->Build(op, {}, nullptr), mindspore::kLiteError);
106 ASSERT_EQ(kernel_executor_->Build(op, inputs_abs, nullptr), mindspore::kLiteNullptr);
107 ASSERT_EQ(kernel_executor_->Build(op, inputs_abs, context_), mindspore::kSuccess);
108
109 auto addn = std::make_shared<ops::AddN>();
110 ASSERT_EQ(kernel_executor_->Build(addn, inputs_abs, context_), mindspore::kLiteError);
111 tensor_abs.SetDataType(mindspore::DataType::kNumberTypeInt8);
112 ASSERT_EQ(kernel_executor_->Build(op, inputs_abs, context_), mindspore::kLiteError);
113 tensor_abs.SetDataType(mindspore::DataType::kNumberTypeFloat16);
114 ASSERT_EQ(kernel_executor_->Build(op, inputs_abs, context_), mindspore::kLiteError);
115 }
116
TEST_F(KernelExecutorTest,TestResize)117 TEST_F(KernelExecutorTest, TestResize) {
118 auto op = std::make_shared<ops::Abs>();
119 std::vector<mindspore::MSTensor> inputs_abs;
120 mindspore::MSTensor tensor_abs("Abs", mindspore::DataType::kNumberTypeFloat32, {1, 3, 2, 2},
121 reinterpret_cast<void *>(input_data_.data()), 12 * sizeof(float));
122 inputs_abs.emplace_back(tensor_abs);
123
124 std::vector<mindspore::MSTensor> inputs_abs_resize;
125 mindspore::MSTensor tensor_abs_resize("Abs", mindspore::DataType::kNumberTypeFloat32, {1, 4, 3},
126 reinterpret_cast<void *>(input_data_.data()), 12 * sizeof(float));
127 inputs_abs_resize.emplace_back(tensor_abs_resize);
128
129 ASSERT_EQ(kernel_executor_->ReSize({}), mindspore::kLiteNullptr);
130 kernel_executor_->Build(nullptr, {}, nullptr);
131 ASSERT_EQ(kernel_executor_->ReSize({}), mindspore::kLiteNullptr);
132 kernel_executor_->Build(op, inputs_abs, context_);
133 ASSERT_EQ(kernel_executor_->ReSize({}), mindspore::kLiteError);
134 ASSERT_EQ(kernel_executor_->ReSize(inputs_abs_resize), mindspore::kSuccess);
135 }
136
TEST_F(KernelExecutorTest,TestExecute)137 TEST_F(KernelExecutorTest, TestExecute) {
138 auto op = std::make_shared<ops::Abs>();
139 std::vector<mindspore::MSTensor> inputs_abs;
140 std::vector<mindspore::MSTensor> outputs_abs;
141 mindspore::MSTensor tensor_abs("Abs", mindspore::DataType::kNumberTypeFloat32, {1, 3, 2, 2},
142 reinterpret_cast<void *>(input_data_.data()), 12 * sizeof(float));
143 inputs_abs.emplace_back(tensor_abs);
144
145 ASSERT_EQ(kernel_executor_->Execute(inputs_abs, &outputs_abs), mindspore::kLiteNullptr);
146 kernel_executor_->Build(nullptr, inputs_abs, nullptr);
147 ASSERT_EQ(kernel_executor_->Execute(inputs_abs, &outputs_abs), mindspore::kLiteNullptr);
148
149 kernel_executor_->Build(op, inputs_abs, context_);
150 ASSERT_EQ(kernel_executor_->Execute(inputs_abs, nullptr), mindspore::kLiteNullptr);
151 ASSERT_EQ(kernel_executor_->Execute(inputs_abs, &outputs_abs), mindspore::kSuccess);
152 float correct[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
153 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs_abs[0].MutableData()), correct,
154 outputs_abs[0].ElementNum(), 0.0001));
155
156 std::vector<mindspore::MSTensor> inputs_other;
157 mindspore::MSTensor tensor_other("other", mindspore::DataType::kNumberTypeFloat32, {1, 3, 2, 2},
158 reinterpret_cast<void *>(input_data_.data()), 12 * sizeof(float));
159 inputs_other.emplace_back(tensor_other);
160 tensor_other.SetShape({3, 1, 2, 2});
161 ASSERT_EQ(kernel_executor_->Execute(inputs_other, &outputs_abs), mindspore::kLiteError);
162 tensor_other.SetShape({1, 3, 4});
163 ASSERT_EQ(kernel_executor_->Execute(inputs_other, &outputs_abs), mindspore::kLiteError);
164 tensor_other.SetFormat(mindspore::NCHW);
165 ASSERT_EQ(kernel_executor_->Execute(inputs_other, &outputs_abs), mindspore::kLiteError);
166 tensor_other.SetDataType(mindspore::DataType::kNumberTypeFloat16);
167 ASSERT_EQ(kernel_executor_->Execute(inputs_other, &outputs_abs), mindspore::kLiteError);
168 inputs_other.emplace_back(tensor_abs);
169 ASSERT_EQ(kernel_executor_->Execute(inputs_other, &outputs_abs), mindspore::kLiteError);
170 }
171
TEST_F(KernelExecutorTest,TestCustom)172 TEST_F(KernelExecutorTest, TestCustom) {
173 auto op = std::make_shared<ops::Custom>();
174 auto kernel_executor = std::make_shared<mindspore::KernelExecutor>();
175 std::vector<mindspore::MSTensor> inputs;
176 std::vector<mindspore::MSTensor> outputs;
177 mindspore::MSTensor tensor("Custom", mindspore::DataType::kNumberTypeFloat32, {1, 3, 2, 2},
178 reinterpret_cast<void *>(input_data_.data()), 12 * sizeof(float));
179 inputs.emplace_back(tensor);
180 inputs.emplace_back(tensor);
181
182 ASSERT_EQ(kernel_executor->Build(op, inputs, context_, 0), mindspore::kLiteError);
183 ASSERT_EQ(kernel_executor->Build(op, inputs, context_), mindspore::kLiteError);
184 ASSERT_EQ(kernel_executor->Build(op, inputs, context_, 1), mindspore::kLiteNotSupport);
185
186 std::map<std::string, std::vector<uint8_t>> custom_attrs;
187 std::string input_num = std::to_string(2);
188 std::vector<uint8_t> input_num_attr(input_num.begin(), input_num.end());
189 custom_attrs["input_num"] = input_num_attr;
190 std::string op_kind = "custom op";
191 std::vector<uint8_t> op_kind_attr(op_kind.begin(), op_kind.end());
192 custom_attrs["op_kind"] = op_kind_attr;
193 op->Init("Custom_Add", custom_attrs);
194 ASSERT_EQ(kernel_executor->Build(op, inputs, context_, 1), mindspore::kSuccess);
195 ASSERT_EQ(kernel_executor->Execute(inputs, &outputs), mindspore::kSuccess);
196 float correct[] = {-2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24};
197 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
198 0.0001));
199 }
200
TEST_F(KernelExecutorTest,TestRelu)201 TEST_F(KernelExecutorTest, TestRelu) {
202 auto op = std::make_shared<ops::ReLU>();
203 std::vector<mindspore::MSTensor> inputs;
204 std::vector<mindspore::MSTensor> outputs;
205 mindspore::MSTensor tensor("Relu", mindspore::DataType::kNumberTypeFloat32, {1, 3, 2, 2},
206 reinterpret_cast<void *>(input_data_.data()), 12 * sizeof(float));
207 inputs.emplace_back(tensor);
208
209 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
210 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
211 float correct[] = {0, 2, 0, 4, 0, 6, 0, 8, 0, 10, 0, 12};
212 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
213 0.0001));
214 }
215
TEST_F(KernelExecutorTest,TestSigmoid)216 TEST_F(KernelExecutorTest, TestSigmoid) {
217 auto op = std::make_shared<ops::Sigmoid>();
218 std::vector<mindspore::MSTensor> inputs;
219 std::vector<mindspore::MSTensor> outputs;
220 std::vector<float> input_data{1, 2, 3, 4, 5};
221 mindspore::MSTensor tensor("Sigmoid", mindspore::DataType::kNumberTypeFloat32, {5},
222 reinterpret_cast<void *>(input_data.data()), 5 * sizeof(float));
223 inputs.emplace_back(tensor);
224
225 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
226 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
227 float correct[] = {0.731059, 0.88081, 0.952574, 0.982015, 0.993307};
228 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
229 0.0001));
230 }
231
TEST_F(KernelExecutorTest,TestAdd)232 TEST_F(KernelExecutorTest, TestAdd) {
233 auto op = std::make_shared<ops::Add>();
234 std::vector<mindspore::MSTensor> inputs;
235 std::vector<mindspore::MSTensor> outputs;
236 mindspore::MSTensor tensor("Add", mindspore::DataType::kNumberTypeFloat32, {1, 3, 2, 2},
237 reinterpret_cast<void *>(input_data_.data()), 12 * sizeof(float));
238 inputs.emplace_back(tensor);
239 inputs.emplace_back(tensor);
240
241 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
242 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
243 float correct[] = {-2, 4, -6, 8, -10, 12, -14, 16, -18, 20, -22, 24};
244 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
245 0.0001));
246 }
247
TEST_F(KernelExecutorTest,TestArgMax)248 TEST_F(KernelExecutorTest, TestArgMax) {
249 auto op = std::make_shared<ops::Argmax>();
250 op->set_axis(-1);
251 std::vector<float> argmax_data{1, 20, 5, 67, 8, 9, 130, 24, 15};
252 std::vector<mindspore::MSTensor> inputs;
253 std::vector<mindspore::MSTensor> outputs;
254 mindspore::MSTensor tensor("Argmax", mindspore::DataType::kNumberTypeFloat32, {3, 3},
255 reinterpret_cast<void *>(argmax_data.data()), 9 * sizeof(float));
256 inputs.emplace_back(tensor);
257
258 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
259 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
260 int32_t correct[] = {1, 0, 0};
261 ASSERT_EQ(
262 0, CompareOutputData(reinterpret_cast<int32_t *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(), 0));
263 }
264
TEST_F(KernelExecutorTest,TestArgMin)265 TEST_F(KernelExecutorTest, TestArgMin) {
266 auto op = std::make_shared<ops::Argmin>();
267 op->set_axis(-1);
268 std::vector<float> input_data{2.0, 3.1, 1.2};
269 std::vector<mindspore::MSTensor> inputs;
270 std::vector<mindspore::MSTensor> outputs;
271 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {3},
272 reinterpret_cast<void *>(input_data.data()), 3 * sizeof(float));
273 inputs.emplace_back(input);
274
275 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
276 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
277 int32_t correct[] = {2};
278 ASSERT_EQ(
279 0, CompareOutputData(reinterpret_cast<int32_t *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(), 0));
280 }
281
TEST_F(KernelExecutorTest,TestAvgPool)282 TEST_F(KernelExecutorTest, TestAvgPool) {
283 auto op = std::make_shared<ops::AvgPool>();
284 op->set_kernel_size({2, 2});
285 op->set_strides({1, 1});
286 std::vector<float> input_data{0, 12, 24, 1, 13, 25, 2, 14, 26, 3, 15, 27, 4, 16, 28, 5, 17, 29,
287 6, 18, 30, 7, 19, 31, 8, 20, 32, 9, 21, 33, 10, 22, 34, 11, 23, 35};
288 std::vector<mindspore::MSTensor> inputs;
289 std::vector<mindspore::MSTensor> outputs;
290 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {1, 3, 4, 3},
291 reinterpret_cast<void *>(input_data.data()), 36 * sizeof(float));
292 input.SetFormat(mindspore::Format::NHWC);
293 inputs.emplace_back(input);
294
295 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
296 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
297 float correct[] = {2.5, 14.5, 26.5, 3.5, 15.5, 27.5, 4.5, 16.5, 28.5,
298 6.5, 18.5, 30.5, 7.5, 19.5, 31.5, 8.5, 20.5, 32.5};
299 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
300 0.0001));
301 }
302
TEST_F(KernelExecutorTest,TestBatchNorm)303 TEST_F(KernelExecutorTest, TestBatchNorm) {
304 auto op = std::make_shared<ops::BatchNorm>();
305 op->set_is_training(true);
306 std::vector<float> input_data{1, 1, 1, 1};
307 std::vector<mindspore::MSTensor> inputs;
308 std::vector<mindspore::MSTensor> outputs;
309 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {1, 1, 2, 2},
310 reinterpret_cast<void *>(input_data.data()), 4 * sizeof(float));
311 mindspore::MSTensor scale("input", mindspore::DataType::kNumberTypeFloat32, {2},
312 reinterpret_cast<void *>(input_data.data()), 2 * sizeof(float));
313 mindspore::MSTensor bias("input", mindspore::DataType::kNumberTypeFloat32, {2},
314 reinterpret_cast<void *>(input_data.data()), 2 * sizeof(float));
315 mindspore::MSTensor mean("input", mindspore::DataType::kNumberTypeFloat32, {2},
316 reinterpret_cast<void *>(input_data.data()), 2 * sizeof(float));
317 mindspore::MSTensor variance("input", mindspore::DataType::kNumberTypeFloat32, {2},
318 reinterpret_cast<void *>(input_data.data()), 2 * sizeof(float));
319 inputs.emplace_back(input);
320 inputs.emplace_back(scale);
321 inputs.emplace_back(bias);
322 inputs.emplace_back(mean);
323 inputs.emplace_back(variance);
324
325 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
326 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
327 float correct[] = {1, 1, 1, 1};
328 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
329 0.0001));
330 }
331
TEST_F(KernelExecutorTest,TestCeil)332 TEST_F(KernelExecutorTest, TestCeil) {
333 auto op = std::make_shared<ops::Ceil>();
334 std::vector<float> input_data{1.1, 2.5, -1.5};
335 std::vector<mindspore::MSTensor> inputs;
336 std::vector<mindspore::MSTensor> outputs;
337 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {3},
338 reinterpret_cast<void *>(input_data.data()), 3 * sizeof(float));
339 inputs.emplace_back(input);
340
341 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
342 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
343 float correct[] = {2, 3, -1};
344 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
345 0.0001));
346 }
347
TEST_F(KernelExecutorTest,TestConcat)348 TEST_F(KernelExecutorTest, TestConcat) {
349 auto op = std::make_shared<ops::Concat>();
350 op->set_axis(1);
351 std::vector<float> input_data{0, 1, 2, 1};
352 std::vector<mindspore::MSTensor> inputs;
353 std::vector<mindspore::MSTensor> outputs;
354 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {2, 2},
355 reinterpret_cast<void *>(input_data.data()), 4 * sizeof(float));
356 inputs.emplace_back(input);
357 inputs.emplace_back(input);
358
359 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
360 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
361 float correct[] = {0, 1, 0, 1, 2, 1, 2, 1};
362 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
363 0.0001));
364 }
365
TEST_F(KernelExecutorTest,TestConv2D)366 TEST_F(KernelExecutorTest, TestConv2D) {
367 auto op = std::make_shared<ops::Conv2D>();
368 const int batch = 10;
369 const int in_hw = 32;
370 const int out_hw = 30;
371 const int channel = 32;
372 const int kernel_size = 3;
373 op->set_out_channel(channel);
374 op->set_kernel_size({kernel_size, kernel_size});
375 op->set_stride({1, 1});
376 op->set_dilation({1, 1});
377 op->set_group(1);
378 std::vector<float> input_data(batch * in_hw * in_hw * channel, 1);
379 std::vector<mindspore::MSTensor> inputs;
380 std::vector<mindspore::MSTensor> outputs;
381 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {batch, in_hw, in_hw, channel},
382 reinterpret_cast<void *>(input_data.data()),
383 batch * in_hw * in_hw * channel * sizeof(float));
384 mindspore::MSTensor weight("input", mindspore::DataType::kNumberTypeFloat32,
385 {channel, kernel_size, kernel_size, channel}, reinterpret_cast<void *>(input_data.data()),
386 channel * kernel_size * kernel_size * channel * sizeof(float));
387 input.SetFormat(mindspore::Format::NHWC);
388 weight.SetFormat(mindspore::Format::NHWC);
389 inputs.emplace_back(input);
390 inputs.emplace_back(weight);
391
392 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
393 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
394 std::vector<int64_t> shape{batch, out_hw, out_hw, channel};
395 ASSERT_EQ(outputs[0].Shape(), shape);
396 }
397
TEST_F(KernelExecutorTest,TestConv2DTranspose)398 TEST_F(KernelExecutorTest, TestConv2DTranspose) {
399 auto op = std::make_shared<ops::Conv2DTranspose>();
400 const int batch = 10;
401 const int in_hw = 30;
402 const int out_hw = 32;
403 const int channel = 32;
404 const int kernel_size = 3;
405 op->set_in_channel(channel);
406 op->set_out_channel(channel);
407 op->set_kernel_size({kernel_size, kernel_size});
408 op->set_stride({1, 1});
409 op->set_dilation({1, 1});
410 op->set_group(1);
411 std::vector<float> input_data(batch * in_hw * in_hw * channel, 1);
412 std::vector<mindspore::MSTensor> inputs;
413 std::vector<mindspore::MSTensor> outputs;
414 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {batch, in_hw, in_hw, channel},
415 reinterpret_cast<void *>(input_data.data()),
416 batch * in_hw * in_hw * channel * sizeof(float));
417 mindspore::MSTensor weight("input", mindspore::DataType::kNumberTypeFloat32,
418 {channel, kernel_size, kernel_size, channel}, reinterpret_cast<void *>(input_data.data()),
419 channel * kernel_size * kernel_size * channel * sizeof(float));
420 input.SetFormat(mindspore::Format::NHWC);
421 weight.SetFormat(mindspore::Format::NHWC);
422 inputs.emplace_back(input);
423 inputs.emplace_back(weight);
424
425 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
426 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
427 std::vector<int64_t> shape{batch, out_hw, out_hw, channel};
428 ASSERT_EQ(outputs[0].Shape(), shape);
429 }
430
TEST_F(KernelExecutorTest,TestDiv)431 TEST_F(KernelExecutorTest, TestDiv) {
432 auto op = std::make_shared<ops::Div>();
433 std::vector<float> input_data{-4, 5, 6};
434 std::vector<float> input_data2{3, 2, 3};
435 std::vector<mindspore::MSTensor> inputs;
436 std::vector<mindspore::MSTensor> outputs;
437 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {3},
438 reinterpret_cast<void *>(input_data.data()), 3 * sizeof(float));
439 mindspore::MSTensor input2("input", mindspore::DataType::kNumberTypeFloat32, {3},
440 reinterpret_cast<void *>(input_data2.data()), 3 * sizeof(float));
441 inputs.emplace_back(input);
442 inputs.emplace_back(input2);
443
444 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
445 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
446 float correct[] = {-1.33333, 2.5, 2};
447 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
448 0.0001));
449 }
450
TEST_F(KernelExecutorTest,TestEqual)451 TEST_F(KernelExecutorTest, TestEqual) {
452 auto op = std::make_shared<ops::Equal>();
453 std::vector<float> input_data{1, 2, 3};
454 std::vector<float> input_data2{2};
455 std::vector<mindspore::MSTensor> inputs;
456 std::vector<mindspore::MSTensor> outputs;
457 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {3},
458 reinterpret_cast<void *>(input_data.data()), 3 * sizeof(float));
459 mindspore::MSTensor input2("input", mindspore::DataType::kNumberTypeFloat32, {1},
460 reinterpret_cast<void *>(input_data2.data()), 1 * sizeof(float));
461 inputs.emplace_back(input);
462 inputs.emplace_back(input2);
463
464 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
465 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
466 }
467
TEST_F(KernelExecutorTest,TestFlatten)468 TEST_F(KernelExecutorTest, TestFlatten) {
469 auto op = std::make_shared<ops::Flatten>();
470 std::vector<float> input_data(24, 1);
471 std::vector<mindspore::MSTensor> inputs;
472 std::vector<mindspore::MSTensor> outputs;
473 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {1, 2, 3, 4},
474 reinterpret_cast<void *>(input_data.data()), 24 * sizeof(float));
475 inputs.emplace_back(input);
476
477 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
478 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
479 std::vector<int64_t> shape{1, 24};
480 ASSERT_EQ(outputs[0].Shape(), shape);
481 }
482
TEST_F(KernelExecutorTest,TestGather)483 TEST_F(KernelExecutorTest, TestGather) {
484 auto op = std::make_shared<ops::Gather>();
485 std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
486 std::vector<float> input_data2{0, 2, 4, 2, 6};
487 std::vector<mindspore::MSTensor> inputs;
488 std::vector<mindspore::MSTensor> outputs;
489 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {3, 4},
490 reinterpret_cast<void *>(input_data.data()), 12 * sizeof(float));
491 mindspore::MSTensor input2("input", mindspore::DataType::kNumberTypeFloat32, {2},
492 reinterpret_cast<void *>(input_data2.data()), 2 * sizeof(float));
493 mindspore::MSTensor input3("input", mindspore::DataType::kNumberTypeFloat32, {1},
494 reinterpret_cast<void *>(input_data2.data()), 1 * sizeof(float));
495 inputs.emplace_back(input);
496 inputs.emplace_back(input2);
497 inputs.emplace_back(input3);
498
499 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
500 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
501 float correct[] = {1, 2, 3, 4, 9, 10, 11, 12};
502 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
503 0.0001));
504 }
505
TEST_F(KernelExecutorTest,TestGatherNd)506 TEST_F(KernelExecutorTest, TestGatherNd) {
507 auto op = std::make_shared<ops::GatherNd>();
508 std::vector<float> input_data{-0.1, 0.3, 3.6, 0.4, 0.5, -3.2};
509 std::vector<int32_t> input_data2{0, 0, 1, 1};
510 std::vector<mindspore::MSTensor> inputs;
511 std::vector<mindspore::MSTensor> outputs;
512 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {2, 3},
513 reinterpret_cast<void *>(input_data.data()), 6 * sizeof(float));
514 mindspore::MSTensor input2("input", mindspore::DataType::kNumberTypeInt32, {2, 2},
515 reinterpret_cast<void *>(input_data2.data()), 4 * sizeof(int32_t));
516 inputs.emplace_back(input);
517 inputs.emplace_back(input2);
518
519 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
520 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
521 float correct[] = {-0.1, 0.5};
522 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
523 0.0001));
524 }
525
TEST_F(KernelExecutorTest,TestMatMul)526 TEST_F(KernelExecutorTest, TestMatMul) {
527 auto op = std::make_shared<ops::MatMul>();
528 std::vector<float> input_data(12, 1);
529 std::vector<mindspore::MSTensor> inputs;
530 std::vector<mindspore::MSTensor> outputs;
531 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {1, 3},
532 reinterpret_cast<void *>(input_data.data()), 3 * sizeof(float));
533 mindspore::MSTensor input2("input", mindspore::DataType::kNumberTypeFloat32, {3, 4},
534 reinterpret_cast<void *>(input_data.data()), 12 * sizeof(float));
535 inputs.emplace_back(input);
536 inputs.emplace_back(input2);
537
538 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
539 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
540 float correct[] = {3, 3, 3, 3};
541 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
542 0.0001));
543 }
544
TEST_F(KernelExecutorTest,TestMaximum)545 TEST_F(KernelExecutorTest, TestMaximum) {
546 auto op = std::make_shared<ops::Maximum>();
547 std::vector<float> input_data{1, 5, 3};
548 std::vector<float> input_data2{4, 2, 6};
549 std::vector<mindspore::MSTensor> inputs;
550 std::vector<mindspore::MSTensor> outputs;
551 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {3},
552 reinterpret_cast<void *>(input_data.data()), 3 * sizeof(float));
553 mindspore::MSTensor input2("input", mindspore::DataType::kNumberTypeFloat32, {3},
554 reinterpret_cast<void *>(input_data2.data()), 3 * sizeof(float));
555 inputs.emplace_back(input);
556 inputs.emplace_back(input2);
557
558 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
559 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
560 float correct[] = {4, 5, 6};
561 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
562 0.0001));
563 }
564
TEST_F(KernelExecutorTest,TestMaxPool)565 TEST_F(KernelExecutorTest, TestMaxPool) {
566 auto op = std::make_shared<ops::MaxPool>();
567 op->set_kernel_size({2, 2});
568 op->set_strides({1, 1});
569 std::vector<float> input_data{0, 12, 24, 1, 13, 25, 2, 14, 26, 3, 15, 27, 4, 16, 28, 5, 17, 29,
570 6, 18, 30, 7, 19, 31, 8, 20, 32, 9, 21, 33, 10, 22, 34, 11, 23, 35};
571 std::vector<mindspore::MSTensor> inputs;
572 std::vector<mindspore::MSTensor> outputs;
573 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {1, 3, 4, 3},
574 reinterpret_cast<void *>(input_data.data()), 36 * sizeof(float));
575 input.SetFormat(mindspore::Format::NHWC);
576 inputs.emplace_back(input);
577
578 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
579 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
580 float correct[] = {5, 17, 29, 6, 18, 30, 7, 19, 31, 9, 21, 33, 10, 22, 34, 11, 23, 35};
581 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
582 0.0001));
583 }
584
TEST_F(KernelExecutorTest,TestMinimum)585 TEST_F(KernelExecutorTest, TestMinimum) {
586 auto op = std::make_shared<ops::Minimum>();
587 std::vector<float> input_data{1, 5, 3};
588 std::vector<float> input_data2{4, 2, 6};
589 std::vector<mindspore::MSTensor> inputs;
590 std::vector<mindspore::MSTensor> outputs;
591 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {3},
592 reinterpret_cast<void *>(input_data.data()), 3 * sizeof(float));
593 mindspore::MSTensor input2("input", mindspore::DataType::kNumberTypeFloat32, {3},
594 reinterpret_cast<void *>(input_data2.data()), 3 * sizeof(float));
595 inputs.emplace_back(input);
596 inputs.emplace_back(input2);
597
598 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
599 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
600 float correct[] = {1, 2, 3};
601 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
602 0.0001));
603 }
604
TEST_F(KernelExecutorTest,TestMul)605 TEST_F(KernelExecutorTest, TestMul) {
606 auto op = std::make_shared<ops::Mul>();
607 std::vector<float> input_data{1, 2, 3};
608 std::vector<float> input_data2{4, 5, 6};
609 std::vector<mindspore::MSTensor> inputs;
610 std::vector<mindspore::MSTensor> outputs;
611 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {3},
612 reinterpret_cast<void *>(input_data.data()), 3 * sizeof(float));
613 mindspore::MSTensor input2("input", mindspore::DataType::kNumberTypeFloat32, {3},
614 reinterpret_cast<void *>(input_data2.data()), 3 * sizeof(float));
615 inputs.emplace_back(input);
616 inputs.emplace_back(input2);
617
618 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
619 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
620 float correct[] = {4, 10, 18};
621 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
622 0.0001));
623 }
624
TEST_F(KernelExecutorTest,TestPad)625 TEST_F(KernelExecutorTest, TestPad) {
626 auto op = std::make_shared<ops::Pad>();
627 std::vector<float> input_data{-0.1, 0.3, 3.6, 0.4, 0.5, -3.2};
628 std::vector<int32_t> input_data2{1, 2, 2, 1};
629 std::vector<mindspore::MSTensor> inputs;
630 std::vector<mindspore::MSTensor> outputs;
631 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {2, 3},
632 reinterpret_cast<void *>(input_data.data()), 6 * sizeof(float));
633 mindspore::MSTensor input2("input", mindspore::DataType::kNumberTypeInt32, {4},
634 reinterpret_cast<void *>(input_data2.data()), 4 * sizeof(int32_t));
635 inputs.emplace_back(input);
636 inputs.emplace_back(input2);
637
638 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
639 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
640 float correct[] = {0, 0, 0, 0, 0, 0, 0, 0, -0.1, 0.3, 3.6, 0, 0, 0, 0.4,
641 0.5, -3.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
642 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
643 0.0001));
644 }
645
TEST_F(KernelExecutorTest,TestPReLU)646 TEST_F(KernelExecutorTest, TestPReLU) {
647 auto op = std::make_shared<ops::PReLU>();
648 std::vector<float> input_data{-6, -4, -2, -5, -3, -1, 0, 2, 4, 1, 3, 5};
649 std::vector<float> input_data2{0.1, 0.6, -0.3};
650 std::vector<mindspore::MSTensor> inputs;
651 std::vector<mindspore::MSTensor> outputs;
652 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {2, 1, 2, 3},
653 reinterpret_cast<void *>(input_data.data()), 12 * sizeof(float));
654 mindspore::MSTensor input2("input", mindspore::DataType::kNumberTypeFloat32, {3},
655 reinterpret_cast<void *>(input_data2.data()), 3 * sizeof(float));
656 input.SetFormat(mindspore::Format::NHWC);
657 inputs.emplace_back(input);
658 inputs.emplace_back(input2);
659
660 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
661 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
662 float correct[] = {-0.6, -2.4, 0.6, -0.5, -1.8, 0.3, 0, 2, 4, 1, 3, 5};
663 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
664 0.0001));
665 }
666
TEST_F(KernelExecutorTest,TestReshape)667 TEST_F(KernelExecutorTest, TestReshape) {
668 auto op = std::make_shared<ops::Reshape>();
669 std::vector<float> input_data{-0.1, 0.3, 3.6, 0.4, 0.5, -3.2};
670 std::vector<int32_t> input_data2{3, 2};
671 std::vector<mindspore::MSTensor> inputs;
672 std::vector<mindspore::MSTensor> outputs;
673 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {2, 3},
674 reinterpret_cast<void *>(input_data.data()), 6 * sizeof(float));
675 mindspore::MSTensor input2("input", mindspore::DataType::kNumberTypeInt32, {2},
676 reinterpret_cast<void *>(input_data2.data()), 2 * sizeof(int32_t));
677 inputs.emplace_back(input);
678 inputs.emplace_back(input2);
679
680 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
681 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
682 float correct[] = {-0.1, 0.3, 3.6, 0.4, 0.5, -3.2};
683 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
684 0.0001));
685 std::vector<int64_t> shape{3, 2};
686 ASSERT_EQ(outputs[0].Shape(), shape);
687 }
688
TEST_F(KernelExecutorTest,TestSoftmax)689 TEST_F(KernelExecutorTest, TestSoftmax) {
690 auto op = std::make_shared<ops::Softmax>();
691 op->set_axis({-1});
692 std::vector<float> input_data{1, 2, 3, 4, 5};
693 std::vector<mindspore::MSTensor> inputs;
694 std::vector<mindspore::MSTensor> outputs;
695 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {1, 5},
696 reinterpret_cast<void *>(input_data.data()), 5 * sizeof(float));
697 inputs.emplace_back(input);
698
699 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
700 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
701 float correct[] = {0.0116558, 0.0316853, 0.0861187, 0.234124, 0.636416};
702 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
703 0.0001));
704 }
705
TEST_F(KernelExecutorTest,TestStridedSlice)706 TEST_F(KernelExecutorTest, TestStridedSlice) {
707 auto op = std::make_shared<ops::StridedSlice>();
708 std::vector<float> input_data{1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6};
709 std::vector<int32_t> input_data2{1, 0, 0};
710 std::vector<int32_t> input_data3{2, 1, 3};
711 std::vector<int32_t> input_data4{1, 1, 1};
712 std::vector<mindspore::MSTensor> inputs;
713 std::vector<mindspore::MSTensor> outputs;
714 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {3, 2, 3},
715 reinterpret_cast<void *>(input_data.data()), 18 * sizeof(float));
716 mindspore::MSTensor input2("input", mindspore::DataType::kNumberTypeInt32, {3},
717 reinterpret_cast<void *>(input_data2.data()), 3 * sizeof(int32_t));
718 mindspore::MSTensor input3("input", mindspore::DataType::kNumberTypeInt32, {3},
719 reinterpret_cast<void *>(input_data3.data()), 3 * sizeof(int32_t));
720 mindspore::MSTensor input4("input", mindspore::DataType::kNumberTypeInt32, {3},
721 reinterpret_cast<void *>(input_data4.data()), 3 * sizeof(int32_t));
722 inputs.emplace_back(input);
723 inputs.emplace_back(input2);
724 inputs.emplace_back(input3);
725 inputs.emplace_back(input4);
726
727 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
728 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
729 float correct[] = {3, 3, 3};
730 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
731 0.0001));
732 }
733
TEST_F(KernelExecutorTest,TestTopK)734 TEST_F(KernelExecutorTest, TestTopK) {
735 auto op = std::make_shared<ops::TopK>();
736 op->set_sorted(true);
737 std::vector<float> input_data{1, 2, 3, 4, 5};
738 std::vector<int32_t> input_data2{3};
739 std::vector<mindspore::MSTensor> inputs;
740 std::vector<mindspore::MSTensor> outputs;
741 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {5},
742 reinterpret_cast<void *>(input_data.data()), 5 * sizeof(float));
743 mindspore::MSTensor input2("input", mindspore::DataType::kNumberTypeInt32, {1},
744 reinterpret_cast<void *>(input_data2.data()), 1 * sizeof(int32_t));
745 inputs.emplace_back(input);
746 inputs.emplace_back(input2);
747
748 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
749 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
750 float correct[] = {5, 4, 3};
751 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
752 0.0001));
753 }
754
TEST_F(KernelExecutorTest,TestTranspose)755 TEST_F(KernelExecutorTest, TestTranspose) {
756 auto op = std::make_shared<ops::Transpose>();
757 std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
758 std::vector<int32_t> input_data2{0, 2, 1};
759 std::vector<mindspore::MSTensor> inputs;
760 std::vector<mindspore::MSTensor> outputs;
761 mindspore::MSTensor input("input", mindspore::DataType::kNumberTypeFloat32, {2, 2, 3},
762 reinterpret_cast<void *>(input_data.data()), 12 * sizeof(float));
763 mindspore::MSTensor input2("input", mindspore::DataType::kNumberTypeInt32, {3},
764 reinterpret_cast<void *>(input_data2.data()), 3 * sizeof(int32_t));
765 inputs.emplace_back(input);
766 inputs.emplace_back(input2);
767
768 ASSERT_EQ(kernel_executor_->Build(op, inputs, context_), mindspore::kSuccess);
769 ASSERT_EQ(kernel_executor_->Execute(inputs, &outputs), mindspore::kSuccess);
770 float correct[] = {1, 4, 2, 5, 3, 6, 7, 10, 8, 11, 9, 12};
771 ASSERT_EQ(0, CompareOutputData(reinterpret_cast<float *>(outputs[0].MutableData()), correct, outputs[0].ElementNum(),
772 0.0001));
773 }
774 } // namespace mindspore
775