1 /**
2 * Copyright 2020 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/runtime/kernel/arm/fp32/unstack_fp32.h"
18 #include "src/kernel_registry.h"
19 #include "include/errorcode.h"
20
21 using mindspore::lite::KernelRegistrar;
22 using mindspore::lite::RET_ERROR;
23 using mindspore::lite::RET_OK;
24 using mindspore::schema::PrimitiveType_Unstack;
25
26 namespace mindspore::kernel {
Init()27 int UnstackCPUKernel::Init() {
28 MS_CHECK_TRUE_RET(in_tensors_.size() == 1, RET_ERROR);
29 MS_CHECK_TRUE_RET(out_tensors_.size() >= 1, RET_ERROR);
30 CHECK_NULL_RETURN(in_tensors_.front());
31 CHECK_NULL_RETURN(out_tensors_.front());
32 CHECK_NULL_RETURN(op_parameter_);
33 if (!InferShapeDone()) {
34 return RET_OK;
35 }
36 return ReSize();
37 }
38
ReSize()39 int UnstackCPUKernel::ReSize() {
40 auto input = in_tensors_.at(0);
41 size_t shape_size = input->shape().size();
42
43 auto para = reinterpret_cast<UnstackParameter *>(op_parameter_);
44 para->pre_dims_ = 1;
45 para->axis_dim_ = 1;
46 para->after_dims_ = 1;
47 if (para->axis_ < 0) {
48 para->axis_ += static_cast<int>(shape_size);
49 }
50
51 for (size_t i = 0; i < shape_size; i++) {
52 if (static_cast<int>(i) < para->axis_) {
53 para->pre_dims_ *= input->DimensionSize(i);
54 } else if (static_cast<int>(i) > para->axis_) {
55 para->after_dims_ *= input->DimensionSize(i);
56 } else {
57 para->axis_dim_ = input->DimensionSize(i);
58 }
59 }
60 if (output_addr_array_ != nullptr) {
61 free(output_addr_array_);
62 output_addr_array_ = nullptr;
63 }
64 MS_CHECK_FALSE_MSG(SIZE_MUL_OVERFLOW(sizeof(void *), out_tensors_.size()), RET_ERROR, "mul overflow");
65 output_addr_array_ = reinterpret_cast<void **>(malloc(sizeof(void *) * out_tensors_.size()));
66 if (output_addr_array_ == nullptr) {
67 MS_LOG(ERROR) << "Failed to malloc memory";
68 return lite::RET_ERROR;
69 }
70 return RET_OK;
71 }
72
Run()73 int UnstackCPUKernel::Run() {
74 float *input = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData());
75 CHECK_NULL_RETURN(input);
76 size_t out_num = out_tensors_.size();
77 for (size_t i = 0; i < out_num; i++) {
78 output_addr_array_[i] = out_tensors_.at(i)->data();
79 CHECK_NULL_RETURN(output_addr_array_[i]);
80 }
81 auto para = reinterpret_cast<UnstackParameter *>(op_parameter_);
82 para->num_ = static_cast<int>(out_num);
83 Unstack(input, output_addr_array_, para, sizeof(float));
84 return RET_OK;
85 }
86
87 REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Unstack, LiteKernelCreator<UnstackCPUKernel>)
88 } // namespace mindspore::kernel
89