1 /**
2 * Copyright 2021 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/runtime/kernel/arm/fp32/log_softmax_fp32.h"
18 #include <cstring>
19 #include <vector>
20 #include "nnacl/fp32/log_softmax_fp32.h"
21 #include "schema/model_generated.h"
22 #include "src/kernel_registry.h"
23 #include "include/errorcode.h"
24
25 using mindspore::kernel::KERNEL_ARCH;
26 using mindspore::lite::KernelRegistrar;
27 using mindspore::lite::RET_ERROR;
28 using mindspore::lite::RET_OK;
29 using mindspore::schema::PrimitiveType_LogSoftmax;
30
31 namespace mindspore::kernel {
~LogSoftmaxCPUKernel()32 LogSoftmaxCPUKernel::~LogSoftmaxCPUKernel() {
33 if (tmp_data_ != nullptr) {
34 free(tmp_data_);
35 tmp_data_ = nullptr;
36 }
37 }
38
Init()39 int LogSoftmaxCPUKernel::Init() {
40 auto ret = SoftmaxBaseCPUKernel::Init();
41 if (ret != RET_OK) {
42 return ret;
43 }
44
45 if (!InferShapeDone()) {
46 return RET_OK;
47 }
48 return ReSize();
49 }
50
ReSize()51 int LogSoftmaxCPUKernel::ReSize() {
52 auto ret = SoftmaxBaseCPUKernel::ReSize();
53 if (ret != RET_OK) {
54 return ret;
55 }
56 auto n_dim = softmax_param_->n_dim_;
57 auto axis = softmax_param_->axis_;
58 auto in_shape = in_tensors_.front()->shape();
59 int out_plane_size = 1;
60 for (int i = 0; i < axis; ++i) {
61 out_plane_size *= in_shape.at(i);
62 }
63 int in_plane_size = 1;
64 for (int i = axis + 1; i < n_dim; i++) {
65 in_plane_size *= in_shape.at(i);
66 }
67 in_plane_size_ = in_plane_size;
68 out_plane_size_ = out_plane_size;
69 auto tmp_data_size =
70 in_plane_size == 1 ? out_plane_size * in_plane_size * in_shape.at(axis) : out_plane_size * in_plane_size;
71 if (tmp_data_ != nullptr) {
72 free(tmp_data_);
73 }
74 tmp_data_ = reinterpret_cast<float *>(malloc(tmp_data_size * sizeof(float)));
75 if (tmp_data_ == nullptr) {
76 MS_LOG(ERROR) << "malloc data for log_softmax fail!";
77 return RET_ERROR;
78 }
79 return RET_OK;
80 }
81
DoLogSoftmaxLastAxis(int task_id)82 int LogSoftmaxCPUKernel::DoLogSoftmaxLastAxis(int task_id) {
83 MS_CHECK_FALSE(op_parameter_->thread_num_ == 0, RET_ERROR);
84 int unit = UP_DIV(out_plane_size_, op_parameter_->thread_num_);
85 int begin = task_id * unit;
86 int end = MSMIN(begin + unit, out_plane_size_);
87 int channel = softmax_param_->input_shape_[softmax_param_->axis_];
88 int offset = begin * channel;
89 auto input_ptr = reinterpret_cast<float *>(in_tensors_.at(kInputIndex)->data());
90 CHECK_NULL_RETURN(input_ptr);
91 auto output_ptr = reinterpret_cast<float *>(out_tensors_.at(kOutputIndex)->data());
92 CHECK_NULL_RETURN(output_ptr);
93 LogSoftmaxLastAxis(input_ptr + offset, output_ptr + offset, tmp_data_ + offset, end - begin, channel);
94 return RET_OK;
95 }
96
LogSoftmaxLastAxisRun(void * cdata,int task_id,float lhs_scale,float rhs_scale)97 int LogSoftmaxLastAxisRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
98 auto kernel = reinterpret_cast<LogSoftmaxCPUKernel *>(cdata);
99 CHECK_NULL_RETURN(kernel);
100 auto ret = kernel->DoLogSoftmaxLastAxis(task_id);
101 if (ret != RET_OK) {
102 MS_LOG(ERROR) << "DoLogSoftmaxLastAxis error task_id: " << task_id << ", ret: " << ret;
103 }
104 return ret;
105 }
106
Run()107 int LogSoftmaxCPUKernel::Run() {
108 int ret = RET_OK;
109 if (in_plane_size_ == 1) {
110 ret = ParallelLaunch(this->ms_context_, LogSoftmaxLastAxisRun, this, op_parameter_->thread_num_);
111 if (ret != RET_OK) {
112 MS_LOG(ERROR) << "LogSoftmaxCPUKernel ParallelLaunch failed, ret: " << ret;
113 }
114 } else {
115 auto input_ptr = reinterpret_cast<float *>(in_tensors_.at(kInputIndex)->data());
116 CHECK_NULL_RETURN(input_ptr);
117 auto output_ptr = reinterpret_cast<float *>(out_tensors_.at(kOutputIndex)->data());
118 CHECK_NULL_RETURN(output_ptr);
119 CHECK_NULL_RETURN(tmp_data_);
120 LogSoftmax(input_ptr, output_ptr, tmp_data_, softmax_param_);
121 }
122 return ret;
123 }
124
125 REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LogSoftmax, LiteKernelCreator<LogSoftmaxCPUKernel>)
126 } // namespace mindspore::kernel
127