• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/runtime/kernel/arm/fp32/softmax_fp32.h"
18 #include <cstring>
19 #include <vector>
20 #include "nnacl/fp32/softmax_fp32.h"
21 #include "schema/model_generated.h"
22 #include "src/kernel_registry.h"
23 #include "include/errorcode.h"
24 
25 using mindspore::kernel::KERNEL_ARCH;
26 using mindspore::lite::KernelRegistrar;
27 using mindspore::lite::RET_ERROR;
28 using mindspore::lite::RET_OK;
29 using mindspore::schema::PrimitiveType_Softmax;
30 
31 namespace mindspore::kernel {
Init()32 int SoftmaxCPUKernel::Init() {
33   CHECK_LESS_RETURN(in_tensors_.size(), 1);
34   CHECK_LESS_RETURN(out_tensors_.size(), 1);
35   auto ret = SoftmaxBaseCPUKernel::Init();
36   if (ret != RET_OK) {
37     return ret;
38   }
39 
40   if (!InferShapeDone()) {
41     return RET_OK;
42   }
43   return ReSize();
44 }
45 
ReSize()46 int SoftmaxCPUKernel::ReSize() {
47   auto ret = SoftmaxBaseCPUKernel::ReSize();
48   if (ret != RET_OK) {
49     return ret;
50   }
51   auto n_dim = softmax_param_->n_dim_;
52   auto axis = softmax_param_->axis_;
53   auto in_shape = in_tensors_.front()->shape();
54   int out_plane_size = 1;
55   for (int i = 0; i < axis; ++i) {
56     out_plane_size *= in_shape.at(i);
57   }
58   int in_plane_size = 1;
59   for (int i = axis + 1; i < n_dim; i++) {
60     in_plane_size *= in_shape.at(i);
61   }
62   in_plane_size_ = in_plane_size;
63   out_plane_size_ = out_plane_size;
64   if (in_plane_size_ > 1) {
65     if (sum_data_ != nullptr) {
66       free(sum_data_);
67     }
68     CHECK_LESS_RETURN(MAX_MALLOC_SIZE, out_plane_size_ * in_plane_size_ * sizeof(float));
69     sum_data_ = reinterpret_cast<float *>(malloc(out_plane_size * in_plane_size * sizeof(float)));
70     if (sum_data_ == nullptr) {
71       MS_LOG(ERROR) << "malloc data for softmax fail!";
72       return RET_ERROR;
73     }
74   }
75   return RET_OK;
76 }
77 
DoSoftmaxLastAxis(int task_id)78 int SoftmaxCPUKernel::DoSoftmaxLastAxis(int task_id) {
79   int unit = UP_DIV(out_plane_size_, op_parameter_->thread_num_);
80   if (INT_MUL_OVERFLOW(task_id, unit)) {
81     MS_LOG(ERROR) << "int mul overflow.";
82     return RET_ERROR;
83   }
84   int begin = task_id * unit;
85   int end = MSMIN(begin + unit, out_plane_size_);
86   int channel = softmax_param_->input_shape_[softmax_param_->axis_];
87   if (INT_MUL_OVERFLOW(begin, channel)) {
88     MS_LOG(ERROR) << "int mul overflow.";
89     return RET_ERROR;
90   }
91   int offset = begin * channel;
92   auto input_ptr = reinterpret_cast<float *>(in_tensors_.at(kInputIndex)->MutableData());
93   auto output_ptr = reinterpret_cast<float *>(out_tensors_.at(kOutputIndex)->MutableData());
94   SoftmaxLastAxis(input_ptr + offset, output_ptr + offset, end - begin, channel);
95   return RET_OK;
96 }
97 
SoftmaxLastAxisRun(void * cdata,int task_id,float lhs_scale,float rhs_scale)98 int SoftmaxLastAxisRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) {
99   CHECK_NULL_RETURN(cdata);
100   auto kernel = reinterpret_cast<SoftmaxCPUKernel *>(cdata);
101   auto ret = kernel->DoSoftmaxLastAxis(task_id);
102   if (ret != RET_OK) {
103     MS_LOG(ERROR) << "DoSoftmaxLastAxis error task_id: " << task_id << ", ret: " << ret;
104   }
105   return ret;
106 }
107 
Run()108 int SoftmaxCPUKernel::Run() {
109   int ret = RET_OK;
110   if (in_plane_size_ == 1) {
111     ret = ParallelLaunch(this->ms_context_, SoftmaxLastAxisRun, this, op_parameter_->thread_num_);
112     if (ret != RET_OK) {
113       MS_LOG(ERROR) << "SoftmaxCPUKernel ParallelLaunch failed, ret: " << ret;
114     }
115   } else {
116     MS_ASSERT(sum_data_);
117     MS_ASSERT(softmax_param_);
118     auto input_ptr = reinterpret_cast<float *>(in_tensors_.at(kInputIndex)->data());
119     MS_ASSERT(input_ptr);
120     auto output_ptr = reinterpret_cast<float *>(out_tensors_.at(kOutputIndex)->data());
121     MS_ASSERT(output_ptr);
122     Softmax(input_ptr, output_ptr, sum_data_, softmax_param_);
123   }
124   return ret;
125 }
126 
127 REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Softmax, LiteKernelCreator<SoftmaxCPUKernel>)
128 }  // namespace mindspore::kernel
129