• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2023 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either log_softmaxress or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "nnacl/kernel/log_softmax.h"
18 #include "nnacl/common_func.h"
19 #include "nnacl/kernel/default_kernel_base.h"
20 #include "nnacl/fp32/log_softmax_fp32.h"
21 #ifdef ENABLE_FP16
22 #include "nnacl/fp16/log_softmax_fp16.h"
23 #endif
24 
LogSoftmaxLastAxisRun(void * cdata,int task_id,float l,float r)25 int LogSoftmaxLastAxisRun(void *cdata, int task_id, float l, float r) {
26   LogSoftmaxStruct *log_softmax = (LogSoftmaxStruct *)cdata;
27   NNACL_CHECK_NULL_RETURN_ERR(log_softmax);
28 
29   TensorC *in = log_softmax->softmax_.base_.in_[FIRST_INPUT];
30   NNACL_CHECK_NULL_RETURN_ERR(in);
31   void *input_ptr = in->data_;
32   NNACL_CHECK_NULL_RETURN_ERR(input_ptr);
33   void *output_ptr = log_softmax->softmax_.base_.out_[OUTPUT_INDEX]->data_;
34   NNACL_CHECK_NULL_RETURN_ERR(output_ptr);
35   void *tmp_ptr = log_softmax->softmax_.sum_data_;
36   NNACL_CHECK_NULL_RETURN_ERR(tmp_ptr);
37 
38   int unit = UP_DIV(log_softmax->softmax_.out_plane_size_, log_softmax->softmax_.base_.thread_nr_);
39   int begin = task_id * unit;
40   int end = MSMIN(begin + unit, log_softmax->softmax_.out_plane_size_);
41   int channel = in->shape_[log_softmax->softmax_.axis_];
42   int offset = begin * channel;
43 
44 #ifdef ENABLE_FP16
45   if (log_softmax->softmax_.data_type_ == kNumberTypeFloat16) {
46     LogSoftmaxLastAxisFp16((const float16_t *)input_ptr + offset, (float16_t *)output_ptr + offset,
47                            (float16_t *)tmp_ptr + offset, end - begin, channel);
48     return NNACL_OK;
49   }
50 #endif
51   LogSoftmaxLastAxis((const float *)input_ptr + offset, (float *)output_ptr + offset, (float *)tmp_ptr + offset,
52                      end - begin, channel);
53   return NNACL_OK;
54 }
55 
LogSoftmaxResize(struct KernelBase * self)56 int LogSoftmaxResize(struct KernelBase *self) {
57   LogSoftmaxStruct *log_softmax = (LogSoftmaxStruct *)self;
58   NNACL_CHECK_NULL_RETURN_ERR(log_softmax);
59 
60   int ret = InitSoftmaxParam(&log_softmax->softmax_);
61   if (ret != NNACL_OK) {
62     return ret;
63   }
64 
65   if (log_softmax->softmax_.in_plane_size_ == 1 && log_softmax->softmax_.sum_data_ == NULL) {
66     TensorC *in = log_softmax->softmax_.base_.in_[FIRST_INPUT];
67     NNACL_CHECK_NULL_RETURN_ERR(in);
68     SoftmaxStruct *softmax = &log_softmax->softmax_;
69 
70     int sum_data_size = softmax->in_plane_size_ * softmax->out_plane_size_ * in->shape_[softmax->axis_];
71     softmax->sum_data_ = self->env_->Alloc(self->env_->allocator_, sum_data_size * DataTypeCSize(softmax->data_type_));
72     NNACL_MALLOC_CHECK_NULL_RETURN_ERR(softmax->sum_data_);
73   }
74   return NNACL_OK;
75 }
76 
LogSoftmaxCompute(struct KernelBase * self)77 int LogSoftmaxCompute(struct KernelBase *self) {
78   LogSoftmaxStruct *log_softmax = (LogSoftmaxStruct *)self;
79   NNACL_CHECK_NULL_RETURN_ERR(log_softmax);
80 
81   if (log_softmax->softmax_.in_plane_size_ == 1) {
82     return self->env_->ParallelLaunch(self->env_->thread_pool_, LogSoftmaxLastAxisRun, self, self->thread_nr_);
83   }
84 
85   TensorC *in = self->in_[FIRST_INPUT];
86   NNACL_CHECK_NULL_RETURN_ERR(in);
87   void *input_ptr = in->data_;
88   NNACL_CHECK_NULL_RETURN_ERR(input_ptr);
89   void *output_ptr = self->out_[OUTPUT_INDEX]->data_;
90   NNACL_CHECK_NULL_RETURN_ERR(output_ptr);
91   NNACL_CHECK_NULL_RETURN_ERR(log_softmax->softmax_.sum_data_);
92 
93 #ifdef ENABLE_FP16
94   if (log_softmax->softmax_.data_type_ == kNumberTypeFloat16) {
95     LogSoftmaxFp16((const float16_t *)input_ptr, (float16_t *)output_ptr, (float16_t *)log_softmax->softmax_.sum_data_,
96                    in->shape_, in->shape_size_, log_softmax->softmax_.axis_);
97     return NNACL_OK;
98   }
99 #endif
100   LogSoftmax((const float *)input_ptr, (float *)output_ptr, (float *)log_softmax->softmax_.sum_data_, in->shape_,
101              in->shape_size_, log_softmax->softmax_.axis_);
102   return NNACL_OK;
103 }
104 
CreateLogSoftmax(OpParameter * param,int data_type)105 KernelBase *CreateLogSoftmax(OpParameter *param, int data_type) {
106   LogSoftmaxStruct *log_softmax = (LogSoftmaxStruct *)malloc(sizeof(LogSoftmaxStruct));
107   NNACL_MALLOC_CHECK_NULL_RETURN_NULL(log_softmax);
108   memset(log_softmax, 0, sizeof(LogSoftmaxStruct));
109 
110   log_softmax->softmax_.sum_data_ = NULL;
111   log_softmax->softmax_.data_type_ = data_type;
112   log_softmax->softmax_.base_.Prepare = DefaultPrepare1In1Out;
113   log_softmax->softmax_.base_.Release = SoftmaxRelease;
114   log_softmax->softmax_.base_.Resize = LogSoftmaxResize;
115   log_softmax->softmax_.base_.Compute = LogSoftmaxCompute;
116   return (KernelBase *)log_softmax;
117 }
118 
119 REG_KERNEL_CREATOR(PrimType_LogSoftmax, kNumberTypeFloat32, CreateLogSoftmax)
120 REG_KERNEL_CREATOR(PrimType_LogSoftmax, kNumberTypeFloat16, CreateLogSoftmax)
121