• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h"
25 
26 #include "arm_compute/core/CL/CLHelpers.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/Types.h"
29 #include "arm_compute/core/Utils.h"
30 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
31 #include "arm_compute/runtime/CL/CLScheduler.h"
32 #include "src/core/CL/ICLKernel.h"
33 #include "src/core/CL/kernels/CLFillBorderKernel.h"
34 #include "src/core/CL/kernels/CLSoftmaxLayerKernel.h"
35 #include "src/core/helpers/SoftmaxHelpers.h"
36 #include "support/MemorySupport.h"
37 
38 namespace arm_compute
39 {
40 template <bool IS_LOG>
CLSoftmaxLayerGeneric(std::shared_ptr<IMemoryManager> memory_manager)41 CLSoftmaxLayerGeneric<IS_LOG>::CLSoftmaxLayerGeneric(std::shared_ptr<IMemoryManager> memory_manager)
42     : _memory_group(std::move(memory_manager)),
43       _permute_input(),
44       _permute_output(),
45       _max_shift_exp_sum_kernel(support::cpp14::make_unique<CLLogits1DMaxShiftExpSumKernel>()),
46       _norm_kernel(support::cpp14::make_unique<CLLogits1DNormKernel>()),
47       _max(),
48       _sum(),
49       _tmp(),
50       _input_permuted(),
51       _output_permuted(),
52       _needs_permute()
53 {
54 }
55 
56 template <bool IS_LOG>
57 CLSoftmaxLayerGeneric<IS_LOG>::~CLSoftmaxLayerGeneric() = default;
58 
59 template <bool IS_LOG>
configure(const ICLTensor * input,ICLTensor * output,float beta,int32_t axis)60 void CLSoftmaxLayerGeneric<IS_LOG>::configure(const ICLTensor *input, ICLTensor *output, float beta, int32_t axis)
61 {
62     configure(CLKernelLibrary::get().get_compile_context(), input, output, beta, axis);
63 }
64 
65 template <bool IS_LOG>
configure(const CLCompileContext & compile_context,const ICLTensor * input,ICLTensor * output,float beta,int32_t axis)66 void CLSoftmaxLayerGeneric<IS_LOG>::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, float beta, int32_t axis)
67 {
68     // Perform validation step
69     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
70     ARM_COMPUTE_ERROR_THROW_ON(CLSoftmaxLayerGeneric<IS_LOG>::validate(input->info(), output->info(), beta, axis));
71 
72     const size_t actual_axis = static_cast<size_t>(wrap_around(axis, static_cast<int32_t>(input->info()->num_dimensions())));
73 
74     _needs_permute              = actual_axis != 0;
75     ICLTensor       *tmp_output = output;
76     const ICLTensor *tmp_input  = _needs_permute ? &_input_permuted : input;
77     if(_needs_permute)
78     {
79         _memory_group.manage(&_input_permuted);
80         _memory_group.manage(&_output_permuted);
81         _permute_input.configure(compile_context, input, &_input_permuted, softmax_helpers::get_permutation_vector_from_softmax_axis(actual_axis));
82         tmp_output = &_output_permuted;
83     }
84 
85     // Create intermediate tensors
86     DataType   tmp_data_type = is_data_type_quantized_asymmetric(tmp_input->info()->data_type()) ? DataType::S32 : tmp_input->info()->data_type();
87     TensorInfo tensor_info_tmp(tmp_input->info()->clone()->set_data_type(tmp_data_type));
88     _tmp.allocator()->init(tensor_info_tmp);
89     TensorShape max_sum_shape = tmp_input->info()->tensor_shape();
90     max_sum_shape.set(0, 1);
91     _max.allocator()->init(tmp_input->info()->clone()->set_tensor_shape(max_sum_shape));
92     _sum.allocator()->init(tmp_input->info()->clone()->set_tensor_shape(max_sum_shape).set_data_type(tmp_data_type));
93 
94     // Set GPU target to kernels
95     _max_shift_exp_sum_kernel->set_target(CLScheduler::get().target());
96 
97     // Manage intermediate buffers
98     _memory_group.manage(&_tmp);
99     _memory_group.manage(&_max);
100     _memory_group.manage(&_sum);
101 
102     SoftmaxKernelInfo softmax_info;
103     softmax_info.beta            = beta;
104     softmax_info.is_log          = IS_LOG;
105     softmax_info.input_data_type = tmp_input->info()->data_type();
106 
107     // Configure kernels
108     _max_shift_exp_sum_kernel->configure(compile_context, tmp_input, &_max, &_tmp, &_sum, softmax_info);
109     _norm_kernel->configure(compile_context, &_tmp, &_sum, tmp_output, softmax_info);
110 
111     // Allocate intermediate buffers
112     _tmp.allocator()->allocate();
113     _max.allocator()->allocate();
114     _sum.allocator()->allocate();
115     if(_needs_permute)
116     {
117         _permute_output.configure(compile_context, &_output_permuted, output, softmax_helpers::get_permutation_vector_from_softmax_axis(actual_axis));
118         _input_permuted.allocator()->allocate();
119         _output_permuted.allocator()->allocate();
120     }
121 }
122 
123 template <bool IS_LOG>
validate(const ITensorInfo * input,const ITensorInfo * output,float beta,int32_t axis)124 Status CLSoftmaxLayerGeneric<IS_LOG>::validate(const ITensorInfo *input, const ITensorInfo *output, float beta, int32_t axis)
125 {
126     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
127     ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_dimensions() > 4, "Only up to 4 dimensions are supported");
128     ARM_COMPUTE_UNUSED(beta);
129     ARM_COMPUTE_RETURN_ERROR_ON(axis < static_cast<int32_t>(-input->num_dimensions()) || static_cast<int32_t>(input->num_dimensions()) <= axis);
130 
131     const size_t actual_axis   = static_cast<size_t>(wrap_around(axis, static_cast<int32_t>(input->num_dimensions())));
132     const bool   needs_permute = actual_axis != 0;
133     if(needs_permute)
134     {
135         const PermutationVector permutation_vector = softmax_helpers::get_permutation_vector_from_softmax_axis(actual_axis);
136         const TensorShape       permuted_shape     = misc::shape_calculator::compute_permutation_output_shape(*input, permutation_vector);
137         TensorInfo              input_permuted(input->clone()->set_tensor_shape(permuted_shape));
138         ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(input, &input_permuted, permutation_vector));
139         TensorInfo output_permuted(output->clone()->set_tensor_shape(permuted_shape));
140         ARM_COMPUTE_RETURN_ON_ERROR(CLPermute::validate(&output_permuted, output, permutation_vector));
141     }
142 
143     // Create intermediate tensor info
144     DataType   tmp_data_type = is_data_type_quantized_asymmetric(input->data_type()) ? DataType::S32 : input->data_type();
145     TensorInfo tensor_info_tmp(input->clone()->set_data_type(tmp_data_type).set_is_resizable(true));
146 
147     TensorShape max_sum_shape = input->tensor_shape();
148     max_sum_shape.set(0, 1);
149     TensorInfo tensor_info_max(input->clone()->set_tensor_shape(max_sum_shape).set_is_resizable(true));
150     TensorInfo tensor_info_sum(input->clone()->set_tensor_shape(max_sum_shape).set_data_type(tmp_data_type).set_quantization_info(QuantizationInfo()).set_is_resizable(true));
151 
152     SoftmaxKernelInfo softmax_info;
153     softmax_info.beta            = beta;
154     softmax_info.is_log          = IS_LOG;
155     softmax_info.input_data_type = input->data_type();
156 
157     ARM_COMPUTE_RETURN_ON_ERROR(CLLogits1DMaxShiftExpSumKernel::validate(input, &tensor_info_max, &tensor_info_tmp, &tensor_info_sum));
158     ARM_COMPUTE_RETURN_ON_ERROR(CLLogits1DNormKernel::validate(&tensor_info_tmp, &tensor_info_sum, output, softmax_info));
159 
160     return Status{};
161 }
162 
163 template <bool IS_LOG>
run()164 void           CLSoftmaxLayerGeneric<IS_LOG>::run()
165 {
166     MemoryGroupResourceScope scope_mg(_memory_group);
167 
168     if(_needs_permute)
169     {
170         _permute_input.run();
171     }
172 
173     CLScheduler::get().enqueue(*_max_shift_exp_sum_kernel, false);
174     CLScheduler::get().enqueue(*_norm_kernel, !_needs_permute);
175 
176     if(_needs_permute)
177     {
178         _permute_output.run();
179     }
180 }
181 
182 template class CLSoftmaxLayerGeneric<false>;
183 template class CLSoftmaxLayerGeneric<true>;
184 
185 } // namespace arm_compute
186