• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/core/CL/kernels/CLSoftmaxLayerKernel.h"
25 
26 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
27 #include "src/core/CL/CLValidate.h"
28 #include "src/core/helpers/AutoConfiguration.h"
29 #include "src/core/helpers/WindowHelpers.h"
30 #include "support/StringSupport.h"
31 
32 namespace arm_compute
33 {
34 namespace
35 {
36 /** Calculates softmax parameters from the quantized input scale and scaling factor for the exponent and places them as build options.
37  *
38  * Prepares these build options:
39  * -INPUT_BETA_MULTIPLIER, INPUT_BETA_LEFT_SHIFT - quantized representation of beta multiplier.
40  * -DIFF_MIN - threshold difference between maximum value of input data and current processed value,
41  *             it defines whether the value will be taken into account or not.
42  *
43  * @param[in] build_opts  Build options to extend
44  * @param[in] input_scale Input scaling factor
45  * @param[in] beta        Exponent scaling factor beta
46  */
prepare_quantized_softmax_build_options(float input_scale,float beta)47 CLBuildOptions prepare_quantized_softmax_build_options(float input_scale, float beta)
48 {
49     // Number of integer bits in temporary fixed-point representation of current-to-max difference
50     static const int scaled_diff_int_bits = 5;
51     // Number of integer bits used in temporary fixed-point representation of exponent accumulator
52     static const int exp_accumulation_in_bits = 12;
53 
54     const double beta_multiplier = std::min(
55                                        1.0 * beta * input_scale * (1 << (31 - scaled_diff_int_bits)),
56                                        (1LL << 31) - 1.0);
57     int input_beta_multiplier;
58     int input_beta_left_shift;
59     quantization::calculate_quantized_multiplier_greater_than_one(beta_multiplier, &input_beta_multiplier, &input_beta_left_shift);
60 
61     const double max_input_rescaled = 1.0 * ((1 << scaled_diff_int_bits) - 1) * (1LL << (31 - scaled_diff_int_bits)) / (1LL << input_beta_left_shift);
62     const int    diff_min           = -1.f * std::floor(max_input_rescaled);
63 
64     CLBuildOptions build_opts;
65     build_opts.add_option("-DSCALED_DIFF_INT_BITS=" + support::cpp11::to_string(scaled_diff_int_bits));
66     build_opts.add_option("-DEXP_ACCUMULATION_INT_BITS=" + support::cpp11::to_string(exp_accumulation_in_bits));
67     build_opts.add_option("-DINPUT_BETA_MULTIPLIER=" + support::cpp11::to_string(input_beta_multiplier));
68     build_opts.add_option("-DINPUT_BETA_LEFT_SHIFT=" + support::cpp11::to_string(input_beta_left_shift));
69     build_opts.add_option("-DDIFF_MIN=" + support::cpp11::to_string(diff_min));
70 
71     return build_opts;
72 }
73 
validate_arguments_1DMaxShiftExpSum(const ITensorInfo * input,const ITensorInfo * max,const ITensorInfo * output,const ITensorInfo * sum)74 Status validate_arguments_1DMaxShiftExpSum(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum)
75 {
76     ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
77     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
78     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(max, sum, output);
79 
80     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, max);
81 
82     const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(input->data_type());
83 
84     // Checks performed when output is configured
85     if(output->total_size() != 0)
86     {
87         if(is_quantized_asymmetric)
88         {
89             ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
90         }
91         else
92         {
93             ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
94         }
95         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
96     }
97 
98     // Checks performed when sum is configured
99     if(sum->total_size() != 0)
100     {
101         if(is_quantized_asymmetric)
102         {
103             ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(sum, 1, DataType::S32);
104         }
105         else
106         {
107             ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(max, sum);
108         }
109         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(max, sum);
110     }
111 
112     return Status{};
113 }
114 
validate_arguments_1DNorm(const ITensorInfo * input,const ITensorInfo * sum,const ITensorInfo * output,const SoftmaxKernelInfo & info)115 Status validate_arguments_1DNorm(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, const SoftmaxKernelInfo &info)
116 {
117     ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
118     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S32, DataType::F16, DataType::F32);
119     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(sum, output);
120     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, sum);
121     ARM_COMPUTE_RETURN_ERROR_ON(info.is_log && !is_data_type_float(info.input_data_type));
122 
123     // Note: output should always have a scale of 1/256 and offset 0
124     const QuantizationInfo allowed_quantization_info = get_softmax_output_quantization_info(info.input_data_type, info.is_log);
125     const bool             is_quantized_asymmetric   = is_data_type_quantized_asymmetric(info.input_data_type);
126 
127     // Checks performed when output is configured
128     if(output->total_size() != 0)
129     {
130         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
131         if(!is_quantized_asymmetric)
132         {
133             ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
134         }
135         else
136         {
137             ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
138             ARM_COMPUTE_RETURN_ERROR_ON(output->quantization_info() != allowed_quantization_info);
139         }
140     }
141 
142     return Status{};
143 }
144 } // namespace
145 
146 /**< Grid size (obtained through auto-tuning) */
147 const unsigned int CLLogits1DMaxShiftExpSumKernel::_grid_size = 64;
148 /**< Vector size in the serial case (obtained through auto-tuning) */
149 const unsigned int CLLogits1DMaxShiftExpSumKernel::_serial_vector_size = 8;
150 /**< Vector size in the parallel case (obtained through auto-tuning, enables the best memory access pattern for Bifrost) .*/
151 const unsigned int CLLogits1DMaxShiftExpSumKernel::_parallel_vector_size = 4;
152 
CLLogits1DMaxShiftExpSumKernel()153 CLLogits1DMaxShiftExpSumKernel::CLLogits1DMaxShiftExpSumKernel()
154     : _input(nullptr), _max(nullptr), _output(nullptr), _sum(nullptr)
155 {
156 }
157 
configure(const ICLTensor * input,ICLTensor * max,ICLTensor * output,ICLTensor * sum,const SoftmaxKernelInfo & info)158 void CLLogits1DMaxShiftExpSumKernel::configure(const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info)
159 {
160     configure(CLKernelLibrary::get().get_compile_context(), input, max, output, sum, info);
161 }
162 
configure(const CLCompileContext & compile_context,const ICLTensor * input,ICLTensor * max,ICLTensor * output,ICLTensor * sum,const SoftmaxKernelInfo & info)163 void CLLogits1DMaxShiftExpSumKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, const SoftmaxKernelInfo &info)
164 {
165     ARM_COMPUTE_ERROR_ON_NULLPTR(input, max, sum, output);
166 
167     auto padding_info = get_padding_info({ input, max, output, sum });
168 
169     // Output auto initialization if not yet initialized
170     auto_init_if_empty(*sum->info(), input->info()->clone()->set_tensor_shape(max->info()->tensor_shape()));
171     auto_init_if_empty(*output->info(), *input->info()->clone());
172 
173     // Perform validation step
174     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_1DMaxShiftExpSum(input->info(), max->info(), output->info(), sum->info()));
175 
176     _input  = input;
177     _max    = max;
178     _output = output;
179     _sum    = sum;
180 
181     const DataType                dt                 = input->info()->data_type();
182     const UniformQuantizationInfo qinfo              = input->info()->quantization_info().uniform();
183     const size_t                  reduction_dim_size = input->info()->dimension(0);
184     const float                   beta               = info.beta;
185     const auto                    is_signed_qasymm8  = is_data_type_quantized_asymmetric_signed(info.input_data_type);
186     const int                     min_value          = is_signed_qasymm8 ? CL_SCHAR_MIN : 0;
187 
188     ParallelReductionInfo parallel_reduction_info = is_parallel_reduction(reduction_dim_size);
189     const unsigned int    vector_size             = adjust_vec_size(std::get<1>(parallel_reduction_info), reduction_dim_size);
190 
191     // Set build options
192     CLBuildOptions build_opts;
193     build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(dt));
194     build_opts.add_option("-DMIN_VALUE=" + support::cpp11::to_string(min_value));
195     build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(vector_size));
196     build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(reduction_dim_size));
197     build_opts.add_option("-DVECTOR_SIZE_LEFTOVER=" + support::cpp11::to_string(reduction_dim_size % vector_size));
198     build_opts.add_option("-DLOG_VECTOR_SIZE=" + support::cpp11::to_string(lround(log2(vector_size))));
199     build_opts.add_option_if((reduction_dim_size % vector_size) != 0, "-DNON_MULTIPLE_OF_VECTOR_SIZE");
200     build_opts.add_option_if(is_signed_qasymm8, "-DQASYMM8_SIGNED");
201     build_opts.add_option_if(is_data_type_float(dt) && (beta != 1.0f), "-DBETA=" + float_to_string_with_full_precision(beta));
202     build_opts.add_option_if(is_data_type_float(dt) && info.is_log, "-DLOG_SOFTMAX");
203     build_opts.add_option_if(is_data_type_float(dt), "-DMINVAL=" + ((dt == DataType::F16) ? std::string("-HALF_MAX") : std::string("-FLT_MAX")));
204     build_opts.add_options_if(is_data_type_quantized_asymmetric(dt), prepare_quantized_softmax_build_options(qinfo.scale, beta).options());
205 
206     cl::NDRange lws_hint(cl::NullRange);
207     std::string kernel_name = std::string("softmax_layer_max_shift_exp_sum_") + (is_data_type_quantized_asymmetric(dt) ? "quantized_" : "");
208 
209     // Configure parallel kernel if needed
210     if(std::get<0>(parallel_reduction_info))
211     {
212         kernel_name += "parallel";
213         bool is_grid_size_pow2 = (_grid_size != 0) && ((_grid_size & (_grid_size - 1)) == 0);
214         build_opts.add_option_if(is_grid_size_pow2 && _grid_size <= 256, "-DGRID_SIZE=" + support::cpp11::to_string(_grid_size));
215 
216         // Handle boundary conditions.
217         const unsigned int multiple_grid_size = (reduction_dim_size / vector_size) % _grid_size;
218         build_opts.add_option_if((multiple_grid_size != 0) || ((reduction_dim_size % vector_size) != 0), "-DNON_MULTIPLE_OF_GRID_SIZE");
219         // Setting _lws_hint in this way can also communicate grid_size to CLLogits1DMaxShiftExpSumKernel::run().
220         // A single workgroup performs reduction in dimension 0 in the parallel case, hence lws[0]==gws[0].
221         lws_hint = cl::NDRange(_grid_size);
222     }
223     else
224     {
225         kernel_name += "serial";
226     }
227 
228     // Create kernel.
229     _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
230 
231     // Configure window
232     Window win = calculate_max_window(*(input->info()), Steps(reduction_dim_size));
233     ICLKernel::configure_internal(win, lws_hint);
234 
235     ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
236 }
237 
validate(const ITensorInfo * input,const ITensorInfo * max,const ITensorInfo * output,const ITensorInfo * sum)238 Status CLLogits1DMaxShiftExpSumKernel::validate(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum)
239 {
240     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_1DMaxShiftExpSum(input, max, output, sum));
241     return Status{};
242 }
243 
is_parallel_reduction(size_t size)244 CLLogits1DMaxShiftExpSumKernel::ParallelReductionInfo CLLogits1DMaxShiftExpSumKernel::is_parallel_reduction(size_t size)
245 {
246     bool         is_parallel_reduction = (size >= (_grid_size * _serial_vector_size)) && (_grid_size > 1);
247     unsigned int vector_size           = is_parallel_reduction ? _parallel_vector_size : _serial_vector_size;
248     return std::make_tuple(is_parallel_reduction, vector_size);
249 }
250 
run(const Window & window,cl::CommandQueue & queue)251 void CLLogits1DMaxShiftExpSumKernel::run(const Window &window, cl::CommandQueue &queue)
252 {
253     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
254     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
255 
256     // Collapse window in Z dimension
257     Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
258 
259     // Reconfigure window in case of parallel reduction
260     ParallelReductionInfo parallel_reduction_info = is_parallel_reduction(_input->info()->dimension(0));
261     if(std::get<0>(parallel_reduction_info))
262     {
263         // Launch grid_size parallel work items
264         window_collapsed.set(Window::DimX, Window::Dimension(0, _grid_size, 1));
265     }
266 
267     // Get slices
268     Window slice = window_collapsed.first_slice_window_3D();
269     do
270     {
271         unsigned int idx = 0;
272         // Set inputs
273         add_3D_tensor_argument(idx, _input, slice);
274         add_3D_tensor_argument(idx, _max, slice);
275         add_3D_tensor_argument(idx, _output, slice);
276         add_3D_tensor_argument(idx, _sum, slice);
277         enqueue(queue, *this, slice, lws_hint());
278     }
279     while(window_collapsed.slide_window_slice_3D(slice));
280 }
281 
CLLogits1DNormKernel()282 CLLogits1DNormKernel::CLLogits1DNormKernel()
283     : _input(nullptr), _sum(nullptr), _output(nullptr)
284 {
285 }
286 
configure(const ICLTensor * input,const ICLTensor * sum,ICLTensor * output,const SoftmaxKernelInfo & info)287 void CLLogits1DNormKernel::configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info)
288 {
289     configure(CLKernelLibrary::get().get_compile_context(), input, sum, output, info);
290 }
291 
configure(const CLCompileContext & compile_context,const ICLTensor * input,const ICLTensor * sum,ICLTensor * output,const SoftmaxKernelInfo & info)292 void CLLogits1DNormKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *sum, ICLTensor *output, const SoftmaxKernelInfo &info)
293 {
294     ARM_COMPUTE_ERROR_ON_NULLPTR(input, sum, output);
295 
296     auto padding_info = get_padding_info({ input, output, sum });
297 
298     // Note: output should always have a scale of 1/256 and offset 0
299     const bool                    is_quantized_asymmetric   = is_data_type_quantized_asymmetric(info.input_data_type);
300     const DataType                output_data_type          = info.input_data_type;
301     const QuantizationInfo        allowed_quantization_info = get_softmax_output_quantization_info(info.input_data_type, info.is_log);
302     const UniformQuantizationInfo qinfo                     = input->info()->quantization_info().uniform();
303 
304     // Output auto initialization if not yet initialized
305     auto_init_if_empty(*output->info(),
306                        input->info()->clone()->set_data_type(output_data_type).set_quantization_info(allowed_quantization_info));
307 
308     // Perform validation step
309     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_1DNorm(input->info(), sum->info(), output->info(), info));
310 
311     _input  = input;
312     _sum    = sum;
313     _output = output;
314 
315     const auto         is_signed_qasymm8 = is_data_type_quantized_asymmetric_signed(info.input_data_type);
316     const int          min_value         = is_signed_qasymm8 ? CL_SCHAR_MIN : 0;
317     const unsigned int vector_size       = adjust_vec_size(16, input->info()->dimension(0));
318 
319     // Set build options
320     CLBuildOptions build_opts;
321     build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(info.input_data_type));
322     build_opts.add_option("-DMIN_VALUE=" + support::cpp11::to_string(min_value));
323     build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(vector_size));
324     build_opts.add_option("-DVECTOR_SIZE_LEFTOVER=" + support::cpp11::to_string(input->info()->dimension(0) % vector_size));
325     build_opts.add_option_if(is_data_type_quantized_asymmetric_signed(info.input_data_type), "-DQASYMM8_SIGNED");
326     build_opts.add_options_if(is_quantized_asymmetric,
327                               prepare_quantized_softmax_build_options(qinfo.scale, info.beta).options());
328     build_opts.add_option_if(info.is_log, "-DLOG_SOFTMAX");
329 
330     // Create kernel
331     std::string kernel_name = std::string("softmax_layer_norm") + (is_quantized_asymmetric ? "_quantized" : "");
332     _kernel                 = create_kernel(compile_context, kernel_name, build_opts.options());
333 
334     // Configure window
335     auto win = calculate_max_window(*(input->info()), Steps(vector_size));
336     ICLKernel::configure_internal(win);
337 
338     ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
339 }
340 
validate(const ITensorInfo * input,const ITensorInfo * sum,const ITensorInfo * output,const SoftmaxKernelInfo & info)341 Status CLLogits1DNormKernel::validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, const SoftmaxKernelInfo &info)
342 {
343     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_1DNorm(input, sum, output, info));
344 
345     return Status{};
346 }
347 
run(const Window & window,cl::CommandQueue & queue)348 void CLLogits1DNormKernel::run(const Window &window, cl::CommandQueue &queue)
349 {
350     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
351     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
352 
353     Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
354     Window slice            = window_collapsed.first_slice_window_3D();
355 
356     do
357     {
358         Window sum_slice = slice;
359         sum_slice.set(Window::DimX, Window::Dimension(0, 1, 1));
360 
361         unsigned int idx = 0;
362         // Set inputs
363         add_3D_tensor_argument(idx, _input, slice);
364         add_3D_tensor_argument(idx, _sum, sum_slice);
365         add_3D_tensor_argument(idx, _output, slice);
366         enqueue(queue, *this, slice, lws_hint());
367     }
368     while(window_collapsed.slide_window_slice_3D(slice));
369 }
370 } // namespace arm_compute