1 /*
2 * Copyright (c) 2018-2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "src/core/CL/kernels/CLStridedSliceKernel.h"
25 #include "arm_compute/core/CL/ICLTensor.h"
26 #include "arm_compute/core/TensorInfo.h"
27 #include "arm_compute/core/utils/helpers/tensor_transform.h"
28 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
29 #include "src/core/helpers/AutoConfiguration.h"
30 #include "src/core/helpers/WindowHelpers.h"
31 #include "src/core/utils/helpers/bit_ops.h"
32 #include "support/Cast.h"
33 #include "support/StringSupport.h"
34
35 namespace arm_compute
36 {
37 namespace
38 {
validate_arguments(const ITensorInfo * input,const ITensorInfo * output,const Coordinates & starts,const Coordinates & ends,const BiStrides & strides,int32_t begin_mask,int32_t end_mask,int32_t shrink_axis_mask)39 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
40 const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
41 int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
42 {
43 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
44 ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
45
46 ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape().num_dimensions() > 4);
47 ARM_COMPUTE_RETURN_ERROR_ON(starts.num_dimensions() > input->num_dimensions());
48 ARM_COMPUTE_RETURN_ERROR_ON(ends.num_dimensions() > input->num_dimensions());
49 ARM_COMPUTE_RETURN_ERROR_ON(strides.num_dimensions() > input->num_dimensions());
50 ARM_COMPUTE_RETURN_ERROR_ON(std::any_of(strides.cbegin(), strides.cbegin() + strides.num_dimensions(), [](int i)
51 {
52 return i == 0;
53 }));
54
55 // Get expected output shape
56 const TensorShape exp_output_shape = arm_compute::misc::shape_calculator::compute_strided_slice_shape(*input,
57 starts, ends, strides,
58 begin_mask, end_mask, shrink_axis_mask);
59 ARM_COMPUTE_RETURN_ERROR_ON(exp_output_shape.total_size() == 0);
60
61 // Checks output if configured
62 if(output->total_size() != 0)
63 {
64 const TensorInfo exp_output_info = output->clone()->set_tensor_shape(exp_output_shape);
65 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(output, &exp_output_info);
66 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
67 }
68
69 return Status{};
70 }
71 } // namespace
72
CLStridedSliceKernel()73 CLStridedSliceKernel::CLStridedSliceKernel()
74 {
75 _type = CLKernelType::ELEMENTWISE;
76 }
77
configure(const CLCompileContext & compile_context,const ITensorInfo * input,ITensorInfo * output,const Coordinates & starts,const Coordinates & ends,const BiStrides & strides,int32_t begin_mask,int32_t end_mask,int32_t shrink_axis_mask)78 void CLStridedSliceKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output,
79 const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
80 int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
81 {
82 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
83 auto padding_info = get_padding_info({ input, output });
84 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input, output, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask));
85
86 const TensorShape &input_shape = input->tensor_shape();
87
88 Coordinates starts_abs;
89 Coordinates ends_abs;
90 Coordinates final_strides;
91 std::tie(starts_abs, ends_abs, final_strides) = arm_compute::helpers::tensor_transform::calculate_strided_slice_coords(
92 input_shape,
93 starts, ends, strides,
94 begin_mask, end_mask, shrink_axis_mask);
95
96 // Configure kernel window
97 const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_strided_slice_shape(*input,
98 starts, ends, strides,
99 begin_mask, end_mask, shrink_axis_mask);
100 auto_init_if_empty(*output, input->clone()->set_tensor_shape(output_shape));
101 Window win = calculate_max_window(*output, Steps());
102
103 // Enable multiple elements processing along x if stride_x is 1 and output width greater than the access vector size
104 const int vec_size_x = 16 / input->element_size();
105 const int output_width_x = output->tensor_shape().x();
106 const bool is_shrink_on_x = arm_compute::helpers::bit_ops::is_bit_set(shrink_axis_mask, 0);
107 const bool multi_access_x = !is_shrink_on_x && (final_strides.x() == 1) && (output_width_x / vec_size_x > 0);
108
109 // Update window if needed
110 if(multi_access_x)
111 {
112 Window &updated_window = win;
113 updated_window.set(Window::DimX,
114 Window::Dimension(updated_window.x().start(), ceil_to_multiple(updated_window.x().end(), vec_size_x), vec_size_x));
115 }
116 ICLKernel::configure_internal(win);
117
118 // Create build options
119 CLBuildOptions build_opts;
120 build_opts.add_option("-DDATA_TYPE=" + get_cl_unsigned_type_from_element_size(data_size_from_type(input->data_type())));
121 for(unsigned int i = 0; i < input_shape.num_dimensions(); ++i)
122 {
123 const bool is_shrink = arm_compute::helpers::bit_ops::is_bit_set(shrink_axis_mask, i);
124 build_opts.add_option("-DSTART_" + support::cpp11::to_string(i) + "=" + support::cpp11::to_string(starts_abs[i]));
125 build_opts.add_option("-DSTRIDE_" + support::cpp11::to_string(i) + "=" + support::cpp11::to_string(final_strides[i]));
126 build_opts.add_option_if(is_shrink, "-DSHRINK_" + support::cpp11::to_string(i));
127 }
128 build_opts.add_option_if(multi_access_x, "-DLAST_ACCESSED_X=" + support::cpp11::to_string(std::max<int>(output_width_x - vec_size_x, 0)));
129 build_opts.add_option_if(multi_access_x, "-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
130 build_opts.add_option_if_else(input_shape.num_dimensions() > 2,
131 "-DSRC_DEPTH=" + support::cpp11::to_string(input_shape.z()),
132 "-DSRC_DEPTH=1");
133 build_opts.add_option_if_else(output->num_dimensions() > 2,
134 "-DDST_DEPTH=" + support::cpp11::to_string(output->tensor_shape().z()),
135 "-DDST_DEPTH=1");
136
137 // Create kernel
138 _kernel = create_kernel(compile_context, "strided_slice", build_opts.options());
139
140 // Set config_id for enabling LWS tuning
141 _config_id = "strided_slice";
142 _config_id += "_";
143 _config_id += lower_string(string_from_data_type(input->data_type()));
144 for(unsigned int i = 0; i < input_shape.num_dimensions(); ++i)
145 {
146 _config_id += "_";
147 _config_id += support::cpp11::to_string(input->dimension(i));
148 _config_id += "_";
149 _config_id += support::cpp11::to_string(starts_abs[i]);
150 _config_id += "_";
151 _config_id += support::cpp11::to_string(ends_abs[i]);
152 _config_id += "_";
153 _config_id += support::cpp11::to_string(final_strides[i]);
154 }
155 ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
156 }
157
validate(const ITensorInfo * input,const ITensorInfo * output,const Coordinates & starts,const Coordinates & ends,const BiStrides & strides,int32_t begin_mask,int32_t end_mask,int32_t shrink_axis_mask)158 Status CLStridedSliceKernel::validate(const ITensorInfo *input, const ITensorInfo *output,
159 const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
160 int32_t begin_mask, int32_t end_mask, int32_t shrink_axis_mask)
161 {
162 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask));
163
164 return Status{};
165 }
166
run_op(ITensorPack & tensors,const Window & window,cl::CommandQueue & queue)167 void CLStridedSliceKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
168 {
169 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
170 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
171
172 const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
173 auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
174
175 Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
176 Window slice = window_collapsed.first_slice_window_4D();
177
178 do
179 {
180 unsigned int idx = 0;
181 add_4D_tensor_argument(idx, src, slice);
182 add_4D_tensor_argument(idx, dst, slice);
183 enqueue(queue, *this, slice, lws_hint());
184 }
185 while(window_collapsed.slide_window_slice_4D(slice));
186 }
187 } // namespace arm_compute
188