• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
25 
26 #include "arm_compute/core/Error.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/ITensor.h"
29 #include "arm_compute/core/Types.h"
30 #include "arm_compute/core/Validate.h"
31 #include "arm_compute/core/Window.h"
32 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
33 #include "src/core/NEON/INEKernel.h"
34 #include "src/core/helpers/AutoConfiguration.h"
35 #include "src/core/helpers/WindowHelpers.h"
36 
37 #include <arm_neon.h>
38 #include <cstddef>
39 #include <cstdint>
40 #include <tuple>
41 
42 using namespace arm_compute;
43 using namespace arm_compute::misc::shape_calculator;
44 
45 namespace
46 {
validate_arguments(const ITensorInfo * input,const ITensorInfo * output)47 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
48 {
49     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
50     //Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
51     ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
52     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
53 
54     if(output->total_size() != 0)
55     {
56         TensorShape output_shape = input->tensor_shape();
57         output_shape.set(0, input->dimension(0) * 4);
58         output_shape.set(1, std::ceil(input->dimension(1) / 4.0f));
59         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
60         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
61         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
62     }
63 
64     return Status{};
65 }
66 } // namespace
67 
NEGEMMInterleave4x4Kernel()68 NEGEMMInterleave4x4Kernel::NEGEMMInterleave4x4Kernel()
69     : _func(nullptr)
70 {
71 }
72 
configure(const ITensor * input,ITensor * output)73 void NEGEMMInterleave4x4Kernel::configure(const ITensor *input, ITensor *output)
74 {
75     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
76 
77     // Output auto inizialitation if not yet initialized
78     auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(compute_interleaved_shape(*input->info())));
79 
80     // Perform validate step
81     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info()));
82 
83     _input  = input;
84     _output = output;
85 
86     switch(input->info()->element_size())
87     {
88         case 1:
89             _func = &NEGEMMInterleave4x4Kernel::gemm_interleave4x4<uint8_t>;
90             break;
91         case 2:
92             _func = &NEGEMMInterleave4x4Kernel::gemm_interleave4x4<uint16_t>;
93             break;
94         case 4:
95             _func = &NEGEMMInterleave4x4Kernel::gemm_interleave4x4<uint32_t>;
96             break;
97         default:
98             ARM_COMPUTE_ERROR_ON("Element size not supported");
99             break;
100     }
101 
102     Window win = calculate_max_window(*input->info(), Steps(1, 4));
103 
104     Coordinates coord;
105     coord.set_num_dimensions(output->info()->num_dimensions());
106     output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape()));
107 
108     INEKernel::configure(win);
109 }
110 
validate(const ITensorInfo * input,const ITensorInfo * output)111 Status NEGEMMInterleave4x4Kernel::validate(const ITensorInfo *input, const ITensorInfo *output)
112 {
113     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
114 
115     return Status{};
116 }
117 
118 template <typename ScalarType>
gemm_interleave4x4(const ITensor * input,ITensor * output,const Window & window)119 void NEGEMMInterleave4x4Kernel::gemm_interleave4x4(const ITensor *input, ITensor *output, const Window &window)
120 {
121     const size_t window_start_x = window.x().start();
122     const size_t window_end_x   = window.x().end();
123 
124     const size_t in_height = input->info()->dimension(1);
125     const size_t in_stride = input->info()->strides_in_bytes()[1];
126 
127     const size_t partial_y = in_height % 4;
128 
129     // Set window for the input tensor
130     Window win = window;
131     win.set(Window::DimX, Window::Dimension(0, 1, 1));
132 
133     // Set window for the output tensor
134     Window win_out(window);
135     win_out.set(Window::DimX, Window::Dimension(0, 1, 1));
136     win_out.scale(Window::DimY, 0.25f);
137 
138     Iterator in(input, win);
139     Iterator out(output, win_out);
140 
141     execute_window_loop(win, [&](const Coordinates & id)
142     {
143         if(id.y() + 4 <= static_cast<int>(in_height))
144         {
145             for(size_t x = window_start_x; x < window_end_x; ++x)
146             {
147                 const ScalarType data[4] =
148                 {
149                     *(reinterpret_cast<const ScalarType *>(in.ptr() + 0 * in_stride) + x),
150                     *(reinterpret_cast<const ScalarType *>(in.ptr() + 1 * in_stride) + x),
151                     *(reinterpret_cast<const ScalarType *>(in.ptr() + 2 * in_stride) + x),
152                     *(reinterpret_cast<const ScalarType *>(in.ptr() + 3 * in_stride) + x),
153                 };
154                 std::memcpy(out.ptr() + x * 4 * sizeof(ScalarType), data, 4 * sizeof(ScalarType));
155             }
156         }
157         else
158         {
159             for(size_t x = window_start_x; x < window_end_x; ++x)
160             {
161                 ScalarType data[4] = { 0, 0, 0, 0 };
162 
163                 for(size_t y = 0; y < partial_y; ++y)
164                 {
165                     data[y] = *(reinterpret_cast<const ScalarType *>(in.ptr() + y * in_stride) + x);
166                 }
167 
168                 std::memcpy(out.ptr() + x * 4 * sizeof(ScalarType), data, 4 * sizeof(ScalarType));
169             }
170         }
171     },
172     in, out);
173 }
174 
run(const Window & window,const ThreadInfo & info)175 void NEGEMMInterleave4x4Kernel::run(const Window &window, const ThreadInfo &info)
176 {
177     ARM_COMPUTE_UNUSED(info);
178     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
179     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
180     ARM_COMPUTE_ERROR_ON(_func == nullptr);
181     /*
182     *  This kernel puts the values in a 4x4 block of Matrix A on the same row (Interleaved values)
183     *         |a00 a01 a02 a03|
184     *         |a10 a11 a12 a13|
185     *         |a20 a21 a22 a23| = | a00 a10 a20 a30 || a01 a11 a21 a31 || a02 a12 a22 a32 || a03 a13 a23 a33 |
186     *         |a30 a31 a32 a33|
187     *
188     *         After this operation, the output matrix will have the following shape: [ height * 4, ceil(width / 4.0f) ]
189     */
190     (this->*_func)(_input, _output, window);
191 }
192