1 /* 2 * Copyright (c) 2017-2020 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #pragma once 25 26 #include <assert.h> 27 28 #include <algorithm> 29 30 #include "arm_gemm.hpp" 31 #include "ndrange.hpp" 32 #include "utils.hpp" 33 34 #include "mergeresults.hpp" 35 #include "transform.hpp" 36 37 #ifdef CYCLE_PROFILING 38 #include "profiler.hpp" 39 #endif 40 41 namespace arm_gemm { 42 43 // Implementation of the GemmCommon abstract class. 44 template<typename strategy, typename To, typename Tr> 45 class GemmHybridQuantized : public GemmCommon<To, Tr> { 46 typedef typename strategy::operand_type Toi; 47 typedef typename strategy::result_type Tri; 48 49 /* const properties set by constructor */ 50 const CPUInfo * const _ci; 51 52 const unsigned int _Msize; 53 const unsigned int _Nsize; 54 const unsigned int _Ksize; 55 56 const unsigned int _nbatches; 57 const unsigned int _nmulti; 58 59 /* Blocking info */ 60 const unsigned int _k_block; 61 const unsigned int _n_block; 62 const unsigned int _Mround; 63 64 /* Pretransposed buffer. */ 65 const Toi *_B_transposed=nullptr; 66 67 const NDRange<4> _window_range; 68 69 Requantize32 _qp; 70 int32_t *row_bias = nullptr; 71 int32_t *col_bias = nullptr; 72 73 void *working_space = nullptr; 74 75 unsigned int _nthreads; 76 get_col_sum_size() const77 unsigned int get_col_sum_size() const { 78 return _Nsize * _nmulti * sizeof(int32_t); 79 } 80 compute_k_block(const GemmArgs & args)81 static unsigned int compute_k_block(const GemmArgs &args) { 82 // We don't support K blocks as we only temporarily store 32 bit results. 83 return args._Ksize; 84 85 if (args._cfg && args._cfg->inner_block_size) { 86 return args._cfg->inner_block_size; 87 } 88 89 const unsigned int L1_size = args._ci->get_L1_cache_size(); 90 91 // k_block: Find out how much of the larger array can be loaded into half the cache. 92 // This should account for associative caches. 93 unsigned int k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::out_height()))); 94 95 // Needs to be (at least a single) multiple of the K unroll level. 96 k_block /= strategy::k_unroll(); 97 k_block = std::max(k_block, 1U) * strategy::k_unroll(); 98 99 // Now tune to presented problem size; this is how many blocks we need. 100 unsigned int numk_blocks = iceildiv(args._Ksize, k_block); 101 102 // So divide the space equally into that many blocks. 103 k_block = iceildiv(args._Ksize, numk_blocks); 104 105 // And round UP to the K unroll level required. 106 k_block = roundup(k_block, strategy::k_unroll()); 107 108 return k_block; 109 } 110 compute_n_block(const GemmArgs & args)111 static unsigned int compute_n_block(const GemmArgs &args) { 112 if (args._cfg && args._cfg->outer_block_size) { 113 return args._cfg->outer_block_size; 114 } 115 116 const unsigned int k_block = compute_k_block(args); 117 const unsigned int L2_size = args._ci->get_L2_cache_size(); 118 119 // n_block: Work out how many rows (of length k_block) will fit in the L2 120 // Don't allocate more than 90% of the L2 to allow for overheads, and subtract off the L1 contents. 121 const unsigned int scaled_l2_size = (L2_size * 9) / 10; 122 const unsigned int k_block_area = k_block * sizeof(Toi) * (strategy::out_width() + strategy::out_height()); 123 124 // .. if the L1 contents is bigger than the L2, just return a minimal size block. 125 if (k_block_area > scaled_l2_size) { 126 return strategy::out_width(); 127 } 128 129 unsigned int n_block = (scaled_l2_size - k_block_area) / (sizeof(Toi) * k_block); 130 131 // Needs to be (at least a single) multiple of the kernel output width. 132 n_block /= strategy::out_width(); 133 n_block = std::max(n_block, 1u) * strategy::out_width(); 134 135 // And tune to the presented problem size. 136 unsigned int numblocks = iceildiv(args._Nsize, n_block); 137 n_block = iceildiv(args._Nsize, numblocks); 138 n_block = roundup(n_block, strategy::out_width()); 139 140 assert(n_block > 0); 141 142 return n_block; 143 } 144 145 public: 146 GemmHybridQuantized(GemmHybridQuantized &) = delete; 147 GemmHybridQuantized & operator= (GemmHybridQuantized &) = delete; 148 149 /* Constructor */ GemmHybridQuantized(const GemmArgs & args,const Requantize32 & qp)150 GemmHybridQuantized(const GemmArgs &args, const Requantize32 &qp) 151 : _ci(args._ci), _Msize(args._Msize), _Nsize(args._Nsize), _Ksize(args._Ksize), 152 _nbatches(args._nbatches), _nmulti(args._nmulti), 153 _k_block(compute_k_block(args)), _n_block(compute_n_block(args)), 154 _Mround(roundup(args._Msize, strategy::out_height())), 155 _window_range(iceildiv(args._Msize, strategy::out_height()), _nbatches, iceildiv(_Nsize, _n_block), _nmulti), 156 _qp (qp), _nthreads(args._maxthreads) { } 157 158 // Interface implementation - Compulsory functions get_window_size() const159 ndrange_t get_window_size() const override { 160 return { _window_range.total_size() }; 161 } 162 163 // This kernel can always be dynamically scheduled. supports_dynamic_scheduling() const164 bool supports_dynamic_scheduling() const override { 165 return true; 166 } 167 168 // Execute execute(const ndcoord_t & work_range,const ndcoord_t &,int threadid)169 void execute(const ndcoord_t &work_range, const ndcoord_t &, int threadid) override { 170 #ifdef CYCLE_PROFILING 171 profiler prof; 172 #endif 173 strategy strat(_ci); 174 175 uintptr_t working_int = reinterpret_cast<uintptr_t>(working_space); 176 177 Tri *result_buffer = reinterpret_cast<Tri *>(working_int + (threadid * strategy::out_height() * _Nsize * sizeof(Tri))); 178 179 /* Make sure we've been set up correctly. */ 180 assert(_B_transposed); 181 static_assert(std::is_same<To, Toi>::value, "gemm_native: Operand types must be the same."); 182 183 /* For now, each work item implies all the K for a given output 184 * pixel (so we don't need to synchronize access to the output 185 * array). So separate the loop over K blocks here. */ 186 for (unsigned int k0=0; k0<_Ksize; k0+=_k_block) { 187 unsigned int kmax = std::min(k0 + _k_block, _Ksize); 188 unsigned int kern_k = roundup(kmax-k0, strategy::k_unroll()); 189 190 auto p = _window_range.iterator(work_range.get_position(0), work_range.get_position_end(0)); 191 192 if (p.done()) { 193 return; 194 } 195 196 do { 197 const unsigned int m_start = p.dim(0) * strategy::out_height(); 198 const unsigned int m_end = std::min((p.dim(0) + 1) * strategy::out_height(), _Msize); 199 const unsigned int batch = p.dim(1); 200 const unsigned int n0 = p.dim(2) * _n_block; 201 const unsigned int nmax = std::min(n0 + _n_block, _Nsize); 202 const unsigned int multi = p.dim(3); 203 204 int32_t local_row_sums[strategy::out_height()]; 205 206 const Toi *b_panel = _B_transposed + 207 (multi * roundup(_Nsize, strategy::out_width()) * roundup(_Ksize, strategy::k_unroll())) + 208 (k0 * roundup(_Nsize, strategy::out_width())) + 209 (n0 * kern_k); 210 211 { 212 #ifdef CYCLE_PROFILING 213 auto p = prof.ScopedProfiler(PROFILE_KERNEL, (m_end - m_start) * kern_k * roundup(nmax-n0, strategy::out_width())); 214 #endif 215 strat.kernel(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride) + (m_start * this->_lda) + k0, this->_lda, 216 b_panel, 217 result_buffer, (nmax-n0), 218 (m_end - m_start), (nmax - n0), kern_k, 219 nullptr, Activation(), false); 220 } 221 222 { 223 #ifdef CYCLE_PROFILING 224 auto p = prof.ScopedProfiler(PROFILE_ROWSUMS, (m_end - m_start) * _Ksize); 225 #endif 226 compute_row_sums(_qp, _Ksize, (m_end - m_start), 227 this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride) + (m_start * this->_lda), this->_lda, 228 local_row_sums); 229 } 230 231 { 232 #ifdef CYCLE_PROFILING 233 auto p = prof.ScopedProfiler(PROFILE_QUANTIZE, (m_end - m_start) * _Nsize); 234 #endif 235 236 requantize_block_32(_qp, (nmax - n0), (m_end - m_start), result_buffer, (nmax - n0), 237 this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (m_start * this->_ldc) + n0, this->_ldc, 238 local_row_sums, col_bias + (multi * _Nsize) + n0, n0); 239 } 240 } while (p.next_dim0()); 241 } 242 } 243 244 // Working space needed for intermediate result buffers. get_working_size() const245 size_t get_working_size() const override { 246 return (_nthreads * strategy::out_height() * _Nsize * sizeof(Tri)); 247 } 248 set_working_space(void * buffer)249 void set_working_space(void *buffer) override { 250 working_space = buffer; 251 } 252 253 // Interface implementation - pretransposed B_is_pretransposed() const254 bool B_is_pretransposed() const override { 255 return true; 256 } 257 B_pretranspose_required() const258 bool B_pretranspose_required() const override { 259 return (_B_transposed==nullptr); 260 } 261 get_B_pretransposed_array_size() const262 size_t get_B_pretransposed_array_size() const override { 263 return get_col_sum_size() + (roundup(_Nsize, strategy::out_width()) * roundup(_Ksize, strategy::k_unroll()) * _nmulti * sizeof(Toi)); 264 } 265 pretranspose_B_array(void * in_buffer,const To * B,const int ldb,const int B_multi_stride)266 void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override { 267 col_bias = reinterpret_cast<int32_t *>(in_buffer); 268 269 for (unsigned int i=0; i<_nmulti; i++) { 270 compute_col_sums(_qp, _Nsize, _Ksize, B + (i * B_multi_stride), ldb, col_bias + (i * _Nsize), _Ksize, i, 0); 271 } 272 273 uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer); 274 Toi *buffer = reinterpret_cast<Toi *>(buffer_int + get_col_sum_size()); 275 _B_transposed = buffer; 276 strategy strat(_ci); 277 278 for (unsigned int multi=0; multi<_nmulti; multi++) { 279 for (unsigned int k0=0; k0<_Ksize; k0+=_k_block) { 280 const unsigned int kmax = std::min(k0 + _k_block, _Ksize); 281 const unsigned int k_size = roundup(kmax-k0, strategy::k_unroll()); 282 283 for (unsigned int x0=0; x0<_Nsize; x0+=_n_block) { 284 const unsigned int xmax = std::min(x0+_n_block, _Nsize); 285 286 const unsigned int size = roundup(xmax-x0, strategy::out_width()) * k_size; 287 288 strat.transforms.PrepareB( buffer, B + (multi * B_multi_stride), ldb, 289 x0, xmax, k0, kmax); 290 291 buffer += size; 292 } 293 } 294 } 295 } 296 set_pretransposed_B_data(void * in_buffer)297 void set_pretransposed_B_data(void *in_buffer) override { 298 uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer); 299 _B_transposed = reinterpret_cast<Toi *>(buffer_int + get_col_sum_size()); 300 col_bias = reinterpret_cast<int32_t *>(in_buffer); 301 } 302 set_quantized_bias(const int32_t * bias,size_t bias_multi_stride)303 void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) override { 304 _qp.bias = bias; 305 _qp.bias_multi_stride = bias_multi_stride; 306 } 307 }; 308 309 } // namespace arm_gemm 310