• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "tests/validation/Helpers.h"
25 
26 #include <algorithm>
27 #include <cmath>
28 
29 namespace arm_compute
30 {
31 namespace test
32 {
33 namespace validation
34 {
fill_mask_from_pattern(uint8_t * mask,int cols,int rows,MatrixPattern pattern)35 void fill_mask_from_pattern(uint8_t *mask, int cols, int rows, MatrixPattern pattern)
36 {
37     unsigned int                v = 0;
38     std::mt19937                gen(library->seed());
39     std::bernoulli_distribution dist(0.5);
40 
41     for(int r = 0; r < rows; ++r)
42     {
43         for(int c = 0; c < cols; ++c, ++v)
44         {
45             uint8_t val = 0;
46 
47             switch(pattern)
48             {
49                 case MatrixPattern::BOX:
50                     val = 255;
51                     break;
52                 case MatrixPattern::CROSS:
53                     val = ((r == (rows / 2)) || (c == (cols / 2))) ? 255 : 0;
54                     break;
55                 case MatrixPattern::DISK:
56                     val = (((r - rows / 2.0f + 0.5f) * (r - rows / 2.0f + 0.5f)) / ((rows / 2.0f) * (rows / 2.0f)) + ((c - cols / 2.0f + 0.5f) * (c - cols / 2.0f + 0.5f)) / ((cols / 2.0f) *
57                             (cols / 2.0f))) <= 1.0f ? 255 : 0;
58                     break;
59                 case MatrixPattern::OTHER:
60                     val = (dist(gen) ? 0 : 255);
61                     break;
62                 default:
63                     return;
64             }
65 
66             mask[v] = val;
67         }
68     }
69 
70     if(pattern == MatrixPattern::OTHER)
71     {
72         std::uniform_int_distribution<uint8_t> distribution_u8(0, ((cols * rows) - 1));
73         mask[distribution_u8(gen)] = 255;
74     }
75 }
76 
harris_corners_parameters()77 HarrisCornersParameters harris_corners_parameters()
78 {
79     HarrisCornersParameters params;
80 
81     std::mt19937                           gen(library->seed());
82     std::uniform_real_distribution<float>  threshold_dist(0.f, 0.001f);
83     std::uniform_real_distribution<float>  sensitivity(0.04f, 0.15f);
84     std::uniform_real_distribution<float>  euclidean_distance(0.f, 30.f);
85     std::uniform_int_distribution<uint8_t> int_dist(0, 255);
86 
87     params.threshold             = threshold_dist(gen);
88     params.sensitivity           = sensitivity(gen);
89     params.min_dist              = euclidean_distance(gen);
90     params.constant_border_value = int_dist(gen);
91 
92     return params;
93 }
94 
canny_edge_parameters()95 CannyEdgeParameters canny_edge_parameters()
96 {
97     CannyEdgeParameters params;
98 
99     std::mt19937                           gen(library->seed());
100     std::uniform_int_distribution<uint8_t> int_dist(0, 255);
101     std::uniform_int_distribution<uint8_t> threshold_dist(2, 255);
102 
103     params.constant_border_value = int_dist(gen);
104     params.upper_thresh          = threshold_dist(gen); // upper_threshold >= 2
105     threshold_dist               = std::uniform_int_distribution<uint8_t>(1, params.upper_thresh - 1);
106     params.lower_thresh          = threshold_dist(gen); // lower_threshold >= 1 && lower_threshold < upper_threshold
107 
108     return params;
109 }
110 
111 template <>
convert_from_asymmetric(const SimpleTensor<uint8_t> & src)112 SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<uint8_t> &src)
113 {
114     const UniformQuantizationInfo &quantization_info = src.quantization_info().uniform();
115     SimpleTensor<float>            dst{ src.shape(), DataType::F32, 1, QuantizationInfo(), src.data_layout() };
116 #if defined(_OPENMP)
117     #pragma omp parallel for
118 #endif /* _OPENMP */
119     for(int i = 0; i < src.num_elements(); ++i)
120     {
121         dst[i] = dequantize_qasymm8(src[i], quantization_info);
122     }
123     return dst;
124 }
125 
126 template <>
convert_from_asymmetric(const SimpleTensor<int8_t> & src)127 SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<int8_t> &src)
128 {
129     const UniformQuantizationInfo &quantization_info = src.quantization_info().uniform();
130     SimpleTensor<float>            dst{ src.shape(), DataType::F32, 1, QuantizationInfo(), src.data_layout() };
131 
132 #if defined(_OPENMP)
133     #pragma omp parallel for
134 #endif /* _OPENMP */
135     for(int i = 0; i < src.num_elements(); ++i)
136     {
137         dst[i] = dequantize_qasymm8_signed(src[i], quantization_info);
138     }
139     return dst;
140 }
141 
142 template <>
convert_from_asymmetric(const SimpleTensor<uint16_t> & src)143 SimpleTensor<float> convert_from_asymmetric(const SimpleTensor<uint16_t> &src)
144 {
145     const UniformQuantizationInfo &quantization_info = src.quantization_info().uniform();
146     SimpleTensor<float>            dst{ src.shape(), DataType::F32, 1, QuantizationInfo(), src.data_layout() };
147 
148 #if defined(_OPENMP)
149     #pragma omp parallel for
150 #endif /* _OPENMP */
151     for(int i = 0; i < src.num_elements(); ++i)
152     {
153         dst[i] = dequantize_qasymm16(src[i], quantization_info);
154     }
155     return dst;
156 }
157 
158 template <>
convert_to_asymmetric(const SimpleTensor<float> & src,const QuantizationInfo & quantization_info)159 SimpleTensor<uint8_t> convert_to_asymmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info)
160 {
161     SimpleTensor<uint8_t>          dst{ src.shape(), DataType::QASYMM8, 1, quantization_info };
162     const UniformQuantizationInfo &qinfo = quantization_info.uniform();
163 
164 #if defined(_OPENMP)
165     #pragma omp parallel for
166 #endif /* _OPENMP */
167     for(int i = 0; i < src.num_elements(); ++i)
168     {
169         dst[i] = quantize_qasymm8(src[i], qinfo);
170     }
171     return dst;
172 }
173 
174 template <>
convert_to_asymmetric(const SimpleTensor<float> & src,const QuantizationInfo & quantization_info)175 SimpleTensor<int8_t> convert_to_asymmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info)
176 {
177     SimpleTensor<int8_t>           dst{ src.shape(), DataType::QASYMM8_SIGNED, 1, quantization_info };
178     const UniformQuantizationInfo &qinfo = quantization_info.uniform();
179 
180 #if defined(_OPENMP)
181     #pragma omp parallel for
182 #endif /* _OPENMP */
183     for(int i = 0; i < src.num_elements(); ++i)
184     {
185         dst[i] = quantize_qasymm8_signed(src[i], qinfo);
186     }
187     return dst;
188 }
189 
190 template <>
convert_to_asymmetric(const SimpleTensor<float> & src,const QuantizationInfo & quantization_info)191 SimpleTensor<uint16_t> convert_to_asymmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info)
192 {
193     SimpleTensor<uint16_t>         dst{ src.shape(), DataType::QASYMM16, 1, quantization_info };
194     const UniformQuantizationInfo &qinfo = quantization_info.uniform();
195 
196 #if defined(_OPENMP)
197     #pragma omp parallel for
198 #endif /* _OPENMP */
199     for(int i = 0; i < src.num_elements(); ++i)
200     {
201         dst[i] = quantize_qasymm16(src[i], qinfo);
202     }
203     return dst;
204 }
205 
206 template <>
convert_to_symmetric(const SimpleTensor<float> & src,const QuantizationInfo & quantization_info)207 SimpleTensor<int16_t> convert_to_symmetric(const SimpleTensor<float> &src, const QuantizationInfo &quantization_info)
208 {
209     SimpleTensor<int16_t>          dst{ src.shape(), DataType::QSYMM16, 1, quantization_info };
210     const UniformQuantizationInfo &qinfo = quantization_info.uniform();
211 
212 #if defined(_OPENMP)
213     #pragma omp parallel for
214 #endif /* _OPENMP */
215     for(int i = 0; i < src.num_elements(); ++i)
216     {
217         dst[i] = quantize_qsymm16(src[i], qinfo);
218     }
219     return dst;
220 }
221 
222 template <>
convert_from_symmetric(const SimpleTensor<int16_t> & src)223 SimpleTensor<float> convert_from_symmetric(const SimpleTensor<int16_t> &src)
224 {
225     const UniformQuantizationInfo &quantization_info = src.quantization_info().uniform();
226     SimpleTensor<float>            dst{ src.shape(), DataType::F32, 1, QuantizationInfo(), src.data_layout() };
227 
228 #if defined(_OPENMP)
229     #pragma omp parallel for
230 #endif /* _OPENMP */
231     for(int i = 0; i < src.num_elements(); ++i)
232     {
233         dst[i] = dequantize_qsymm16(src[i], quantization_info);
234     }
235     return dst;
236 }
237 
238 template <typename T>
matrix_multiply(const SimpleTensor<T> & a,const SimpleTensor<T> & b,SimpleTensor<T> & out)239 void matrix_multiply(const SimpleTensor<T> &a, const SimpleTensor<T> &b, SimpleTensor<T> &out)
240 {
241     ARM_COMPUTE_ERROR_ON(a.shape()[0] != b.shape()[1]);
242     ARM_COMPUTE_ERROR_ON(a.shape()[1] != out.shape()[1]);
243     ARM_COMPUTE_ERROR_ON(b.shape()[0] != out.shape()[0]);
244 
245     const int M = a.shape()[1]; // Rows
246     const int N = b.shape()[0]; // Cols
247     const int K = b.shape()[1];
248 
249 #if defined(_OPENMP)
250     #pragma omp parallel for collapse(2)
251 #endif /* _OPENMP */
252     for(int y = 0; y < M; ++y)
253     {
254         for(int x = 0; x < N; ++x)
255         {
256             float acc = 0.0f;
257             for(int k = 0; k < K; ++k)
258             {
259                 acc += a[y * K + k] * b[x + k * N];
260             }
261 
262             out[x + y * N] = acc;
263         }
264     }
265 }
266 
267 template <typename T>
transpose_matrix(const SimpleTensor<T> & in,SimpleTensor<T> & out)268 void transpose_matrix(const SimpleTensor<T> &in, SimpleTensor<T> &out)
269 {
270     ARM_COMPUTE_ERROR_ON((in.shape()[0] != out.shape()[1]) || (in.shape()[1] != out.shape()[0]));
271 
272     const int width  = in.shape()[0];
273     const int height = in.shape()[1];
274 
275 #if defined(_OPENMP)
276     #pragma omp parallel for collapse(2)
277 #endif /* _OPENMP */
278     for(int y = 0; y < height; ++y)
279     {
280         for(int x = 0; x < width; ++x)
281         {
282             const T val = in[x + y * width];
283 
284             out[x * height + y] = val;
285         }
286     }
287 }
288 
289 template <typename T>
get_tile(const SimpleTensor<T> & in,SimpleTensor<T> & tile,const Coordinates & coord)290 void get_tile(const SimpleTensor<T> &in, SimpleTensor<T> &tile, const Coordinates &coord)
291 {
292     ARM_COMPUTE_ERROR_ON(tile.shape().num_dimensions() > 2);
293 
294     const int w_tile = tile.shape()[0];
295     const int h_tile = tile.shape()[1];
296 
297     // Fill the tile with zeros
298     std::fill(tile.data() + 0, (tile.data() + (w_tile * h_tile)), static_cast<T>(0));
299 
300     // Check if with the dimensions greater than 2 we could have out-of-bound reads
301     for(size_t d = 2; d < Coordinates::num_max_dimensions; ++d)
302     {
303         if(coord[d] < 0 || coord[d] >= static_cast<int>(in.shape()[d]))
304         {
305             ARM_COMPUTE_ERROR("coord[d] < 0 || coord[d] >= in.shape()[d] with d >= 2");
306         }
307     }
308 
309     // Since we could have out-of-bound reads along the X and Y dimensions,
310     // we start calculating the input address with x = 0 and y = 0
311     Coordinates start_coord = coord;
312     start_coord[0]          = 0;
313     start_coord[1]          = 0;
314 
315     // Get input and roi pointers
316     auto in_ptr  = static_cast<const T *>(in(start_coord));
317     auto roi_ptr = static_cast<T *>(tile.data());
318 
319     const int x_in_start = std::max(0, coord[0]);
320     const int y_in_start = std::max(0, coord[1]);
321     const int x_in_end   = std::min(static_cast<int>(in.shape()[0]), coord[0] + w_tile);
322     const int y_in_end   = std::min(static_cast<int>(in.shape()[1]), coord[1] + h_tile);
323 
324     // Number of elements to copy per row
325     const int n = x_in_end - x_in_start;
326 
327     // Starting coordinates for the ROI
328     const int x_tile_start = coord[0] > 0 ? 0 : std::abs(coord[0]);
329     const int y_tile_start = coord[1] > 0 ? 0 : std::abs(coord[1]);
330 
331     // Update input pointer
332     in_ptr += x_in_start;
333     in_ptr += (y_in_start * in.shape()[0]);
334 
335     // Update ROI pointer
336     roi_ptr += x_tile_start;
337     roi_ptr += (y_tile_start * tile.shape()[0]);
338 
339     for(int y = y_in_start; y < y_in_end; ++y)
340     {
341         // Copy per row
342         std::copy(in_ptr, in_ptr + n, roi_ptr);
343 
344         in_ptr += in.shape()[0];
345         roi_ptr += tile.shape()[0];
346     }
347 }
348 
349 template <typename T>
zeros(SimpleTensor<T> & in,const Coordinates & anchor,const TensorShape & shape)350 void zeros(SimpleTensor<T> &in, const Coordinates &anchor, const TensorShape &shape)
351 {
352     ARM_COMPUTE_ERROR_ON(anchor.num_dimensions() != shape.num_dimensions());
353     ARM_COMPUTE_ERROR_ON(in.shape().num_dimensions() > 2);
354     ARM_COMPUTE_ERROR_ON(shape.num_dimensions() > 2);
355 
356     // Check if with the dimensions greater than 2 we could have out-of-bound reads
357     for(size_t d = 0; d < Coordinates::num_max_dimensions; ++d)
358     {
359         if(anchor[d] < 0 || ((anchor[d] + shape[d]) > in.shape()[d]))
360         {
361             ARM_COMPUTE_ERROR("anchor[d] < 0 || (anchor[d] + shape[d]) > in.shape()[d]");
362         }
363     }
364 
365     // Get input pointer
366     auto in_ptr = static_cast<T *>(in(anchor[0] + anchor[1] * in.shape()[0]));
367 
368     const unsigned int n = in.shape()[0];
369 
370     for(unsigned int y = 0; y < shape[1]; ++y)
371     {
372         std::fill(in_ptr, in_ptr + shape[0], 0);
373         in_ptr += n;
374     }
375 }
376 
get_quantized_bounds(const QuantizationInfo & quant_info,float min,float max)377 std::pair<int, int> get_quantized_bounds(const QuantizationInfo &quant_info, float min, float max)
378 {
379     ARM_COMPUTE_ERROR_ON_MSG(min > max, "min must be lower equal than max");
380 
381     const int min_bound = quantize_qasymm8(min, quant_info.uniform());
382     const int max_bound = quantize_qasymm8(max, quant_info.uniform());
383     return std::pair<int, int> { min_bound, max_bound };
384 }
385 
get_quantized_qasymm8_signed_bounds(const QuantizationInfo & quant_info,float min,float max)386 std::pair<int, int> get_quantized_qasymm8_signed_bounds(const QuantizationInfo &quant_info, float min, float max)
387 {
388     ARM_COMPUTE_ERROR_ON_MSG(min > max, "min must be lower equal than max");
389 
390     const int min_bound = quantize_qasymm8_signed(min, quant_info.uniform());
391     const int max_bound = quantize_qasymm8_signed(max, quant_info.uniform());
392     return std::pair<int, int> { min_bound, max_bound };
393 }
394 
get_symm_quantized_per_channel_bounds(const QuantizationInfo & quant_info,float min,float max,size_t channel_id)395 std::pair<int, int> get_symm_quantized_per_channel_bounds(const QuantizationInfo &quant_info, float min, float max, size_t channel_id)
396 {
397     ARM_COMPUTE_ERROR_ON_MSG(min > max, "min must be lower equal than max");
398 
399     const int min_bound = quantize_qsymm8_per_channel(min, quant_info, channel_id);
400     const int max_bound = quantize_qsymm8_per_channel(max, quant_info, channel_id);
401     return std::pair<int, int> { min_bound, max_bound };
402 }
403 
404 template void get_tile(const SimpleTensor<float> &in, SimpleTensor<float> &roi, const Coordinates &coord);
405 template void get_tile(const SimpleTensor<half> &in, SimpleTensor<half> &roi, const Coordinates &coord);
406 template void get_tile(const SimpleTensor<int> &in, SimpleTensor<int> &roi, const Coordinates &coord);
407 template void get_tile(const SimpleTensor<short> &in, SimpleTensor<short> &roi, const Coordinates &coord);
408 template void get_tile(const SimpleTensor<char> &in, SimpleTensor<char> &roi, const Coordinates &coord);
409 template void zeros(SimpleTensor<float> &in, const Coordinates &anchor, const TensorShape &shape);
410 template void zeros(SimpleTensor<half> &in, const Coordinates &anchor, const TensorShape &shape);
411 template void transpose_matrix(const SimpleTensor<float> &in, SimpleTensor<float> &out);
412 template void transpose_matrix(const SimpleTensor<half> &in, SimpleTensor<half> &out);
413 template void transpose_matrix(const SimpleTensor<int> &in, SimpleTensor<int> &out);
414 template void transpose_matrix(const SimpleTensor<short> &in, SimpleTensor<short> &out);
415 template void transpose_matrix(const SimpleTensor<char> &in, SimpleTensor<char> &out);
416 template void matrix_multiply(const SimpleTensor<float> &a, const SimpleTensor<float> &b, SimpleTensor<float> &out);
417 template void matrix_multiply(const SimpleTensor<half> &a, const SimpleTensor<half> &b, SimpleTensor<half> &out);
418 
419 } // namespace validation
420 } // namespace test
421 } // namespace arm_compute
422