• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include "arm_compute/core/Helpers.h"
26 
27 #include "arm_compute/core/Utils.h"
28 
29 #include <algorithm>
30 #include <cmath>
31 #include <cstdint>
32 #include <fstream>
33 #include <map>
34 #include <string>
35 
36 namespace arm_compute
37 {
read_file(const std::string & filename,bool binary)38 std::string read_file(const std::string &filename, bool binary)
39 {
40     std::string   out;
41     std::ifstream fs;
42 
43 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
44     try
45     {
46 #endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
47         fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
48         std::ios_base::openmode mode = std::ios::in;
49 
50         if(binary)
51         {
52             mode |= std::ios::binary;
53         }
54 
55         fs.open(filename, mode);
56 
57         // Go to the end of the file
58         fs.seekg(0, std::ios::end);
59         // Reserve the memory required to store the file's content
60         out.reserve(fs.tellg());
61         // Go back to the beginning of the file
62         fs.seekg(0, std::ios::beg);
63         // Copy the content of the file
64         out.assign(std::istreambuf_iterator<char>(fs), std::istreambuf_iterator<char>());
65 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
66     }
67     catch(const std::ifstream::failure &e)
68     {
69         ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", filename.c_str(), e.what());
70     }
71 #endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
72 
73     return out;
74 }
75 
string_from_format(Format format)76 const std::string &string_from_format(Format format)
77 {
78     static std::map<Format, const std::string> formats_map =
79     {
80         { Format::UNKNOWN, "UNKNOWN" },
81         { Format::U8, "U8" },
82         { Format::S16, "S16" },
83         { Format::U16, "U16" },
84         { Format::S32, "S32" },
85         { Format::U32, "U32" },
86         { Format::F16, "F16" },
87         { Format::F32, "F32" },
88         { Format::UV88, "UV88" },
89         { Format::RGB888, "RGB888" },
90         { Format::RGBA8888, "RGBA8888" },
91         { Format::YUV444, "YUV444" },
92         { Format::YUYV422, "YUYV422" },
93         { Format::NV12, "NV12" },
94         { Format::NV21, "NV21" },
95         { Format::IYUV, "IYUV" },
96         { Format::UYVY422, "UYVY422" }
97     };
98 
99     return formats_map[format];
100 }
101 
string_from_channel(Channel channel)102 const std::string &string_from_channel(Channel channel)
103 {
104     static std::map<Channel, const std::string> channels_map =
105     {
106         { Channel::UNKNOWN, "UNKNOWN" },
107         { Channel::R, "R" },
108         { Channel::G, "G" },
109         { Channel::B, "B" },
110         { Channel::A, "A" },
111         { Channel::Y, "Y" },
112         { Channel::U, "U" },
113         { Channel::V, "V" },
114         { Channel::C0, "C0" },
115         { Channel::C1, "C1" },
116         { Channel::C2, "C2" },
117         { Channel::C3, "C3" }
118     };
119 
120     return channels_map[channel];
121 }
122 
string_from_data_layout(DataLayout dl)123 const std::string &string_from_data_layout(DataLayout dl)
124 {
125     static std::map<DataLayout, const std::string> dl_map =
126     {
127         { DataLayout::UNKNOWN, "UNKNOWN" },
128         { DataLayout::NCHW, "NCHW" },
129         { DataLayout::NHWC, "NHWC" },
130     };
131 
132     return dl_map[dl];
133 }
134 
string_from_data_type(DataType dt)135 const std::string &string_from_data_type(DataType dt)
136 {
137     static std::map<DataType, const std::string> dt_map =
138     {
139         { DataType::UNKNOWN, "UNKNOWN" },
140         { DataType::S8, "S8" },
141         { DataType::U8, "U8" },
142         { DataType::S16, "S16" },
143         { DataType::U16, "U16" },
144         { DataType::S32, "S32" },
145         { DataType::U32, "U32" },
146         { DataType::S64, "S64" },
147         { DataType::U64, "U64" },
148         { DataType::F16, "F16" },
149         { DataType::F32, "F32" },
150         { DataType::F64, "F64" },
151         { DataType::SIZET, "SIZET" },
152         { DataType::QSYMM8, "QSYMM8" },
153         { DataType::QSYMM8_PER_CHANNEL, "QSYMM8_PER_CHANNEL" },
154         { DataType::QASYMM8, "QASYMM8" },
155         { DataType::QASYMM8_SIGNED, "QASYMM8_SIGNED" },
156         { DataType::QSYMM16, "QSYMM16" },
157         { DataType::QASYMM16, "QASYMM16" },
158     };
159 
160     return dt_map[dt];
161 }
162 
string_from_activation_func(ActivationLayerInfo::ActivationFunction act)163 const std::string &string_from_activation_func(ActivationLayerInfo::ActivationFunction act)
164 {
165     static std::map<ActivationLayerInfo::ActivationFunction, const std::string> act_map =
166     {
167         { ActivationLayerInfo::ActivationFunction::ABS, "ABS" },
168         { ActivationLayerInfo::ActivationFunction::LINEAR, "LINEAR" },
169         { ActivationLayerInfo::ActivationFunction::LOGISTIC, "LOGISTIC" },
170         { ActivationLayerInfo::ActivationFunction::RELU, "RELU" },
171         { ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, "BRELU" },
172         { ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, "LU_BRELU" },
173         { ActivationLayerInfo::ActivationFunction::LEAKY_RELU, "LRELU" },
174         { ActivationLayerInfo::ActivationFunction::SOFT_RELU, "SRELU" },
175         { ActivationLayerInfo::ActivationFunction::ELU, "ELU" },
176         { ActivationLayerInfo::ActivationFunction::SQRT, "SQRT" },
177         { ActivationLayerInfo::ActivationFunction::SQUARE, "SQUARE" },
178         { ActivationLayerInfo::ActivationFunction::TANH, "TANH" },
179         { ActivationLayerInfo::ActivationFunction::IDENTITY, "IDENTITY" },
180         { ActivationLayerInfo::ActivationFunction::HARD_SWISH, "HARD_SWISH" }
181 
182     };
183 
184     return act_map[act];
185 }
186 
string_from_matrix_pattern(MatrixPattern pattern)187 const std::string &string_from_matrix_pattern(MatrixPattern pattern)
188 {
189     static std::map<MatrixPattern, const std::string> pattern_map =
190     {
191         { MatrixPattern::BOX, "BOX" },
192         { MatrixPattern::CROSS, "CROSS" },
193         { MatrixPattern::DISK, "DISK" },
194         { MatrixPattern::OTHER, "OTHER" },
195     };
196 
197     return pattern_map[pattern];
198 }
199 
string_from_non_linear_filter_function(NonLinearFilterFunction function)200 const std::string &string_from_non_linear_filter_function(NonLinearFilterFunction function)
201 {
202     static std::map<NonLinearFilterFunction, const std::string> func_map =
203     {
204         { NonLinearFilterFunction::MAX, "MAX" },
205         { NonLinearFilterFunction::MEDIAN, "MEDIAN" },
206         { NonLinearFilterFunction::MIN, "MIN" },
207     };
208 
209     return func_map[function];
210 }
211 
string_from_interpolation_policy(InterpolationPolicy policy)212 const std::string &string_from_interpolation_policy(InterpolationPolicy policy)
213 {
214     static std::map<InterpolationPolicy, const std::string> interpolation_policy_map =
215     {
216         { InterpolationPolicy::AREA, "AREA" },
217         { InterpolationPolicy::BILINEAR, "BILINEAR" },
218         { InterpolationPolicy::NEAREST_NEIGHBOR, "NEAREST_NEIGHBOUR" },
219     };
220 
221     return interpolation_policy_map[policy];
222 }
223 
string_from_border_mode(BorderMode border_mode)224 const std::string &string_from_border_mode(BorderMode border_mode)
225 {
226     static std::map<BorderMode, const std::string> border_mode_map =
227     {
228         { BorderMode::UNDEFINED, "UNDEFINED" },
229         { BorderMode::CONSTANT, "CONSTANT" },
230         { BorderMode::REPLICATE, "REPLICATE" },
231     };
232 
233     return border_mode_map[border_mode];
234 }
235 
string_from_norm_type(NormType type)236 const std::string &string_from_norm_type(NormType type)
237 {
238     static std::map<NormType, const std::string> norm_type_map =
239     {
240         { NormType::IN_MAP_1D, "IN_MAP_1D" },
241         { NormType::IN_MAP_2D, "IN_MAP_2D" },
242         { NormType::CROSS_MAP, "CROSS_MAP" },
243     };
244 
245     return norm_type_map[type];
246 }
247 
string_from_pooling_type(PoolingType type)248 const std::string &string_from_pooling_type(PoolingType type)
249 {
250     static std::map<PoolingType, const std::string> pool_type_map =
251     {
252         { PoolingType::MAX, "MAX" },
253         { PoolingType::AVG, "AVG" },
254         { PoolingType::L2, "L2" },
255     };
256 
257     return pool_type_map[type];
258 }
259 
string_from_gemmlowp_output_stage(GEMMLowpOutputStageType output_stage)260 const std::string &string_from_gemmlowp_output_stage(GEMMLowpOutputStageType output_stage)
261 {
262     static std::map<GEMMLowpOutputStageType, const std::string> output_stage_map =
263     {
264         { GEMMLowpOutputStageType::NONE, "" },
265         { GEMMLowpOutputStageType::QUANTIZE_DOWN, "quantize_down" },
266         { GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT, "quantize_down_fixedpoint" },
267         { GEMMLowpOutputStageType::QUANTIZE_DOWN_FLOAT, "quantize_down_float" }
268     };
269 
270     return output_stage_map[output_stage];
271 }
272 
string_from_pixel_value(const PixelValue & value,const DataType data_type)273 std::string string_from_pixel_value(const PixelValue &value, const DataType data_type)
274 {
275     std::stringstream ss;
276     std::string       converted_string;
277 
278     switch(data_type)
279     {
280         case DataType::U8:
281         case DataType::QASYMM8:
282             // Needs conversion to 32 bit, otherwise interpreted as ASCII values
283             ss << uint32_t(value.get<uint8_t>());
284             converted_string = ss.str();
285             break;
286         case DataType::S8:
287         case DataType::QASYMM8_SIGNED:
288         case DataType::QSYMM8_PER_CHANNEL:
289             // Needs conversion to 32 bit, otherwise interpreted as ASCII values
290             ss << int32_t(value.get<int8_t>());
291             converted_string = ss.str();
292             break;
293         case DataType::U16:
294         case DataType::QASYMM16:
295             ss << value.get<uint16_t>();
296             converted_string = ss.str();
297             break;
298         case DataType::S16:
299         case DataType::QSYMM16:
300             ss << value.get<int16_t>();
301             converted_string = ss.str();
302             break;
303         case DataType::U32:
304             ss << value.get<uint32_t>();
305             converted_string = ss.str();
306             break;
307         case DataType::S32:
308             ss << value.get<int32_t>();
309             converted_string = ss.str();
310             break;
311         case DataType::F32:
312             converted_string = float_to_string_with_full_precision(value.get<float>());
313             break;
314         case DataType::F16:
315             static_assert(sizeof(half) == 2, "Half must be 16 bit");
316             ss << value.get<half>();
317             converted_string = ss.str();
318             break;
319         default:
320             ARM_COMPUTE_ERROR("Not handled");
321     }
322 
323     return converted_string;
324 }
325 
data_type_from_name(const std::string & name)326 DataType data_type_from_name(const std::string &name)
327 {
328     static const std::map<std::string, DataType> data_types =
329     {
330         { "f16", DataType::F16 },
331         { "f32", DataType::F32 },
332         { "qasymm8", DataType::QASYMM8 },
333     };
334 
335 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
336     try
337     {
338 #endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
339         return data_types.at(utility::tolower(name));
340 
341 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
342     }
343     catch(const std::out_of_range &)
344     {
345         ARM_COMPUTE_ERROR_VAR("Invalid data type name: %s", name.c_str());
346     }
347 #endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
348 }
349 
lower_string(const std::string & val)350 std::string lower_string(const std::string &val)
351 {
352     std::string res = val;
353     std::transform(res.begin(), res.end(), res.begin(), ::tolower);
354     return res;
355 }
356 
calculate_same_pad(TensorShape input_shape,TensorShape weights_shape,PadStrideInfo conv_info,DataLayout data_layout,const Size2D & dilation,const DimensionRoundingType & rounding_type)357 PadStrideInfo calculate_same_pad(TensorShape input_shape, TensorShape weights_shape, PadStrideInfo conv_info, DataLayout data_layout, const Size2D &dilation,
358                                  const DimensionRoundingType &rounding_type)
359 {
360     const auto &strides = conv_info.stride();
361     ARM_COMPUTE_ERROR_ON_MSG((strides.first < 1 || strides.second < 1), "Stride values should be greater than or equal to 1.");
362 
363     const unsigned int width_idx     = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
364     const unsigned int height_idx    = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
365     const unsigned int in_width      = input_shape[width_idx];
366     const unsigned int in_height     = input_shape[height_idx];
367     const unsigned int kernel_width  = weights_shape[width_idx];
368     const unsigned int kernel_height = weights_shape[height_idx];
369 
370     // Calculate output dimensions
371     const auto         is_ceil    = static_cast<unsigned int>(rounding_type == DimensionRoundingType::CEIL);
372     const unsigned int out_width  = ((in_width - is_ceil) + strides.first - 1) / strides.first + is_ceil;
373     const unsigned int out_height = ((in_height - is_ceil) + strides.second - 1) / strides.second + is_ceil;
374 
375     // Calculate effective weights sizes
376     const int real_weight_width  = (kernel_width - 1) * dilation.x() + 1;
377     const int real_weight_height = (kernel_height - 1) * dilation.y() + 1;
378 
379     // Calculate total pad
380     const int pad_width  = std::max(0, static_cast<int>((out_width - 1) * strides.first + real_weight_width - in_width));
381     const int pad_height = std::max(0, static_cast<int>((out_height - 1) * strides.second + real_weight_height - in_height));
382 
383     // Calculate individual paddings
384     const unsigned int pad_left   = pad_width / 2;
385     const unsigned int pad_top    = pad_height / 2;
386     const unsigned int pad_right  = pad_width - pad_left;
387     const unsigned int pad_bottom = pad_height - pad_top;
388 
389     PadStrideInfo same_info(strides.first, strides.second, pad_left, pad_right, pad_top, pad_bottom, rounding_type);
390 
391     // Check for correctness of predicted output shape against the one calculated using the generated info
392     const auto out_dims = scaled_dimensions(in_width, in_height, kernel_width, kernel_height, same_info, dilation);
393     ARM_COMPUTE_ERROR_ON(out_dims.first != out_width || out_dims.second != out_height);
394     ARM_COMPUTE_UNUSED(out_dims);
395 
396     return same_info;
397 }
398 
deconvolution_output_dimensions(unsigned int in_width,unsigned int in_height,unsigned int kernel_width,unsigned int kernel_height,const PadStrideInfo & pad_stride_info)399 std::pair<unsigned int, unsigned int> deconvolution_output_dimensions(unsigned int in_width, unsigned int in_height,
400                                                                       unsigned int kernel_width, unsigned int kernel_height,
401                                                                       const PadStrideInfo &pad_stride_info)
402 {
403     const unsigned int pad_left   = pad_stride_info.pad_left();
404     const unsigned int pad_top    = pad_stride_info.pad_top();
405     const unsigned int pad_right  = pad_stride_info.pad_right();
406     const unsigned int pad_bottom = pad_stride_info.pad_bottom();
407     const unsigned int stride_x   = pad_stride_info.stride().first;
408     const unsigned int stride_y   = pad_stride_info.stride().second;
409 
410     ARM_COMPUTE_ERROR_ON(in_width < 1 || in_height < 1);
411     ARM_COMPUTE_ERROR_ON(((in_width - 1) * stride_x + kernel_width) < (pad_left + pad_right));
412     ARM_COMPUTE_ERROR_ON(((in_height - 1) * stride_y + kernel_height) < (pad_top + pad_bottom));
413     const int w = stride_x * (in_width - 1) + kernel_width - (pad_left + pad_right);
414     const int h = stride_y * (in_height - 1) + kernel_height - (pad_top + pad_bottom);
415 
416     return std::make_pair<unsigned int, unsigned int>(w, h);
417 }
418 
scaled_dimensions(int width,int height,int kernel_width,int kernel_height,const PadStrideInfo & pad_stride_info,const Size2D & dilation)419 std::pair<unsigned int, unsigned int> scaled_dimensions(int width, int height,
420                                                         int kernel_width, int kernel_height,
421                                                         const PadStrideInfo &pad_stride_info,
422                                                         const Size2D        &dilation)
423 {
424     const int dilation_x = dilation.x();
425     const int dilation_y = dilation.y();
426     const int pad_left   = pad_stride_info.pad_left();
427     const int pad_top    = pad_stride_info.pad_top();
428     const int pad_right  = pad_stride_info.pad_right();
429     const int pad_bottom = pad_stride_info.pad_bottom();
430     const int stride_x   = pad_stride_info.stride().first;
431     const int stride_y   = pad_stride_info.stride().second;
432     int       w          = 0;
433     int       h          = 0;
434     switch(pad_stride_info.round())
435     {
436         case DimensionRoundingType::FLOOR:
437             w = static_cast<int>(std::floor((static_cast<float>(width + pad_left + pad_right - (dilation_x * (kernel_width - 1) + 1)) / stride_x) + 1));
438             h = static_cast<int>(std::floor((static_cast<float>(height + pad_top + pad_bottom - (dilation_y * (kernel_height - 1) + 1)) / stride_y) + 1));
439             break;
440         case DimensionRoundingType::CEIL:
441             w = static_cast<int>(std::ceil((static_cast<float>(width + pad_left + pad_right - (dilation_x * (kernel_width - 1) + 1)) / stride_x) + 1));
442             h = static_cast<int>(std::ceil((static_cast<float>(height + pad_top + pad_bottom - (dilation_y * (kernel_height - 1) + 1)) / stride_y) + 1));
443             break;
444         default:
445             ARM_COMPUTE_ERROR("Unsupported rounding type");
446     }
447 
448     w = std::max(1, w);
449     h = std::max(1, h);
450     return std::make_pair(static_cast<unsigned int>(w), static_cast<unsigned int>(h));
451 }
452 
needs_serialized_reduction(ReductionOperation op,DataType dt,unsigned int axis)453 bool needs_serialized_reduction(ReductionOperation op, DataType dt, unsigned int axis)
454 {
455     const bool is_min_max        = (op == ReductionOperation::MAX || op == ReductionOperation::MIN);
456     const bool is_quantized_type = is_data_type_quantized(dt);
457     const bool is_first_dim      = (axis == 0);
458 
459     return !is_first_dim || is_min_max || is_quantized_type;
460 }
461 
get_softmax_output_quantization_info(DataType input_type,bool is_log)462 QuantizationInfo get_softmax_output_quantization_info(DataType input_type, bool is_log)
463 {
464     // Note: Output quantization info for softmax should always have
465     // * Softmax with QASYMM8: scale = 1/256, offset = 0
466     // * Softmax with QASYMM8_SIGNED: scale = 1/256, offset = -128
467     // * LogSoftmax with QASYMM8: scale = 1/256, offset = 0
468     // * LogSoftmax with QASYMM8_SIGNED: scale = 16/256, offset = 127
469     if(is_data_type_quantized_asymmetric_signed(input_type))
470     {
471         if(is_log)
472         {
473             return QuantizationInfo(16.f / 256, 127);
474         }
475         else
476         {
477             return QuantizationInfo(1.f / 256, -128);
478         }
479     }
480     return QuantizationInfo(1.f / 256, 0);
481 }
482 
get_quantized_activation_min_max(ActivationLayerInfo act_info,DataType data_type,UniformQuantizationInfo oq_info)483 std::pair<int32_t, int32_t> get_quantized_activation_min_max(ActivationLayerInfo act_info, DataType data_type, UniformQuantizationInfo oq_info)
484 {
485     const bool is_qasymm8_signed = is_data_type_quantized_asymmetric_signed(data_type);
486     const auto a                 = act_info.a();
487     const auto b                 = act_info.b();
488     const int  a_int             = is_qasymm8_signed ? quantize_qasymm8_signed(a, oq_info) : quantize_qasymm8(a, oq_info);
489     const int  b_int             = is_qasymm8_signed ? quantize_qasymm8_signed(b, oq_info) : quantize_qasymm8(b, oq_info);
490     const auto type_max_value    = std::get<1>(get_min_max(data_type)).get<int32_t>();
491 
492     const int32_t min_activation = act_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU ? oq_info.offset : b_int;
493     const int32_t max_activation = act_info.activation() == ActivationLayerInfo::ActivationFunction::RELU ? type_max_value : a_int;
494 
495     return std::make_pair(min_activation, max_activation);
496 }
497 
get_padding_info(std::initializer_list<const ITensor * > tensors)498 std::unordered_map<const ITensorInfo *, PaddingSize> get_padding_info(std::initializer_list<const ITensor *> tensors)
499 {
500     std::unordered_map<const ITensorInfo *, PaddingSize> res;
501 
502     for(const ITensor *tensor : tensors)
503     {
504         if(tensor)
505         {
506             res.insert({ tensor->info(), tensor->info()->padding() });
507         }
508     }
509 
510     return res;
511 }
512 
get_padding_info(std::initializer_list<const ITensorInfo * > infos)513 std::unordered_map<const ITensorInfo *, PaddingSize> get_padding_info(std::initializer_list<const ITensorInfo *> infos)
514 {
515     std::unordered_map<const ITensorInfo *, PaddingSize> res;
516 
517     for(const ITensorInfo *info : infos)
518     {
519         if(info)
520         {
521             res.insert({ info, info->padding() });
522         }
523     }
524 
525     return res;
526 }
527 
has_padding_changed(const std::unordered_map<const ITensorInfo *,PaddingSize> & padding_map)528 bool has_padding_changed(const std::unordered_map<const ITensorInfo *, PaddingSize> &padding_map)
529 {
530     return std::find_if(padding_map.begin(), padding_map.end(), [](const std::pair<const ITensorInfo *, PaddingSize> &padding_info)
531     {
532         return (padding_info.first->padding() != padding_info.second);
533     })
534     != padding_map.end();
535 }
536 
537 #ifdef ARM_COMPUTE_ASSERTS_ENABLED
print_consecutive_elements(std::ostream & s,DataType dt,const uint8_t * ptr,unsigned int n,int stream_width,const std::string & element_delim)538 void print_consecutive_elements(std::ostream &s, DataType dt, const uint8_t *ptr, unsigned int n, int stream_width, const std::string &element_delim)
539 {
540     switch(dt)
541     {
542         case DataType::U8:
543         case DataType::QASYMM8:
544             print_consecutive_elements_impl<uint8_t>(s, ptr, n, stream_width, element_delim);
545             break;
546         case DataType::S8:
547         case DataType::QSYMM8:
548         case DataType::QASYMM8_SIGNED:
549         case DataType::QSYMM8_PER_CHANNEL:
550             print_consecutive_elements_impl<int8_t>(s, reinterpret_cast<const int8_t *>(ptr), n, stream_width, element_delim);
551             break;
552         case DataType::U16:
553         case DataType::QASYMM16:
554             print_consecutive_elements_impl<uint16_t>(s, reinterpret_cast<const uint16_t *>(ptr), n, stream_width, element_delim);
555             break;
556         case DataType::S16:
557         case DataType::QSYMM16:
558             print_consecutive_elements_impl<int16_t>(s, reinterpret_cast<const int16_t *>(ptr), n, stream_width, element_delim);
559             break;
560         case DataType::U32:
561             print_consecutive_elements_impl<uint32_t>(s, reinterpret_cast<const uint32_t *>(ptr), n, stream_width, element_delim);
562             break;
563         case DataType::S32:
564             print_consecutive_elements_impl<int32_t>(s, reinterpret_cast<const int32_t *>(ptr), n, stream_width, element_delim);
565             break;
566         case DataType::BFLOAT16:
567             print_consecutive_elements_impl<bfloat16>(s, reinterpret_cast<const bfloat16 *>(ptr), n, stream_width, element_delim);
568             break;
569         case DataType::F16:
570             print_consecutive_elements_impl<half>(s, reinterpret_cast<const half *>(ptr), n, stream_width, element_delim);
571             break;
572         case DataType::F32:
573             print_consecutive_elements_impl<float>(s, reinterpret_cast<const float *>(ptr), n, stream_width, element_delim);
574             break;
575         default:
576             ARM_COMPUTE_ERROR("Undefined element size for given data type");
577     }
578 }
579 
max_consecutive_elements_display_width(std::ostream & s,DataType dt,const uint8_t * ptr,unsigned int n)580 int max_consecutive_elements_display_width(std::ostream &s, DataType dt, const uint8_t *ptr, unsigned int n)
581 {
582     switch(dt)
583     {
584         case DataType::U8:
585         case DataType::QASYMM8:
586             return max_consecutive_elements_display_width_impl<uint8_t>(s, ptr, n);
587         case DataType::S8:
588         case DataType::QSYMM8:
589         case DataType::QASYMM8_SIGNED:
590         case DataType::QSYMM8_PER_CHANNEL:
591             return max_consecutive_elements_display_width_impl<int8_t>(s, reinterpret_cast<const int8_t *>(ptr), n);
592         case DataType::U16:
593         case DataType::QASYMM16:
594             return max_consecutive_elements_display_width_impl<uint16_t>(s, reinterpret_cast<const uint16_t *>(ptr), n);
595         case DataType::S16:
596         case DataType::QSYMM16:
597             return max_consecutive_elements_display_width_impl<int16_t>(s, reinterpret_cast<const int16_t *>(ptr), n);
598         case DataType::U32:
599             return max_consecutive_elements_display_width_impl<uint32_t>(s, reinterpret_cast<const uint32_t *>(ptr), n);
600         case DataType::S32:
601             return max_consecutive_elements_display_width_impl<int32_t>(s, reinterpret_cast<const int32_t *>(ptr), n);
602         case DataType::BFLOAT16:
603             return max_consecutive_elements_display_width_impl<bfloat16>(s, reinterpret_cast<const bfloat16 *>(ptr), n);
604         case DataType::F16:
605             return max_consecutive_elements_display_width_impl<half>(s, reinterpret_cast<const half *>(ptr), n);
606         case DataType::F32:
607             return max_consecutive_elements_display_width_impl<float>(s, reinterpret_cast<const float *>(ptr), n);
608         default:
609             ARM_COMPUTE_ERROR("Undefined element size for given data type");
610     }
611     return 0;
612 }
613 #endif /* ARM_COMPUTE_ASSERTS_ENABLED */
614 
615 } // namespace arm_compute