1 /*
2 * Copyright (c) 2017-2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #include "utils/GraphUtils.h"
26
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/Types.h"
29 #include "arm_compute/graph/Logger.h"
30 #include "arm_compute/runtime/SubTensor.h"
31
32 #pragma GCC diagnostic push
33 #pragma GCC diagnostic ignored "-Wunused-parameter"
34 #include "utils/ImageLoader.h"
35 #pragma GCC diagnostic pop
36 #include "utils/Utils.h"
37
38 #include <inttypes.h>
39 #include <iomanip>
40 #include <limits>
41
42 using namespace arm_compute::graph_utils;
43
44 namespace
45 {
compute_permutation_parameters(const arm_compute::TensorShape & shape,arm_compute::DataLayout data_layout)46 std::pair<arm_compute::TensorShape, arm_compute::PermutationVector> compute_permutation_parameters(const arm_compute::TensorShape &shape,
47 arm_compute::DataLayout data_layout)
48 {
49 // Set permutation parameters if needed
50 arm_compute::TensorShape permuted_shape = shape;
51 arm_compute::PermutationVector perm;
52 // Permute only if num_dimensions greater than 2
53 if(shape.num_dimensions() > 2)
54 {
55 perm = (data_layout == arm_compute::DataLayout::NHWC) ? arm_compute::PermutationVector(2U, 0U, 1U) : arm_compute::PermutationVector(1U, 2U, 0U);
56
57 arm_compute::PermutationVector perm_shape = (data_layout == arm_compute::DataLayout::NCHW) ? arm_compute::PermutationVector(2U, 0U, 1U) : arm_compute::PermutationVector(1U, 2U, 0U);
58 arm_compute::permute(permuted_shape, perm_shape);
59 }
60
61 return std::make_pair(permuted_shape, perm);
62 }
63 } // namespace
64
TFPreproccessor(float min_range,float max_range)65 TFPreproccessor::TFPreproccessor(float min_range, float max_range)
66 : _min_range(min_range), _max_range(max_range)
67 {
68 }
preprocess(ITensor & tensor)69 void TFPreproccessor::preprocess(ITensor &tensor)
70 {
71 if(tensor.info()->data_type() == DataType::F32)
72 {
73 preprocess_typed<float>(tensor);
74 }
75 else if(tensor.info()->data_type() == DataType::F16)
76 {
77 preprocess_typed<half>(tensor);
78 }
79 else
80 {
81 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
82 }
83 }
84
85 template <typename T>
preprocess_typed(ITensor & tensor)86 void TFPreproccessor::preprocess_typed(ITensor &tensor)
87 {
88 Window window;
89 window.use_tensor_dimensions(tensor.info()->tensor_shape());
90
91 const float range = _max_range - _min_range;
92 execute_window_loop(window, [&](const Coordinates & id)
93 {
94 const T value = *reinterpret_cast<T *>(tensor.ptr_to_element(id));
95 float res = value / 255.f; // Normalize to [0, 1]
96 res = res * range + _min_range; // Map to [min_range, max_range]
97 *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = res;
98 });
99 }
100
CaffePreproccessor(std::array<float,3> mean,bool bgr,float scale)101 CaffePreproccessor::CaffePreproccessor(std::array<float, 3> mean, bool bgr, float scale)
102 : _mean(mean), _bgr(bgr), _scale(scale)
103 {
104 if(_bgr)
105 {
106 std::swap(_mean[0], _mean[2]);
107 }
108 }
109
preprocess(ITensor & tensor)110 void CaffePreproccessor::preprocess(ITensor &tensor)
111 {
112 if(tensor.info()->data_type() == DataType::F32)
113 {
114 preprocess_typed<float>(tensor);
115 }
116 else if(tensor.info()->data_type() == DataType::F16)
117 {
118 preprocess_typed<half>(tensor);
119 }
120 else
121 {
122 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
123 }
124 }
125
126 template <typename T>
preprocess_typed(ITensor & tensor)127 void CaffePreproccessor::preprocess_typed(ITensor &tensor)
128 {
129 Window window;
130 window.use_tensor_dimensions(tensor.info()->tensor_shape());
131 const int channel_idx = get_data_layout_dimension_index(tensor.info()->data_layout(), DataLayoutDimension::CHANNEL);
132
133 execute_window_loop(window, [&](const Coordinates & id)
134 {
135 const T value = *reinterpret_cast<T *>(tensor.ptr_to_element(id)) - T(_mean[id[channel_idx]]);
136 *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = value * T(_scale);
137 });
138 }
139
PPMWriter(std::string name,unsigned int maximum)140 PPMWriter::PPMWriter(std::string name, unsigned int maximum)
141 : _name(std::move(name)), _iterator(0), _maximum(maximum)
142 {
143 }
144
access_tensor(ITensor & tensor)145 bool PPMWriter::access_tensor(ITensor &tensor)
146 {
147 std::stringstream ss;
148 ss << _name << _iterator << ".ppm";
149
150 arm_compute::utils::save_to_ppm(tensor, ss.str());
151
152 _iterator++;
153 if(_maximum == 0)
154 {
155 return true;
156 }
157 return _iterator < _maximum;
158 }
159
DummyAccessor(unsigned int maximum)160 DummyAccessor::DummyAccessor(unsigned int maximum)
161 : _iterator(0), _maximum(maximum)
162 {
163 }
164
access_tensor_data()165 bool DummyAccessor::access_tensor_data()
166 {
167 return false;
168 }
169
access_tensor(ITensor & tensor)170 bool DummyAccessor::access_tensor(ITensor &tensor)
171 {
172 ARM_COMPUTE_UNUSED(tensor);
173 bool ret = _maximum == 0 || _iterator < _maximum;
174 if(_iterator == _maximum)
175 {
176 _iterator = 0;
177 }
178 else
179 {
180 _iterator++;
181 }
182 return ret;
183 }
184
NumPyAccessor(std::string npy_path,TensorShape shape,DataType data_type,DataLayout data_layout,std::ostream & output_stream)185 NumPyAccessor::NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, DataLayout data_layout, std::ostream &output_stream)
186 : _npy_tensor(), _filename(std::move(npy_path)), _output_stream(output_stream)
187 {
188 NumPyBinLoader loader(_filename, data_layout);
189
190 TensorInfo info(shape, 1, data_type);
191 info.set_data_layout(data_layout);
192
193 _npy_tensor.allocator()->init(info);
194 _npy_tensor.allocator()->allocate();
195
196 loader.access_tensor(_npy_tensor);
197 }
198
199 template <typename T>
access_numpy_tensor(ITensor & tensor,T tolerance)200 void NumPyAccessor::access_numpy_tensor(ITensor &tensor, T tolerance)
201 {
202 const int num_elements = tensor.info()->tensor_shape().total_size();
203 int num_mismatches = utils::compare_tensor<T>(tensor, _npy_tensor, tolerance);
204 float percentage_mismatches = static_cast<float>(num_mismatches) / num_elements;
205
206 _output_stream << "Results: " << 100.f - (percentage_mismatches * 100) << " % matches with the provided output[" << _filename << "]." << std::endl;
207 _output_stream << " " << num_elements - num_mismatches << " out of " << num_elements << " matches with the provided output[" << _filename << "]." << std::endl
208 << std::endl;
209 }
210
access_tensor(ITensor & tensor)211 bool NumPyAccessor::access_tensor(ITensor &tensor)
212 {
213 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::F32, DataType::QASYMM8);
214 ARM_COMPUTE_ERROR_ON(_npy_tensor.info()->dimension(0) != tensor.info()->dimension(0));
215
216 switch(tensor.info()->data_type())
217 {
218 case DataType::QASYMM8:
219 access_numpy_tensor<qasymm8_t>(tensor, 0);
220 break;
221 case DataType::F32:
222 access_numpy_tensor<float>(tensor, 0.0001f);
223 break;
224 default:
225 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
226 }
227
228 return false;
229 }
230
231 #ifdef ARM_COMPUTE_ASSERTS_ENABLED
PrintAccessor(std::ostream & output_stream,IOFormatInfo io_fmt)232 PrintAccessor::PrintAccessor(std::ostream &output_stream, IOFormatInfo io_fmt)
233 : _output_stream(output_stream), _io_fmt(io_fmt)
234 {
235 }
236
access_tensor(ITensor & tensor)237 bool PrintAccessor::access_tensor(ITensor &tensor)
238 {
239 tensor.print(_output_stream, _io_fmt);
240 return false;
241 }
242 #endif /* ARM_COMPUTE_ASSERTS_ENABLED */
243
SaveNumPyAccessor(std::string npy_name,const bool is_fortran)244 SaveNumPyAccessor::SaveNumPyAccessor(std::string npy_name, const bool is_fortran)
245 : _npy_name(std::move(npy_name)), _is_fortran(is_fortran)
246 {
247 }
248
access_tensor(ITensor & tensor)249 bool SaveNumPyAccessor::access_tensor(ITensor &tensor)
250 {
251 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::F32);
252
253 utils::save_to_npy(tensor, _npy_name, _is_fortran);
254
255 return false;
256 }
257
ImageAccessor(std::string filename,bool bgr,std::unique_ptr<IPreprocessor> preprocessor)258 ImageAccessor::ImageAccessor(std::string filename, bool bgr, std::unique_ptr<IPreprocessor> preprocessor)
259 : _already_loaded(false), _filename(std::move(filename)), _bgr(bgr), _preprocessor(std::move(preprocessor))
260 {
261 }
262
access_tensor(ITensor & tensor)263 bool ImageAccessor::access_tensor(ITensor &tensor)
264 {
265 if(!_already_loaded)
266 {
267 auto image_loader = utils::ImageLoaderFactory::create(_filename);
268 ARM_COMPUTE_EXIT_ON_MSG(image_loader == nullptr, "Unsupported image type");
269
270 // Open image file
271 image_loader->open(_filename);
272
273 // Get permutated shape and permutation parameters
274 TensorShape permuted_shape = tensor.info()->tensor_shape();
275 arm_compute::PermutationVector perm;
276 if(tensor.info()->data_layout() != DataLayout::NCHW)
277 {
278 std::tie(permuted_shape, perm) = compute_permutation_parameters(tensor.info()->tensor_shape(), tensor.info()->data_layout());
279 }
280
281 ARM_COMPUTE_EXIT_ON_MSG_VAR(image_loader->width() != permuted_shape.x() || image_loader->height() != permuted_shape.y(),
282 "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu64 ",%" PRIu64 "].",
283 image_loader->width(), image_loader->height(),
284 static_cast<uint64_t>(permuted_shape.x()), static_cast<uint64_t>(permuted_shape.y()));
285
286 // Fill the tensor with the PPM content (BGR)
287 image_loader->fill_planar_tensor(tensor, _bgr);
288
289 // Preprocess tensor
290 if(_preprocessor)
291 {
292 _preprocessor->preprocess(tensor);
293 }
294 }
295
296 _already_loaded = !_already_loaded;
297 return _already_loaded;
298 }
299
ValidationInputAccessor(const std::string & image_list,std::string images_path,std::unique_ptr<IPreprocessor> preprocessor,bool bgr,unsigned int start,unsigned int end,std::ostream & output_stream)300 ValidationInputAccessor::ValidationInputAccessor(const std::string &image_list,
301 std::string images_path,
302 std::unique_ptr<IPreprocessor> preprocessor,
303 bool bgr,
304 unsigned int start,
305 unsigned int end,
306 std::ostream &output_stream)
307 : _path(std::move(images_path)), _images(), _preprocessor(std::move(preprocessor)), _bgr(bgr), _offset(0), _output_stream(output_stream)
308 {
309 ARM_COMPUTE_EXIT_ON_MSG(start > end, "Invalid validation range!");
310
311 std::ifstream ifs;
312 try
313 {
314 ifs.exceptions(std::ifstream::badbit);
315 ifs.open(image_list, std::ios::in | std::ios::binary);
316
317 // Parse image names
318 unsigned int counter = 0;
319 for(std::string line; !std::getline(ifs, line).fail() && counter <= end; ++counter)
320 {
321 // Add image to process if withing range
322 if(counter >= start)
323 {
324 std::stringstream linestream(line);
325 std::string image_name;
326
327 linestream >> image_name;
328 _images.emplace_back(std::move(image_name));
329 }
330 }
331 }
332 catch(const std::ifstream::failure &e)
333 {
334 ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", image_list.c_str(), e.what());
335 }
336 }
337
access_tensor(arm_compute::ITensor & tensor)338 bool ValidationInputAccessor::access_tensor(arm_compute::ITensor &tensor)
339 {
340 bool ret = _offset < _images.size();
341 if(ret)
342 {
343 utils::JPEGLoader jpeg;
344
345 // Open JPEG file
346 std::string image_name = _path + _images[_offset++];
347 jpeg.open(image_name);
348 _output_stream << "[" << _offset << "/" << _images.size() << "] Validating " << image_name << std::endl;
349
350 // Get permutated shape and permutation parameters
351 TensorShape permuted_shape = tensor.info()->tensor_shape();
352 arm_compute::PermutationVector perm;
353 if(tensor.info()->data_layout() != DataLayout::NCHW)
354 {
355 std::tie(permuted_shape, perm) = compute_permutation_parameters(tensor.info()->tensor_shape(),
356 tensor.info()->data_layout());
357 }
358
359 ARM_COMPUTE_EXIT_ON_MSG_VAR(jpeg.width() != permuted_shape.x() || jpeg.height() != permuted_shape.y(),
360 "Failed to load image file: dimensions [%d,%d] not correct, expected [%" PRIu64 ",%" PRIu64 "].",
361 jpeg.width(), jpeg.height(),
362 static_cast<uint64_t>(permuted_shape.x()), static_cast<uint64_t>(permuted_shape.y()));
363
364 // Fill the tensor with the JPEG content (BGR)
365 jpeg.fill_planar_tensor(tensor, _bgr);
366
367 // Preprocess tensor
368 if(_preprocessor)
369 {
370 _preprocessor->preprocess(tensor);
371 }
372 }
373
374 return ret;
375 }
376
ValidationOutputAccessor(const std::string & image_list,std::ostream & output_stream,unsigned int start,unsigned int end)377 ValidationOutputAccessor::ValidationOutputAccessor(const std::string &image_list,
378 std::ostream &output_stream,
379 unsigned int start,
380 unsigned int end)
381 : _results(), _output_stream(output_stream), _offset(0), _positive_samples_top1(0), _positive_samples_top5(0)
382 {
383 ARM_COMPUTE_EXIT_ON_MSG(start > end, "Invalid validation range!");
384
385 std::ifstream ifs;
386 try
387 {
388 ifs.exceptions(std::ifstream::badbit);
389 ifs.open(image_list, std::ios::in | std::ios::binary);
390
391 // Parse image correctly classified labels
392 unsigned int counter = 0;
393 for(std::string line; !std::getline(ifs, line).fail() && counter <= end; ++counter)
394 {
395 // Add label if within range
396 if(counter >= start)
397 {
398 std::stringstream linestream(line);
399 std::string image_name;
400 int result;
401
402 linestream >> image_name >> result;
403 _results.emplace_back(result);
404 }
405 }
406 }
407 catch(const std::ifstream::failure &e)
408 {
409 ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", image_list.c_str(), e.what());
410 }
411 }
412
reset()413 void ValidationOutputAccessor::reset()
414 {
415 _offset = 0;
416 _positive_samples_top1 = 0;
417 _positive_samples_top5 = 0;
418 }
419
access_tensor(arm_compute::ITensor & tensor)420 bool ValidationOutputAccessor::access_tensor(arm_compute::ITensor &tensor)
421 {
422 bool ret = _offset < _results.size();
423 if(ret)
424 {
425 // Get results
426 std::vector<size_t> tensor_results;
427 switch(tensor.info()->data_type())
428 {
429 case DataType::QASYMM8:
430 tensor_results = access_predictions_tensor<uint8_t>(tensor);
431 break;
432 case DataType::F16:
433 tensor_results = access_predictions_tensor<half>(tensor);
434 break;
435 case DataType::F32:
436 tensor_results = access_predictions_tensor<float>(tensor);
437 break;
438 default:
439 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
440 }
441
442 // Check if tensor results are within top-n accuracy
443 size_t correct_label = _results[_offset++];
444
445 aggregate_sample(tensor_results, _positive_samples_top1, 1, correct_label);
446 aggregate_sample(tensor_results, _positive_samples_top5, 5, correct_label);
447 }
448
449 // Report top_n accuracy
450 if(_offset >= _results.size())
451 {
452 report_top_n(1, _results.size(), _positive_samples_top1);
453 report_top_n(5, _results.size(), _positive_samples_top5);
454 }
455
456 return ret;
457 }
458
459 template <typename T>
access_predictions_tensor(arm_compute::ITensor & tensor)460 std::vector<size_t> ValidationOutputAccessor::access_predictions_tensor(arm_compute::ITensor &tensor)
461 {
462 // Get the predicted class
463 std::vector<size_t> index;
464
465 const auto output_net = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
466 const size_t num_classes = tensor.info()->dimension(0);
467
468 index.resize(num_classes);
469
470 // Sort results
471 std::iota(std::begin(index), std::end(index), static_cast<size_t>(0));
472 std::sort(std::begin(index), std::end(index),
473 [&](size_t a, size_t b)
474 {
475 return output_net[a] > output_net[b];
476 });
477
478 return index;
479 }
480
aggregate_sample(const std::vector<size_t> & res,size_t & positive_samples,size_t top_n,size_t correct_label)481 void ValidationOutputAccessor::aggregate_sample(const std::vector<size_t> &res, size_t &positive_samples, size_t top_n, size_t correct_label)
482 {
483 auto is_valid_label = [correct_label](size_t label)
484 {
485 return label == correct_label;
486 };
487
488 if(std::any_of(std::begin(res), std::begin(res) + top_n, is_valid_label))
489 {
490 ++positive_samples;
491 }
492 }
493
report_top_n(size_t top_n,size_t total_samples,size_t positive_samples)494 void ValidationOutputAccessor::report_top_n(size_t top_n, size_t total_samples, size_t positive_samples)
495 {
496 size_t negative_samples = total_samples - positive_samples;
497 float accuracy = positive_samples / static_cast<float>(total_samples);
498
499 _output_stream << "----------Top " << top_n << " accuracy ----------" << std::endl
500 << std::endl;
501 _output_stream << "Positive samples : " << positive_samples << std::endl;
502 _output_stream << "Negative samples : " << negative_samples << std::endl;
503 _output_stream << "Accuracy : " << accuracy << std::endl;
504 }
505
DetectionOutputAccessor(const std::string & labels_path,std::vector<TensorShape> & imgs_tensor_shapes,std::ostream & output_stream)506 DetectionOutputAccessor::DetectionOutputAccessor(const std::string &labels_path, std::vector<TensorShape> &imgs_tensor_shapes, std::ostream &output_stream)
507 : _labels(), _tensor_shapes(std::move(imgs_tensor_shapes)), _output_stream(output_stream)
508 {
509 _labels.clear();
510
511 std::ifstream ifs;
512
513 try
514 {
515 ifs.exceptions(std::ifstream::badbit);
516 ifs.open(labels_path, std::ios::in | std::ios::binary);
517
518 for(std::string line; !std::getline(ifs, line).fail();)
519 {
520 _labels.emplace_back(line);
521 }
522 }
523 catch(const std::ifstream::failure &e)
524 {
525 ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", labels_path.c_str(), e.what());
526 }
527 }
528
529 template <typename T>
access_predictions_tensor(ITensor & tensor)530 void DetectionOutputAccessor::access_predictions_tensor(ITensor &tensor)
531 {
532 const size_t num_detection = tensor.info()->valid_region().shape.y();
533 const auto output_prt = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
534
535 if(num_detection > 0)
536 {
537 _output_stream << "---------------------- Detections ----------------------" << std::endl
538 << std::endl;
539
540 _output_stream << std::left << std::setprecision(4) << std::setw(8) << "Image | " << std::setw(8) << "Label | " << std::setw(12) << "Confidence | "
541 << "[ xmin, ymin, xmax, ymax ]" << std::endl;
542
543 for(size_t i = 0; i < num_detection; ++i)
544 {
545 auto im = static_cast<const int>(output_prt[i * 7]);
546 _output_stream << std::setw(8) << im << std::setw(8)
547 << _labels[output_prt[i * 7 + 1]] << std::setw(12) << output_prt[i * 7 + 2]
548 << " [" << (output_prt[i * 7 + 3] * _tensor_shapes[im].x())
549 << ", " << (output_prt[i * 7 + 4] * _tensor_shapes[im].y())
550 << ", " << (output_prt[i * 7 + 5] * _tensor_shapes[im].x())
551 << ", " << (output_prt[i * 7 + 6] * _tensor_shapes[im].y())
552 << "]" << std::endl;
553 }
554 }
555 else
556 {
557 _output_stream << "No detection found." << std::endl;
558 }
559 }
560
access_tensor(ITensor & tensor)561 bool DetectionOutputAccessor::access_tensor(ITensor &tensor)
562 {
563 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::F32);
564
565 switch(tensor.info()->data_type())
566 {
567 case DataType::F32:
568 access_predictions_tensor<float>(tensor);
569 break;
570 default:
571 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
572 }
573
574 return false;
575 }
576
TopNPredictionsAccessor(const std::string & labels_path,size_t top_n,std::ostream & output_stream)577 TopNPredictionsAccessor::TopNPredictionsAccessor(const std::string &labels_path, size_t top_n, std::ostream &output_stream)
578 : _labels(), _output_stream(output_stream), _top_n(top_n)
579 {
580 _labels.clear();
581
582 std::ifstream ifs;
583
584 try
585 {
586 ifs.exceptions(std::ifstream::badbit);
587 ifs.open(labels_path, std::ios::in | std::ios::binary);
588
589 for(std::string line; !std::getline(ifs, line).fail();)
590 {
591 _labels.emplace_back(line);
592 }
593 }
594 catch(const std::ifstream::failure &e)
595 {
596 ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", labels_path.c_str(), e.what());
597 }
598 }
599
600 template <typename T>
access_predictions_tensor(ITensor & tensor)601 void TopNPredictionsAccessor::access_predictions_tensor(ITensor &tensor)
602 {
603 // Get the predicted class
604 std::vector<T> classes_prob;
605 std::vector<size_t> index;
606
607 const auto output_net = reinterpret_cast<T *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
608 const size_t num_classes = tensor.info()->dimension(0);
609
610 classes_prob.resize(num_classes);
611 index.resize(num_classes);
612
613 std::copy(output_net, output_net + num_classes, classes_prob.begin());
614
615 // Sort results
616 std::iota(std::begin(index), std::end(index), static_cast<size_t>(0));
617 std::sort(std::begin(index), std::end(index),
618 [&](size_t a, size_t b)
619 {
620 return classes_prob[a] > classes_prob[b];
621 });
622
623 _output_stream << "---------- Top " << _top_n << " predictions ----------" << std::endl
624 << std::endl;
625 for(size_t i = 0; i < _top_n; ++i)
626 {
627 _output_stream << std::fixed << std::setprecision(4)
628 << +classes_prob[index.at(i)]
629 << " - [id = " << index.at(i) << "]"
630 << ", " << _labels[index.at(i)] << std::endl;
631 }
632 }
633
access_tensor(ITensor & tensor)634 bool TopNPredictionsAccessor::access_tensor(ITensor &tensor)
635 {
636 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::F32, DataType::QASYMM8);
637 ARM_COMPUTE_ERROR_ON(_labels.size() != tensor.info()->dimension(0));
638
639 switch(tensor.info()->data_type())
640 {
641 case DataType::QASYMM8:
642 access_predictions_tensor<uint8_t>(tensor);
643 break;
644 case DataType::F32:
645 access_predictions_tensor<float>(tensor);
646 break;
647 default:
648 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
649 }
650
651 return false;
652 }
653
RandomAccessor(PixelValue lower,PixelValue upper,std::random_device::result_type seed)654 RandomAccessor::RandomAccessor(PixelValue lower, PixelValue upper, std::random_device::result_type seed)
655 : _lower(lower), _upper(upper), _seed(seed)
656 {
657 }
658
659 template <typename T, typename D>
fill(ITensor & tensor,D && distribution)660 void RandomAccessor::fill(ITensor &tensor, D &&distribution)
661 {
662 std::mt19937 gen(_seed);
663
664 if(tensor.info()->padding().empty() && (dynamic_cast<SubTensor *>(&tensor) == nullptr))
665 {
666 for(size_t offset = 0; offset < tensor.info()->total_size(); offset += tensor.info()->element_size())
667 {
668 const auto value = static_cast<T>(distribution(gen));
669 *reinterpret_cast<T *>(tensor.buffer() + offset) = value;
670 }
671 }
672 else
673 {
674 // If tensor has padding accessing tensor elements through execution window.
675 Window window;
676 window.use_tensor_dimensions(tensor.info()->tensor_shape());
677
678 execute_window_loop(window, [&](const Coordinates & id)
679 {
680 const auto value = static_cast<T>(distribution(gen));
681 *reinterpret_cast<T *>(tensor.ptr_to_element(id)) = value;
682 });
683 }
684 }
685
access_tensor(ITensor & tensor)686 bool RandomAccessor::access_tensor(ITensor &tensor)
687 {
688 switch(tensor.info()->data_type())
689 {
690 case DataType::QASYMM8:
691 case DataType::U8:
692 {
693 std::uniform_int_distribution<uint8_t> distribution_u8(_lower.get<uint8_t>(), _upper.get<uint8_t>());
694 fill<uint8_t>(tensor, distribution_u8);
695 break;
696 }
697 case DataType::S8:
698 {
699 std::uniform_int_distribution<int8_t> distribution_s8(_lower.get<int8_t>(), _upper.get<int8_t>());
700 fill<int8_t>(tensor, distribution_s8);
701 break;
702 }
703 case DataType::U16:
704 {
705 std::uniform_int_distribution<uint16_t> distribution_u16(_lower.get<uint16_t>(), _upper.get<uint16_t>());
706 fill<uint16_t>(tensor, distribution_u16);
707 break;
708 }
709 case DataType::S16:
710 {
711 std::uniform_int_distribution<int16_t> distribution_s16(_lower.get<int16_t>(), _upper.get<int16_t>());
712 fill<int16_t>(tensor, distribution_s16);
713 break;
714 }
715 case DataType::U32:
716 {
717 std::uniform_int_distribution<uint32_t> distribution_u32(_lower.get<uint32_t>(), _upper.get<uint32_t>());
718 fill<uint32_t>(tensor, distribution_u32);
719 break;
720 }
721 case DataType::S32:
722 {
723 std::uniform_int_distribution<int32_t> distribution_s32(_lower.get<int32_t>(), _upper.get<int32_t>());
724 fill<int32_t>(tensor, distribution_s32);
725 break;
726 }
727 case DataType::U64:
728 {
729 std::uniform_int_distribution<uint64_t> distribution_u64(_lower.get<uint64_t>(), _upper.get<uint64_t>());
730 fill<uint64_t>(tensor, distribution_u64);
731 break;
732 }
733 case DataType::S64:
734 {
735 std::uniform_int_distribution<int64_t> distribution_s64(_lower.get<int64_t>(), _upper.get<int64_t>());
736 fill<int64_t>(tensor, distribution_s64);
737 break;
738 }
739 case DataType::F16:
740 {
741 arm_compute::utils::uniform_real_distribution_16bit<half> distribution_f16(_lower.get<float>(), _upper.get<float>());
742 fill<half>(tensor, distribution_f16);
743 break;
744 }
745 case DataType::F32:
746 {
747 std::uniform_real_distribution<float> distribution_f32(_lower.get<float>(), _upper.get<float>());
748 fill<float>(tensor, distribution_f32);
749 break;
750 }
751 case DataType::F64:
752 {
753 std::uniform_real_distribution<double> distribution_f64(_lower.get<double>(), _upper.get<double>());
754 fill<double>(tensor, distribution_f64);
755 break;
756 }
757 default:
758 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
759 }
760 return true;
761 }
762
NumPyBinLoader(std::string filename,DataLayout file_layout)763 NumPyBinLoader::NumPyBinLoader(std::string filename, DataLayout file_layout)
764 : _already_loaded(false), _filename(std::move(filename)), _file_layout(file_layout)
765 {
766 }
767
access_tensor(ITensor & tensor)768 bool NumPyBinLoader::access_tensor(ITensor &tensor)
769 {
770 if(!_already_loaded)
771 {
772 utils::NPYLoader loader;
773 loader.open(_filename, _file_layout);
774 loader.fill_tensor(tensor);
775 }
776
777 _already_loaded = !_already_loaded;
778 return _already_loaded;
779 }
780